repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
jrior001/android_kernel_samsung_d2 | drivers/hv/ring_buffer.c | 794 | 11206 | /*
*
* Copyright (c) 2009, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hyperv.h>
#include "hyperv_vmbus.h"
/* #defines */
/* Amount of space to write to */
#define BYTES_AVAIL_TO_WRITE(r, w, z) \
((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
/*
*
* hv_get_ringbuffer_availbytes()
*
* Get number of bytes available to read and to write to
* for the specified ring buffer
*/
static inline void
hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
u32 *read, u32 *write)
{
u32 read_loc, write_loc;
smp_read_barrier_depends();
/* Capture the read/write indices before they changed */
read_loc = rbi->ring_buffer->read_index;
write_loc = rbi->ring_buffer->write_index;
*write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
*read = rbi->ring_datasize - *write;
}
/*
* hv_get_next_write_location()
*
* Get the next write location for the specified ring buffer
*
*/
static inline u32
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
{
u32 next = ring_info->ring_buffer->write_index;
return next;
}
/*
* hv_set_next_write_location()
*
* Set the next write location for the specified ring buffer
*
*/
static inline void
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
u32 next_write_location)
{
ring_info->ring_buffer->write_index = next_write_location;
}
/*
* hv_get_next_read_location()
*
* Get the next read location for the specified ring buffer
*/
static inline u32
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
{
u32 next = ring_info->ring_buffer->read_index;
return next;
}
/*
* hv_get_next_readlocation_withoffset()
*
* Get the next read location + offset for the specified ring buffer.
* This allows the caller to skip
*/
static inline u32
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
u32 offset)
{
u32 next = ring_info->ring_buffer->read_index;
next += offset;
next %= ring_info->ring_datasize;
return next;
}
/*
*
* hv_set_next_read_location()
*
* Set the next read location for the specified ring buffer
*
*/
static inline void
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
u32 next_read_location)
{
ring_info->ring_buffer->read_index = next_read_location;
}
/*
*
* hv_get_ring_buffer()
*
* Get the start of the ring buffer
*/
static inline void *
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
{
return (void *)ring_info->ring_buffer->buffer;
}
/*
*
* hv_get_ring_buffersize()
*
* Get the size of the ring buffer
*/
static inline u32
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
{
return ring_info->ring_datasize;
}
/*
*
* hv_get_ring_bufferindices()
*
* Get the read and write indices as u64 of the specified ring buffer
*
*/
static inline u64
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
{
return (u64)ring_info->ring_buffer->write_index << 32;
}
/*
*
* hv_copyfrom_ringbuffer()
*
* Helper routine to copy to source from ring buffer.
* Assume there is enough room. Handles wrap-around in src case only!!
*
*/
static u32 hv_copyfrom_ringbuffer(
struct hv_ring_buffer_info *ring_info,
void *dest,
u32 destlen,
u32 start_read_offset)
{
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
u32 frag_len;
/* wrap-around detected at the src */
if (destlen > ring_buffer_size - start_read_offset) {
frag_len = ring_buffer_size - start_read_offset;
memcpy(dest, ring_buffer + start_read_offset, frag_len);
memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
} else
memcpy(dest, ring_buffer + start_read_offset, destlen);
start_read_offset += destlen;
start_read_offset %= ring_buffer_size;
return start_read_offset;
}
/*
*
* hv_copyto_ringbuffer()
*
* Helper routine to copy from source to ring buffer.
* Assume there is enough room. Handles wrap-around in dest case only!!
*
*/
static u32 hv_copyto_ringbuffer(
struct hv_ring_buffer_info *ring_info,
u32 start_write_offset,
void *src,
u32 srclen)
{
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
u32 frag_len;
/* wrap-around detected! */
if (srclen > ring_buffer_size - start_write_offset) {
frag_len = ring_buffer_size - start_write_offset;
memcpy(ring_buffer + start_write_offset, src, frag_len);
memcpy(ring_buffer, src + frag_len, srclen - frag_len);
} else
memcpy(ring_buffer + start_write_offset, src, srclen);
start_write_offset += srclen;
start_write_offset %= ring_buffer_size;
return start_write_offset;
}
/*
*
* hv_ringbuffer_get_debuginfo()
*
* Get various debug metrics for the specified ring buffer
*
*/
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
if (ring_info->ring_buffer) {
hv_get_ringbuffer_availbytes(ring_info,
&bytes_avail_toread,
&bytes_avail_towrite);
debug_info->bytes_avail_toread = bytes_avail_toread;
debug_info->bytes_avail_towrite = bytes_avail_towrite;
debug_info->current_read_index =
ring_info->ring_buffer->read_index;
debug_info->current_write_index =
ring_info->ring_buffer->write_index;
debug_info->current_interrupt_mask =
ring_info->ring_buffer->interrupt_mask;
}
}
/*
*
* hv_get_ringbuffer_interrupt_mask()
*
* Get the interrupt mask for the specified ring buffer
*
*/
u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
{
return rbi->ring_buffer->interrupt_mask;
}
/*
*
* hv_ringbuffer_init()
*
*Initialize the ring buffer
*
*/
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void *buffer, u32 buflen)
{
if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
return -EINVAL;
memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
ring_info->ring_buffer->read_index =
ring_info->ring_buffer->write_index = 0;
ring_info->ring_size = buflen;
ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
spin_lock_init(&ring_info->ring_lock);
return 0;
}
/*
*
* hv_ringbuffer_cleanup()
*
* Cleanup the ring buffer
*
*/
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
}
/*
*
* hv_ringbuffer_write()
*
* Write to the ring buffer
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sglist, u32 sgcount)
{
int i = 0;
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 totalbytes_towrite = 0;
struct scatterlist *sg;
u32 next_write_location;
u64 prev_indices = 0;
unsigned long flags;
for_each_sg(sglist, sg, sgcount, i)
{
totalbytes_towrite += sg->length;
}
totalbytes_towrite += sizeof(u64);
spin_lock_irqsave(&outring_info->ring_lock, flags);
hv_get_ringbuffer_availbytes(outring_info,
&bytes_avail_toread,
&bytes_avail_towrite);
/* If there is only room for the packet, assume it is full. */
/* Otherwise, the next time around, we think the ring buffer */
/* is empty since the read index == write index */
if (bytes_avail_towrite <= totalbytes_towrite) {
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
for_each_sg(sglist, sg, sgcount, i)
{
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
sg_virt(sg),
sg->length);
}
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
&prev_indices,
sizeof(u64));
/* Make sure we flush all writes before updating the writeIndex */
wmb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return 0;
}
/*
*
* hv_ringbuffer_peek()
*
* Read without advancing the read index
*
*/
int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
void *Buffer, u32 buflen)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
unsigned long flags;
spin_lock_irqsave(&Inring_info->ring_lock, flags);
hv_get_ringbuffer_availbytes(Inring_info,
&bytes_avail_toread,
&bytes_avail_towrite);
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
return -EAGAIN;
}
/* Convert to byte offset */
next_read_location = hv_get_next_read_location(Inring_info);
next_read_location = hv_copyfrom_ringbuffer(Inring_info,
Buffer,
buflen,
next_read_location);
spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
return 0;
}
/*
*
* hv_ringbuffer_read()
*
* Read and advance the read index
*
*/
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
u32 buflen, u32 offset)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
unsigned long flags;
if (buflen <= 0)
return -EINVAL;
spin_lock_irqsave(&inring_info->ring_lock, flags);
hv_get_ringbuffer_availbytes(inring_info,
&bytes_avail_toread,
&bytes_avail_towrite);
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
return -EAGAIN;
}
next_read_location =
hv_get_next_readlocation_withoffset(inring_info, offset);
next_read_location = hv_copyfrom_ringbuffer(inring_info,
buffer,
buflen,
next_read_location);
next_read_location = hv_copyfrom_ringbuffer(inring_info,
&prev_indices,
sizeof(u64),
next_read_location);
/* Make sure all reads are done before we update the read index since */
/* the writer may start writing to the read area once the read index */
/*is updated */
mb();
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
return 0;
}
| gpl-2.0 |
explora26/kernel-hikey-linaro | drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | 1050 | 47604 | /*
* Copyright (C) 2003 - 2009 NetXen, Inc.
* Copyright (C) 2009 - QLogic Corporation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
*
*/
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <net/checksum.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
struct crb_addr_pair {
u32 addr;
u32 data;
};
#define NETXEN_MAX_CRB_XFORM 60
static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
#define NETXEN_ADDR_ERROR (0xffffffff)
#define crb_addr_transform(name) \
crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
#define NETXEN_NIC_XDMA_RESET 0x8000ff
static void
netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring);
static int netxen_p3_has_mn(struct netxen_adapter *adapter);
static void crb_addr_transform_setup(void)
{
crb_addr_transform(XDMA);
crb_addr_transform(TIMR);
crb_addr_transform(SRE);
crb_addr_transform(SQN3);
crb_addr_transform(SQN2);
crb_addr_transform(SQN1);
crb_addr_transform(SQN0);
crb_addr_transform(SQS3);
crb_addr_transform(SQS2);
crb_addr_transform(SQS1);
crb_addr_transform(SQS0);
crb_addr_transform(RPMX7);
crb_addr_transform(RPMX6);
crb_addr_transform(RPMX5);
crb_addr_transform(RPMX4);
crb_addr_transform(RPMX3);
crb_addr_transform(RPMX2);
crb_addr_transform(RPMX1);
crb_addr_transform(RPMX0);
crb_addr_transform(ROMUSB);
crb_addr_transform(SN);
crb_addr_transform(QMN);
crb_addr_transform(QMS);
crb_addr_transform(PGNI);
crb_addr_transform(PGND);
crb_addr_transform(PGN3);
crb_addr_transform(PGN2);
crb_addr_transform(PGN1);
crb_addr_transform(PGN0);
crb_addr_transform(PGSI);
crb_addr_transform(PGSD);
crb_addr_transform(PGS3);
crb_addr_transform(PGS2);
crb_addr_transform(PGS1);
crb_addr_transform(PGS0);
crb_addr_transform(PS);
crb_addr_transform(PH);
crb_addr_transform(NIU);
crb_addr_transform(I2Q);
crb_addr_transform(EG);
crb_addr_transform(MN);
crb_addr_transform(MS);
crb_addr_transform(CAS2);
crb_addr_transform(CAS1);
crb_addr_transform(CAS0);
crb_addr_transform(CAM);
crb_addr_transform(C2C1);
crb_addr_transform(C2C0);
crb_addr_transform(SMB);
crb_addr_transform(OCM0);
crb_addr_transform(I2C0);
}
void netxen_release_rx_buffers(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct netxen_rx_buffer *rx_buf;
int i, ring;
recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
for (i = 0; i < rds_ring->num_desc; ++i) {
rx_buf = &(rds_ring->rx_buf_arr[i]);
if (rx_buf->state == NETXEN_BUFFER_FREE)
continue;
pci_unmap_single(adapter->pdev,
rx_buf->dma,
rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
if (rx_buf->skb != NULL)
dev_kfree_skb_any(rx_buf->skb);
}
}
}
void netxen_release_tx_buffers(struct netxen_adapter *adapter)
{
struct netxen_cmd_buffer *cmd_buf;
struct netxen_skb_frag *buffrag;
int i, j;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
spin_lock_bh(&adapter->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
if (buffrag->dma) {
pci_unmap_single(adapter->pdev, buffrag->dma,
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
buffrag->length,
PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
}
if (cmd_buf->skb) {
dev_kfree_skb_any(cmd_buf->skb);
cmd_buf->skb = NULL;
}
cmd_buf++;
}
spin_unlock_bh(&adapter->tx_clean_lock);
}
void netxen_free_sw_resources(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_tx_ring *tx_ring;
int ring;
recv_ctx = &adapter->recv_ctx;
if (recv_ctx->rds_rings == NULL)
goto skip_rds;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
vfree(rds_ring->rx_buf_arr);
rds_ring->rx_buf_arr = NULL;
}
kfree(recv_ctx->rds_rings);
skip_rds:
if (adapter->tx_ring == NULL)
return;
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
kfree(tx_ring);
adapter->tx_ring = NULL;
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_sds_ring *sds_ring;
struct nx_host_tx_ring *tx_ring;
struct netxen_rx_buffer *rx_buf;
int ring, i;
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
if (tx_ring == NULL)
return -ENOMEM;
adapter->tx_ring = tx_ring;
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL)
goto err_out;
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
rds_ring = kcalloc(adapter->max_rds_rings,
sizeof(struct nx_host_rds_ring), GFP_KERNEL);
if (rds_ring == NULL)
goto err_out;
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
switch (ring) {
case RCV_RING_NORMAL:
rds_ring->num_desc = adapter->num_rxd;
if (adapter->ahw.cut_through) {
rds_ring->dma_size =
NX_CT_DEFAULT_RX_BUF_LEN;
rds_ring->skb_size =
NX_CT_DEFAULT_RX_BUF_LEN;
} else {
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
rds_ring->dma_size =
NX_P3_RX_BUF_MAX_LEN;
else
rds_ring->dma_size =
NX_P2_RX_BUF_MAX_LEN;
rds_ring->skb_size =
rds_ring->dma_size + NET_IP_ALIGN;
}
break;
case RCV_RING_JUMBO:
rds_ring->num_desc = adapter->num_jumbo_rxd;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
rds_ring->dma_size =
NX_P3_RX_JUMBO_BUF_MAX_LEN;
else
rds_ring->dma_size =
NX_P2_RX_JUMBO_BUF_MAX_LEN;
if (adapter->capabilities & NX_CAP0_HW_LRO)
rds_ring->dma_size += NX_LRO_BUFFER_EXTRA;
rds_ring->skb_size =
rds_ring->dma_size + NET_IP_ALIGN;
break;
case RCV_RING_LRO:
rds_ring->num_desc = adapter->num_lro_rxd;
rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH;
rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
break;
}
rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
if (rds_ring->rx_buf_arr == NULL)
/* free whatever was already allocated */
goto err_out;
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
* and put them in the queues.
*/
rx_buf = rds_ring->rx_buf_arr;
for (i = 0; i < rds_ring->num_desc; i++) {
list_add_tail(&rx_buf->list,
&rds_ring->free_list);
rx_buf->ref_handle = i;
rx_buf->state = NETXEN_BUFFER_FREE;
rx_buf++;
}
spin_lock_init(&rds_ring->lock);
}
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
INIT_LIST_HEAD(&sds_ring->free_list[i]);
}
return 0;
err_out:
netxen_free_sw_resources(adapter);
return -ENOMEM;
}
/*
* netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
* address to external PCI CRB address.
*/
static u32 netxen_decode_crb_addr(u32 addr)
{
int i;
u32 base_addr, offset, pci_base;
crb_addr_transform_setup();
pci_base = NETXEN_ADDR_ERROR;
base_addr = addr & 0xfff00000;
offset = addr & 0x000fffff;
for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
if (crb_addr_xform[i] == base_addr) {
pci_base = i << 20;
break;
}
}
if (pci_base == NETXEN_ADDR_ERROR)
return pci_base;
else
return pci_base + offset;
}
#define NETXEN_MAX_ROM_WAIT_USEC 100
static int netxen_wait_rom_done(struct netxen_adapter *adapter)
{
long timeout = 0;
long done = 0;
cond_resched();
while (done == 0) {
done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
done &= 2;
if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) {
dev_err(&adapter->pdev->dev,
"Timeout reached waiting for rom done");
return -EIO;
}
udelay(1);
}
return 0;
}
static int do_rom_fast_read(struct netxen_adapter *adapter,
int addr, int *valp)
{
NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
if (netxen_wait_rom_done(adapter)) {
printk("Error waiting for rom done\n");
return -EIO;
}
/* reset abyte_cnt and dummy_byte_cnt */
NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
udelay(10);
NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
*valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
return 0;
}
static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
u8 *bytes, size_t size)
{
int addridx;
int ret = 0;
for (addridx = addr; addridx < (addr + size); addridx += 4) {
int v;
ret = do_rom_fast_read(adapter, addridx, &v);
if (ret != 0)
break;
*(__le32 *)bytes = cpu_to_le32(v);
bytes += 4;
}
return ret;
}
int
netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
u8 *bytes, size_t size)
{
int ret;
ret = netxen_rom_lock(adapter);
if (ret < 0)
return ret;
ret = do_rom_fast_read_words(adapter, addr, bytes, size);
netxen_rom_unlock(adapter);
return ret;
}
int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
{
int ret;
if (netxen_rom_lock(adapter) != 0)
return -EIO;
ret = do_rom_fast_read(adapter, addr, valp);
netxen_rom_unlock(adapter);
return ret;
}
#define NETXEN_BOARDTYPE 0x4008
#define NETXEN_BOARDNUM 0x400c
#define NETXEN_CHIPNUM 0x4010
int netxen_pinit_from_rom(struct netxen_adapter *adapter)
{
int addr, val;
int i, n, init_delay = 0;
struct crb_addr_pair *buf;
unsigned offset;
u32 off;
/* resetall */
netxen_rom_lock(adapter);
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff);
netxen_rom_unlock(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
(n != 0xcafecafe) ||
netxen_rom_fast_read(adapter, 4, &n) != 0) {
printk(KERN_ERR "%s: ERROR Reading crb_init area: "
"n: %08x\n", netxen_nic_driver_name, n);
return -EIO;
}
offset = n & 0xffffU;
n = (n >> 16) & 0xffffU;
} else {
if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
!(n & 0x80000000)) {
printk(KERN_ERR "%s: ERROR Reading crb_init area: "
"n: %08x\n", netxen_nic_driver_name, n);
return -EIO;
}
offset = 1;
n &= ~0x80000000;
}
if (n >= 1024) {
printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
" initialized.\n", __func__, n);
return -EIO;
}
buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
for (i = 0; i < n; i++) {
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
kfree(buf);
return -EIO;
}
buf[i].addr = addr;
buf[i].data = val;
}
for (i = 0; i < n; i++) {
off = netxen_decode_crb_addr(buf[i].addr);
if (off == NETXEN_ADDR_ERROR) {
printk(KERN_ERR"CRB init value out of range %x\n",
buf[i].addr);
continue;
}
off += NETXEN_PCI_CRBSPACE;
if (off & 1)
continue;
/* skipping cold reboot MAGIC */
if (off == NETXEN_CAM_RAM(0x1fc))
continue;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (off == (NETXEN_CRB_I2C0 + 0x1c))
continue;
/* do not reset PCI */
if (off == (ROMUSB_GLB + 0xbc))
continue;
if (off == (ROMUSB_GLB + 0xa8))
continue;
if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
continue;
if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
continue;
if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
continue;
if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
continue;
if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
!NX_IS_REVISION_P3P(adapter->ahw.revision_id))
buf[i].data = 0x1020;
/* skip the function enable register */
if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
continue;
if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
continue;
if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
continue;
}
init_delay = 1;
/* After writing this register, HW needs time for CRB */
/* to quiet down (else crb_window returns 0xffffffff) */
if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
init_delay = 1000;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
/* hold xdma in reset also */
buf[i].data = NETXEN_NIC_XDMA_RESET;
buf[i].data = 0x8000ff;
}
}
NXWR32(adapter, off, buf[i].data);
msleep(init_delay);
}
kfree(buf);
/* disable_peg_cache_all */
/* unreset_net_cache */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
}
/* p2dn replyCount */
NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
/* disable_peg_cache 0 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
/* disable_peg_cache 1 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
/* peg_clr_all */
/* peg_clr 0 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
/* peg_clr 1 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
/* peg_clr 2 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
/* peg_clr 3 */
NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
return 0;
}
static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
{
uint32_t i;
struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
__le32 entries = cpu_to_le32(directory->num_entries);
for (i = 0; i < entries; i++) {
__le32 offs = cpu_to_le32(directory->findex) +
(i * cpu_to_le32(directory->entry_size));
__le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
if (tab_type == section)
return (struct uni_table_desc *) &unirom[offs];
}
return NULL;
}
#define QLCNIC_FILEHEADER_SIZE (14 * 4)
static int
netxen_nic_validate_header(struct netxen_adapter *adapter)
{
const u8 *unirom = adapter->fw->data;
struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
u32 fw_file_size = adapter->fw->size;
u32 tab_size;
__le32 entries;
__le32 entry_size;
if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
return -EINVAL;
entries = cpu_to_le32(directory->num_entries);
entry_size = cpu_to_le32(directory->entry_size);
tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
if (fw_file_size < tab_size)
return -EINVAL;
return 0;
}
static int
netxen_nic_validate_bootld(struct netxen_adapter *adapter)
{
struct uni_table_desc *tab_desc;
struct uni_data_desc *descr;
const u8 *unirom = adapter->fw->data;
__le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
NX_UNI_BOOTLD_IDX_OFF));
u32 offs;
u32 tab_size;
u32 data_size;
tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
if (!tab_desc)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
return 0;
}
static int
netxen_nic_validate_fw(struct netxen_adapter *adapter)
{
struct uni_table_desc *tab_desc;
struct uni_data_desc *descr;
const u8 *unirom = adapter->fw->data;
__le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
NX_UNI_FIRMWARE_IDX_OFF));
u32 offs;
u32 tab_size;
u32 data_size;
tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
if (!tab_desc)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
return 0;
}
static int
netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
{
struct uni_table_desc *ptab_descr;
const u8 *unirom = adapter->fw->data;
int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
1 : netxen_p3_has_mn(adapter);
__le32 entries;
__le32 entry_size;
u32 tab_size;
u32 i;
ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
if (ptab_descr == NULL)
return -EINVAL;
entries = cpu_to_le32(ptab_descr->num_entries);
entry_size = cpu_to_le32(ptab_descr->entry_size);
tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
if (adapter->fw->size < tab_size)
return -EINVAL;
nomn:
for (i = 0; i < entries; i++) {
__le32 flags, file_chiprev, offs;
u8 chiprev = adapter->ahw.revision_id;
uint32_t flagbit;
offs = cpu_to_le32(ptab_descr->findex) +
(i * cpu_to_le32(ptab_descr->entry_size));
flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
NX_UNI_CHIP_REV_OFF));
flagbit = mn_present ? 1 : 2;
if ((chiprev == file_chiprev) &&
((1ULL << flagbit) & flags)) {
adapter->file_prd_off = offs;
return 0;
}
}
if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
mn_present = 0;
goto nomn;
}
return -EINVAL;
}
static int
netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
{
if (netxen_nic_validate_header(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: header validation failed\n");
return -EINVAL;
}
if (netxen_nic_validate_product_offs(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: product validation failed\n");
return -EINVAL;
}
if (netxen_nic_validate_bootld(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: bootld validation failed\n");
return -EINVAL;
}
if (netxen_nic_validate_fw(adapter)) {
dev_err(&adapter->pdev->dev,
"unified image: firmware validation failed\n");
return -EINVAL;
}
return 0;
}
static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
u32 section, u32 idx_offset)
{
const u8 *unirom = adapter->fw->data;
int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
idx_offset));
struct uni_table_desc *tab_desc;
__le32 offs;
tab_desc = nx_get_table_desc(unirom, section);
if (tab_desc == NULL)
return NULL;
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * idx);
return (struct uni_data_desc *)&unirom[offs];
}
static u8 *
nx_get_bootld_offs(struct netxen_adapter *adapter)
{
u32 offs = NETXEN_BOOTLD_START;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
offs = cpu_to_le32((nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_BOOTLD,
NX_UNI_BOOTLD_IDX_OFF))->findex);
return (u8 *)&adapter->fw->data[offs];
}
static u8 *
nx_get_fw_offs(struct netxen_adapter *adapter)
{
u32 offs = NETXEN_IMAGE_START;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
offs = cpu_to_le32((nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_FW,
NX_UNI_FIRMWARE_IDX_OFF))->findex);
return (u8 *)&adapter->fw->data[offs];
}
static __le32
nx_get_fw_size(struct netxen_adapter *adapter)
{
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
return cpu_to_le32((nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_FW,
NX_UNI_FIRMWARE_IDX_OFF))->size);
else
return cpu_to_le32(
*(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
}
static __le32
nx_get_fw_version(struct netxen_adapter *adapter)
{
struct uni_data_desc *fw_data_desc;
const struct firmware *fw = adapter->fw;
__le32 major, minor, sub;
const u8 *ver_str;
int i, ret = 0;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
fw_data_desc = nx_get_data_desc(adapter,
NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
cpu_to_le32(fw_data_desc->size) - 17;
for (i = 0; i < 12; i++) {
if (!strncmp(&ver_str[i], "REV=", 4)) {
ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
&major, &minor, &sub);
break;
}
}
if (ret != 3)
return 0;
return major + (minor << 8) + (sub << 16);
} else
return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
}
static __le32
nx_get_bios_version(struct netxen_adapter *adapter)
{
const struct firmware *fw = adapter->fw;
__le32 bios_ver, prd_off = adapter->file_prd_off;
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ NX_UNI_BIOS_VERSION_OFF));
return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
(bios_ver >> 24);
} else
return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
}
int
netxen_need_fw_reset(struct netxen_adapter *adapter)
{
u32 count, old_count;
u32 val, version, major, minor, build;
int i, timeout;
u8 fw_type;
/* NX2031 firmware doesn't support heartbit */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
if (adapter->need_fw_reset)
return 1;
/* last attempt had failed */
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
for (i = 0; i < 10; i++) {
timeout = msleep_interruptible(200);
if (timeout) {
NXWR32(adapter, CRB_CMDPEG_STATE,
PHAN_INITIALIZE_FAILED);
return -EINTR;
}
count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
if (count != old_count)
break;
}
/* firmware is dead */
if (count == old_count)
return 1;
/* check if we have got newer or different file firmware */
if (adapter->fw) {
val = nx_get_fw_version(adapter);
version = NETXEN_DECODE_VERSION(val);
major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
if (version > NETXEN_VERSION_CODE(major, minor, build))
return 1;
if (version == NETXEN_VERSION_CODE(major, minor, build) &&
adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
fw_type = (val & 0x4) ?
NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
if (adapter->fw_type != fw_type)
return 1;
}
}
return 0;
}
#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505)
int
netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter)
{
u32 flash_fw_ver, min_fw_ver;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
if (netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
dev_err(&adapter->pdev->dev, "Unable to read flash fw"
"version\n");
return -EIO;
}
flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
min_fw_ver = NETXEN_MIN_P3_FW_SUPP;
if (flash_fw_ver >= min_fw_ver)
return 0;
dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported"
"[4.0.505]. Please update firmware on flash\n",
_major(flash_fw_ver), _minor(flash_fw_ver),
_build(flash_fw_ver));
return -EINVAL;
}
static char *fw_name[] = {
NX_P2_MN_ROMIMAGE_NAME,
NX_P3_CT_ROMIMAGE_NAME,
NX_P3_MN_ROMIMAGE_NAME,
NX_UNIFIED_ROMIMAGE_NAME,
NX_FLASH_ROMIMAGE_NAME,
};
int
netxen_load_firmware(struct netxen_adapter *adapter)
{
u64 *ptr64;
u32 i, flashaddr, size;
const struct firmware *fw = adapter->fw;
struct pci_dev *pdev = adapter->pdev;
dev_info(&pdev->dev, "loading firmware from %s\n",
fw_name[adapter->fw_type]);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
if (fw) {
__le64 data;
size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
ptr64 = (u64 *)nx_get_bootld_offs(adapter);
flashaddr = NETXEN_BOOTLD_START;
for (i = 0; i < size; i++) {
data = cpu_to_le64(ptr64[i]);
if (adapter->pci_mem_write(adapter, flashaddr, data))
return -EIO;
flashaddr += 8;
}
size = (__force u32)nx_get_fw_size(adapter) / 8;
ptr64 = (u64 *)nx_get_fw_offs(adapter);
flashaddr = NETXEN_IMAGE_START;
for (i = 0; i < size; i++) {
data = cpu_to_le64(ptr64[i]);
if (adapter->pci_mem_write(adapter,
flashaddr, data))
return -EIO;
flashaddr += 8;
}
size = (__force u32)nx_get_fw_size(adapter) % 8;
if (size) {
data = cpu_to_le64(ptr64[i]);
if (adapter->pci_mem_write(adapter,
flashaddr, data))
return -EIO;
}
} else {
u64 data;
u32 hi, lo;
size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
flashaddr = NETXEN_BOOTLD_START;
for (i = 0; i < size; i++) {
if (netxen_rom_fast_read(adapter,
flashaddr, (int *)&lo) != 0)
return -EIO;
if (netxen_rom_fast_read(adapter,
flashaddr + 4, (int *)&hi) != 0)
return -EIO;
/* hi, lo are already in host endian byteorder */
data = (((u64)hi << 32) | lo);
if (adapter->pci_mem_write(adapter,
flashaddr, data))
return -EIO;
flashaddr += 8;
}
}
msleep(1);
if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
} else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
else {
NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
}
return 0;
}
static int
netxen_validate_firmware(struct netxen_adapter *adapter)
{
__le32 val;
__le32 flash_fw_ver;
u32 file_fw_ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
u32 crbinit_fix_fw;
if (fw_type == NX_UNIFIED_ROMIMAGE) {
if (netxen_nic_validate_unified_romimage(adapter))
return -EINVAL;
} else {
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
if ((__force u32)val != NETXEN_BDINFO_MAGIC)
return -EINVAL;
if (fw->size < NX_FW_MIN_SIZE)
return -EINVAL;
}
val = nx_get_fw_version(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
min_ver = NETXEN_MIN_P3_FW_SUPP;
else
min_ver = NETXEN_VERSION_CODE(3, 4, 216);
file_fw_ver = NETXEN_DECODE_VERSION(val);
if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) ||
(file_fw_ver < min_ver)) {
dev_err(&pdev->dev,
"%s: firmware version %d.%d.%d unsupported\n",
fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver),
_build(file_fw_ver));
return -EINVAL;
}
val = nx_get_bios_version(adapter);
netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
if ((__force u32)val != bios) {
dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
fw_name[fw_type]);
return -EINVAL;
}
if (netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
dev_err(&pdev->dev, "Unable to read flash fw version\n");
return -EIO;
}
flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
/* New fw from file is not allowed, if fw on flash is < 4.0.554 */
crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554);
if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw &&
NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
dev_err(&pdev->dev, "Incompatibility detected between driver "
"and firmware version on flash. This configuration "
"is not recommended. Please update the firmware on "
"flash immediately\n");
return -EINVAL;
}
/* check if flashed firmware is newer only for no-mn and P2 case*/
if (!netxen_p3_has_mn(adapter) ||
NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
if (flash_fw_ver > file_fw_ver) {
dev_info(&pdev->dev, "%s: firmware is older than flash\n",
fw_name[fw_type]);
return -EINVAL;
}
}
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
return 0;
}
static void
nx_get_next_fwtype(struct netxen_adapter *adapter)
{
u8 fw_type;
switch (adapter->fw_type) {
case NX_UNKNOWN_ROMIMAGE:
fw_type = NX_UNIFIED_ROMIMAGE;
break;
case NX_UNIFIED_ROMIMAGE:
if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
fw_type = NX_FLASH_ROMIMAGE;
else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
fw_type = NX_P2_MN_ROMIMAGE;
else if (netxen_p3_has_mn(adapter))
fw_type = NX_P3_MN_ROMIMAGE;
else
fw_type = NX_P3_CT_ROMIMAGE;
break;
case NX_P3_MN_ROMIMAGE:
fw_type = NX_P3_CT_ROMIMAGE;
break;
case NX_P2_MN_ROMIMAGE:
case NX_P3_CT_ROMIMAGE:
default:
fw_type = NX_FLASH_ROMIMAGE;
break;
}
adapter->fw_type = fw_type;
}
static int
netxen_p3_has_mn(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
capability = 0;
/* NX2031 always had MN */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT)
return 1;
}
return 0;
}
void netxen_request_firmware(struct netxen_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int rc = 0;
adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
next:
nx_get_next_fwtype(adapter);
if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
adapter->fw = NULL;
} else {
rc = request_firmware(&adapter->fw,
fw_name[adapter->fw_type], &pdev->dev);
if (rc != 0)
goto next;
rc = netxen_validate_firmware(adapter);
if (rc != 0) {
release_firmware(adapter->fw);
msleep(1);
goto next;
}
}
}
void
netxen_release_firmware(struct netxen_adapter *adapter)
{
release_firmware(adapter->fw);
adapter->fw = NULL;
}
int netxen_init_dummy_dma(struct netxen_adapter *adapter)
{
u64 addr;
u32 hi, lo;
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
NETXEN_HOST_DUMMY_DMA_SIZE,
&adapter->dummy_dma.phys_addr);
if (adapter->dummy_dma.addr == NULL) {
dev_err(&adapter->pdev->dev,
"ERROR: Could not allocate dummy DMA memory\n");
return -ENOMEM;
}
addr = (uint64_t) adapter->dummy_dma.phys_addr;
hi = (addr >> 32) & 0xffffffff;
lo = addr & 0xffffffff;
NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
return 0;
}
/*
* NetXen DMA watchdog control:
*
* Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive
* Bit 1 : disable_request => 1 req disable dma watchdog
* Bit 2 : enable_request => 1 req enable dma watchdog
* Bit 3-31 : unused
*/
void netxen_free_dummy_dma(struct netxen_adapter *adapter)
{
int i = 100;
u32 ctrl;
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return;
if (!adapter->dummy_dma.addr)
return;
ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
if ((ctrl & 0x1) != 0) {
NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2));
while ((ctrl & 0x1) != 0) {
msleep(50);
ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
if (--i == 0)
break;
}
}
if (i) {
pci_free_consistent(adapter->pdev,
NETXEN_HOST_DUMMY_DMA_SIZE,
adapter->dummy_dma.addr,
adapter->dummy_dma.phys_addr);
adapter->dummy_dma.addr = NULL;
} else
dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
}
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
{
u32 val = 0;
int retries = 60;
if (pegtune_val)
return 0;
do {
val = NXRD32(adapter, CRB_CMDPEG_STATE);
switch (val) {
case PHAN_INITIALIZE_COMPLETE:
case PHAN_INITIALIZE_ACK:
return 0;
case PHAN_INITIALIZE_FAILED:
goto out_err;
default:
break;
}
msleep(500);
} while (--retries);
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
out_err:
dev_warn(&adapter->pdev->dev, "firmware init failed\n");
return -EIO;
}
static int
netxen_receive_peg_ready(struct netxen_adapter *adapter)
{
u32 val = 0;
int retries = 2000;
do {
val = NXRD32(adapter, CRB_RCVPEG_STATE);
if (val == PHAN_PEG_RCV_INITIALIZED)
return 0;
msleep(10);
} while (--retries);
if (!retries) {
printk(KERN_ERR "Receive Peg initialization not "
"complete, state: 0x%x.\n", val);
return -EIO;
}
return 0;
}
int netxen_init_firmware(struct netxen_adapter *adapter)
{
int err;
err = netxen_receive_peg_ready(adapter);
if (err)
return err;
NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
return err;
}
static void
netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
{
u32 cable_OUI;
u16 cable_len;
u16 link_speed;
u8 link_status, module, duplex, autoneg;
struct net_device *netdev = adapter->netdev;
adapter->has_link_events = 1;
cable_OUI = msg->body[1] & 0xffffffff;
cable_len = (msg->body[1] >> 32) & 0xffff;
link_speed = (msg->body[1] >> 48) & 0xffff;
link_status = msg->body[2] & 0xff;
duplex = (msg->body[2] >> 16) & 0xff;
autoneg = (msg->body[2] >> 24) & 0xff;
module = (msg->body[2] >> 8) & 0xff;
if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
netdev->name, cable_OUI, cable_len);
} else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
printk(KERN_INFO "%s: unsupported cable length %d\n",
netdev->name, cable_len);
}
/* update link parameters */
if (duplex == LINKEVENT_FULL_DUPLEX)
adapter->link_duplex = DUPLEX_FULL;
else
adapter->link_duplex = DUPLEX_HALF;
adapter->module_type = module;
adapter->link_autoneg = autoneg;
adapter->link_speed = link_speed;
netxen_advert_link_change(adapter, link_status);
}
static void
netxen_handle_fw_message(int desc_cnt, int index,
struct nx_host_sds_ring *sds_ring)
{
nx_fw_msg_t msg;
struct status_desc *desc;
int i = 0, opcode;
while (desc_cnt > 0 && i < 8) {
desc = &sds_ring->desc_head[index];
msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
index = get_next_index(index, sds_ring->num_desc);
desc_cnt--;
}
opcode = netxen_get_nic_msg_opcode(msg.body[0]);
switch (opcode) {
case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
netxen_handle_linkevent(sds_ring->adapter, &msg);
break;
default:
break;
}
}
static int
netxen_alloc_rx_skb(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring,
struct netxen_rx_buffer *buffer)
{
struct sk_buff *skb;
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
if (!buffer->skb)
return 1;
skb = buffer->skb;
if (!adapter->ahw.cut_through)
skb_reserve(skb, 2);
dma = pci_map_single(pdev, skb->data,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return 1;
}
buffer->skb = skb;
buffer->dma = dma;
buffer->state = NETXEN_BUFFER_BUSY;
return 0;
}
static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
{
struct netxen_rx_buffer *buffer;
struct sk_buff *skb;
buffer = &rds_ring->rx_buf_arr[index];
pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
if (!skb)
goto no_skb;
if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
&& cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
buffer->skb = NULL;
no_skb:
buffer->state = NETXEN_BUFFER_FREE;
return skb;
}
static struct netxen_rx_buffer *
netxen_process_rcv(struct netxen_adapter *adapter,
struct nx_host_sds_ring *sds_ring,
int ring, u64 sts_data0)
{
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
struct netxen_rx_buffer *buffer;
struct sk_buff *skb;
struct nx_host_rds_ring *rds_ring;
int index, length, cksum, pkt_offset;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_sts_refhandle(sts_data0);
if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
length = netxen_get_sts_totallength(sts_data0);
cksum = netxen_get_sts_status(sts_data0);
pkt_offset = netxen_get_sts_pkt_offset(sts_data0);
skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
if (!skb)
return buffer;
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
skb_put(skb, length);
if (pkt_offset)
skb_pull(skb, pkt_offset);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
adapter->stats.rx_pkts++;
adapter->stats.rxbytes += length;
return buffer;
}
#define TCP_HDR_SIZE 20
#define TCP_TS_OPTION_SIZE 12
#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
static struct netxen_rx_buffer *
netxen_process_lro(struct netxen_adapter *adapter,
struct nx_host_sds_ring *sds_ring,
int ring, u64 sts_data0, u64 sts_data1)
{
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
struct netxen_rx_buffer *buffer;
struct sk_buff *skb;
struct nx_host_rds_ring *rds_ring;
struct iphdr *iph;
struct tcphdr *th;
bool push, timestamp;
int l2_hdr_offset, l4_hdr_offset;
int index;
u16 lro_length, length, data_offset;
u32 seq_number;
u8 vhdr_len = 0;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
timestamp = netxen_get_lro_sts_timestamp(sts_data0);
lro_length = netxen_get_lro_sts_length(sts_data0);
l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
push = netxen_get_lro_sts_push_flag(sts_data0);
seq_number = netxen_get_lro_sts_seq_number(sts_data1);
skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
if (!skb)
return buffer;
if (timestamp)
data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
else
data_offset = l4_hdr_offset + TCP_HDR_SIZE;
skb_put(skb, lro_length + data_offset);
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
if (skb->protocol == htons(ETH_P_8021Q))
vhdr_len = VLAN_HLEN;
iph = (struct iphdr *)(skb->data + vhdr_len);
th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
th->psh = push;
th->seq = htonl(seq_number);
length = skb->len;
if (adapter->flags & NETXEN_FW_MSS_CAP)
skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1);
netif_receive_skb(skb);
adapter->stats.lro_pkts++;
adapter->stats.rxbytes += length;
return buffer;
}
#define netxen_merge_rx_buffers(list, head) \
do { list_splice_tail_init(list, head); } while (0);
int
netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
{
struct netxen_adapter *adapter = sds_ring->adapter;
struct list_head *cur;
struct status_desc *desc;
struct netxen_rx_buffer *rxbuf;
u32 consumer = sds_ring->consumer;
int count = 0;
u64 sts_data0, sts_data1;
int opcode, ring = 0, desc_cnt;
while (count < max) {
desc = &sds_ring->desc_head[consumer];
sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
if (!(sts_data0 & STATUS_OWNER_HOST))
break;
desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
opcode = netxen_get_sts_opcode(sts_data0);
switch (opcode) {
case NETXEN_NIC_RXPKT_DESC:
case NETXEN_OLD_RXPKT_DESC:
case NETXEN_NIC_SYN_OFFLOAD:
ring = netxen_get_sts_type(sts_data0);
rxbuf = netxen_process_rcv(adapter, sds_ring,
ring, sts_data0);
break;
case NETXEN_NIC_LRO_DESC:
ring = netxen_get_lro_sts_type(sts_data0);
sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
rxbuf = netxen_process_lro(adapter, sds_ring,
ring, sts_data0, sts_data1);
break;
case NETXEN_NIC_RESPONSE_DESC:
netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
default:
goto skip;
}
WARN_ON(desc_cnt > 1);
if (rxbuf)
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
skip:
for (; desc_cnt > 0; desc_cnt--) {
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] =
cpu_to_le64(STATUS_OWNER_PHANTOM);
consumer = get_next_index(consumer, sds_ring->num_desc);
}
count++;
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
struct nx_host_rds_ring *rds_ring =
&adapter->recv_ctx.rds_rings[ring];
if (!list_empty(&sds_ring->free_list[ring])) {
list_for_each(cur, &sds_ring->free_list[ring]) {
rxbuf = list_entry(cur,
struct netxen_rx_buffer, list);
netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
}
spin_lock(&rds_ring->lock);
netxen_merge_rx_buffers(&sds_ring->free_list[ring],
&rds_ring->free_list);
spin_unlock(&rds_ring->lock);
}
netxen_post_rx_buffers_nodb(adapter, rds_ring);
}
if (count) {
sds_ring->consumer = consumer;
NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
}
return count;
}
/* Process Command status ring */
int netxen_process_cmd_ring(struct netxen_adapter *adapter)
{
u32 sw_consumer, hw_consumer;
int count = 0, i;
struct netxen_cmd_buffer *buffer;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct netxen_skb_frag *frag;
int done = 0;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
if (!spin_trylock_bh(&adapter->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
while (sw_consumer != hw_consumer) {
buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
pci_unmap_single(pdev, frag->dma, frag->length,
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */
pci_unmap_page(pdev, frag->dma, frag->length,
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
adapter->stats.xmitfinished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
if (++count >= MAX_STATUS_HANDLE)
break;
}
tx_ring->sw_consumer = sw_consumer;
if (count && netif_running(netdev)) {
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_wake_queue(netdev);
adapter->tx_timeo_cnt = 0;
}
/*
* If everything is freed up to consumer then check if the ring is full
* If the ring is full then check if more needs to be freed and
* schedule the call back again.
*
* This happens when there are 2 CPUs. One could be freeing and the
* other filling it. If the ring is full when we get out of here and
* the card has already interrupted the host then the host can miss the
* interrupt.
*
* There is still a possible race condition and the host could miss an
* interrupt. The card has to take care of this.
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
spin_unlock_bh(&adapter->tx_clean_lock);
return done;
}
void
netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
struct nx_host_rds_ring *rds_ring)
{
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
int producer, count = 0;
netxen_ctx_msg msg = 0;
struct list_head *head;
producer = rds_ring->producer;
head = &rds_ring->free_list;
while (!list_empty(head)) {
buffer = list_entry(head->next, struct netxen_rx_buffer, list);
if (!buffer->skb) {
if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
producer = get_next_index(producer, rds_ring->num_desc);
}
if (count) {
rds_ring->producer = producer;
NXWRIO(adapter, rds_ring->crb_rcv_producer,
(producer-1) & (rds_ring->num_desc-1));
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
/*
* Write a doorbell msg to tell phanmon of change in
* receive ring producer
* Only for firmware version < 4.0.0
*/
netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
netxen_set_msg_privid(msg);
netxen_set_msg_count(msg,
((producer - 1) &
(rds_ring->num_desc - 1)));
netxen_set_msg_ctxid(msg, adapter->portnum);
netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
NXWRIO(adapter, DB_NORMALIZE(adapter,
NETXEN_RCV_PRODUCER_OFFSET), msg);
}
}
}
static void
netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring)
{
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
int producer, count = 0;
struct list_head *head;
if (!spin_trylock(&rds_ring->lock))
return;
producer = rds_ring->producer;
head = &rds_ring->free_list;
while (!list_empty(head)) {
buffer = list_entry(head->next, struct netxen_rx_buffer, list);
if (!buffer->skb) {
if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
producer = get_next_index(producer, rds_ring->num_desc);
}
if (count) {
rds_ring->producer = producer;
NXWRIO(adapter, rds_ring->crb_rcv_producer,
(producer - 1) & (rds_ring->num_desc - 1));
}
spin_unlock(&rds_ring->lock);
}
void netxen_nic_clear_stats(struct netxen_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
}
| gpl-2.0 |
androidbftab1/bf-kernel-4.2 | drivers/staging/lustre/lustre/lov/lov_offset.c | 1306 | 8896 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LOV
#include "../../include/linux/libcfs/libcfs.h"
#include "../include/obd_class.h"
#include "lov_internal.h"
/* compute object size given "stripeno" and the ost size */
u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size,
int stripeno)
{
unsigned long ssize = lsm->lsm_stripe_size;
unsigned long stripe_size;
u64 swidth;
u64 lov_size;
int magic = lsm->lsm_magic;
if (ost_size == 0)
return 0;
LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
/* lov_do_div64(a, b) returns a % b, and a = a / b */
stripe_size = lov_do_div64(ost_size, ssize);
if (stripe_size)
lov_size = ost_size * swidth + stripeno * ssize + stripe_size;
else
lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize;
return lov_size;
}
/* we have an offset in file backed by an lov and want to find out where
* that offset lands in our given stripe of the file. for the easy
* case where the offset is within the stripe, we just have to scale the
* offset down to make it relative to the stripe instead of the lov.
*
* the harder case is what to do when the offset doesn't intersect the
* stripe. callers will want start offsets clamped ahead to the start
* of the nearest stripe in the file. end offsets similarly clamped to the
* nearest ending byte of a stripe in the file:
*
* all this function does is move offsets to the nearest region of the
* stripe, and it does its work "mod" the full length of all the stripes.
* consider a file with 3 stripes:
*
* S E
* ---------------------------------------------------------------------
* | 0 | 1 | 2 | 0 | 1 | 2 |
* ---------------------------------------------------------------------
*
* to find stripe 1's offsets for S and E, it divides by the full stripe
* width and does its math in the context of a single set of stripes:
*
* S E
* -----------------------------------
* | 0 | 1 | 2 |
* -----------------------------------
*
* it'll notice that E is outside stripe 1 and clamp it to the end of the
* stripe, then multiply it back out by lov_off to give the real offsets in
* the stripe:
*
* S E
* ---------------------------------------------------------------------
* | 1 | 1 | 1 | 1 | 1 | 1 |
* ---------------------------------------------------------------------
*
* it would have done similarly and pulled S forward to the start of a 1
* stripe if, say, S had landed in a 0 stripe.
*
* this rounding isn't always correct. consider an E lov offset that lands
* on a 0 stripe, the "mod stripe width" math will pull it forward to the
* start of a 1 stripe, when in fact it wanted to be rounded back to the end
* of a previous 1 stripe. this logic is handled by callers and this is why:
*
* this function returns < 0 when the offset was "before" the stripe and
* was moved forward to the start of the stripe in question; 0 when it
* falls in the stripe and no shifting was done; > 0 when the offset
* was outside the stripe and was pulled back to its final byte. */
int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
int stripeno, u64 *obdoff)
{
unsigned long ssize = lsm->lsm_stripe_size;
u64 stripe_off, this_stripe, swidth;
int magic = lsm->lsm_magic;
int ret = 0;
if (lov_off == OBD_OBJECT_EOF) {
*obdoff = OBD_OBJECT_EOF;
return 0;
}
LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off,
&swidth);
/* lov_do_div64(a, b) returns a % b, and a = a / b */
stripe_off = lov_do_div64(lov_off, swidth);
this_stripe = (u64)stripeno * ssize;
if (stripe_off < this_stripe) {
stripe_off = 0;
ret = -1;
} else {
stripe_off -= this_stripe;
if (stripe_off >= ssize) {
stripe_off = ssize;
ret = 1;
}
}
*obdoff = lov_off * ssize + stripe_off;
return ret;
}
/* Given a whole-file size and a stripe number, give the file size which
* corresponds to the individual object of that stripe.
*
* This behaves basically in the same was as lov_stripe_offset, except that
* file sizes falling before the beginning of a stripe are clamped to the end
* of the previous stripe, not the beginning of the next:
*
* S
* ---------------------------------------------------------------------
* | 0 | 1 | 2 | 0 | 1 | 2 |
* ---------------------------------------------------------------------
*
* if clamped to stripe 2 becomes:
*
* S
* ---------------------------------------------------------------------
* | 0 | 1 | 2 | 0 | 1 | 2 |
* ---------------------------------------------------------------------
*/
u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size,
int stripeno)
{
unsigned long ssize = lsm->lsm_stripe_size;
u64 stripe_off, this_stripe, swidth;
int magic = lsm->lsm_magic;
if (file_size == OBD_OBJECT_EOF)
return OBD_OBJECT_EOF;
LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size,
&swidth);
/* lov_do_div64(a, b) returns a % b, and a = a / b */
stripe_off = lov_do_div64(file_size, swidth);
this_stripe = (u64)stripeno * ssize;
if (stripe_off < this_stripe) {
/* Move to end of previous stripe, or zero */
if (file_size > 0) {
file_size--;
stripe_off = ssize;
} else {
stripe_off = 0;
}
} else {
stripe_off -= this_stripe;
if (stripe_off >= ssize) {
/* Clamp to end of this stripe */
stripe_off = ssize;
}
}
return (file_size * ssize + stripe_off);
}
/* given an extent in an lov and a stripe, calculate the extent of the stripe
* that is contained within the lov extent. this returns true if the given
* stripe does intersect with the lov extent. */
int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
u64 start, u64 end, u64 *obd_start, u64 *obd_end)
{
int start_side, end_side;
start_side = lov_stripe_offset(lsm, start, stripeno, obd_start);
end_side = lov_stripe_offset(lsm, end, stripeno, obd_end);
CDEBUG(D_INODE, "[%llu->%llu] -> [(%d) %llu->%llu (%d)]\n",
start, end, start_side, *obd_start, *obd_end, end_side);
/* this stripe doesn't intersect the file extent when neither
* start or the end intersected the stripe and obd_start and
* obd_end got rounded up to the save value. */
if (start_side != 0 && end_side != 0 && *obd_start == *obd_end)
return 0;
/* as mentioned in the lov_stripe_offset commentary, end
* might have been shifted in the wrong direction. This
* happens when an end offset is before the stripe when viewed
* through the "mod stripe size" math. we detect it being shifted
* in the wrong direction and touch it up.
* interestingly, this can't underflow since end must be > start
* if we passed through the previous check.
* (should we assert for that somewhere?) */
if (end_side != 0)
(*obd_end)--;
return 1;
}
/* compute which stripe number "lov_off" will be written into */
int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off)
{
unsigned long ssize = lsm->lsm_stripe_size;
u64 stripe_off, swidth;
int magic = lsm->lsm_magic;
LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
stripe_off = lov_do_div64(lov_off, swidth);
/* Puts stripe_off/ssize result into stripe_off */
lov_do_div64(stripe_off, ssize);
return stripe_off;
}
| gpl-2.0 |
zparallax/amplitude_aosp_12 | drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v2.c | 1818 | 5047 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include "mpq_dvb_debug.h"
#include "mpq_dmx_plugin_common.h"
#define TSIF_COUNT 2
#define BAM_INPUT_COUNT 4
/* Max number of PID filters */
#define TSPP_MAX_PID_FILTER_NUM 128
/* Max number of section filters */
#define TSPP_MAX_SECTION_FILTER_NUM 64
static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed)
{
MPQ_DVB_DBG_PRINT(
"%s(%d) executed\n",
__func__,
feed->pid);
/* Always feed sections/PES starting from a new one and
* do not partial transfer data from older one
*/
feed->pusi_seen = 0;
return 0;
}
static int mpq_tspp_dmx_stop_filtering(struct dvb_demux_feed *feed)
{
MPQ_DVB_DBG_PRINT(
"%s(%d) executed\n",
__func__,
feed->pid);
return 0;
}
/**
* Returns demux capabilities of TSPPv2 plugin
*
* @demux: demux device
* @caps: Returned capbabilities
*
* Return error code
*/
static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux,
struct dmx_caps *caps)
{
struct dvb_demux *dvb_demux = demux->priv;
if ((dvb_demux == NULL) || (caps == NULL)) {
MPQ_DVB_ERR_PRINT(
"%s: invalid parameters\n",
__func__);
return -EINVAL;
}
caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_INDEXING |
DMX_CAP_VIDEO_DECODER_DATA | DMX_CAP_TS_INSERTION |
DMX_CAP_SECURED_INPUT_PLAYBACK;
caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM;
caps->num_section_filters = dvb_demux->filternum;
caps->num_section_filters_per_pid = dvb_demux->filternum;
caps->section_filter_length = DMX_FILTER_SIZE;
caps->num_demod_inputs = TSIF_COUNT;
caps->num_memory_inputs = BAM_INPUT_COUNT;
caps->max_bitrate = 320;
caps->demod_input_max_bitrate = 96;
caps->memory_input_max_bitrate = 80;
caps->num_cipher_ops = DMX_MAX_CIPHER_OPERATIONS_COUNT;
/* TSIF reports 7 bytes STC at unit of 27MHz */
caps->max_stc = 0x00FFFFFFFFFFFFFF;
return 0;
}
/**
* Initialize a single demux device.
*
* @mpq_adapter: MPQ DVB adapter
* @mpq_demux: The demux device to initialize
*
* Return error code
*/
static int mpq_tspp_dmx_init(
struct dvb_adapter *mpq_adapter,
struct mpq_demux *mpq_demux)
{
int result;
MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
/* Set the kernel-demux object capabilities */
mpq_demux->demux.dmx.capabilities =
DMX_TS_FILTERING |
DMX_PES_FILTERING |
DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING |
DMX_CRC_CHECKING |
DMX_TS_DESCRAMBLING;
/* Set dvb-demux "virtual" function pointers */
mpq_demux->demux.priv = (void *)mpq_demux;
mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
mpq_demux->demux.write_to_decoder = NULL;
mpq_demux->demux.decoder_fullness_init = NULL;
mpq_demux->demux.decoder_fullness_wait = NULL;
mpq_demux->demux.decoder_fullness_abort = NULL;
mpq_demux->demux.decoder_buffer_status = NULL;
mpq_demux->demux.reuse_decoder_buffer = NULL;
mpq_demux->demux.set_secure_mode = NULL;
mpq_demux->demux.oob_command = NULL;
mpq_demux->demux.convert_ts = NULL;
/* Initialize dvb_demux object */
result = dvb_dmx_init(&mpq_demux->demux);
if (result < 0) {
MPQ_ERR_PRINT("%s: dvb_dmx_init failed\n", __func__);
goto init_failed;
}
/* Now initailize the dmx-dev object */
mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps;
result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
if (result < 0) {
MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed (errno=%d)\n",
__func__,
result);
goto init_failed_dmx_release;
}
return 0;
init_failed_dmx_release:
dvb_dmx_release(&mpq_demux->demux);
init_failed:
return result;
}
static int __init mpq_dmx_tspp_plugin_init(void)
{
MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
return mpq_dmx_plugin_init(mpq_tspp_dmx_init);
}
static void __exit mpq_dmx_tspp_plugin_exit(void)
{
MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
mpq_dmx_plugin_exit();
}
module_init(mpq_dmx_tspp_plugin_init);
module_exit(mpq_dmx_tspp_plugin_exit);
MODULE_DESCRIPTION("Qualcomm demux TSPP version2 HW Plugin");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Fevax/kernel_samsung_exynos5422 | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2074 | 155170 | /*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
#include <linux/crc32.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/sockios.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <net/neighbour.h>
#include <net/netevent.h>
#include <asm/uaccess.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4_msg.h"
#include "t4fw_api.h"
#include "l2t.h"
#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5 Network Driver"
/*
* Max interrupt hold-off timer value in us. Queues fall back to this value
* under extreme memory pressure so it's largish to give the system time to
* recover.
*/
#define MAX_SGE_TIMERVAL 200U
enum {
/*
* Physical Function provisioning constants.
*/
PFRES_NVI = 4, /* # of Virtual Interfaces */
PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
*/
PFRES_NEQ = 256, /* # of egress queues */
PFRES_NIQ = 0, /* # of ingress queues */
PFRES_TC = 0, /* PCI-E traffic class */
PFRES_NEXACTF = 128, /* # of exact MPS filters */
PFRES_R_CAPS = FW_CMD_CAP_PF,
PFRES_WX_CAPS = FW_CMD_CAP_PF,
#ifdef CONFIG_PCI_IOV
/*
* Virtual Function provisioning constants. We need two extra Ingress
* Queues with Interrupt capability to serve as the VF's Firmware
* Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
* neither will have Free Lists associated with them). For each
* Ethernet/Control Egress Queue and for each Free List, we need an
* Egress Context.
*/
VFRES_NPORTS = 1, /* # of "ports" per VF */
VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
VFRES_TC = 0, /* PCI-E traffic class */
VFRES_NEXACTF = 16, /* # of exact MPS filters */
VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
#endif
};
/*
* Provide a Port Access Rights Mask for the specified PF/VF. This is very
* static and likely not to be useful in the long run. We really need to
* implement some form of persistent configuration which the firmware
* controls.
*/
static unsigned int pfvfres_pmask(struct adapter *adapter,
unsigned int pf, unsigned int vf)
{
unsigned int portn, portvec;
/*
* Give PF's access to all of the ports.
*/
if (vf == 0)
return FW_PFVF_CMD_PMASK_MASK;
/*
* For VFs, we'll assign them access to the ports based purely on the
* PF. We assign active ports in order, wrapping around if there are
* fewer active ports than PFs: e.g. active port[pf % nports].
* Unfortunately the adapter's port_info structs haven't been
* initialized yet so we have to compute this.
*/
if (adapter->params.nports == 0)
return 0;
portn = pf % adapter->params.nports;
portvec = adapter->params.portvec;
for (;;) {
/*
* Isolate the lowest set bit in the port vector. If we're at
* the port number that we want, return that as the pmask.
* otherwise mask that bit out of the port vector and
* decrement our port number ...
*/
unsigned int pmask = portvec ^ (portvec & (portvec-1));
if (portn == 0)
return pmask;
portn--;
portvec &= ~pmask;
}
/*NOTREACHED*/
}
enum {
MAX_TXQ_ENTRIES = 16384,
MAX_CTRL_TXQ_ENTRIES = 1024,
MAX_RSPQ_ENTRIES = 16384,
MAX_RX_BUFFERS = 16384,
MIN_TXQ_ENTRIES = 32,
MIN_CTRL_TXQ_ENTRIES = 32,
MIN_RSPQ_ENTRIES = 128,
MIN_FL_ENTRIES = 16
};
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
* remind ourselves of the field size limitations and save memory in the case
* where the filter table is large.
*/
struct filter_entry {
/* Administrative fields for filter.
*/
u32 valid:1; /* filter allocated and valid */
u32 locked:1; /* filter is administratively locked */
u32 pending:1; /* filter action is pending firmware reply */
u32 smtidx:8; /* Source MAC Table index for smac */
struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
/* The filter itself. Most of this is a straight copy of information
* provided by the extended ioctl(). Some fields are translated to
* internal forms -- for instance the Ingress Queue ID passed in from
* the ioctl() is translated into the Absolute Ingress Queue ID.
*/
struct ch_filter_specification fs;
};
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0xa000, 0), /* PE10K */
CH_DEVICE(0x4001, -1),
CH_DEVICE(0x4002, -1),
CH_DEVICE(0x4003, -1),
CH_DEVICE(0x4004, -1),
CH_DEVICE(0x4005, -1),
CH_DEVICE(0x4006, -1),
CH_DEVICE(0x4007, -1),
CH_DEVICE(0x4008, -1),
CH_DEVICE(0x4009, -1),
CH_DEVICE(0x400a, -1),
CH_DEVICE(0x4401, 4),
CH_DEVICE(0x4402, 4),
CH_DEVICE(0x4403, 4),
CH_DEVICE(0x4404, 4),
CH_DEVICE(0x4405, 4),
CH_DEVICE(0x4406, 4),
CH_DEVICE(0x4407, 4),
CH_DEVICE(0x4408, 4),
CH_DEVICE(0x4409, 4),
CH_DEVICE(0x440a, 4),
CH_DEVICE(0x440d, 4),
CH_DEVICE(0x440e, 4),
CH_DEVICE(0x5001, 4),
CH_DEVICE(0x5002, 4),
CH_DEVICE(0x5003, 4),
CH_DEVICE(0x5004, 4),
CH_DEVICE(0x5005, 4),
CH_DEVICE(0x5006, 4),
CH_DEVICE(0x5007, 4),
CH_DEVICE(0x5008, 4),
CH_DEVICE(0x5009, 4),
CH_DEVICE(0x500A, 4),
CH_DEVICE(0x500B, 4),
CH_DEVICE(0x500C, 4),
CH_DEVICE(0x500D, 4),
CH_DEVICE(0x500E, 4),
CH_DEVICE(0x500F, 4),
CH_DEVICE(0x5010, 4),
CH_DEVICE(0x5011, 4),
CH_DEVICE(0x5012, 4),
CH_DEVICE(0x5013, 4),
CH_DEVICE(0x5401, 4),
CH_DEVICE(0x5402, 4),
CH_DEVICE(0x5403, 4),
CH_DEVICE(0x5404, 4),
CH_DEVICE(0x5405, 4),
CH_DEVICE(0x5406, 4),
CH_DEVICE(0x5407, 4),
CH_DEVICE(0x5408, 4),
CH_DEVICE(0x5409, 4),
CH_DEVICE(0x540A, 4),
CH_DEVICE(0x540B, 4),
CH_DEVICE(0x540C, 4),
CH_DEVICE(0x540D, 4),
CH_DEVICE(0x540E, 4),
CH_DEVICE(0x540F, 4),
CH_DEVICE(0x5410, 4),
CH_DEVICE(0x5411, 4),
CH_DEVICE(0x5412, 4),
CH_DEVICE(0x5413, 4),
{ 0, }
};
#define FW_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
#define FW_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
* if another PF has already become the Master and initialized the adapter.
* Setting "force_init" will cause this driver to forcibly establish itself as
* the Master PF and initialize the adapter.
*/
static uint force_init;
module_param(force_init, uint, 0644);
MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
/*
* Normally if the firmware we connect to has Configuration File support, we
* use that and only fall back to the old Driver-based initialization if the
* Configuration File fails for some reason. If force_old_init is set, then
* we'll always use the old Driver-based initialization sequence.
*/
static uint force_old_init;
module_param(force_old_init, uint, 0644);
MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
/*
* The driver uses the best interrupt scheme available on a platform in the
* order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
* of these schemes the driver may consider as follows:
*
* msi = 2: choose from among all three options
* msi = 1: only consider MSI and INTx interrupts
* msi = 0: force INTx interrupts
*/
static int msi = 2;
module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
/*
* Queue interrupt hold-off timer values. Queues default to the first of these
* upon creation.
*/
static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
module_param_array(intr_holdoff, uint, NULL, 0644);
MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
"0..4 in microseconds");
static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
"thresholds 1..3 for queue interrupt packet counters");
/*
* Normally we tell the chip to deliver Ingress Packets into our DMA buffers
* offset by 2 bytes in order to have the IP headers line up on 4-byte
* boundaries. This is a requirement for many architectures which will throw
* a machine check fault if an attempt is made to access one of the 4-byte IP
* header fields on a non-4-byte boundary. And it's a major performance issue
* even on some architectures which allow it like some implementations of the
* x86 ISA. However, some architectures don't mind this and for some very
* edge-case performance sensitive applications (like forwarding large volumes
* of small packets), setting this DMA offset to 0 will decrease the number of
* PCI-E Bus transfers enough to measurably affect performance.
*/
static int rx_dma_offset = 2;
static bool vf_acls;
#ifdef CONFIG_PCI_IOV
module_param(vf_acls, bool, 0644);
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
/* Configure the number of PCI-E Virtual Function which are to be instantiated
* on SR-IOV Capable Physical Functions.
*/
static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
#endif
/*
* The filter TCAM has a fixed portion and a variable portion. The fixed
* portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
* ports. The variable portion is 36 bits which can include things like Exact
* Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
* [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
* far exceed the 36-bit budget for this "compressed" header portion of the
* filter. Thus, we have a scarce resource which must be carefully managed.
*
* By default we set this up to mostly match the set of filter matching
* capabilities of T3 but with accommodations for some of T4's more
* interesting features:
*
* { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
* [Inner] VLAN (17), Port (3), FCoE (1) }
*/
enum {
TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
};
static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
module_param(tp_vlan_pri_map, uint, 0644);
MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
static DEFINE_MUTEX(uld_mutex);
static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
static const char *uld_str[] = { "RDMA", "iSCSI" };
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
netdev_info(dev, "link down\n");
else {
static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
const char *s = "10Mbps";
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
case SPEED_10000:
s = "10Gbps";
break;
case SPEED_1000:
s = "1000Mbps";
break;
case SPEED_100:
s = "100Mbps";
break;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
fc[p->link_cfg.fc]);
}
}
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
struct net_device *dev = adapter->port[port_id];
/* Skip changes from disabled ports. */
if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
if (link_stat)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
link_report(dev);
}
}
void t4_os_portmod_changed(const struct adapter *adap, int port_id)
{
static const char *mod_str[] = {
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
const struct net_device *dev = adap->port[port_id];
const struct port_info *pi = netdev_priv(dev);
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
netdev_info(dev, "port module unplugged\n");
else if (pi->mod_type < ARRAY_SIZE(mod_str))
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
}
/*
* Configure the exact and hash address filters to handle a port's multicast
* and secondary unicast MAC addresses.
*/
static int set_addr_filters(const struct net_device *dev, bool sleep)
{
u64 mhash = 0;
u64 uhash = 0;
bool free = true;
u16 filt_idx[7];
const u8 *addr[7];
int ret, naddr = 0;
const struct netdev_hw_addr *ha;
int uc_cnt = netdev_uc_count(dev);
int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
unsigned int mb = pi->adapter->fn;
/* first do the secondary unicast addresses */
netdev_for_each_uc_addr(ha, dev) {
addr[naddr++] = ha->addr;
if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
naddr, addr, filt_idx, &uhash, sleep);
if (ret < 0)
return ret;
free = false;
naddr = 0;
}
}
/* next set up the multicast addresses */
netdev_for_each_mc_addr(ha, dev) {
addr[naddr++] = ha->addr;
if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
naddr, addr, filt_idx, &mhash, sleep);
if (ret < 0)
return ret;
free = false;
naddr = 0;
}
}
return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
uhash | mhash, sleep);
}
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
module_param(dbfifo_int_thresh, int, 0644);
MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
/*
* usecs to sleep while draining the dbfifo
*/
static int dbfifo_drain_delay = 1000;
module_param(dbfifo_drain_delay, int, 0644);
MODULE_PARM_DESC(dbfifo_drain_delay,
"usecs to sleep while draining the dbfifo");
/*
* Set Rx properties of a port, such as promiscruity, address filters, and MTU.
* If @mtu is -1 it is left unchanged.
*/
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
int ret;
struct port_info *pi = netdev_priv(dev);
ret = set_addr_filters(dev, sleep_ok);
if (ret == 0)
ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
return ret;
}
static struct workqueue_struct *workq;
/**
* link_start - enable a port
* @dev: the port to enable
*
* Performs the MAC and PHY actions needed to enable a port.
*/
static int link_start(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
unsigned int mb = pi->adapter->fn;
/*
* We do not set address filters and promiscuity here, the stack does
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
true);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
}
}
if (ret == 0)
ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
if (ret == 0)
ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
return ret;
}
/* Clear a filter and release any of its resources that we own. This also
* clears the filter's "pending" status.
*/
static void clear_filter(struct adapter *adap, struct filter_entry *f)
{
/* If the new or old filter have loopback rewriteing rules then we'll
* need to free any existing Layer Two Table (L2T) entries of the old
* filter rule. The firmware will handle freeing up any Source MAC
* Table (SMT) entries used for rewriting Source MAC Addresses in
* loopback rules.
*/
if (f->l2t)
cxgb4_l2t_release(f->l2t);
/* The zeroing of the filter rule below clears the filter valid,
* pending, locked flags, l2t pointer, etc. so it's all we need for
* this operation.
*/
memset(f, 0, sizeof(*f));
}
/* Handle a filter write/deletion reply.
*/
static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
{
unsigned int idx = GET_TID(rpl);
unsigned int nidx = idx - adap->tids.ftid_base;
unsigned int ret;
struct filter_entry *f;
if (idx >= adap->tids.ftid_base && nidx <
(adap->tids.nftids + adap->tids.nsftids)) {
idx = nidx;
ret = GET_TCB_COOKIE(rpl->cookie);
f = &adap->tids.ftid_tab[idx];
if (ret == FW_FILTER_WR_FLT_DELETED) {
/* Clear the filter when we get confirmation from the
* hardware that the filter has been deleted.
*/
clear_filter(adap, f);
} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
idx);
clear_filter(adap, f);
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
f->pending = 0; /* asynchronous setup completed */
f->valid = 1;
} else {
/* Something went wrong. Issue a warning about the
* problem and clear everything out.
*/
dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
idx, ret);
clear_filter(adap, f);
}
}
}
/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
u8 opcode = ((const struct rss_header *)rsp)->opcode;
rsp++; /* skip RSS header */
/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
*/
if (unlikely(opcode == CPL_FW4_MSG &&
((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
rsp++;
opcode = ((const struct rss_header *)rsp)->opcode;
rsp++;
if (opcode != CPL_SGE_EGR_UPDATE) {
dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
, opcode);
goto out;
}
}
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
struct sge_txq *txq;
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
txq->restarts++;
if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
netif_tx_wake_queue(eq->txq);
} else {
struct sge_ofld_txq *oq;
oq = container_of(txq, struct sge_ofld_txq, q);
tasklet_schedule(&oq->qresume_tsk);
}
} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
const struct cpl_fw6_msg *p = (void *)rsp;
if (p->type == 0)
t4_handle_fw_rpl(q->adap, p->data);
} else if (opcode == CPL_L2T_WRITE_RPL) {
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
} else if (opcode == CPL_SET_TCB_RPL) {
const struct cpl_set_tcb_rpl *p = (void *)rsp;
filter_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
out:
return 0;
}
/**
* uldrx_handler - response queue handler for ULD queues
* @q: the response queue that received the packet
* @rsp: the response queue descriptor holding the offload message
* @gl: the gather list of packet fragments
*
* Deliver an ingress offload packet to a ULD. All processing is done by
* the ULD, we just maintain statistics.
*/
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
*/
if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
rsp += 2;
if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
rxq->stats.nomem++;
return -1;
}
if (gl == NULL)
rxq->stats.imm++;
else if (gl == CXGB4_MSG_AN)
rxq->stats.an++;
else
rxq->stats.pkts++;
return 0;
}
static void disable_msi(struct adapter *adapter)
{
if (adapter->flags & USING_MSIX) {
pci_disable_msix(adapter->pdev);
adapter->flags &= ~USING_MSIX;
} else if (adapter->flags & USING_MSI) {
pci_disable_msi(adapter->pdev);
adapter->flags &= ~USING_MSI;
}
}
/*
* Interrupt handler for non-data events used with MSI-X.
*/
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
{
struct adapter *adap = cookie;
u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
if (v & PFSW) {
adap->swintr = 1;
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
}
t4_slow_intr_handler(adap);
return IRQ_HANDLED;
}
/*
* Name the MSI-X interrupts.
*/
static void name_msix_vecs(struct adapter *adap)
{
int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
/* non-data interrupts */
snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
/* FW events */
snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
adap->port[0]->name);
/* Ethernet queues */
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
for (i = 0; i < pi->nqsets; i++, msi_idx++)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
}
/* offload queues */
for_each_ofldrxq(&adap->sge, i)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
adap->port[0]->name, i);
for_each_rdmarxq(&adap->sge, i)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
adap->msix_info[1].desc, &s->fw_evtq);
if (err)
return err;
for_each_ethrxq(s, ethqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
adap->msix_info[msi_index].desc,
&s->ethrxq[ethqidx].rspq);
if (err)
goto unwind;
msi_index++;
}
for_each_ofldrxq(s, ofldqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
adap->msix_info[msi_index].desc,
&s->ofldrxq[ofldqidx].rspq);
if (err)
goto unwind;
msi_index++;
}
for_each_rdmarxq(s, rdmaqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
adap->msix_info[msi_index].desc,
&s->rdmarxq[rdmaqidx].rspq);
if (err)
goto unwind;
msi_index++;
}
return 0;
unwind:
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
while (--ofldqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ofldrxq[ofldqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
return err;
}
static void free_msix_queue_irqs(struct adapter *adap)
{
int i, msi_index = 2;
struct sge *s = &adap->sge;
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
for_each_ofldrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
}
/**
* write_rss - write the RSS table for a given port
* @pi: the port
* @queues: array of queue indices for RSS
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
*/
static int write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
if (!rss)
return -ENOMEM;
/* map the queue indices to queue ids */
for (i = 0; i < pi->rss_size; i++, queues++)
rss[i] = q[*queues].rspq.abs_id;
err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
kfree(rss);
return err;
}
/**
* setup_rss - configure RSS
* @adap: the adapter
*
* Sets up RSS for each port.
*/
static int setup_rss(struct adapter *adap)
{
int i, err;
for_each_port(adap, i) {
const struct port_info *pi = adap2pinfo(adap, i);
err = write_rss(pi, pi->rss);
if (err)
return err;
}
return 0;
}
/*
* Return the channel of the ingress queue with the given qid.
*/
static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
{
qid -= p->ingr_start;
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}
/*
* Wait until all NAPI handlers are descheduled.
*/
static void quiesce_rx(struct adapter *adap)
{
int i;
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (q && q->handler)
napi_disable(&q->napi);
}
}
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
static void enable_rx(struct adapter *adap)
{
int i;
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (!q)
continue;
if (q->handler)
napi_enable(&q->napi);
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
SEINTARM(q->intr_params) |
INGRESSQID(q->cntxt_id));
}
}
/**
* setup_sge_queues - configure SGE Tx/Rx/response queues
* @adap: the adapter
*
* Determines how many sets of SGE queues to use and initializes them.
* We support multiple queue sets per port if we have MSI-X, otherwise
* just one queue set per port.
*/
static int setup_sge_queues(struct adapter *adap)
{
int err, msi_idx, i, j;
struct sge *s = &adap->sge;
bitmap_zero(s->starving_fl, MAX_EGRQ);
bitmap_zero(s->txq_maperr, MAX_EGRQ);
if (adap->flags & USING_MSIX)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
NULL, NULL);
if (err)
return err;
msi_idx = -((int)s->intrq.abs_id + 1);
}
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
msi_idx, NULL, fwevtq_handler);
if (err) {
freeout: t4_free_sge_resources(adap);
return err;
}
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) {
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
msi_idx, &q->fl,
t4_ethrx_handler);
if (err)
goto freeout;
q->rspq.idx = j;
memset(&q->stats, 0, sizeof(q->stats));
}
for (j = 0; j < pi->nqsets; j++, t++) {
err = t4_sge_alloc_eth_txq(adap, t, dev,
netdev_get_tx_queue(dev, j),
s->fw_evtq.cntxt_id);
if (err)
goto freeout;
}
}
j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
for_each_ofldrxq(s, i) {
struct sge_ofld_rxq *q = &s->ofldrxq[i];
struct net_device *dev = adap->port[i / j];
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
&q->fl, uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
s->ofld_rxq[i] = q->rspq.abs_id;
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
s->fw_evtq.cntxt_id);
if (err)
goto freeout;
}
for_each_rdmarxq(s, i) {
struct sge_ofld_rxq *q = &s->rdmarxq[i];
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
msi_idx, &q->fl, uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
s->rdma_rxq[i] = q->rspq.abs_id;
}
for_each_port(adap, i) {
/*
* Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
* have RDMA queues, and that's the right value.
*/
err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
s->fw_evtq.cntxt_id,
s->rdmarxq[i].rspq.cntxt_id);
if (err)
goto freeout;
}
t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
return 0;
}
/*
* Returns 0 if new FW was successfully loaded, a positive errno if a load was
* started but failed, and a negative errno if flash load couldn't start.
*/
static int upgrade_fw(struct adapter *adap)
{
int ret;
u32 vers, exp_major;
const struct fw_hdr *hdr;
const struct firmware *fw;
struct device *dev = adap->pdev_dev;
char *fw_file_name;
switch (CHELSIO_CHIP_VERSION(adap->chip)) {
case CHELSIO_T4:
fw_file_name = FW_FNAME;
exp_major = FW_VERSION_MAJOR;
break;
case CHELSIO_T5:
fw_file_name = FW5_FNAME;
exp_major = FW_VERSION_MAJOR_T5;
break;
default:
dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
return -EINVAL;
}
ret = request_firmware(&fw, fw_file_name, dev);
if (ret < 0) {
dev_err(dev, "unable to load firmware image %s, error %d\n",
fw_file_name, ret);
return ret;
}
hdr = (const struct fw_hdr *)fw->data;
vers = ntohl(hdr->fw_ver);
if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
ret = -EINVAL; /* wrong major version, won't do */
goto out;
}
/*
* If the flash FW is unusable or we found something newer, load it.
*/
if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
vers > adap->params.fw_vers) {
dev_info(dev, "upgrading firmware ...\n");
ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
/*force=*/false);
if (!ret)
dev_info(dev,
"firmware upgraded to version %pI4 from %s\n",
&hdr->fw_ver, fw_file_name);
else
dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
} else {
/*
* Tell our caller that we didn't upgrade the firmware.
*/
ret = -EINVAL;
}
out: release_firmware(fw);
return ret;
}
/*
* Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
* The allocated memory is cleared.
*/
void *t4_alloc_mem(size_t size)
{
void *p = kzalloc(size, GFP_KERNEL);
if (!p)
p = vzalloc(size);
return p;
}
/*
* Free memory allocated through alloc_mem().
*/
static void t4_free_mem(void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
}
/* Send a Work Request to write the filter at a specified index. We construct
* a Firmware Filter Work Request to have the work done and put the indicated
* filter into "pending" mode which will prevent any further actions against
* it till we get a reply from the firmware on the completion status of the
* request.
*/
static int set_filter_wr(struct adapter *adapter, int fidx)
{
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct sk_buff *skb;
struct fw_filter_wr *fwr;
unsigned int ftid;
/* If the new filter requires loopback Destination MAC and/or VLAN
* rewriting then we need to allocate a Layer 2 Table (L2T) entry for
* the filter.
*/
if (f->fs.newdmac || f->fs.newvlan) {
/* allocate L2T entry for new filter */
f->l2t = t4_l2t_alloc_switching(adapter->l2t);
if (f->l2t == NULL)
return -EAGAIN;
if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
f->fs.eport, f->fs.dmac)) {
cxgb4_l2t_release(f->l2t);
f->l2t = NULL;
return -ENOMEM;
}
}
ftid = adapter->tids.ftid_base + fidx;
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
memset(fwr, 0, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
* of the work is translating the cxgbtool ch_filter_specification
* into the Work Request and the definition of that structure is
* currently in cxgbtool.h which isn't appropriate to pull into the
* common code. We may eventually try to come up with a more neutral
* filter specification structure but for now it's easiest to simply
* put this fairly direct code in line ...
*/
fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
fwr->tid_to_iq =
htonl(V_FW_FILTER_WR_TID(ftid) |
V_FW_FILTER_WR_RQTYPE(f->fs.type) |
V_FW_FILTER_WR_NOREPLY(0) |
V_FW_FILTER_WR_IQ(f->fs.iq));
fwr->del_filter_to_l2tix =
htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
V_FW_FILTER_WR_PRIO(f->fs.prio) |
V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
fwr->ethtype = htons(f->fs.val.ethtype);
fwr->ethtypem = htons(f->fs.mask.ethtype);
fwr->frag_to_ovlan_vldm =
(V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
fwr->smac_sel = 0;
fwr->rx_chan_rx_rpl_iq =
htons(V_FW_FILTER_WR_RX_CHAN(0) |
V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
fwr->maci_to_matchtypem =
htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
V_FW_FILTER_WR_PORT(f->fs.val.iport) |
V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
fwr->ptcl = f->fs.val.proto;
fwr->ptclm = f->fs.mask.proto;
fwr->ttyp = f->fs.val.tos;
fwr->ttypm = f->fs.mask.tos;
fwr->ivlan = htons(f->fs.val.ivlan);
fwr->ivlanm = htons(f->fs.mask.ivlan);
fwr->ovlan = htons(f->fs.val.ovlan);
fwr->ovlanm = htons(f->fs.mask.ovlan);
memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
fwr->lp = htons(f->fs.val.lport);
fwr->lpm = htons(f->fs.mask.lport);
fwr->fp = htons(f->fs.val.fport);
fwr->fpm = htons(f->fs.mask.fport);
if (f->fs.newsmac)
memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
*/
f->pending = 1;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
t4_ofld_send(adapter, skb);
return 0;
}
/* Delete the filter at a specified index.
*/
static int del_filter_wr(struct adapter *adapter, int fidx)
{
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct sk_buff *skb;
struct fw_filter_wr *fwr;
unsigned int len, ftid;
len = sizeof(*fwr);
ftid = adapter->tids.ftid_base + fidx;
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
fwr = (struct fw_filter_wr *)__skb_put(skb, len);
t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
*/
f->pending = 1;
t4_mgmt_tx(adapter, skb);
return 0;
}
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
}
/*
* Implementation of ethtool operations.
*/
static u32 get_msglevel(struct net_device *dev)
{
return netdev2adap(dev)->msg_enable;
}
static void set_msglevel(struct net_device *dev, u32 val)
{
netdev2adap(dev)->msg_enable = val;
}
static char stats_strings[][ETH_GSTRING_LEN] = {
"TxOctetsOK ",
"TxFramesOK ",
"TxBroadcastFrames ",
"TxMulticastFrames ",
"TxUnicastFrames ",
"TxErrorFrames ",
"TxFrames64 ",
"TxFrames65To127 ",
"TxFrames128To255 ",
"TxFrames256To511 ",
"TxFrames512To1023 ",
"TxFrames1024To1518 ",
"TxFrames1519ToMax ",
"TxFramesDropped ",
"TxPauseFrames ",
"TxPPP0Frames ",
"TxPPP1Frames ",
"TxPPP2Frames ",
"TxPPP3Frames ",
"TxPPP4Frames ",
"TxPPP5Frames ",
"TxPPP6Frames ",
"TxPPP7Frames ",
"RxOctetsOK ",
"RxFramesOK ",
"RxBroadcastFrames ",
"RxMulticastFrames ",
"RxUnicastFrames ",
"RxFramesTooLong ",
"RxJabberErrors ",
"RxFCSErrors ",
"RxLengthErrors ",
"RxSymbolErrors ",
"RxRuntFrames ",
"RxFrames64 ",
"RxFrames65To127 ",
"RxFrames128To255 ",
"RxFrames256To511 ",
"RxFrames512To1023 ",
"RxFrames1024To1518 ",
"RxFrames1519ToMax ",
"RxPauseFrames ",
"RxPPP0Frames ",
"RxPPP1Frames ",
"RxPPP2Frames ",
"RxPPP3Frames ",
"RxPPP4Frames ",
"RxPPP5Frames ",
"RxPPP6Frames ",
"RxPPP7Frames ",
"RxBG0FramesDropped ",
"RxBG1FramesDropped ",
"RxBG2FramesDropped ",
"RxBG3FramesDropped ",
"RxBG0FramesTrunc ",
"RxBG1FramesTrunc ",
"RxBG2FramesTrunc ",
"RxBG3FramesTrunc ",
"TSO ",
"TxCsumOffload ",
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
"WriteCoalSuccess ",
"WriteCoalFail ",
};
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(stats_strings);
default:
return -EOPNOTSUPP;
}
}
#define T4_REGMAP_SIZE (160 * 1024)
#define T5_REGMAP_SIZE (332 * 1024)
static int get_regs_len(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
if (is_t4(adap->chip))
return T4_REGMAP_SIZE;
else
return T5_REGMAP_SIZE;
}
static int get_eeprom_len(struct net_device *dev)
{
return EEPROMSIZE;
}
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
if (stringset == ETH_SS_STATS)
memcpy(data, stats_strings, sizeof(stats_strings));
}
/*
* port stats maintained per queue of the port. They should be in the same
* order as in stats_strings above.
*/
struct queue_port_stats {
u64 tso;
u64 tx_csum;
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
u64 gro_pkts;
u64 gro_merged;
};
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p, struct queue_port_stats *s)
{
int i;
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
memset(s, 0, sizeof(*s));
for (i = 0; i < p->nqsets; i++, rx++, tx++) {
s->tso += tx->tso;
s->tx_csum += tx->tx_cso;
s->rx_csum += rx->stats.rx_cso;
s->vlan_ex += rx->stats.vlan_ex;
s->vlan_ins += tx->vlan_ins;
s->gro_pkts += rx->stats.lro_pkts;
s->gro_merged += rx->stats.lro_merged;
}
}
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
u32 val1, val2;
t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
if (!is_t4(adapter->chip)) {
t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
*data = val1 - val2;
data++;
*data = val2;
data++;
} else {
memset(data, 0, 2 * sizeof(u64));
*data += 2;
}
}
/*
* Return a version number to identify the type of adapter. The scheme is:
* - bits 0..9: chip version
* - bits 10..15: chip revision
* - bits 16..23: register dump version
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
return CHELSIO_CHIP_VERSION(ap->chip) |
(CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
unsigned int end)
{
u32 *p = buf + start;
for ( ; start <= end; start += sizeof(u32))
*p++ = t4_read_reg(ap, start);
}
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
0x1180, 0x11b4,
0x11fc, 0x123c,
0x1300, 0x173c,
0x1800, 0x18fc,
0x3000, 0x30d8,
0x30e0, 0x5924,
0x5960, 0x59d4,
0x5a00, 0x5af8,
0x6000, 0x6098,
0x6100, 0x6150,
0x6200, 0x6208,
0x6240, 0x6248,
0x6280, 0x6338,
0x6370, 0x638c,
0x6400, 0x643c,
0x6500, 0x6524,
0x6a00, 0x6a38,
0x6a60, 0x6a78,
0x6b00, 0x6b84,
0x6bf0, 0x6c84,
0x6cf0, 0x6d84,
0x6df0, 0x6e84,
0x6ef0, 0x6f84,
0x6ff0, 0x7084,
0x70f0, 0x7184,
0x71f0, 0x7284,
0x72f0, 0x7384,
0x73f0, 0x7450,
0x7500, 0x7530,
0x7600, 0x761c,
0x7680, 0x76cc,
0x7700, 0x7798,
0x77c0, 0x77fc,
0x7900, 0x79fc,
0x7b00, 0x7c38,
0x7d00, 0x7efc,
0x8dc0, 0x8e1c,
0x8e30, 0x8e78,
0x8ea0, 0x8f6c,
0x8fc0, 0x9074,
0x90fc, 0x90fc,
0x9400, 0x9458,
0x9600, 0x96bc,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
0x9d80, 0x9dec,
0x9e00, 0x9e6c,
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0x9fec,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xea7c,
0xf000, 0x11190,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x1924c,
0x193f8, 0x19474,
0x19490, 0x194f8,
0x19800, 0x19f30,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e040, 0x1e04c,
0x1e284, 0x1e28c,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e440, 0x1e44c,
0x1e684, 0x1e68c,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e840, 0x1e84c,
0x1ea84, 0x1ea8c,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec40, 0x1ec4c,
0x1ee84, 0x1ee8c,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f040, 0x1f04c,
0x1f284, 0x1f28c,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f440, 0x1f44c,
0x1f684, 0x1f68c,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f840, 0x1f84c,
0x1fa84, 0x1fa8c,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc40, 0x1fc4c,
0x1fe84, 0x1fe8c,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x20000, 0x2002c,
0x20100, 0x2013c,
0x20190, 0x201c8,
0x20200, 0x20318,
0x20400, 0x20528,
0x20540, 0x20614,
0x21000, 0x21040,
0x2104c, 0x21060,
0x210c0, 0x210ec,
0x21200, 0x21268,
0x21270, 0x21284,
0x212fc, 0x21388,
0x21400, 0x21404,
0x21500, 0x21518,
0x2152c, 0x2153c,
0x21550, 0x21554,
0x21600, 0x21600,
0x21608, 0x21628,
0x21630, 0x2163c,
0x21700, 0x2171c,
0x21780, 0x2178c,
0x21800, 0x21c38,
0x21c80, 0x21d7c,
0x21e00, 0x21e04,
0x22000, 0x2202c,
0x22100, 0x2213c,
0x22190, 0x221c8,
0x22200, 0x22318,
0x22400, 0x22528,
0x22540, 0x22614,
0x23000, 0x23040,
0x2304c, 0x23060,
0x230c0, 0x230ec,
0x23200, 0x23268,
0x23270, 0x23284,
0x232fc, 0x23388,
0x23400, 0x23404,
0x23500, 0x23518,
0x2352c, 0x2353c,
0x23550, 0x23554,
0x23600, 0x23600,
0x23608, 0x23628,
0x23630, 0x2363c,
0x23700, 0x2371c,
0x23780, 0x2378c,
0x23800, 0x23c38,
0x23c80, 0x23d7c,
0x23e00, 0x23e04,
0x24000, 0x2402c,
0x24100, 0x2413c,
0x24190, 0x241c8,
0x24200, 0x24318,
0x24400, 0x24528,
0x24540, 0x24614,
0x25000, 0x25040,
0x2504c, 0x25060,
0x250c0, 0x250ec,
0x25200, 0x25268,
0x25270, 0x25284,
0x252fc, 0x25388,
0x25400, 0x25404,
0x25500, 0x25518,
0x2552c, 0x2553c,
0x25550, 0x25554,
0x25600, 0x25600,
0x25608, 0x25628,
0x25630, 0x2563c,
0x25700, 0x2571c,
0x25780, 0x2578c,
0x25800, 0x25c38,
0x25c80, 0x25d7c,
0x25e00, 0x25e04,
0x26000, 0x2602c,
0x26100, 0x2613c,
0x26190, 0x261c8,
0x26200, 0x26318,
0x26400, 0x26528,
0x26540, 0x26614,
0x27000, 0x27040,
0x2704c, 0x27060,
0x270c0, 0x270ec,
0x27200, 0x27268,
0x27270, 0x27284,
0x272fc, 0x27388,
0x27400, 0x27404,
0x27500, 0x27518,
0x2752c, 0x2753c,
0x27550, 0x27554,
0x27600, 0x27600,
0x27608, 0x27628,
0x27630, 0x2763c,
0x27700, 0x2771c,
0x27780, 0x2778c,
0x27800, 0x27c38,
0x27c80, 0x27d7c,
0x27e00, 0x27e04
};
static const unsigned int t5_reg_ranges[] = {
0x1008, 0x1148,
0x1180, 0x11b4,
0x11fc, 0x123c,
0x1280, 0x173c,
0x1800, 0x18fc,
0x3000, 0x3028,
0x3060, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
0x56cc, 0x575c,
0x580c, 0x5814,
0x5890, 0x58bc,
0x5940, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a9c,
0x5b9c, 0x5bfc,
0x6000, 0x6040,
0x6058, 0x614c,
0x7700, 0x7798,
0x77c0, 0x78fc,
0x7b00, 0x7c54,
0x7d00, 0x7efc,
0x8dc0, 0x8de0,
0x8df8, 0x8e84,
0x8ea0, 0x8f84,
0x8fc0, 0x90f8,
0x9400, 0x9470,
0x9600, 0x96f4,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
0x9d80, 0x9dec,
0x9e00, 0x9e6c,
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0x11088,
0x1109c, 0x1117c,
0x11190, 0x11204,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x19290,
0x193f8, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
0x19c00, 0x19c60,
0x19c94, 0x19e10,
0x19e50, 0x19f34,
0x19f40, 0x19f50,
0x19f90, 0x19fe4,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
0x1e440, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
0x1e840, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
0x1ec40, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
0x1f040, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
0x1f440, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
0x1f840, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
0x1fc40, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30030,
0x30100, 0x30144,
0x30190, 0x301d0,
0x30200, 0x30318,
0x30400, 0x3052c,
0x30540, 0x3061c,
0x30800, 0x30834,
0x308c0, 0x30908,
0x30910, 0x309ac,
0x30a00, 0x30a04,
0x30a0c, 0x30a2c,
0x30a44, 0x30a50,
0x30a74, 0x30c24,
0x30d08, 0x30d14,
0x30d1c, 0x30d20,
0x30d3c, 0x30d50,
0x31200, 0x3120c,
0x31220, 0x31220,
0x31240, 0x31240,
0x31600, 0x31600,
0x31608, 0x3160c,
0x31a00, 0x31a1c,
0x31e04, 0x31e20,
0x31e38, 0x31e3c,
0x31e80, 0x31e80,
0x31e88, 0x31ea8,
0x31eb0, 0x31eb4,
0x31ec8, 0x31ed4,
0x31fb8, 0x32004,
0x32208, 0x3223c,
0x32600, 0x32630,
0x32a00, 0x32abc,
0x32b00, 0x32b70,
0x33000, 0x33048,
0x33060, 0x3309c,
0x330f0, 0x33148,
0x33160, 0x3319c,
0x331f0, 0x332e4,
0x332f8, 0x333e4,
0x333f8, 0x33448,
0x33460, 0x3349c,
0x334f0, 0x33548,
0x33560, 0x3359c,
0x335f0, 0x336e4,
0x336f8, 0x337e4,
0x337f8, 0x337fc,
0x33814, 0x33814,
0x3382c, 0x3382c,
0x33880, 0x3388c,
0x338e8, 0x338ec,
0x33900, 0x33948,
0x33960, 0x3399c,
0x339f0, 0x33ae4,
0x33af8, 0x33b10,
0x33b28, 0x33b28,
0x33b3c, 0x33b50,
0x33bf0, 0x33c10,
0x33c28, 0x33c28,
0x33c3c, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34030,
0x34100, 0x34144,
0x34190, 0x341d0,
0x34200, 0x34318,
0x34400, 0x3452c,
0x34540, 0x3461c,
0x34800, 0x34834,
0x348c0, 0x34908,
0x34910, 0x349ac,
0x34a00, 0x34a04,
0x34a0c, 0x34a2c,
0x34a44, 0x34a50,
0x34a74, 0x34c24,
0x34d08, 0x34d14,
0x34d1c, 0x34d20,
0x34d3c, 0x34d50,
0x35200, 0x3520c,
0x35220, 0x35220,
0x35240, 0x35240,
0x35600, 0x35600,
0x35608, 0x3560c,
0x35a00, 0x35a1c,
0x35e04, 0x35e20,
0x35e38, 0x35e3c,
0x35e80, 0x35e80,
0x35e88, 0x35ea8,
0x35eb0, 0x35eb4,
0x35ec8, 0x35ed4,
0x35fb8, 0x36004,
0x36208, 0x3623c,
0x36600, 0x36630,
0x36a00, 0x36abc,
0x36b00, 0x36b70,
0x37000, 0x37048,
0x37060, 0x3709c,
0x370f0, 0x37148,
0x37160, 0x3719c,
0x371f0, 0x372e4,
0x372f8, 0x373e4,
0x373f8, 0x37448,
0x37460, 0x3749c,
0x374f0, 0x37548,
0x37560, 0x3759c,
0x375f0, 0x376e4,
0x376f8, 0x377e4,
0x377f8, 0x377fc,
0x37814, 0x37814,
0x3782c, 0x3782c,
0x37880, 0x3788c,
0x378e8, 0x378ec,
0x37900, 0x37948,
0x37960, 0x3799c,
0x379f0, 0x37ae4,
0x37af8, 0x37b10,
0x37b28, 0x37b28,
0x37b3c, 0x37b50,
0x37bf0, 0x37c10,
0x37c28, 0x37c28,
0x37c3c, 0x37c50,
0x37cf0, 0x37cfc,
0x38000, 0x38030,
0x38100, 0x38144,
0x38190, 0x381d0,
0x38200, 0x38318,
0x38400, 0x3852c,
0x38540, 0x3861c,
0x38800, 0x38834,
0x388c0, 0x38908,
0x38910, 0x389ac,
0x38a00, 0x38a04,
0x38a0c, 0x38a2c,
0x38a44, 0x38a50,
0x38a74, 0x38c24,
0x38d08, 0x38d14,
0x38d1c, 0x38d20,
0x38d3c, 0x38d50,
0x39200, 0x3920c,
0x39220, 0x39220,
0x39240, 0x39240,
0x39600, 0x39600,
0x39608, 0x3960c,
0x39a00, 0x39a1c,
0x39e04, 0x39e20,
0x39e38, 0x39e3c,
0x39e80, 0x39e80,
0x39e88, 0x39ea8,
0x39eb0, 0x39eb4,
0x39ec8, 0x39ed4,
0x39fb8, 0x3a004,
0x3a208, 0x3a23c,
0x3a600, 0x3a630,
0x3aa00, 0x3aabc,
0x3ab00, 0x3ab70,
0x3b000, 0x3b048,
0x3b060, 0x3b09c,
0x3b0f0, 0x3b148,
0x3b160, 0x3b19c,
0x3b1f0, 0x3b2e4,
0x3b2f8, 0x3b3e4,
0x3b3f8, 0x3b448,
0x3b460, 0x3b49c,
0x3b4f0, 0x3b548,
0x3b560, 0x3b59c,
0x3b5f0, 0x3b6e4,
0x3b6f8, 0x3b7e4,
0x3b7f8, 0x3b7fc,
0x3b814, 0x3b814,
0x3b82c, 0x3b82c,
0x3b880, 0x3b88c,
0x3b8e8, 0x3b8ec,
0x3b900, 0x3b948,
0x3b960, 0x3b99c,
0x3b9f0, 0x3bae4,
0x3baf8, 0x3bb10,
0x3bb28, 0x3bb28,
0x3bb3c, 0x3bb50,
0x3bbf0, 0x3bc10,
0x3bc28, 0x3bc28,
0x3bc3c, 0x3bc50,
0x3bcf0, 0x3bcfc,
0x3c000, 0x3c030,
0x3c100, 0x3c144,
0x3c190, 0x3c1d0,
0x3c200, 0x3c318,
0x3c400, 0x3c52c,
0x3c540, 0x3c61c,
0x3c800, 0x3c834,
0x3c8c0, 0x3c908,
0x3c910, 0x3c9ac,
0x3ca00, 0x3ca04,
0x3ca0c, 0x3ca2c,
0x3ca44, 0x3ca50,
0x3ca74, 0x3cc24,
0x3cd08, 0x3cd14,
0x3cd1c, 0x3cd20,
0x3cd3c, 0x3cd50,
0x3d200, 0x3d20c,
0x3d220, 0x3d220,
0x3d240, 0x3d240,
0x3d600, 0x3d600,
0x3d608, 0x3d60c,
0x3da00, 0x3da1c,
0x3de04, 0x3de20,
0x3de38, 0x3de3c,
0x3de80, 0x3de80,
0x3de88, 0x3dea8,
0x3deb0, 0x3deb4,
0x3dec8, 0x3ded4,
0x3dfb8, 0x3e004,
0x3e208, 0x3e23c,
0x3e600, 0x3e630,
0x3ea00, 0x3eabc,
0x3eb00, 0x3eb70,
0x3f000, 0x3f048,
0x3f060, 0x3f09c,
0x3f0f0, 0x3f148,
0x3f160, 0x3f19c,
0x3f1f0, 0x3f2e4,
0x3f2f8, 0x3f3e4,
0x3f3f8, 0x3f448,
0x3f460, 0x3f49c,
0x3f4f0, 0x3f548,
0x3f560, 0x3f59c,
0x3f5f0, 0x3f6e4,
0x3f6f8, 0x3f7e4,
0x3f7f8, 0x3f7fc,
0x3f814, 0x3f814,
0x3f82c, 0x3f82c,
0x3f880, 0x3f88c,
0x3f8e8, 0x3f8ec,
0x3f900, 0x3f948,
0x3f960, 0x3f99c,
0x3f9f0, 0x3fae4,
0x3faf8, 0x3fb10,
0x3fb28, 0x3fb28,
0x3fb3c, 0x3fb50,
0x3fbf0, 0x3fc10,
0x3fc28, 0x3fc28,
0x3fc3c, 0x3fc50,
0x3fcf0, 0x3fcfc,
0x40000, 0x4000c,
0x40040, 0x40068,
0x40080, 0x40144,
0x40180, 0x4018c,
0x40200, 0x40298,
0x402ac, 0x4033c,
0x403f8, 0x403fc,
0x41300, 0x413c4,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x44078,
0x440c0, 0x44278,
0x442c0, 0x44478,
0x444c0, 0x44678,
0x446c0, 0x44878,
0x448c0, 0x449fc,
0x45000, 0x45068,
0x45080, 0x45084,
0x450a0, 0x450b0,
0x45200, 0x45268,
0x45280, 0x45284,
0x452a0, 0x452b0,
0x460c0, 0x460e4,
0x47000, 0x4708c,
0x47200, 0x47250,
0x47400, 0x47420,
0x47600, 0x47618,
0x47800, 0x47814,
0x48000, 0x4800c,
0x48040, 0x48068,
0x48080, 0x48144,
0x48180, 0x4818c,
0x48200, 0x48298,
0x482ac, 0x4833c,
0x483f8, 0x483fc,
0x49300, 0x493c4,
0x49400, 0x4941c,
0x49480, 0x494d0,
0x4c000, 0x4c078,
0x4c0c0, 0x4c278,
0x4c2c0, 0x4c478,
0x4c4c0, 0x4c678,
0x4c6c0, 0x4c878,
0x4c8c0, 0x4c9fc,
0x4d000, 0x4d068,
0x4d080, 0x4d084,
0x4d0a0, 0x4d0b0,
0x4d200, 0x4d268,
0x4d280, 0x4d284,
0x4d2a0, 0x4d2b0,
0x4e0c0, 0x4e0e4,
0x4f000, 0x4f08c,
0x4f200, 0x4f250,
0x4f400, 0x4f420,
0x4f600, 0x4f618,
0x4f800, 0x4f814,
0x50000, 0x500cc,
0x50400, 0x50400,
0x50800, 0x508cc,
0x50c00, 0x50c00,
0x51000, 0x5101c,
0x51300, 0x51308,
};
int i;
struct adapter *ap = netdev2adap(dev);
static const unsigned int *reg_ranges;
int arr_size = 0, buf_size = 0;
if (is_t4(ap->chip)) {
reg_ranges = &t4_reg_ranges[0];
arr_size = ARRAY_SIZE(t4_reg_ranges);
buf_size = T4_REGMAP_SIZE;
} else {
reg_ranges = &t5_reg_ranges[0];
arr_size = ARRAY_SIZE(t5_reg_ranges);
buf_size = T5_REGMAP_SIZE;
}
regs->version = mk_adap_vers(ap);
memset(buf, 0, buf_size);
for (i = 0; i < arr_size; i += 2)
reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
}
static int restart_autoneg(struct net_device *dev)
{
struct port_info *p = netdev_priv(dev);
if (!netif_running(dev))
return -EAGAIN;
if (p->link_cfg.autoneg != AUTONEG_ENABLE)
return -EINVAL;
t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
return 0;
}
static int identify_port(struct net_device *dev,
enum ethtool_phys_id_state state)
{
unsigned int val;
struct adapter *adap = netdev2adap(dev);
if (state == ETHTOOL_ID_ACTIVE)
val = 0xffff;
else if (state == ETHTOOL_ID_INACTIVE)
val = 0;
else
return -EINVAL;
return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
}
static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
{
unsigned int v = 0;
if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
type == FW_PORT_TYPE_BT_XAUI) {
v |= SUPPORTED_TP;
if (caps & FW_PORT_CAP_SPEED_100M)
v |= SUPPORTED_100baseT_Full;
if (caps & FW_PORT_CAP_SPEED_1G)
v |= SUPPORTED_1000baseT_Full;
if (caps & FW_PORT_CAP_SPEED_10G)
v |= SUPPORTED_10000baseT_Full;
} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
v |= SUPPORTED_Backplane;
if (caps & FW_PORT_CAP_SPEED_1G)
v |= SUPPORTED_1000baseKX_Full;
if (caps & FW_PORT_CAP_SPEED_10G)
v |= SUPPORTED_10000baseKX4_Full;
} else if (type == FW_PORT_TYPE_KR)
v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
else if (type == FW_PORT_TYPE_BP_AP)
v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
else if (type == FW_PORT_TYPE_BP4_AP)
v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
return v;
}
static unsigned int to_fw_linkcaps(unsigned int caps)
{
unsigned int v = 0;
if (caps & ADVERTISED_100baseT_Full)
v |= FW_PORT_CAP_SPEED_100M;
if (caps & ADVERTISED_1000baseT_Full)
v |= FW_PORT_CAP_SPEED_1G;
if (caps & ADVERTISED_10000baseT_Full)
v |= FW_PORT_CAP_SPEED_10G;
return v;
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
const struct port_info *p = netdev_priv(dev);
if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
p->port_type == FW_PORT_TYPE_BT_XFI ||
p->port_type == FW_PORT_TYPE_BT_XAUI)
cmd->port = PORT_TP;
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
else if (p->port_type == FW_PORT_TYPE_SFP) {
if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
cmd->port = PORT_DA;
else
cmd->port = PORT_FIBRE;
} else
cmd->port = PORT_OTHER;
if (p->mdio_addr >= 0) {
cmd->phy_address = p->mdio_addr;
cmd->transceiver = XCVR_EXTERNAL;
cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
} else {
cmd->phy_address = 0; /* not really, but no better option */
cmd->transceiver = XCVR_INTERNAL;
cmd->mdio_support = 0;
}
cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
cmd->advertising = from_fw_linkcaps(p->port_type,
p->link_cfg.advertising);
ethtool_cmd_speed_set(cmd,
netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
cmd->duplex = DUPLEX_FULL;
cmd->autoneg = p->link_cfg.autoneg;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
}
static unsigned int speed_to_caps(int speed)
{
if (speed == SPEED_100)
return FW_PORT_CAP_SPEED_100M;
if (speed == SPEED_1000)
return FW_PORT_CAP_SPEED_1G;
if (speed == SPEED_10000)
return FW_PORT_CAP_SPEED_10G;
return 0;
}
static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
unsigned int cap;
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_cfg;
u32 speed = ethtool_cmd_speed(cmd);
if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
return -EINVAL;
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
/*
* PHY offers a single speed. See if that's what's
* being requested.
*/
if (cmd->autoneg == AUTONEG_DISABLE &&
(lc->supported & speed_to_caps(speed)))
return 0;
return -EINVAL;
}
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
if (!(lc->supported & cap) || (speed == SPEED_1000) ||
(speed == SPEED_10000))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
} else {
cap = to_fw_linkcaps(cmd->advertising);
if (!(lc->supported & cap))
return -EINVAL;
lc->requested_speed = 0;
lc->advertising = cap | FW_PORT_CAP_ANEG;
}
lc->autoneg = cmd->autoneg;
if (netif_running(dev))
return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
lc);
return 0;
}
static void get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct port_info *p = netdev_priv(dev);
epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
}
static int set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_cfg;
if (epause->autoneg == AUTONEG_DISABLE)
lc->requested_fc = 0;
else if (lc->supported & FW_PORT_CAP_ANEG)
lc->requested_fc = PAUSE_AUTONEG;
else
return -EINVAL;
if (epause->rx_pause)
lc->requested_fc |= PAUSE_RX;
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
lc);
return 0;
}
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
e->rx_max_pending = MAX_RX_BUFFERS;
e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
e->rx_jumbo_max_pending = 0;
e->tx_max_pending = MAX_TXQ_ENTRIES;
e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
e->rx_jumbo_pending = 0;
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
}
static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
int i;
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
e->tx_pending > MAX_TXQ_ENTRIES ||
e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL;
if (adapter->flags & FULL_INIT_DONE)
return -EBUSY;
for (i = 0; i < pi->nqsets; ++i) {
s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
}
return 0;
}
static int closest_timer(const struct sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
delta = time - s->timer_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
match = i;
}
}
return match;
}
static int closest_thres(const struct sge *s, int thres)
{
int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
delta = thres - s->counter_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
match = i;
}
}
return match;
}
/*
* Return a queue's interrupt hold-off time in us. 0 means no timer.
*/
static unsigned int qtimer_val(const struct adapter *adap,
const struct sge_rspq *q)
{
unsigned int idx = q->intr_params >> 1;
return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
}
/**
* set_rxq_intr_params - set a queue's interrupt holdoff parameters
* @adap: the adapter
* @q: the Rx queue
* @us: the hold-off time in us, or 0 to disable timer
* @cnt: the hold-off packet count, or 0 to disable counter
*
* Sets an Rx queue's interrupt hold-off time and packet count. At least
* one of the two needs to be enabled for the queue to generate interrupts.
*/
static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
unsigned int us, unsigned int cnt)
{
if ((us | cnt) == 0)
cnt = 1;
if (cnt) {
int err;
u32 v, new_idx;
new_idx = closest_thres(&adap->sge, cnt);
if (q->desc && q->pktcnt_idx != new_idx) {
/* the queue has already been created, update it */
v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
FW_PARAMS_PARAM_YZ(q->cntxt_id);
err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
&new_idx);
if (err)
return err;
}
q->pktcnt_idx = new_idx;
}
us = us == 0 ? 6 : closest_timer(&adap->sge, us);
q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
return 0;
}
static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
struct sge_rspq *q;
int i;
int r = 0;
for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
q = &adap->sge.ethrxq[i].rspq;
r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
c->rx_max_coalesced_frames);
if (r) {
dev_err(&dev->dev, "failed to set coalesce %d\n", r);
break;
}
}
return r;
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
const struct port_info *pi = netdev_priv(dev);
const struct adapter *adap = pi->adapter;
const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
c->rx_coalesce_usecs = qtimer_val(adap, rq);
c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
adap->sge.counter_val[rq->pktcnt_idx] : 0;
return 0;
}
/**
* eeprom_ptov - translate a physical EEPROM address to virtual
* @phys_addr: the physical EEPROM address
* @fn: the PCI function number
* @sz: size of function-specific area
*
* Translate a physical EEPROM address to virtual. The first 1K is
* accessed through virtual addresses starting at 31K, the rest is
* accessed through virtual addresses starting at 0.
*
* The mapping is as follows:
* [0..1K) -> [31K..32K)
* [1K..1K+A) -> [31K-A..31K)
* [1K+A..ES) -> [0..ES-A-1K)
*
* where A = @fn * @sz, and ES = EEPROM size.
*/
static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
{
fn *= sz;
if (phys_addr < 1024)
return phys_addr + (31 << 10);
if (phys_addr < 1024 + fn)
return 31744 - fn + phys_addr - 1024;
if (phys_addr < EEPROMSIZE)
return phys_addr - 1024 - fn;
return -EINVAL;
}
/*
* The next two routines implement eeprom read/write from physical addresses.
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
return vaddr < 0 ? vaddr : 0;
}
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
return vaddr < 0 ? vaddr : 0;
}
#define EEPROM_MAGIC 0x38E2F10C
static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
u8 *data)
{
int i, err = 0;
struct adapter *adapter = netdev2adap(dev);
u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
e->magic = EEPROM_MAGIC;
for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
if (!err)
memcpy(data, buf + e->offset, e->len);
kfree(buf);
return err;
}
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
u8 *buf;
int err = 0;
u32 aligned_offset, aligned_len, *p;
struct adapter *adapter = netdev2adap(dev);
if (eeprom->magic != EEPROM_MAGIC)
return -EINVAL;
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
if (adapter->fn > 0) {
u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
if (aligned_offset < start ||
aligned_offset + aligned_len > start + EEPROMPFSIZE)
return -EPERM;
}
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/*
* RMW possibly needed for first or last words.
*/
buf = kmalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
if (!err && aligned_len > 4)
err = eeprom_rd_phys(adapter,
aligned_offset + aligned_len - 4,
(u32 *)&buf[aligned_len - 4]);
if (err)
goto out;
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
} else
buf = data;
err = t4_seeprom_wp(adapter, false);
if (err)
goto out;
for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
err = eeprom_wr_phys(adapter, aligned_offset, *p);
aligned_offset += 4;
}
if (!err)
err = t4_seeprom_wp(adapter, true);
out:
if (buf != data)
kfree(buf);
return err;
}
static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
{
int ret;
const struct firmware *fw;
struct adapter *adap = netdev2adap(netdev);
ef->data[sizeof(ef->data) - 1] = '\0';
ret = request_firmware(&fw, ef->data, adap->pdev_dev);
if (ret < 0)
return ret;
ret = t4_load_fw(adap, fw->data, fw->size);
release_firmware(fw);
if (!ret)
dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
return ret;
}
#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
#define BCAST_CRC 0xa0ccc1a6
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
wol->supported = WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = netdev2adap(dev)->wol;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
int err = 0;
struct port_info *pi = netdev_priv(dev);
if (wol->wolopts & ~WOL_SUPPORTED)
return -EINVAL;
t4_wol_magic_enable(pi->adapter, pi->tx_chan,
(wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
if (wol->wolopts & WAKE_BCAST) {
err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
~0ULL, 0, false);
if (!err)
err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
~6ULL, ~0ULL, BCAST_CRC, true);
} else
t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
return err;
}
static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
const struct port_info *pi = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
int err;
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
-1, -1, -1,
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
return err;
}
static u32 get_rss_table_size(struct net_device *dev)
{
const struct port_info *pi = netdev_priv(dev);
return pi->rss_size;
}
static int get_rss_table(struct net_device *dev, u32 *p)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
while (n--)
p[n] = pi->rss[n];
return 0;
}
static int set_rss_table(struct net_device *dev, const u32 *p)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i];
if (pi->adapter->flags & FULL_INIT_DONE)
return write_rss(pi, pi->rss);
return 0;
}
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
const struct port_info *pi = netdev_priv(dev);
switch (info->cmd) {
case ETHTOOL_GRXFH: {
unsigned int v = pi->rss_mode;
info->data = 0;
switch (info->flow_type) {
case TCP_V4_FLOW:
if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V4_FLOW:
if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
(v & FW_RSS_VI_CONFIG_CMD_UDPEN))
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case IPV4_FLOW:
if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case TCP_V6_FLOW:
if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V6_FLOW:
if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
(v & FW_RSS_VI_CONFIG_CMD_UDPEN))
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case IPV6_FLOW:
if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
}
return 0;
}
case ETHTOOL_GRXRINGS:
info->data = pi->nqsets;
return 0;
}
return -EOPNOTSUPP;
}
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
.get_ringparam = get_sge_param,
.set_ringparam = set_sge_param,
.get_coalesce = get_coalesce,
.set_coalesce = set_coalesce,
.get_eeprom_len = get_eeprom_len,
.get_eeprom = get_eeprom,
.set_eeprom = set_eeprom,
.get_pauseparam = get_pauseparam,
.set_pauseparam = set_pauseparam,
.get_link = ethtool_op_get_link,
.get_strings = get_strings,
.set_phys_id = identify_port,
.nway_reset = restart_autoneg,
.get_sset_count = get_sset_count,
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
.get_wol = get_wol,
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh_indir = get_rss_table,
.set_rxfh_indir = set_rss_table,
.flash_device = set_flash,
};
/*
* debugfs support
*/
static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = *ppos;
loff_t avail = file_inode(file)->i_size;
unsigned int mem = (uintptr_t)file->private_data & 3;
struct adapter *adap = file->private_data - mem;
if (pos < 0)
return -EINVAL;
if (pos >= avail)
return 0;
if (count > avail - pos)
count = avail - pos;
while (count) {
size_t len;
int ret, ofst;
__be32 data[16];
if ((mem == MEM_MC) || (mem == MEM_MC1))
ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
else
ret = t4_edc_read(adap, mem, pos, data, NULL);
if (ret)
return ret;
ofst = pos % sizeof(data);
len = min(count, sizeof(data) - ofst);
if (copy_to_user(buf, (u8 *)data + ofst, len))
return -EFAULT;
buf += len;
pos += len;
count -= len;
}
count = pos - *ppos;
*ppos = pos;
return count;
}
static const struct file_operations mem_debugfs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mem_read,
.llseek = default_llseek,
};
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
struct dentry *de;
de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
(void *)adap + idx, &mem_debugfs_fops);
if (de && de->d_inode)
de->d_inode->i_size = size_mb << 20;
}
static int setup_debugfs(struct adapter *adap)
{
int i;
u32 size;
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
if (i & EDRAM0_ENABLE) {
size = t4_read_reg(adap, MA_EDRAM0_BAR);
add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
}
if (i & EDRAM1_ENABLE) {
size = t4_read_reg(adap, MA_EDRAM1_BAR);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
}
if (is_t4(adap->chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
if (i & EXT_MEM_ENABLE)
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_GET(size));
} else {
if (i & EXT_MEM_ENABLE) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
add_debugfs_mem(adap, "mc0", MEM_MC0,
EXT_MEM_SIZE_GET(size));
}
if (i & EXT_MEM1_ENABLE) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
add_debugfs_mem(adap, "mc1", MEM_MC1,
EXT_MEM_SIZE_GET(size));
}
}
if (adap->l2t)
debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
&t4_l2t_fops);
return 0;
}
/*
* upper-layer driver support
*/
/*
* Allocate an active-open TID and set it to the supplied value.
*/
int cxgb4_alloc_atid(struct tid_info *t, void *data)
{
int atid = -1;
spin_lock_bh(&t->atid_lock);
if (t->afree) {
union aopen_entry *p = t->afree;
atid = (p - t->atid_tab) + t->atid_base;
t->afree = p->next;
p->data = data;
t->atids_in_use++;
}
spin_unlock_bh(&t->atid_lock);
return atid;
}
EXPORT_SYMBOL(cxgb4_alloc_atid);
/*
* Release an active-open TID.
*/
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
{
union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
spin_lock_bh(&t->atid_lock);
p->next = t->afree;
t->afree = p;
t->atids_in_use--;
spin_unlock_bh(&t->atid_lock);
}
EXPORT_SYMBOL(cxgb4_free_atid);
/*
* Allocate a server TID and set it to the supplied value.
*/
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
{
int stid;
spin_lock_bh(&t->stid_lock);
if (family == PF_INET) {
stid = find_first_zero_bit(t->stid_bmap, t->nstids);
if (stid < t->nstids)
__set_bit(stid, t->stid_bmap);
else
stid = -1;
} else {
stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
if (stid < 0)
stid = -1;
}
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
return stid;
}
EXPORT_SYMBOL(cxgb4_alloc_stid);
/* Allocate a server filter TID and set it to the supplied value.
*/
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
{
int stid;
spin_lock_bh(&t->stid_lock);
if (family == PF_INET) {
stid = find_next_zero_bit(t->stid_bmap,
t->nstids + t->nsftids, t->nstids);
if (stid < (t->nstids + t->nsftids))
__set_bit(stid, t->stid_bmap);
else
stid = -1;
} else {
stid = -1;
}
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
return stid;
}
EXPORT_SYMBOL(cxgb4_alloc_sftid);
/* Release a server TID.
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
stid -= t->stid_base;
spin_lock_bh(&t->stid_lock);
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
t->stids_in_use--;
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
/*
* Populate a TID_RELEASE WR. Caller must properly size the skb.
*/
static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
unsigned int tid)
{
struct cpl_tid_release *req;
set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}
/*
* Queue a TID release request and if necessary schedule a work queue to
* process it.
*/
static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
unsigned int tid)
{
void **p = &t->tid_tab[tid];
struct adapter *adap = container_of(t, struct adapter, tids);
spin_lock_bh(&adap->tid_release_lock);
*p = adap->tid_release_head;
/* Low 2 bits encode the Tx channel number */
adap->tid_release_head = (void **)((uintptr_t)p | chan);
if (!adap->tid_release_task_busy) {
adap->tid_release_task_busy = true;
queue_work(workq, &adap->tid_release_task);
}
spin_unlock_bh(&adap->tid_release_lock);
}
/*
* Process the list of pending TID release requests.
*/
static void process_tid_release_list(struct work_struct *work)
{
struct sk_buff *skb;
struct adapter *adap;
adap = container_of(work, struct adapter, tid_release_task);
spin_lock_bh(&adap->tid_release_lock);
while (adap->tid_release_head) {
void **p = adap->tid_release_head;
unsigned int chan = (uintptr_t)p & 3;
p = (void *)p - chan;
adap->tid_release_head = *p;
*p = NULL;
spin_unlock_bh(&adap->tid_release_lock);
while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
GFP_KERNEL)))
schedule_timeout_uninterruptible(1);
mk_tid_release(skb, chan, p - adap->tids.tid_tab);
t4_ofld_send(adap, skb);
spin_lock_bh(&adap->tid_release_lock);
}
adap->tid_release_task_busy = false;
spin_unlock_bh(&adap->tid_release_lock);
}
/*
* Release a TID and inform HW. If we are unable to allocate the release
* message we defer to a work queue.
*/
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
{
void *old;
struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
old = t->tid_tab[tid];
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
if (likely(skb)) {
t->tid_tab[tid] = NULL;
mk_tid_release(skb, chan, tid);
t4_ofld_send(adap, skb);
} else
cxgb4_queue_tid_release(t, chan, tid);
if (old)
atomic_dec(&t->tids_in_use);
}
EXPORT_SYMBOL(cxgb4_remove_tid);
/*
* Allocate and initialize the TID tables. Returns 0 on success.
*/
static int tid_init(struct tid_info *t)
{
size_t size;
unsigned int stid_bmap_size;
unsigned int natids = t->natids;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
t->nftids * sizeof(*t->ftid_tab) +
t->nsftids * sizeof(*t->ftid_tab);
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
return -ENOMEM;
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
t->stids_in_use = 0;
t->afree = NULL;
t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
while (--natids)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
return 0;
}
/**
* cxgb4_create_server - create an IP server
* @dev: the device
* @stid: the server TID
* @sip: local IP address to bind server to
* @sport: the server's TCP port
* @queue: queue to direct messages from this server to
*
* Create an IP server for the given port and address.
* Returns <0 on error and one of the %NET_XMIT_* values on success.
*/
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue)
{
unsigned int chan;
struct sk_buff *skb;
struct adapter *adap;
struct cpl_pass_open_req *req;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
adap = netdev2adap(dev);
req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
req->local_port = sport;
req->peer_port = htons(0);
req->local_ip = sip;
req->peer_ip = htonl(0);
chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
return t4_mgmt_tx(adap, skb);
}
EXPORT_SYMBOL(cxgb4_create_server);
/**
* cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
* @mtus: the HW MTU table
* @mtu: the target MTU
* @idx: index of selected entry in the MTU table
*
* Returns the index and the value in the HW MTU table that is closest to
* but does not exceed @mtu, unless @mtu is smaller than any value in the
* table, in which case that smallest available value is selected.
*/
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx)
{
unsigned int i = 0;
while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
++i;
if (idx)
*idx = i;
return mtus[i];
}
EXPORT_SYMBOL(cxgb4_best_mtu);
/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
* Return the HW Tx channel of the given port.
*/
unsigned int cxgb4_port_chan(const struct net_device *dev)
{
return netdev2pinfo(dev)->tx_chan;
}
EXPORT_SYMBOL(cxgb4_port_chan);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
struct adapter *adap = netdev2adap(dev);
u32 v1, v2, lp_count, hp_count;
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
if (is_t4(adap->chip)) {
lp_count = G_LP_COUNT(v1);
hp_count = G_HP_COUNT(v1);
} else {
lp_count = G_LP_COUNT_T5(v1);
hp_count = G_HP_COUNT_T5(v2);
}
return lpfifo ? lp_count : hp_count;
}
EXPORT_SYMBOL(cxgb4_dbfifo_count);
/**
* cxgb4_port_viid - get the VI id of a port
* @dev: the net device for the port
*
* Return the VI id of the given port.
*/
unsigned int cxgb4_port_viid(const struct net_device *dev)
{
return netdev2pinfo(dev)->viid;
}
EXPORT_SYMBOL(cxgb4_port_viid);
/**
* cxgb4_port_idx - get the index of a port
* @dev: the net device for the port
*
* Return the index of the given port.
*/
unsigned int cxgb4_port_idx(const struct net_device *dev)
{
return netdev2pinfo(dev)->port_id;
}
EXPORT_SYMBOL(cxgb4_port_idx);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
{
struct adapter *adap = pci_get_drvdata(pdev);
spin_lock(&adap->stats_lock);
t4_tp_get_tcp_stats(adap, v4, v6);
spin_unlock(&adap->stats_lock);
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
const unsigned int *pgsz_order)
{
struct adapter *adap = netdev2adap(dev);
t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
HPZ3(pgsz_order[3]));
}
EXPORT_SYMBOL(cxgb4_iscsi_init);
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
int ret;
ret = t4_fwaddrspace_write(adap, adap->mbox,
0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
return ret;
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
{
u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
__be64 indices;
int ret;
ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
if (!ret) {
*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
}
return ret;
}
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
u16 size)
{
struct adapter *adap = netdev2adap(dev);
u16 hw_pidx, hw_cidx;
int ret;
ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
if (ret)
goto out;
if (pidx != hw_pidx) {
u16 delta;
if (pidx >= hw_pidx)
delta = pidx - hw_pidx;
else
delta = size - hw_pidx + pidx;
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
QID(qid) | PIDX(delta));
}
out:
return ret;
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
void cxgb4_disable_db_coalescing(struct net_device *dev)
{
struct adapter *adap;
adap = netdev2adap(dev);
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
F_NOCOALESCE);
}
EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
void cxgb4_enable_db_coalescing(struct net_device *dev)
{
struct adapter *adap;
adap = netdev2adap(dev);
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
}
EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
{
const struct device *parent;
const struct net_device *netdev = neigh->dev;
if (netdev->priv_flags & IFF_802_1Q_VLAN)
netdev = vlan_dev_real_dev(netdev);
parent = netdev->dev.parent;
if (parent && parent->driver == &cxgb4_driver.driver)
t4_l2t_update(dev_get_drvdata(parent), neigh);
}
static int netevent_cb(struct notifier_block *nb, unsigned long event,
void *data)
{
switch (event) {
case NETEVENT_NEIGH_UPDATE:
check_neigh_update(data);
break;
case NETEVENT_REDIRECT:
default:
break;
}
return 0;
}
static bool netevent_registered;
static struct notifier_block cxgb4_netevent_nb = {
.notifier_call = netevent_cb
};
static void drain_db_fifo(struct adapter *adap, int usecs)
{
u32 v1, v2, lp_count, hp_count;
do {
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
if (is_t4(adap->chip)) {
lp_count = G_LP_COUNT(v1);
hp_count = G_HP_COUNT(v1);
} else {
lp_count = G_LP_COUNT_T5(v1);
hp_count = G_HP_COUNT_T5(v2);
}
if (lp_count == 0 && hp_count == 0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(usecs));
} while (1);
}
static void disable_txq_db(struct sge_txq *q)
{
spin_lock_irq(&q->db_lock);
q->db_disabled = 1;
spin_unlock_irq(&q->db_lock);
}
static void enable_txq_db(struct sge_txq *q)
{
spin_lock_irq(&q->db_lock);
q->db_disabled = 0;
spin_unlock_irq(&q->db_lock);
}
static void disable_dbs(struct adapter *adap)
{
int i;
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
}
static void enable_dbs(struct adapter *adap)
{
int i;
for_each_ethrxq(&adap->sge, i)
enable_txq_db(&adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i)
enable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(&adap->sge.ctrlq[i].q);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
{
u16 hw_pidx, hw_cidx;
int ret;
spin_lock_bh(&q->db_lock);
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
if (ret)
goto out;
if (q->db_pidx != hw_pidx) {
u16 delta;
if (q->db_pidx >= hw_pidx)
delta = q->db_pidx - hw_pidx;
else
delta = q->size - hw_pidx + q->db_pidx;
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
QID(q->cntxt_id) | PIDX(delta));
}
out:
q->db_disabled = 0;
spin_unlock_bh(&q->db_lock);
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
static void recover_all_queues(struct adapter *adap)
{
int i;
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
mutex_lock(&uld_mutex);
if (adap->uld_handle[CXGB4_ULD_RDMA])
ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
cmd);
mutex_unlock(&uld_mutex);
}
static void process_db_full(struct work_struct *work)
{
struct adapter *adap;
adap = container_of(work, struct adapter, db_full_task);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
drain_db_fifo(adap, dbfifo_drain_delay);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT,
DBFIFO_HP_INT | DBFIFO_LP_INT);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
}
static void process_db_drop(struct work_struct *work)
{
struct adapter *adap;
adap = container_of(work, struct adapter, db_drop_task);
if (is_t4(adap->chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
drain_db_fifo(adap, 1);
recover_all_queues(adap);
enable_dbs(adap);
} else {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
unsigned int s_qpp;
unsigned short udb_density;
unsigned long qpshift;
int page;
u32 udb;
dev_warn(adap->pdev_dev,
"Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
dropped_db, qid,
(dropped_db >> 14) & 1,
(dropped_db >> 13) & 1,
pidx_inc);
drain_db_fifo(adap, 1);
s_qpp = QUEUESPERPAGEPF1 * adap->fn;
udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
qpshift = PAGE_SHIFT - ilog2(udb_density);
udb = qid << qpshift;
udb &= PAGE_MASK;
page = udb / PAGE_SIZE;
udb += (qid - (page * udb_density)) * 128;
writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
/* Re-enable BAR2 WC */
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
}
void t4_db_full(struct adapter *adap)
{
if (is_t4(adap->chip)) {
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
}
}
void t4_db_dropped(struct adapter *adap)
{
if (is_t4(adap->chip))
queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
struct cxgb4_lld_info lli;
unsigned short i;
lli.pdev = adap->pdev;
lli.l2t = adap->l2t;
lli.tids = &adap->tids;
lli.ports = adap->port;
lli.vr = &adap->vres;
lli.mtus = adap->params.mtus;
if (uld == CXGB4_ULD_RDMA) {
lli.rxq_ids = adap->sge.rdma_rxq;
lli.nrxq = adap->sge.rdmaqs;
} else if (uld == CXGB4_ULD_ISCSI) {
lli.rxq_ids = adap->sge.ofld_rxq;
lli.nrxq = adap->sge.ofldqsets;
}
lli.ntxq = adap->sge.ofldqsets;
lli.nchan = adap->params.nports;
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
lli.adapter_type = adap->params.rev;
lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
lli.filt_mode = adap->filter_mode;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_pktshift = adap->sge.pktshift;
lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
dev_warn(adap->pdev_dev,
"could not attach to the %s driver, error %ld\n",
uld_str[uld], PTR_ERR(handle));
return;
}
adap->uld_handle[uld] = handle;
if (!netevent_registered) {
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
if (adap->flags & FULL_INIT_DONE)
ulds[uld].state_change(handle, CXGB4_STATE_UP);
}
static void attach_ulds(struct adapter *adap)
{
unsigned int i;
mutex_lock(&uld_mutex);
list_add_tail(&adap->list_node, &adapter_list);
for (i = 0; i < CXGB4_ULD_MAX; i++)
if (ulds[i].add)
uld_attach(adap, i);
mutex_unlock(&uld_mutex);
}
static void detach_ulds(struct adapter *adap)
{
unsigned int i;
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
for (i = 0; i < CXGB4_ULD_MAX; i++)
if (adap->uld_handle[i]) {
ulds[i].state_change(adap->uld_handle[i],
CXGB4_STATE_DETACH);
adap->uld_handle[i] = NULL;
}
if (netevent_registered && list_empty(&adapter_list)) {
unregister_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
{
unsigned int i;
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++)
if (adap->uld_handle[i])
ulds[i].state_change(adap->uld_handle[i], new_state);
mutex_unlock(&uld_mutex);
}
/**
* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
*
* Registers an upper-layer driver with this driver and notifies the ULD
* about any presently available devices that support its type. Returns
* %-EBUSY if a ULD of the same type is already registered.
*/
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
{
int ret = 0;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
if (ulds[type].add) {
ret = -EBUSY;
goto out;
}
ulds[type] = *p;
list_for_each_entry(adap, &adapter_list, list_node)
uld_attach(adap, type);
out: mutex_unlock(&uld_mutex);
return ret;
}
EXPORT_SYMBOL(cxgb4_register_uld);
/**
* cxgb4_unregister_uld - unregister an upper-layer driver
* @type: the ULD type
*
* Unregisters an existing upper-layer driver.
*/
int cxgb4_unregister_uld(enum cxgb4_uld type)
{
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node)
adap->uld_handle[type] = NULL;
ulds[type].add = NULL;
mutex_unlock(&uld_mutex);
return 0;
}
EXPORT_SYMBOL(cxgb4_unregister_uld);
/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
* Called when the first port is enabled, this function performs the
* actions necessary to make an adapter operational, such as completing
* the initialization of HW modules, and enabling interrupts.
*
* Must be called with the rtnl lock held.
*/
static int cxgb_up(struct adapter *adap)
{
int err;
err = setup_sge_queues(adap);
if (err)
goto out;
err = setup_rss(adap);
if (err)
goto freeq;
if (adap->flags & USING_MSIX) {
name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
if (err)
goto irq_err;
err = request_msix_queue_irqs(adap);
if (err) {
free_irq(adap->msix_info[0].vec, adap);
goto irq_err;
}
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
adap->port[0]->name, adap);
if (err)
goto irq_err;
}
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
notify_ulds(adap, CXGB4_STATE_UP);
out:
return err;
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
freeq:
t4_free_sge_resources(adap);
goto out;
}
static void cxgb_down(struct adapter *adapter)
{
t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
adapter->tid_release_task_busy = false;
adapter->tid_release_head = NULL;
if (adapter->flags & USING_MSIX) {
free_msix_queue_irqs(adapter);
free_irq(adapter->msix_info[0].vec, adapter);
} else
free_irq(adapter->pdev->irq, adapter);
quiesce_rx(adapter);
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
adapter->flags &= ~FULL_INIT_DONE;
}
/*
* net_device operations
*/
static int cxgb_open(struct net_device *dev)
{
int err;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_carrier_off(dev);
if (!(adapter->flags & FULL_INIT_DONE)) {
err = cxgb_up(adapter);
if (err < 0)
return err;
}
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
return err;
}
static int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
}
/* Return an error number if the indicated filter isn't writable ...
*/
static int writable_filter(struct filter_entry *f)
{
if (f->locked)
return -EPERM;
if (f->pending)
return -EBUSY;
return 0;
}
/* Delete the filter at the specified index (if valid). The checks for all
* the common problems with doing this like the filter being locked, currently
* pending in another operation, etc.
*/
static int delete_filter(struct adapter *adapter, unsigned int fidx)
{
struct filter_entry *f;
int ret;
if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
return -EINVAL;
f = &adapter->tids.ftid_tab[fidx];
ret = writable_filter(f);
if (ret)
return ret;
if (f->valid)
return del_filter_wr(adapter, fidx);
return 0;
}
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue, unsigned char port, unsigned char mask)
{
int ret;
struct filter_entry *f;
struct adapter *adap;
int i;
u8 *val;
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
stid -= adap->tids.nstids;
stid += adap->tids.nftids;
/* Check to make sure the filter requested is writable ...
*/
f = &adap->tids.ftid_tab[stid];
ret = writable_filter(f);
if (ret)
return ret;
/* Clear out any old resources being used by the filter before
* we start constructing the new filter.
*/
if (f->valid)
clear_filter(adap, f);
/* Clear out filter specifications */
memset(&f->fs, 0, sizeof(struct ch_filter_specification));
f->fs.val.lport = cpu_to_be16(sport);
f->fs.mask.lport = ~0;
val = (u8 *)&sip;
if ((val[0] | val[1] | val[2] | val[3]) != 0) {
for (i = 0; i < 4; i++) {
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
if (adap->filter_mode & F_PORT) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
f->fs.dirsteer = 1;
f->fs.iq = queue;
/* Mark filter as locked */
f->locked = 1;
f->fs.rpttid = 1;
ret = set_filter_wr(adap, stid);
if (ret) {
clear_filter(adap, f);
return ret;
}
return 0;
}
EXPORT_SYMBOL(cxgb4_create_server_filter);
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6)
{
int ret;
struct filter_entry *f;
struct adapter *adap;
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
stid -= adap->tids.nstids;
stid += adap->tids.nftids;
f = &adap->tids.ftid_tab[stid];
/* Unlock the filter */
f->locked = 0;
ret = delete_filter(adap, stid);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(cxgb4_remove_server_filter);
static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *ns)
{
struct port_stats stats;
struct port_info *p = netdev_priv(dev);
struct adapter *adapter = p->adapter;
spin_lock(&adapter->stats_lock);
t4_get_port_stats(adapter, p->tx_chan, &stats);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = stats.tx_octets;
ns->tx_packets = stats.tx_frames;
ns->rx_bytes = stats.rx_octets;
ns->rx_packets = stats.rx_frames;
ns->multicast = stats.rx_mcast_frames;
/* detailed rx_errors */
ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
stats.rx_runt;
ns->rx_over_errors = 0;
ns->rx_crc_errors = stats.rx_fcs_err;
ns->rx_frame_errors = stats.rx_symbol_err;
ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
stats.rx_ovflow2 + stats.rx_ovflow3 +
stats.rx_trunc0 + stats.rx_trunc1 +
stats.rx_trunc2 + stats.rx_trunc3;
ns->rx_missed_errors = 0;
/* detailed tx_errors */
ns->tx_aborted_errors = 0;
ns->tx_carrier_errors = 0;
ns->tx_fifo_errors = 0;
ns->tx_heartbeat_errors = 0;
ns->tx_window_errors = 0;
ns->tx_errors = stats.tx_error_frames;
ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
return ns;
}
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
unsigned int mbox;
int ret = 0, prtad, devad;
struct port_info *pi = netdev_priv(dev);
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
switch (cmd) {
case SIOCGMIIPHY:
if (pi->mdio_addr < 0)
return -EOPNOTSUPP;
data->phy_id = pi->mdio_addr;
break;
case SIOCGMIIREG:
case SIOCSMIIREG:
if (mdio_phy_id_is_c45(data->phy_id)) {
prtad = mdio_phy_id_prtad(data->phy_id);
devad = mdio_phy_id_devad(data->phy_id);
} else if (data->phy_id < 32) {
prtad = data->phy_id;
devad = 0;
data->reg_num &= 0x1f;
} else
return -EINVAL;
mbox = pi->adapter->fn;
if (cmd == SIOCGMIIREG)
ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
data->reg_num, &data->val_out);
else
ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
data->reg_num, data->val_in);
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static void cxgb_set_rxmode(struct net_device *dev)
{
/* unfortunately we can't return errors to the stack */
set_rxmode(dev, -1, false);
}
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
struct port_info *pi = netdev_priv(dev);
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
-1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
return ret;
}
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
int ret;
struct sockaddr *addr = p;
struct port_info *pi = netdev_priv(dev);
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
pi->xact_addr_filt, addr->sa_data, true, true);
if (ret < 0)
return ret;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
pi->xact_addr_filt = ret;
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
if (adap->flags & USING_MSIX) {
int i;
struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
for (i = pi->nqsets; i; i--, rx++)
t4_sge_intr_msix(0, &rx->rspq);
} else
t4_intr_handler(adap)(0, adap);
}
#endif
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
.ndo_start_xmit = t4_eth_xmit,
.ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_set_features = cxgb_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
};
void t4_fatal_err(struct adapter *adap)
{
t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
t4_intr_disable(adap);
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
static void setup_memwin(struct adapter *adap)
{
u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
if (is_t4(adap->chip)) {
mem_win0_base = bar0 + MEMWIN0_BASE;
mem_win1_base = bar0 + MEMWIN1_BASE;
mem_win2_base = bar0 + MEMWIN2_BASE;
} else {
/* For T5, only relative offset inside the PCIe BAR is passed */
mem_win0_base = MEMWIN0_BASE;
mem_win1_base = MEMWIN1_BASE_T5;
mem_win2_base = MEMWIN2_BASE_T5;
}
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
mem_win0_base | BIR(0) |
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
mem_win1_base | BIR(0) |
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
mem_win2_base | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
static void setup_memwin_rdma(struct adapter *adap)
{
if (adap->vres.ocq.size) {
unsigned int start, sz_kb;
start = pci_resource_start(adap->pdev, 2) +
OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
t4_write_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
start | BIR(1) | WINDOW(ilog2(sz_kb)));
t4_write_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
adap->vres.ocq.start);
t4_read_reg(adap,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
}
}
static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
{
u32 v;
int ret;
/* get device capabilities */
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
if (ret < 0)
return ret;
/* select capabilities we'll be using */
if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
if (!vf_acls)
c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
else
c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
} else if (vf_acls) {
dev_err(adap->pdev_dev, "virtualization ACLs not supported");
return ret;
}
c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
if (ret < 0)
return ret;
ret = t4_config_glbl_rss(adap, adap->fn,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
if (ret < 0)
return ret;
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
if (ret < 0)
return ret;
t4_sge_init(adap);
/* tweak some settings */
t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
v = t4_read_reg(adap, TP_PIO_DATA);
t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
/* first 4 Tx modulation queues point to consecutive Tx channels */
adap->params.tp.tx_modq_map = 0xE4;
t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
/* associate each Tx modulation queue with consecutive Tx channels */
v = 0x84218421;
t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&v, 1, A_TP_TX_SCHED_HDR);
t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&v, 1, A_TP_TX_SCHED_FIFO);
t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&v, 1, A_TP_TX_SCHED_PCMD);
#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
if (is_offload(adap)) {
t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
}
/* get basic stuff going */
return t4_early_init(adap, adap->fn);
}
/*
* Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
*/
#define MAX_ATIDS 8192U
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*
* If the firmware we're dealing with has Configuration File support, then
* we use that to perform all configuration
*/
/*
* Tweak configuration based on module parameters, etc. Most of these have
* defaults assigned to them by Firmware Configuration Files (if we're using
* them) but need to be explicitly set if we're using hard-coded
* initialization. But even in the case of using Firmware Configuration
* Files, we'd like to expose the ability to change these via module
* parameters so these are essentially common tweaks/settings for
* Configuration Files and hard-coded initialization ...
*/
static int adap_init0_tweaks(struct adapter *adapter)
{
/*
* Fix up various Host-Dependent Parameters like Page Size, Cache
* Line Size, etc. The firmware default is for a 4KB Page Size and
* 64B Cache Line Size ...
*/
t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
/*
* Process module parameters which affect early initialization.
*/
if (rx_dma_offset != 2 && rx_dma_offset != 0) {
dev_err(&adapter->pdev->dev,
"Ignoring illegal rx_dma_offset=%d, using 2\n",
rx_dma_offset);
rx_dma_offset = 2;
}
t4_set_reg_field(adapter, SGE_CONTROL,
PKTSHIFT_MASK,
PKTSHIFT(rx_dma_offset));
/*
* Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
* adds the pseudo header itself.
*/
t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
CSUM_HAS_PSEUDO_HDR, 0);
return 0;
}
/*
* Attempt to initialize the adapter via a Firmware Configuration File.
*/
static int adap_init0_config(struct adapter *adapter, int reset)
{
struct fw_caps_config_cmd caps_cmd;
const struct firmware *cf;
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
int ret, using_flash;
char *fw_config_file, fw_config_file_path[256];
/*
* Reset device if necessary.
*/
if (reset) {
ret = t4_fw_reset(adapter, adapter->mbox,
PIORSTMODE | PIORST);
if (ret < 0)
goto bye;
}
/*
* If we have a T4 configuration file under /lib/firmware/cxgb4/,
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
case CHELSIO_T4:
fw_config_file = FW_CFNAME;
break;
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
adapter->pdev->device);
ret = -EINVAL;
goto bye;
}
ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
using_flash = 1;
mtype = FW_MEMTYPE_CF_FLASH;
maddr = t4_flash_cfg_addr(adapter);
} else {
u32 params[7], val[7];
using_flash = 0;
if (cf->size >= FLASH_CFG_MAX_SIZE)
ret = -ENOMEM;
else {
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adapter, adapter->mbox,
adapter->fn, 0, 1, params, val);
if (ret == 0) {
/*
* For t4_memory_write() below addresses and
* sizes have to be in terms of multiples of 4
* bytes. So, if the Configuration File isn't
* a multiple of 4 bytes in length we'll have
* to write that out separately since we can't
* guarantee that the bytes following the
* residual byte in the buffer returned by
* request_firmware() are zeroed out ...
*/
size_t resid = cf->size & 0x3;
size_t size = cf->size & ~0x3;
__be32 *data = (__be32 *)cf->data;
mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
ret = t4_memory_write(adapter, mtype, maddr,
size, data);
if (ret == 0 && resid != 0) {
union {
__be32 word;
char buf[4];
} last;
int i;
last.word = data[size >> 2];
for (i = resid; i < 4; i++)
last.buf[i] = 0;
ret = t4_memory_write(adapter, mtype,
maddr + size,
4, &last.word);
}
}
}
release_firmware(cf);
if (ret)
goto bye;
}
/*
* Issue a Capability Configuration command to the firmware to get it
* to parse the Configuration File. We don't use t4_fw_config_file()
* because we want the ability to modify various features after we've
* processed the configuration file ...
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write =
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
goto bye;
finiver = ntohl(caps_cmd.finiver);
finicsum = ntohl(caps_cmd.finicsum);
cfcsum = ntohl(caps_cmd.cfcsum);
if (finicsum != cfcsum)
dev_warn(adapter->pdev_dev, "Configuration File checksum "\
"mismatch: [fini] csum=%#x, computed csum=%#x\n",
finicsum, cfcsum);
/*
* And now tell the firmware to use the configuration we just loaded.
*/
caps_cmd.op_to_write =
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
goto bye;
/*
* Tweak configuration based on system architecture, module
* parameters, etc.
*/
ret = adap_init0_tweaks(adapter);
if (ret < 0)
goto bye;
/*
* And finally tell the firmware to initialize itself using the
* parameters from the Configuration File.
*/
ret = t4_fw_initialize(adapter, adapter->mbox);
if (ret < 0)
goto bye;
sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
* initialization constants burried in the driver.
*/
adapter->flags |= USING_SOFT_PARAMS;
dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
"Configuration File %s, version %#x, computed checksum %#x\n",
(using_flash
? "in device FLASH"
: fw_config_file_path),
finiver, cfcsum);
return 0;
/*
* Something bad happened. Return the error ... (If the "error"
* is that there's no Configuration File on the adapter we don't
* want to issue a warning since this is fairly common.)
*/
bye:
if (ret != -ENOENT)
dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
-ret);
return ret;
}
/*
* Attempt to initialize the adapter via hard-coded, driver supplied
* parameters ...
*/
static int adap_init0_no_config(struct adapter *adapter, int reset)
{
struct sge *s = &adapter->sge;
struct fw_caps_config_cmd caps_cmd;
u32 v;
int i, ret;
/*
* Reset device if necessary
*/
if (reset) {
ret = t4_fw_reset(adapter, adapter->mbox,
PIORSTMODE | PIORST);
if (ret < 0)
goto bye;
}
/*
* Get device capabilities and select which we'll be using.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
goto bye;
if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
if (!vf_acls)
caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
else
caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
} else if (vf_acls) {
dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
goto bye;
}
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
goto bye;
/*
* Tweak configuration based on system architecture, module
* parameters, etc.
*/
ret = adap_init0_tweaks(adapter);
if (ret < 0)
goto bye;
/*
* Select RSS Global Mode we want to use. We use "Basic Virtual"
* mode which maps each Virtual Interface to its own section of
* the RSS Table and we turn on all map and hash enables ...
*/
adapter->flags |= RSS_TNLALLLOOKUP;
ret = t4_config_glbl_rss(adapter, adapter->mbox,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
((adapter->flags & RSS_TNLALLLOOKUP) ?
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
if (ret < 0)
goto bye;
/*
* Set up our own fundamental resource provisioning ...
*/
ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
PFRES_NEQ, PFRES_NETHCTRL,
PFRES_NIQFLINT, PFRES_NIQ,
PFRES_TC, PFRES_NVI,
FW_PFVF_CMD_CMASK_MASK,
pfvfres_pmask(adapter, adapter->fn, 0),
PFRES_NEXACTF,
PFRES_R_CAPS, PFRES_WX_CAPS);
if (ret < 0)
goto bye;
/*
* Perform low level SGE initialization. We need to do this before we
* send the firmware the INITIALIZE command because that will cause
* any other PF Drivers which are waiting for the Master
* Initialization to proceed forward.
*/
for (i = 0; i < SGE_NTIMERS - 1; i++)
s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
s->counter_val[0] = 1;
for (i = 1; i < SGE_NCOUNTERS; i++)
s->counter_val[i] = min(intr_cnt[i - 1],
THRESHOLD_0_GET(THRESHOLD_0_MASK));
t4_sge_init(adapter);
#ifdef CONFIG_PCI_IOV
/*
* Provision resource limits for Virtual Functions. We currently
* grant them all the same static resource limits except for the Port
* Access Rights Mask which we're assigning based on the PF. All of
* the static provisioning stuff for both the PF and VF really needs
* to be managed in a persistent manner for each device which the
* firmware controls.
*/
{
int pf, vf;
for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
if (num_vf[pf] <= 0)
continue;
/* VF numbering starts at 1! */
for (vf = 1; vf <= num_vf[pf]; vf++) {
ret = t4_cfg_pfvf(adapter, adapter->mbox,
pf, vf,
VFRES_NEQ, VFRES_NETHCTRL,
VFRES_NIQFLINT, VFRES_NIQ,
VFRES_TC, VFRES_NVI,
FW_PFVF_CMD_CMASK_MASK,
pfvfres_pmask(
adapter, pf, vf),
VFRES_NEXACTF,
VFRES_R_CAPS, VFRES_WX_CAPS);
if (ret < 0)
dev_warn(adapter->pdev_dev,
"failed to "\
"provision pf/vf=%d/%d; "
"err=%d\n", pf, vf, ret);
}
}
}
#endif
/*
* Set up the default filter mode. Later we'll want to implement this
* via a firmware command, etc. ... This needs to be done before the
* firmare initialization command ... If the selected set of fields
* isn't equal to the default value, we'll need to make sure that the
* field selections will fit in the 36-bit budget.
*/
if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
int j, bits = 0;
for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
switch (tp_vlan_pri_map & (1 << j)) {
case 0:
/* compressed filter field not enabled */
break;
case FCOE_MASK:
bits += 1;
break;
case PORT_MASK:
bits += 3;
break;
case VNIC_ID_MASK:
bits += 17;
break;
case VLAN_MASK:
bits += 17;
break;
case TOS_MASK:
bits += 8;
break;
case PROTOCOL_MASK:
bits += 8;
break;
case ETHERTYPE_MASK:
bits += 16;
break;
case MACMATCH_MASK:
bits += 9;
break;
case MPSHITTYPE_MASK:
bits += 3;
break;
case FRAGMENTATION_MASK:
bits += 1;
break;
}
if (bits > 36) {
dev_err(adapter->pdev_dev,
"tp_vlan_pri_map=%#x needs %d bits > 36;"\
" using %#x\n", tp_vlan_pri_map, bits,
TP_VLAN_PRI_MAP_DEFAULT);
tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
}
}
v = tp_vlan_pri_map;
t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
&v, 1, TP_VLAN_PRI_MAP);
/*
* We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
* to support any of the compressed filter fields above. Newer
* versions of the firmware do this automatically but it doesn't hurt
* to set it here. Meanwhile, we do _not_ need to set Lookup Every
* Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
* since the firmware automatically turns this on and off when we have
* a non-zero number of filters active (since it does have a
* performance impact).
*/
if (tp_vlan_pri_map)
t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
FIVETUPLELOOKUP_MASK,
FIVETUPLELOOKUP_MASK);
/*
* Tweak some settings.
*/
t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
/*
* Get basic stuff going by issuing the Firmware Initialize command.
* Note that this _must_ be after all PFVF commands ...
*/
ret = t4_fw_initialize(adapter, adapter->mbox);
if (ret < 0)
goto bye;
/*
* Return successfully!
*/
dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
"driver parameters\n");
return 0;
/*
* Something bad happened. Return the error ...
*/
bye:
return ret;
}
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
static int adap_init0(struct adapter *adap)
{
int ret;
u32 v, port_vec;
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
int reset = 1, j;
/*
* Contact FW, advertising Master capability (and potentially forcing
* ourselves as the Master PF if our module parameter force_init is
* set).
*/
ret = t4_fw_hello(adap, adap->mbox, adap->fn,
force_init ? MASTER_MUST : MASTER_MAY,
&state);
if (ret < 0) {
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
ret);
return ret;
}
if (ret == adap->mbox)
adap->flags |= MASTER_PF;
if (force_init && state == DEV_STATE_INIT)
state = DEV_STATE_UNINIT;
/*
* If we're the Master PF Driver and the device is uninitialized,
* then let's consider upgrading the firmware ... (We always want
* to check the firmware version number in order to A. get it for
* later reporting and B. to warn if the currently loaded firmware
* is excessively mismatched relative to the driver.)
*/
ret = t4_check_fw_version(adap);
if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
if (ret == -EINVAL || ret > 0) {
if (upgrade_fw(adap) >= 0) {
/*
* Note that the chip was reset as part of the
* firmware upgrade so we don't reset it again
* below and grab the new firmware version.
*/
reset = 0;
ret = t4_check_fw_version(adap);
}
}
if (ret < 0)
return ret;
}
/*
* Grab VPD parameters. This should be done after we establish a
* connection to the firmware since some of the VPD parameters
* (notably the Core Clock frequency) are retrieved via requests to
* the firmware. On the other hand, we need these fairly early on
* so we do this right after getting ahold of the firmware.
*/
ret = get_vpd_params(adap, &adap->params.vpd);
if (ret < 0)
goto bye;
/*
* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
* and portvec ...
*/
v =
FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
adap->params.nports = hweight32(port_vec);
adap->params.portvec = port_vec;
/*
* If the firmware is initialized already (and we're not forcing a
* master initialization), note that we're living with existing
* adapter parameters. Otherwise, it's time to try initializing the
* adapter ...
*/
if (state == DEV_STATE_INIT) {
dev_info(adap->pdev_dev, "Coming up as %s: "\
"Adapter already initialized\n",
adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
adap->flags |= USING_SOFT_PARAMS;
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
/*
* If the firmware doesn't support Configuration
* Files warn user and exit,
*/
if (ret < 0)
dev_warn(adap->pdev_dev, "Firmware doesn't support "
"configuration file.\n");
if (force_old_init)
ret = adap_init0_no_config(adap, reset);
else {
/*
* Find out whether we're dealing with a version of
* the firmware which has configuration file support.
*/
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
params, val);
/*
* If the firmware doesn't support Configuration
* Files, use the old Driver-based, hard-wired
* initialization. Otherwise, try using the
* Configuration File support and fall back to the
* Driver-based initialization if there's no
* Configuration File found.
*/
if (ret < 0)
ret = adap_init0_no_config(adap, reset);
else {
/*
* The firmware provides us with a memory
* buffer where we can load a Configuration
* File from the host if we want to override
* the Configuration File in flash.
*/
ret = adap_init0_config(adap, reset);
if (ret == -ENOENT) {
dev_info(adap->pdev_dev,
"No Configuration File present "
"on adapter. Using hard-wired "
"configuration parameters.\n");
ret = adap_init0_no_config(adap, reset);
}
}
}
if (ret < 0) {
dev_err(adap->pdev_dev,
"could not initialize adapter, error %d\n",
-ret);
goto bye;
}
}
/*
* If we're living with non-hard-coded parameters (either from a
* Firmware Configuration File or values programmed by a different PF
* Driver), give the SGE code a chance to pull in anything that it
* needs ... Note that this must be called after we retrieve our VPD
* parameters in order to know how to convert core ticks to seconds.
*/
if (adap->flags & USING_SOFT_PARAMS) {
ret = t4_sge_init(adap);
if (ret < 0)
goto bye;
}
if (is_bypass_device(adap->pdev->device))
adap->params.bypass = 1;
/*
* Grab some of our basic fundamental operating parameters.
*/
#define FW_PARAM_DEV(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
FW_PARAMS_PARAM_Y(0) | \
FW_PARAMS_PARAM_Z(0)
params[0] = FW_PARAM_PFVF(EQ_START);
params[1] = FW_PARAM_PFVF(L2T_START);
params[2] = FW_PARAM_PFVF(L2T_END);
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
params[5] = FW_PARAM_PFVF(IQFLINT_START);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_start = val[0];
adap->l2t_start = val[1];
adap->l2t_end = val[2];
adap->tids.ftid_base = val[3];
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
/* If Active filter size is set we enable establishing
* offload connection through firmware work request
*/
if ((val[0] != val[1]) && (ret >= 0)) {
adap->flags |= FW_OFLD_CONN;
adap->tids.aftid_base = val[0];
adap->tids.aftid_end = val[1];
}
/* If we're running on newer firmware, let it know that we're
* prepared to deal with encapsulated CPL messages. Older
* firmware won't understand this and we'll just get
* unencapsulated messages ...
*/
params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
val[0] = 1;
(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
/*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
goto bye;
if (caps_cmd.ofldcaps) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
params[2] = FW_PARAM_PFVF(SERVER_END);
params[3] = FW_PARAM_PFVF(TDDP_START);
params[4] = FW_PARAM_PFVF(TDDP_END);
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
params, val);
if (ret < 0)
goto bye;
adap->tids.ntids = val[0];
adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
adap->tids.stid_base = val[1];
adap->tids.nstids = val[2] - val[1] + 1;
/*
* Setup server filter region. Divide the availble filter
* region into two parts. Regular filters get 1/3rd and server
* filters get 2/3rd part. This is only enabled if workarond
* path is enabled.
* 1. For regular filters.
* 2. Server filter: This are special filters which are used
* to redirect SYN packets to offload queue.
*/
if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
adap->tids.sftid_base = adap->tids.ftid_base +
DIV_ROUND_UP(adap->tids.nftids, 3);
adap->tids.nsftids = adap->tids.nftids -
DIV_ROUND_UP(adap->tids.nftids, 3);
adap->tids.nftids = adap->tids.sftid_base -
adap->tids.ftid_base;
}
adap->vres.ddp.start = val[3];
adap->vres.ddp.size = val[4] - val[3] + 1;
adap->params.ofldq_wr_cred = val[5];
adap->params.offload = 1;
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
params[1] = FW_PARAM_PFVF(STAG_END);
params[2] = FW_PARAM_PFVF(RQ_START);
params[3] = FW_PARAM_PFVF(RQ_END);
params[4] = FW_PARAM_PFVF(PBL_START);
params[5] = FW_PARAM_PFVF(PBL_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
params, val);
if (ret < 0)
goto bye;
adap->vres.stag.start = val[0];
adap->vres.stag.size = val[1] - val[0] + 1;
adap->vres.rq.start = val[2];
adap->vres.rq.size = val[3] - val[2] + 1;
adap->vres.pbl.start = val[4];
adap->vres.pbl.size = val[5] - val[4] + 1;
params[0] = FW_PARAM_PFVF(SQRQ_START);
params[1] = FW_PARAM_PFVF(SQRQ_END);
params[2] = FW_PARAM_PFVF(CQ_START);
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->vres.qp.start = val[0];
adap->vres.qp.size = val[1] - val[0] + 1;
adap->vres.cq.start = val[2];
adap->vres.cq.size = val[3] - val[2] + 1;
adap->vres.ocq.start = val[4];
adap->vres.ocq.size = val[5] - val[4] + 1;
}
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
params[1] = FW_PARAM_PFVF(ISCSI_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
params, val);
if (ret < 0)
goto bye;
adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1;
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
/*
* These are finalized by FW initialization, load their values now.
*/
v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
adap->params.tp.tre = TIMERRESOLUTION_GET(v);
adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
for (j = 0; j < NCHAN; j++)
adap->params.tp.tx_modq[j] = j;
t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&adap->filter_mode, 1,
TP_VLAN_PRI_MAP);
adap->flags |= FW_OK;
return 0;
/*
* Something bad happened. If a command timed out or failed with EIO
* FW does not operate within its spec or something catastrophic
* happened to HW/FW, stop issuing commands.
*/
bye:
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
}
/* EEH callbacks */
static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
int i;
struct adapter *adap = pci_get_drvdata(pdev);
if (!adap)
goto out;
rtnl_lock();
adap->flags &= ~FW_OK;
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
netif_device_detach(dev);
netif_carrier_off(dev);
}
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
pci_disable_device(pdev);
out: return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
{
int i, ret;
struct fw_caps_config_cmd c;
struct adapter *adap = pci_get_drvdata(pdev);
if (!adap) {
pci_restore_state(pdev);
pci_save_state(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
pci_cleanup_aer_uncorrect_error_status(pdev);
if (t4_wait_dev_ready(adap) < 0)
return PCI_ERS_RESULT_DISCONNECT;
if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
adap->flags |= FW_OK;
if (adap_init1(adap, &c))
return PCI_ERS_RESULT_DISCONNECT;
for_each_port(adap, i) {
struct port_info *p = adap2pinfo(adap, i);
ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
NULL, NULL);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
p->viid = ret;
p->xact_addr_filt = -1;
}
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
setup_memwin(adap);
if (cxgb_up(adap))
return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_RECOVERED;
}
static void eeh_resume(struct pci_dev *pdev)
{
int i;
struct adapter *adap = pci_get_drvdata(pdev);
if (!adap)
return;
rtnl_lock();
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
if (netif_running(dev)) {
link_start(dev);
cxgb_set_rxmode(dev);
}
netif_device_attach(dev);
}
rtnl_unlock();
}
static const struct pci_error_handlers cxgb4_eeh = {
.error_detected = eeh_err_detected,
.slot_reset = eeh_slot_reset,
.resume = eeh_resume,
};
static inline bool is_10g_port(const struct link_config *lc)
{
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
unsigned int size, unsigned int iqe_size)
{
q->intr_params = QINTR_TIMER_IDX(timer_idx) |
(pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
q->iqe_len = iqe_size;
q->size = size;
}
/*
* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int i, q10g = 0, n10g = 0, qidx = 0;
for_each_port(adap, i)
n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
if (is_offload(adap)) {
/*
* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
if (n10g) {
i = min_t(int, ARRAY_SIZE(s->ofldrxq),
num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else
s->ofldqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
init_rspq(&r->rspq, 0, 0, 1024, 64);
r->fl.size = 72;
}
for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
s->ethtxq[i].q.size = 1024;
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
s->ctrlq[i].q.size = 512;
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
struct sge_ofld_rxq *r = &s->ofldrxq[i];
init_rspq(&r->rspq, 0, 0, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
r->fl.size = 72;
}
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
struct sge_ofld_rxq *r = &s->rdmarxq[i];
init_rspq(&r->rspq, 0, 0, 511, 64);
r->rspq.uld = CXGB4_ULD_RDMA;
r->fl.size = 72;
}
init_rspq(&s->fw_evtq, 6, 0, 512, 64);
init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
}
/*
* Reduce the number of Ethernet queues across all ports to at most n.
* n provides at least one queue per port.
*/
static void reduce_ethqs(struct adapter *adap, int n)
{
int i;
struct port_info *pi;
while (n < adap->sge.ethqsets)
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
if (pi->nqsets > 1) {
pi->nqsets--;
adap->sge.ethqsets--;
if (adap->sge.ethqsets <= n)
break;
}
}
n = 0;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
pi->first_qset = n;
n += pi->nqsets;
}
}
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS 2
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
int i, err, want, need;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry entries[MAX_INGQ + 1];
for (i = 0; i < ARRAY_SIZE(entries); ++i)
entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
want += s->rdmaqs + s->ofldqsets;
/* need nchan for each possible ULD */
ofld_need = 2 * nchan;
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
want = err;
if (!err) {
/*
* Distribute available vectors to the various queue groups.
* Every group gets its minimum requirement and NIC gets top
* priority for leftovers.
*/
i = want - EXTRA_VECS - ofld_need;
if (i < s->max_ethqsets) {
s->max_ethqsets = i;
if (i < s->ethqsets)
reduce_ethqs(adap, i);
}
if (is_offload(adap)) {
i = want - EXTRA_VECS - s->max_ethqsets;
i -= ofld_need - nchan;
s->ofldqsets = (i / nchan) * nchan; /* round down */
}
for (i = 0; i < want; ++i)
adap->msix_info[i].vec = entries[i].vector;
} else if (err > 0)
dev_info(adap->pdev_dev,
"only %d MSI-X vectors left, not using MSI-X\n", err);
return err;
}
#undef EXTRA_VECS
static int init_rss(struct adapter *adap)
{
unsigned int i, j;
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
if (!pi->rss)
return -ENOMEM;
for (j = 0; j < pi->rss_size; j++)
pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
static void print_port_info(const struct net_device *dev)
{
static const char *base[] = {
"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
"KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
};
char buf[80];
char *bufp = buf;
const char *spd = "";
const struct port_info *pi = netdev_priv(dev);
const struct adapter *adap = pi->adapter;
if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
spd = " 2.5 GT/s";
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
spd = " 5 GT/s";
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
bufp += sprintf(bufp, "100/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s", base[pi->port_type]);
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
netdev_info(dev, "S/N: %s, E/C: %s\n",
adap->params.vpd.sn, adap->params.vpd.ec);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
{
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
/*
* Free the following resources:
* - memory used for tables
* - MSI/MSI-X
* - net devices
* - resources FW is holding for us
*/
static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
disable_msi(adapter);
for_each_port(adapter, i)
if (adapter->port[i]) {
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
if (adapter->flags & FW_OK)
t4_fw_bye(adapter, adapter->fn);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
/* Just info, some other driver may have claimed the device. */
dev_info(&pdev->dev, "cannot obtain PCI resources\n");
return err;
}
/* We control everything through one PF */
func = PCI_FUNC(pdev->devfn);
if (func != ent->driver_data) {
pci_save_state(pdev); /* to restore SR-IOV later */
goto sriov;
}
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "cannot enable PCI device\n");
goto out_release_regions;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
goto out_disable_device;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_disable_device;
}
}
pci_enable_pcie_error_reporting(pdev);
enable_pcie_relaxed_ordering(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
goto out_disable_device;
}
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
goto out_free_adapter;
}
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
adapter->fn = func;
adapter->msg_enable = dflt_msg_enable;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
INIT_WORK(&adapter->db_drop_task, process_db_drop);
err = t4_prep_adapter(adapter);
if (err)
goto out_unmap_bar0;
if (!is_t4(adapter->chip)) {
s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE;
/* Each segment size is 128B. Write coalescing is enabled only
* when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
* queue is less no of segments that can be accommodated in
* a page size.
*/
if (qpp > num_seg) {
dev_err(&pdev->dev,
"Incorrect number of egress queues per page\n");
err = -EINVAL;
goto out_unmap_bar0;
}
adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
if (!adapter->bar2) {
dev_err(&pdev->dev, "cannot map device bar2 region\n");
err = -ENOMEM;
goto out_unmap_bar0;
}
}
setup_memwin(adapter);
err = adap_init0(adapter);
setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
for_each_port(adapter, i) {
struct net_device *netdev;
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
if (!netdev) {
err = -ENOMEM;
goto out_free_dev;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter->port[i] = netdev;
pi = netdev_priv(netdev);
pi->adapter = adapter;
pi->xact_addr_filt = -1;
pi->port_id = i;
netdev->irq = pdev->irq;
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->netdev_ops = &cxgb4_netdev_ops;
SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
}
pci_set_drvdata(pdev, adapter);
if (adapter->flags & FW_OK) {
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
}
/*
* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
cfg_queues(adapter);
adapter->l2t = t4_init_l2t();
if (!adapter->l2t) {
/* We tolerate a lack of L2T, giving up some functionality */
dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
adapter->params.offload = 0;
}
if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
adapter->params.offload = 0;
}
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX;
else if (msi > 0 && pci_enable_msi(pdev) == 0)
adapter->flags |= USING_MSI;
err = init_rss(adapter);
if (err)
goto out_free_dev;
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
* with the ports we manage to register successfully. However we must
* register at least one net device.
*/
for_each_port(adapter, i) {
pi = adap2pinfo(adapter, i);
netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
err = register_netdev(adapter->port[i]);
if (err)
break;
adapter->chan_map[pi->tx_chan] = i;
print_port_info(adapter->port[i]);
}
if (i == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
goto out_free_dev;
}
if (err) {
dev_warn(&pdev->dev, "only %d net devices registered\n", i);
err = 0;
}
if (cxgb4_debugfs_root) {
adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
cxgb4_debugfs_root);
setup_debugfs(adapter);
}
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
if (is_offload(adapter))
attach_ulds(adapter);
sriov:
#ifdef CONFIG_PCI_IOV
if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
dev_info(&pdev->dev,
"instantiated %u virtual functions\n",
num_vf[func]);
#endif
return 0;
out_free_dev:
free_some_resources(adapter);
out_unmap_bar:
if (!is_t4(adapter->chip))
iounmap(adapter->bar2);
out_unmap_bar0:
iounmap(adapter->regs);
out_free_adapter:
kfree(adapter);
out_disable_device:
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
#ifdef CONFIG_PCI_IOV
pci_disable_sriov(pdev);
#endif
if (adapter) {
int i;
if (is_offload(adapter))
detach_ulds(adapter);
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
/* If we allocated filters, free up state associated with any
* valid filters ...
*/
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
for (i = 0; i < (adapter->tids.nftids +
adapter->tids.nsftids); i++, f++)
if (f->valid)
clear_filter(adapter, f);
}
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
free_some_resources(adapter);
iounmap(adapter->regs);
if (!is_t4(adapter->chip))
iounmap(adapter->bar2);
kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
} else
pci_release_regions(pdev);
}
static struct pci_driver cxgb4_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
.err_handler = &cxgb4_eeh,
};
static int __init cxgb4_init_module(void)
{
int ret;
workq = create_singlethread_workqueue("cxgb4");
if (!workq)
return -ENOMEM;
/* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!cxgb4_debugfs_root)
pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
debugfs_remove(cxgb4_debugfs_root);
return ret;
}
static void __exit cxgb4_cleanup_module(void)
{
pci_unregister_driver(&cxgb4_driver);
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
flush_workqueue(workq);
destroy_workqueue(workq);
}
module_init(cxgb4_init_module);
module_exit(cxgb4_cleanup_module);
| gpl-2.0 |
gq213/linux-3.10.72 | drivers/gpu/drm/exynos/exynos_drm_gem.c | 2074 | 18637 | /* exynos_drm_gem.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <drm/drmP.h>
#include <linux/shmem_fs.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
static unsigned int convert_to_vm_err_msg(int msg)
{
unsigned int out_msg;
switch (msg) {
case 0:
case -ERESTARTSYS:
case -EINTR:
out_msg = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
out_msg = VM_FAULT_OOM;
break;
default:
out_msg = VM_FAULT_SIGBUS;
break;
}
return out_msg;
}
static int check_gem_flags(unsigned int flags)
{
if (flags & ~(EXYNOS_BO_MASK)) {
DRM_ERROR("invalid flags.\n");
return -EINVAL;
}
return 0;
}
static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
struct vm_area_struct *vma)
{
DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
/* non-cachable as default. */
if (obj->flags & EXYNOS_BO_CACHABLE)
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
else if (obj->flags & EXYNOS_BO_WC)
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
vma->vm_page_prot =
pgprot_noncached(vm_get_page_prot(vma->vm_flags));
}
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
/* TODO */
return roundup(size, PAGE_SIZE);
}
static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
struct scatterlist *sgl;
unsigned long pfn;
int i;
if (!buf->sgt)
return -EINTR;
if (page_offset >= (buf->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
sgl = buf->sgt->sgl;
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
if (page_offset < (sgl->length >> PAGE_SHIFT))
break;
page_offset -= (sgl->length >> PAGE_SHIFT);
}
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
struct drm_file *file_priv,
unsigned int *handle)
{
int ret;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, obj, handle);
if (ret)
return ret;
DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_unreference_unlocked(obj);
return 0;
}
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
struct drm_gem_object *obj;
struct exynos_drm_gem_buf *buf;
DRM_DEBUG_KMS("%s\n", __FILE__);
obj = &exynos_gem_obj->base;
buf = exynos_gem_obj->buffer;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
/*
* do not release memory region from exporter.
*
* the region will be released by exporter
* once dmabuf's refcount becomes 0.
*/
if (obj->import_attach)
goto out;
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
if (obj->map_list.map)
drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
exynos_gem_obj = NULL;
}
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return 0;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
drm_gem_object_unreference_unlocked(obj);
return exynos_gem_obj->buffer->size;
}
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret;
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
DRM_ERROR("failed to allocate exynos gem object\n");
return NULL;
}
exynos_gem_obj->size = size;
obj = &exynos_gem_obj->base;
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
DRM_ERROR("failed to initialize gem object\n");
kfree(exynos_gem_obj);
return NULL;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
return exynos_gem_obj;
}
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buf;
int ret;
if (!size) {
DRM_ERROR("invalid size.\n");
return ERR_PTR(-EINVAL);
}
size = roundup_gem_size(size, flags);
DRM_DEBUG_KMS("%s\n", __FILE__);
ret = check_gem_flags(flags);
if (ret)
return ERR_PTR(ret);
buf = exynos_drm_init_buf(dev, size);
if (!buf)
return ERR_PTR(-ENOMEM);
exynos_gem_obj = exynos_drm_gem_init(dev, size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
goto err_fini_buf;
}
exynos_gem_obj->buffer = buf;
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
ret = exynos_drm_alloc_buf(dev, buf, flags);
if (ret < 0) {
drm_gem_object_release(&exynos_gem_obj->base);
goto err_fini_buf;
}
return exynos_gem_obj;
err_fini_buf:
exynos_drm_fini_buf(dev, buf);
return ERR_PTR(ret);
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
&args->handle);
if (ret) {
exynos_drm_gem_destroy(exynos_gem_obj);
return ret;
}
return 0;
}
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
}
exynos_gem_obj = to_exynos_gem_obj(obj);
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
drm_gem_object_unreference_unlocked(obj);
/*
* decrease obj->refcount one more time because we has already
* increased it at exynos_drm_gem_get_dma_addr().
*/
drm_gem_object_unreference_unlocked(obj);
}
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_map_off *args = data;
DRM_DEBUG_KMS("%s\n", __FILE__);
DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
args->handle, (unsigned long)args->offset);
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
return -ENODEV;
}
return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
&args->offset);
}
static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
struct file *filp)
{
struct drm_file *file_priv;
/* find current process's drm_file from filelist. */
list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
if (file_priv->filp == filp)
return file_priv;
WARN_ON(1);
return ERR_PTR(-EFAULT);
}
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
struct drm_file *file_priv;
unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
/* restore it to driver's fops. */
filp->f_op = fops_get(drm_dev->driver->fops);
file_priv = exynos_drm_find_drm_file(drm_dev, filp);
if (IS_ERR(file_priv))
return PTR_ERR(file_priv);
/* restore it to drm_file. */
filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
buffer = exynos_gem_obj->buffer;
/* check if user-requested size is valid. */
if (vm_size > buffer->size)
return -EINVAL;
ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
buffer->dma_addr, buffer->size,
&buffer->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
}
/*
* take a reference to this mapping of the object. And this reference
* is unreferenced by the corresponding vm_close call.
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(drm_dev, vma);
return 0;
}
static const struct file_operations exynos_drm_gem_fops = {
.mmap = exynos_drm_gem_mmap_buffer,
};
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
unsigned int addr;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
return -ENODEV;
}
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return -EINVAL;
}
/*
* We have to use gem object and its fops for specific mmaper,
* but vm_mmap() can deliver only filp. So we have to change
* filp->f_op and filp->private_data temporarily, then restore
* again. So it is important to keep lock until restoration the
* settings to prevent others from misuse of filp->f_op or
* filp->private_data.
*/
mutex_lock(&dev->struct_mutex);
/*
* Set specific mmper's fops. And it will be restored by
* exynos_drm_gem_mmap_buffer to dev->driver->fops.
* This is used to call specific mapper temporarily.
*/
file_priv->filp->f_op = &exynos_drm_gem_fops;
/*
* Set gem object to private_data so that specific mmaper
* can get the gem object. And it will be restored by
* exynos_drm_gem_mmap_buffer to drm_file.
*/
file_priv->filp->private_data = obj;
addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference(obj);
if (IS_ERR((void *)addr)) {
/* check filp->f_op, filp->private_data are restored */
if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
file_priv->filp->f_op = fops_get(dev->driver->fops);
file_priv->filp->private_data = file_priv;
}
mutex_unlock(&dev->struct_mutex);
return PTR_ERR((void *)addr);
}
mutex_unlock(&dev->struct_mutex);
args->mapped = addr;
DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
return 0;
}
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
args->flags = exynos_gem_obj->flags;
args->size = exynos_gem_obj->size;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *vma_copy;
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
if (!vma_copy)
return NULL;
if (vma->vm_ops && vma->vm_ops->open)
vma->vm_ops->open(vma);
if (vma->vm_file)
get_file(vma->vm_file);
memcpy(vma_copy, vma, sizeof(*vma));
vma_copy->vm_mm = NULL;
vma_copy->vm_next = NULL;
vma_copy->vm_prev = NULL;
return vma_copy;
}
void exynos_gem_put_vma(struct vm_area_struct *vma)
{
if (!vma)
return;
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
kfree(vma);
}
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma)
{
int get_npages;
/* the memory region mmaped with VM_PFNMAP. */
if (vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
unsigned long pfn;
int ret = follow_pfn(vma, start, &pfn);
if (ret)
return ret;
pages[i] = pfn_to_page(pfn);
}
if (i != npages) {
DRM_ERROR("failed to get user_pages.\n");
return -EINVAL;
}
return 0;
}
get_npages = get_user_pages(current, current->mm, start,
npages, 1, 1, pages, NULL);
get_npages = max(get_npages, 0);
if (get_npages != npages) {
DRM_ERROR("failed to get user_pages.\n");
while (get_npages)
put_page(pages[--get_npages]);
return -EFAULT;
}
return 0;
}
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma)
{
if (!vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; i++) {
set_page_dirty_lock(pages[i]);
/*
* undo the reference we took when populating
* the table.
*/
put_page(pages[i]);
}
}
}
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
int nents;
mutex_lock(&drm_dev->struct_mutex);
nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with dma.\n");
mutex_unlock(&drm_dev->struct_mutex);
return nents;
}
mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
}
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
return 0;
}
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buf;
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_gem_obj = to_exynos_gem_obj(obj);
buf = exynos_gem_obj->buffer;
if (obj->import_attach)
drm_prime_gem_destroy(obj, buf->sgt);
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
/*
* alocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
EXYNOS_BO_WC, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
&args->handle);
if (ret) {
exynos_drm_gem_destroy(exynos_gem_obj);
return ret;
}
return 0;
}
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
struct drm_gem_object *obj;
int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
mutex_lock(&dev->struct_mutex);
/*
* get offset of memory allocated for drm framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_MAP_DUMB command.
*/
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
ret = -EINVAL;
goto unlock;
}
if (!obj->map_list.map) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
unsigned int handle)
{
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
/*
* obj->refcount and obj->handle_count are decreased and
* if both them are 0 then exynos_drm_gem_free_object()
* would be called by callback to release resources.
*/
ret = drm_gem_handle_delete(file_priv, handle);
if (ret < 0) {
DRM_ERROR("failed to delete drm_gem_handle.\n");
return ret;
}
return 0;
}
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
unsigned long f_vaddr;
pgoff_t page_offset;
int ret;
page_offset = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
f_vaddr = (unsigned long)vmf->virtual_address;
mutex_lock(&dev->struct_mutex);
ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
return convert_to_vm_err_msg(ret);
}
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
/* set vm_area_struct. */
ret = drm_gem_mmap(filp, vma);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
}
obj = vma->vm_private_data;
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_gem_flags(exynos_gem_obj->flags);
if (ret) {
drm_gem_vm_close(vma);
drm_gem_free_mmap_offset(obj);
return ret;
}
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
update_vm_cache_attr(exynos_gem_obj, vma);
return ret;
}
| gpl-2.0 |
SM-G920P/TeamSPR | net/netfilter/nfnetlink_acct.c | 2586 | 8875 | /*
* (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org>
* (C) 2011 Intra2net AG <http://www.intra2net.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation (or any later at your option).
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/atomic.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_acct.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
static LIST_HEAD(nfnl_acct_list);
struct nf_acct {
atomic64_t pkts;
atomic64_t bytes;
struct list_head head;
atomic_t refcnt;
char name[NFACCT_NAME_MAX];
struct rcu_head rcu_head;
};
static int
nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
struct nf_acct *nfacct, *matching = NULL;
char *acct_name;
if (!tb[NFACCT_NAME])
return -EINVAL;
acct_name = nla_data(tb[NFACCT_NAME]);
if (strlen(acct_name) == 0)
return -EINVAL;
list_for_each_entry(nfacct, &nfnl_acct_list, head) {
if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
matching = nfacct;
break;
}
if (matching) {
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
/* reset counters if you request a replacement. */
atomic64_set(&matching->pkts, 0);
atomic64_set(&matching->bytes, 0);
return 0;
}
return -EBUSY;
}
nfacct = kzalloc(sizeof(struct nf_acct), GFP_KERNEL);
if (nfacct == NULL)
return -ENOMEM;
strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
if (tb[NFACCT_BYTES]) {
atomic64_set(&nfacct->bytes,
be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
}
if (tb[NFACCT_PKTS]) {
atomic64_set(&nfacct->pkts,
be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
}
atomic_set(&nfacct->refcnt, 1);
list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
return 0;
}
static int
nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, struct nf_acct *acct)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
u64 pkts, bytes;
event |= NFNL_SUBSYS_ACCT << 8;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
if (nlh == NULL)
goto nlmsg_failure;
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = AF_UNSPEC;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
if (nla_put_string(skb, NFACCT_NAME, acct->name))
goto nla_put_failure;
if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
pkts = atomic64_xchg(&acct->pkts, 0);
bytes = atomic64_xchg(&acct->bytes, 0);
} else {
pkts = atomic64_read(&acct->pkts);
bytes = atomic64_read(&acct->bytes);
}
if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
nlmsg_failure:
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -1;
}
static int
nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nf_acct *cur, *last;
if (cb->args[2])
return 0;
last = (struct nf_acct *)cb->args[1];
if (cb->args[1])
cb->args[1] = 0;
rcu_read_lock();
list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
if (last) {
if (cur != last)
continue;
last = NULL;
}
if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
NFNL_MSG_ACCT_NEW, cur) < 0) {
cb->args[1] = (unsigned long)cur;
break;
}
}
if (!cb->args[1])
cb->args[2] = 1;
rcu_read_unlock();
return skb->len;
}
static int
nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
int ret = -ENOENT;
struct nf_acct *cur;
char *acct_name;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nfnl_acct_dump,
};
return netlink_dump_start(nfnl, skb, nlh, &c);
}
if (!tb[NFACCT_NAME])
return -EINVAL;
acct_name = nla_data(tb[NFACCT_NAME]);
list_for_each_entry(cur, &nfnl_acct_list, head) {
struct sk_buff *skb2;
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
continue;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
break;
}
ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).portid,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
NFNL_MSG_ACCT_NEW, cur);
if (ret <= 0) {
kfree_skb(skb2);
break;
}
ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
MSG_DONTWAIT);
if (ret > 0)
ret = 0;
/* this avoids a loop in nfnetlink. */
return ret == -EAGAIN ? -ENOBUFS : ret;
}
return ret;
}
/* try to delete object, fail if it is still in use. */
static int nfnl_acct_try_del(struct nf_acct *cur)
{
int ret = 0;
/* we want to avoid races with nfnl_acct_find_get. */
if (atomic_dec_and_test(&cur->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&cur->head);
kfree_rcu(cur, rcu_head);
} else {
/* still in use, restore reference counter. */
atomic_inc(&cur->refcnt);
ret = -EBUSY;
}
return ret;
}
static int
nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
char *acct_name;
struct nf_acct *cur;
int ret = -ENOENT;
if (!tb[NFACCT_NAME]) {
list_for_each_entry(cur, &nfnl_acct_list, head)
nfnl_acct_try_del(cur);
return 0;
}
acct_name = nla_data(tb[NFACCT_NAME]);
list_for_each_entry(cur, &nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
ret = nfnl_acct_try_del(cur);
if (ret < 0)
return ret;
break;
}
return ret;
}
static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
[NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 },
[NFACCT_BYTES] = { .type = NLA_U64 },
[NFACCT_PKTS] = { .type = NLA_U64 },
};
static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
[NFNL_MSG_ACCT_NEW] = { .call = nfnl_acct_new,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy },
[NFNL_MSG_ACCT_GET] = { .call = nfnl_acct_get,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy },
[NFNL_MSG_ACCT_GET_CTRZERO] = { .call = nfnl_acct_get,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy },
[NFNL_MSG_ACCT_DEL] = { .call = nfnl_acct_del,
.attr_count = NFACCT_MAX,
.policy = nfnl_acct_policy },
};
static const struct nfnetlink_subsystem nfnl_acct_subsys = {
.name = "acct",
.subsys_id = NFNL_SUBSYS_ACCT,
.cb_count = NFNL_MSG_ACCT_MAX,
.cb = nfnl_acct_cb,
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
struct nf_acct *nfnl_acct_find_get(const char *acct_name)
{
struct nf_acct *cur, *acct = NULL;
rcu_read_lock();
list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
continue;
if (!try_module_get(THIS_MODULE))
goto err;
if (!atomic_inc_not_zero(&cur->refcnt)) {
module_put(THIS_MODULE);
goto err;
}
acct = cur;
break;
}
err:
rcu_read_unlock();
return acct;
}
EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
void nfnl_acct_put(struct nf_acct *acct)
{
atomic_dec(&acct->refcnt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(nfnl_acct_put);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
{
atomic64_inc(&nfacct->pkts);
atomic64_add(skb->len, &nfacct->bytes);
}
EXPORT_SYMBOL_GPL(nfnl_acct_update);
static int __init nfnl_acct_init(void)
{
int ret;
pr_info("nfnl_acct: registering with nfnetlink.\n");
ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
if (ret < 0) {
pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
goto err_out;
}
return 0;
err_out:
return ret;
}
static void __exit nfnl_acct_exit(void)
{
struct nf_acct *cur, *tmp;
pr_info("nfnl_acct: unregistering from nfnetlink.\n");
nfnetlink_subsys_unregister(&nfnl_acct_subsys);
list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
list_del_rcu(&cur->head);
/* We are sure that our objects have no clients at this point,
* it's safe to release them all without checking refcnt. */
kfree_rcu(cur, rcu_head);
}
}
module_init(nfnl_acct_init);
module_exit(nfnl_acct_exit);
| gpl-2.0 |
djmax81/android_kernel_samsung_exynos5433_LL | arch/mips/alchemy/common/platform.c | 2586 | 12047 | /*
* Platform device support for Au1x00 SoCs.
*
* Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
*
* (C) Copyright Embedded Alley Solutions, Inc 2005
* Author: Pantelis Antoniou <pantelis@embeddedalley.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/slab.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#include <asm/mach-au1x00/au1xxx_eth.h>
#include <prom.h>
static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
unsigned int old_state)
{
#ifdef CONFIG_SERIAL_8250
switch (state) {
case 0:
alchemy_uart_enable(CPHYSADDR(port->membase));
serial8250_do_pm(port, state, old_state);
break;
case 3: /* power off */
serial8250_do_pm(port, state, old_state);
alchemy_uart_disable(CPHYSADDR(port->membase));
break;
default:
serial8250_do_pm(port, state, old_state);
break;
}
#endif
}
#define PORT(_base, _irq) \
{ \
.mapbase = _base, \
.irq = _irq, \
.regshift = 2, \
.iotype = UPIO_AU, \
.flags = UPF_SKIP_TEST | UPF_IOREMAP | \
UPF_FIXED_TYPE, \
.type = PORT_16550A, \
.pm = alchemy_8250_pm, \
}
static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = {
[ALCHEMY_CPU_AU1000] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT),
PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT),
},
[ALCHEMY_CPU_AU1500] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT),
},
[ALCHEMY_CPU_AU1100] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT),
},
[ALCHEMY_CPU_AU1550] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT),
},
[ALCHEMY_CPU_AU1200] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT),
},
[ALCHEMY_CPU_AU1300] = {
PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT),
PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT),
PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT),
PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT),
},
};
static struct platform_device au1xx0_uart_device = {
.name = "serial8250",
.id = PLAT8250_DEV_AU1X00,
};
static void __init alchemy_setup_uarts(int ctype)
{
unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
int s = sizeof(struct plat_serial8250_port);
int c = alchemy_get_uarts(ctype);
struct plat_serial8250_port *ports;
ports = kzalloc(s * (c + 1), GFP_KERNEL);
if (!ports) {
printk(KERN_INFO "Alchemy: no memory for UART data\n");
return;
}
memcpy(ports, au1x00_uart_data[ctype], s * c);
au1xx0_uart_device.dev.platform_data = ports;
/* Fill up uartclk. */
for (s = 0; s < c; s++)
ports[s].uartclk = uartclk;
if (platform_device_register(&au1xx0_uart_device))
printk(KERN_INFO "Alchemy: failed to register UARTs\n");
}
/* The dmamask must be set for OHCI/EHCI to work */
static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32);
static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32);
/* Power on callback for the ehci platform driver */
static int alchemy_ehci_power_on(struct platform_device *pdev)
{
return alchemy_usb_control(ALCHEMY_USB_EHCI0, 1);
}
/* Power off/suspend callback for the ehci platform driver */
static void alchemy_ehci_power_off(struct platform_device *pdev)
{
alchemy_usb_control(ALCHEMY_USB_EHCI0, 0);
}
static struct usb_ehci_pdata alchemy_ehci_pdata = {
.no_io_watchdog = 1,
.power_on = alchemy_ehci_power_on,
.power_off = alchemy_ehci_power_off,
.power_suspend = alchemy_ehci_power_off,
};
/* Power on callback for the ohci platform driver */
static int alchemy_ohci_power_on(struct platform_device *pdev)
{
int unit;
unit = (pdev->id == 1) ?
ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
return alchemy_usb_control(unit, 1);
}
/* Power off/suspend callback for the ohci platform driver */
static void alchemy_ohci_power_off(struct platform_device *pdev)
{
int unit;
unit = (pdev->id == 1) ?
ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
alchemy_usb_control(unit, 0);
}
static struct usb_ohci_pdata alchemy_ohci_pdata = {
.power_on = alchemy_ohci_power_on,
.power_off = alchemy_ohci_power_off,
.power_suspend = alchemy_ohci_power_off,
};
static unsigned long alchemy_ohci_data[][2] __initdata = {
[ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT },
[ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT },
[ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT },
[ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT },
[ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT },
[ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT },
};
static unsigned long alchemy_ehci_data[][2] __initdata = {
[ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT },
[ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT },
};
static int __init _new_usbres(struct resource **r, struct platform_device **d)
{
*r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
if (!*r)
return -ENOMEM;
*d = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
if (!*d) {
kfree(*r);
return -ENOMEM;
}
(*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32);
(*d)->num_resources = 2;
(*d)->resource = *r;
return 0;
}
static void __init alchemy_setup_usb(int ctype)
{
struct resource *res;
struct platform_device *pdev;
/* setup OHCI0. Every variant has one */
if (_new_usbres(&res, &pdev))
return;
res[0].start = alchemy_ohci_data[ctype][0];
res[0].end = res[0].start + 0x100 - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = alchemy_ohci_data[ctype][1];
res[1].end = res[1].start;
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 0;
pdev->dev.dma_mask = &alchemy_ohci_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n");
/* setup EHCI0: Au1200/Au1300 */
if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) {
if (_new_usbres(&res, &pdev))
return;
res[0].start = alchemy_ehci_data[ctype][0];
res[0].end = res[0].start + 0x100 - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = alchemy_ehci_data[ctype][1];
res[1].end = res[1].start;
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ehci-platform";
pdev->id = 0;
pdev->dev.dma_mask = &alchemy_ehci_dmamask;
pdev->dev.platform_data = &alchemy_ehci_pdata;
if (platform_device_register(pdev))
printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n");
}
/* Au1300: OHCI1 */
if (ctype == ALCHEMY_CPU_AU1300) {
if (_new_usbres(&res, &pdev))
return;
res[0].start = AU1300_USB_OHCI1_PHYS_ADDR;
res[0].end = res[0].start + 0x100 - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = AU1300_USB_INT;
res[1].end = res[1].start;
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 1;
pdev->dev.dma_mask = &alchemy_ohci_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n");
}
}
/* Macro to help defining the Ethernet MAC resources */
#define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */
#define MAC_RES(_base, _enable, _irq, _macdma) \
{ \
.start = _base, \
.end = _base + 0xffff, \
.flags = IORESOURCE_MEM, \
}, \
{ \
.start = _enable, \
.end = _enable + 0x3, \
.flags = IORESOURCE_MEM, \
}, \
{ \
.start = _irq, \
.end = _irq, \
.flags = IORESOURCE_IRQ \
}, \
{ \
.start = _macdma, \
.end = _macdma + 0x1ff, \
.flags = IORESOURCE_MEM, \
}
static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = {
[ALCHEMY_CPU_AU1000] = {
MAC_RES(AU1000_MAC0_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR,
AU1000_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1500] = {
MAC_RES(AU1500_MAC0_PHYS_ADDR,
AU1500_MACEN_PHYS_ADDR,
AU1500_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1100] = {
MAC_RES(AU1000_MAC0_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR,
AU1100_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1550] = {
MAC_RES(AU1000_MAC0_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR,
AU1550_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
};
static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
.phy1_search_mac0 = 1,
};
static struct platform_device au1xxx_eth0_device = {
.name = "au1000-eth",
.id = 0,
.num_resources = MAC_RES_COUNT,
.dev.platform_data = &au1xxx_eth0_platform_data,
};
static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
[ALCHEMY_CPU_AU1000] = {
MAC_RES(AU1000_MAC1_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR + 4,
AU1000_MAC1_DMA_INT,
AU1000_MACDMA1_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1500] = {
MAC_RES(AU1500_MAC1_PHYS_ADDR,
AU1500_MACEN_PHYS_ADDR + 4,
AU1500_MAC1_DMA_INT,
AU1000_MACDMA1_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1550] = {
MAC_RES(AU1000_MAC1_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR + 4,
AU1550_MAC1_DMA_INT,
AU1000_MACDMA1_PHYS_ADDR)
},
};
static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
.phy1_search_mac0 = 1,
};
static struct platform_device au1xxx_eth1_device = {
.name = "au1000-eth",
.id = 1,
.num_resources = MAC_RES_COUNT,
.dev.platform_data = &au1xxx_eth1_platform_data,
};
void __init au1xxx_override_eth_cfg(unsigned int port,
struct au1000_eth_platform_data *eth_data)
{
if (!eth_data || port > 1)
return;
if (port == 0)
memcpy(&au1xxx_eth0_platform_data, eth_data,
sizeof(struct au1000_eth_platform_data));
else
memcpy(&au1xxx_eth1_platform_data, eth_data,
sizeof(struct au1000_eth_platform_data));
}
static void __init alchemy_setup_macs(int ctype)
{
int ret, i;
unsigned char ethaddr[6];
struct resource *macres;
/* Handle 1st MAC */
if (alchemy_get_macs(ctype) < 1)
return;
macres = kmemdup(au1xxx_eth0_resources[ctype],
sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
if (!macres) {
printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n");
return;
}
au1xxx_eth0_device.resource = macres;
i = prom_get_ethernet_addr(ethaddr);
if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac))
memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
ret = platform_device_register(&au1xxx_eth0_device);
if (ret)
printk(KERN_INFO "Alchemy: failed to register MAC0\n");
/* Handle 2nd MAC */
if (alchemy_get_macs(ctype) < 2)
return;
macres = kmemdup(au1xxx_eth1_resources[ctype],
sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
if (!macres) {
printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n");
return;
}
au1xxx_eth1_device.resource = macres;
ethaddr[5] += 1; /* next addr for 2nd MAC */
if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac))
memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6);
/* Register second MAC if enabled in pinfunc */
if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) {
ret = platform_device_register(&au1xxx_eth1_device);
if (ret)
printk(KERN_INFO "Alchemy: failed to register MAC1\n");
}
}
static int __init au1xxx_platform_init(void)
{
int ctype = alchemy_get_cputype();
alchemy_setup_uarts(ctype);
alchemy_setup_macs(ctype);
alchemy_setup_usb(ctype);
return 0;
}
arch_initcall(au1xxx_platform_init);
| gpl-2.0 |
abyssxsy/linux-tk1 | drivers/isdn/hisax/sedlbauer.c | 4634 | 24914 | /* $Id: sedlbauer.c,v 1.34.2.6 2004/01/24 20:47:24 keil Exp $
*
* low level stuff for Sedlbauer cards
* includes support for the Sedlbauer speed star (speed star II),
* support for the Sedlbauer speed fax+,
* support for the Sedlbauer ISDN-Controller PC/104 and
* support for the Sedlbauer speed pci
* derived from the original file asuscom.c from Karsten Keil
*
* Author Marcus Niemann
* Copyright by Marcus Niemann <niemann@www-bib.fh-bielefeld.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to Karsten Keil
* Sedlbauer AG for informations
* Edgar Toernig
*
*/
/* Supported cards:
* Card: Chip: Configuration: Comment:
* ---------------------------------------------------------------------
* Speed Card ISAC_HSCX DIP-SWITCH
* Speed Win ISAC_HSCX ISAPNP
* Speed Fax+ ISAC_ISAR ISAPNP Full analog support
* Speed Star ISAC_HSCX CARDMGR
* Speed Win2 IPAC ISAPNP
* ISDN PC/104 IPAC DIP-SWITCH
* Speed Star2 IPAC CARDMGR
* Speed PCI IPAC PCI PNP
* Speed Fax+ ISAC_ISAR PCI PNP Full analog support
*
* Important:
* For the sedlbauer speed fax+ to work properly you have to download
* the firmware onto the card.
* For example: hisaxctrl <DriverID> 9 ISAR.BIN
*/
#include <linux/init.h>
#include "hisax.h"
#include "isac.h"
#include "ipac.h"
#include "hscx.h"
#include "isar.h"
#include "isdnl1.h"
#include <linux/pci.h>
#include <linux/isapnp.h>
static const char *Sedlbauer_revision = "$Revision: 1.34.2.6 $";
static const char *Sedlbauer_Types[] =
{"None", "speed card/win", "speed star", "speed fax+",
"speed win II / ISDN PC/104", "speed star II", "speed pci",
"speed fax+ pyramid", "speed fax+ pci", "HST Saphir III"};
#define PCI_SUBVENDOR_SPEEDFAX_PYRAMID 0x51
#define PCI_SUBVENDOR_HST_SAPHIR3 0x52
#define PCI_SUBVENDOR_SEDLBAUER_PCI 0x53
#define PCI_SUBVENDOR_SPEEDFAX_PCI 0x54
#define PCI_SUB_ID_SEDLBAUER 0x01
#define SEDL_SPEED_CARD_WIN 1
#define SEDL_SPEED_STAR 2
#define SEDL_SPEED_FAX 3
#define SEDL_SPEED_WIN2_PC104 4
#define SEDL_SPEED_STAR2 5
#define SEDL_SPEED_PCI 6
#define SEDL_SPEEDFAX_PYRAMID 7
#define SEDL_SPEEDFAX_PCI 8
#define HST_SAPHIR3 9
#define SEDL_CHIP_TEST 0
#define SEDL_CHIP_ISAC_HSCX 1
#define SEDL_CHIP_ISAC_ISAR 2
#define SEDL_CHIP_IPAC 3
#define SEDL_BUS_ISA 1
#define SEDL_BUS_PCI 2
#define SEDL_BUS_PCMCIA 3
#define byteout(addr, val) outb(val, addr)
#define bytein(addr) inb(addr)
#define SEDL_HSCX_ISA_RESET_ON 0
#define SEDL_HSCX_ISA_RESET_OFF 1
#define SEDL_HSCX_ISA_ISAC 2
#define SEDL_HSCX_ISA_HSCX 3
#define SEDL_HSCX_ISA_ADR 4
#define SEDL_HSCX_PCMCIA_RESET 0
#define SEDL_HSCX_PCMCIA_ISAC 1
#define SEDL_HSCX_PCMCIA_HSCX 2
#define SEDL_HSCX_PCMCIA_ADR 4
#define SEDL_ISAR_ISA_ISAC 4
#define SEDL_ISAR_ISA_ISAR 6
#define SEDL_ISAR_ISA_ADR 8
#define SEDL_ISAR_ISA_ISAR_RESET_ON 10
#define SEDL_ISAR_ISA_ISAR_RESET_OFF 12
#define SEDL_IPAC_ANY_ADR 0
#define SEDL_IPAC_ANY_IPAC 2
#define SEDL_IPAC_PCI_BASE 0
#define SEDL_IPAC_PCI_ADR 0xc0
#define SEDL_IPAC_PCI_IPAC 0xc8
#define SEDL_ISAR_PCI_ADR 0xc8
#define SEDL_ISAR_PCI_ISAC 0xd0
#define SEDL_ISAR_PCI_ISAR 0xe0
#define SEDL_ISAR_PCI_ISAR_RESET_ON 0x01
#define SEDL_ISAR_PCI_ISAR_RESET_OFF 0x18
#define SEDL_ISAR_PCI_LED1 0x08
#define SEDL_ISAR_PCI_LED2 0x10
#define SEDL_RESET 0x3 /* same as DOS driver */
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
byteout(ale, off);
ret = bytein(adr);
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
{
byteout(ale, off);
insb(adr, data, size);
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
byteout(ale, off);
byteout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
{
byteout(ale, off);
outsb(adr, data, size);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
readfifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
writefifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0, data, size);
}
static u_char
ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset | 0x80));
}
static void
WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset | 0x80, value);
}
static void
ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
{
readfifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0x80, data, size);
}
static void
WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
{
writefifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0x80, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.sedl.adr,
cs->hw.sedl.hscx, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.sedl.adr,
cs->hw.sedl.hscx, offset + (hscx ? 0x40 : 0), value);
}
/* ISAR access routines
* mode = 0 access with IRQ on
* mode = 1 access with IRQ off
* mode = 2 access with IRQ off and using last offset
*/
static u_char
ReadISAR(struct IsdnCardState *cs, int mode, u_char offset)
{
if (mode == 0)
return (readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, offset));
else if (mode == 1)
byteout(cs->hw.sedl.adr, offset);
return (bytein(cs->hw.sedl.hscx));
}
static void
WriteISAR(struct IsdnCardState *cs, int mode, u_char offset, u_char value)
{
if (mode == 0)
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, offset, value);
else {
if (mode == 1)
byteout(cs->hw.sedl.adr, offset);
byteout(cs->hw.sedl.hscx, value);
}
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readreg(cs->hw.sedl.adr, \
cs->hw.sedl.hscx, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.sedl.adr, \
cs->hw.sedl.hscx, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.sedl.adr, \
cs->hw.sedl.hscx, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.sedl.adr, \
cs->hw.sedl.hscx, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
sedlbauer_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
if ((cs->hw.sedl.bus == SEDL_BUS_PCMCIA) && (*cs->busy_flag == 1)) {
/* The card tends to generate interrupts while being removed
causing us to just crash the kernel. bad. */
spin_unlock_irqrestore(&cs->lock, flags);
printk(KERN_WARNING "Sedlbauer: card not available!\n");
return IRQ_NONE;
}
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_ISTA + 0x40);
Start_HSCX:
if (val)
hscx_int_main(cs, val);
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_ISTA + 0x40);
if (val) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "HSCX IntStat after IntRoutine");
goto Start_HSCX;
}
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
if (val) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK, 0xFF);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK + 0x40, 0xFF);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0xFF);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0x0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK, 0x0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK + 0x40, 0x0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t
sedlbauer_interrupt_ipac(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char ista, val, icnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
ista = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ISTA);
Start_IPAC:
if (cs->debug & L1_DEB_IPAC)
debugl1(cs, "IPAC ISTA %02X", ista);
if (ista & 0x0f) {
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_ISTA + 0x40);
if (ista & 0x01)
val |= 0x01;
if (ista & 0x04)
val |= 0x02;
if (ista & 0x08)
val |= 0x04;
if (val)
hscx_int_main(cs, val);
}
if (ista & 0x20) {
val = 0xfe & readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA | 0x80);
if (val) {
isac_interrupt(cs, val);
}
}
if (ista & 0x10) {
val = 0x01;
isac_interrupt(cs, val);
}
ista = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ISTA);
if ((ista & 0x3f) && icnt) {
icnt--;
goto Start_IPAC;
}
if (!icnt)
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "Sedlbauer IRQ LOOP");
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_MASK, 0xFF);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_MASK, 0xC0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t
sedlbauer_interrupt_isar(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
int cnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT);
Start_ISAR:
if (val & ISAR_IRQSTA)
isar_int_main(cs);
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT);
if ((val & ISAR_IRQSTA) && --cnt) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "ISAR IntStat after IntRoutine");
goto Start_ISAR;
}
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
if (val && --cnt) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
if (!cnt)
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "Sedlbauer IRQ LOOP");
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT, 0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0xFF);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0x0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT, ISAR_IRQMSK);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_sedlbauer(struct IsdnCardState *cs)
{
int bytecnt = 8;
if (cs->subtyp == SEDL_SPEED_FAX) {
bytecnt = 16;
} else if (cs->hw.sedl.bus == SEDL_BUS_PCI) {
bytecnt = 256;
}
if (cs->hw.sedl.cfg_reg)
release_region(cs->hw.sedl.cfg_reg, bytecnt);
}
static void
reset_sedlbauer(struct IsdnCardState *cs)
{
printk(KERN_INFO "Sedlbauer: resetting card\n");
if (!((cs->hw.sedl.bus == SEDL_BUS_PCMCIA) &&
(cs->hw.sedl.chip == SEDL_CHIP_ISAC_HSCX))) {
if (cs->hw.sedl.chip == SEDL_CHIP_IPAC) {
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_POTA2, 0x20);
mdelay(2);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_POTA2, 0x0);
mdelay(10);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_CONF, 0x0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ACFG, 0xff);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_AOE, 0x0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_MASK, 0xc0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_PCFG, 0x12);
} else if ((cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) &&
(cs->hw.sedl.bus == SEDL_BUS_PCI)) {
byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_on);
mdelay(2);
byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
mdelay(10);
} else {
byteout(cs->hw.sedl.reset_on, SEDL_RESET); /* Reset On */
mdelay(2);
byteout(cs->hw.sedl.reset_off, 0); /* Reset Off */
mdelay(10);
}
}
}
static int
Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
reset_sedlbauer(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_RELEASE:
if (cs->hw.sedl.bus == SEDL_BUS_PCI)
/* disable all IRQ */
byteout(cs->hw.sedl.cfg_reg + 5, 0);
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
spin_lock_irqsave(&cs->lock, flags);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
ISAR_IRQBIT, 0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac,
ISAC_MASK, 0xFF);
reset_sedlbauer(cs);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
ISAR_IRQBIT, 0);
writereg(cs->hw.sedl.adr, cs->hw.sedl.isac,
ISAC_MASK, 0xFF);
spin_unlock_irqrestore(&cs->lock, flags);
}
release_io_sedlbauer(cs);
return (0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
if (cs->hw.sedl.bus == SEDL_BUS_PCI)
/* enable all IRQ */
byteout(cs->hw.sedl.cfg_reg + 5, 0x02);
reset_sedlbauer(cs);
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
clear_pending_isac_ints(cs);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
ISAR_IRQBIT, 0);
initisac(cs);
initisar(cs);
/* Reenable all IRQ */
cs->writeisac(cs, ISAC_MASK, 0);
/* RESET Receiver and Transmitter */
cs->writeisac(cs, ISAC_CMDR, 0x41);
} else {
inithscxisac(cs, 3);
}
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_TEST:
return (0);
case MDL_INFO_CONN:
if (cs->subtyp != SEDL_SPEEDFAX_PYRAMID)
return (0);
spin_lock_irqsave(&cs->lock, flags);
if ((long) arg)
cs->hw.sedl.reset_off &= ~SEDL_ISAR_PCI_LED2;
else
cs->hw.sedl.reset_off &= ~SEDL_ISAR_PCI_LED1;
byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
spin_unlock_irqrestore(&cs->lock, flags);
break;
case MDL_INFO_REL:
if (cs->subtyp != SEDL_SPEEDFAX_PYRAMID)
return (0);
spin_lock_irqsave(&cs->lock, flags);
if ((long) arg)
cs->hw.sedl.reset_off |= SEDL_ISAR_PCI_LED2;
else
cs->hw.sedl.reset_off |= SEDL_ISAR_PCI_LED1;
byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
spin_unlock_irqrestore(&cs->lock, flags);
break;
}
return (0);
}
#ifdef __ISAPNP__
static struct isapnp_device_id sedl_ids[] = {
{ ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
(unsigned long) "Speed win" },
{ ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x02),
ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x02),
(unsigned long) "Speed Fax+" },
{ 0, }
};
static struct isapnp_device_id *ipid = &sedl_ids[0];
static struct pnp_card *pnp_c = NULL;
static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
{
struct IsdnCardState *cs = card->cs;
struct pnp_dev *pnp_d;
if (!isapnp_present())
return -1;
while (ipid->card_vendor) {
if ((pnp_c = pnp_find_card(ipid->card_vendor,
ipid->card_device, pnp_c))) {
pnp_d = NULL;
if ((pnp_d = pnp_find_dev(pnp_c,
ipid->vendor, ipid->function, pnp_d))) {
int err;
printk(KERN_INFO "HiSax: %s detected\n",
(char *)ipid->driver_data);
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
if (err < 0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
__func__, err);
return (0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
return (0);
}
cs->hw.sedl.cfg_reg = card->para[1];
cs->irq = card->para[0];
if (ipid->function == ISAPNP_FUNCTION(0x2)) {
cs->subtyp = SEDL_SPEED_FAX;
cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
*bytecnt = 16;
} else {
cs->subtyp = SEDL_SPEED_CARD_WIN;
cs->hw.sedl.chip = SEDL_CHIP_TEST;
}
return (1);
} else {
printk(KERN_ERR "Sedlbauer PnP: PnP error card found, no device\n");
return (0);
}
}
ipid++;
pnp_c = NULL;
}
printk(KERN_INFO "Sedlbauer PnP: no ISAPnP card found\n");
return -1;
}
#else
static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
{
return -1;
}
#endif /* __ISAPNP__ */
#ifdef CONFIG_PCI
static struct pci_dev *dev_sedl = NULL;
static int setup_sedlbauer_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u16 sub_vendor_id, sub_id;
if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) {
if (pci_enable_device(dev_sedl))
return (0);
cs->irq = dev_sedl->irq;
if (!cs->irq) {
printk(KERN_WARNING "Sedlbauer: No IRQ for PCI card found\n");
return (0);
}
cs->hw.sedl.cfg_reg = pci_resource_start(dev_sedl, 0);
} else {
printk(KERN_WARNING "Sedlbauer: No PCI card found\n");
return (0);
}
cs->irq_flags |= IRQF_SHARED;
cs->hw.sedl.bus = SEDL_BUS_PCI;
sub_vendor_id = dev_sedl->subsystem_vendor;
sub_id = dev_sedl->subsystem_device;
printk(KERN_INFO "Sedlbauer: PCI subvendor:%x subid %x\n",
sub_vendor_id, sub_id);
printk(KERN_INFO "Sedlbauer: PCI base adr %#x\n",
cs->hw.sedl.cfg_reg);
if (sub_id != PCI_SUB_ID_SEDLBAUER) {
printk(KERN_ERR "Sedlbauer: unknown sub id %#x\n", sub_id);
return (0);
}
if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PYRAMID) {
cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
cs->subtyp = SEDL_SPEEDFAX_PYRAMID;
} else if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PCI) {
cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
cs->subtyp = SEDL_SPEEDFAX_PCI;
} else if (sub_vendor_id == PCI_SUBVENDOR_HST_SAPHIR3) {
cs->hw.sedl.chip = SEDL_CHIP_IPAC;
cs->subtyp = HST_SAPHIR3;
} else if (sub_vendor_id == PCI_SUBVENDOR_SEDLBAUER_PCI) {
cs->hw.sedl.chip = SEDL_CHIP_IPAC;
cs->subtyp = SEDL_SPEED_PCI;
} else {
printk(KERN_ERR "Sedlbauer: unknown sub vendor id %#x\n",
sub_vendor_id);
return (0);
}
cs->hw.sedl.reset_on = SEDL_ISAR_PCI_ISAR_RESET_ON;
cs->hw.sedl.reset_off = SEDL_ISAR_PCI_ISAR_RESET_OFF;
byteout(cs->hw.sedl.cfg_reg, 0xff);
byteout(cs->hw.sedl.cfg_reg, 0x00);
byteout(cs->hw.sedl.cfg_reg + 2, 0xdd);
byteout(cs->hw.sedl.cfg_reg + 5, 0); /* disable all IRQ */
byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_on);
mdelay(2);
byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
mdelay(10);
return (1);
}
#else
static int setup_sedlbauer_pci(struct IsdnCard *card)
{
return (1);
}
#endif /* CONFIG_PCI */
int setup_sedlbauer(struct IsdnCard *card)
{
int bytecnt = 8, ver, val, rc;
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, Sedlbauer_revision);
printk(KERN_INFO "HiSax: Sedlbauer driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ == ISDN_CTYPE_SEDLBAUER) {
cs->subtyp = SEDL_SPEED_CARD_WIN;
cs->hw.sedl.bus = SEDL_BUS_ISA;
cs->hw.sedl.chip = SEDL_CHIP_TEST;
} else if (cs->typ == ISDN_CTYPE_SEDLBAUER_PCMCIA) {
cs->subtyp = SEDL_SPEED_STAR;
cs->hw.sedl.bus = SEDL_BUS_PCMCIA;
cs->hw.sedl.chip = SEDL_CHIP_TEST;
} else if (cs->typ == ISDN_CTYPE_SEDLBAUER_FAX) {
cs->subtyp = SEDL_SPEED_FAX;
cs->hw.sedl.bus = SEDL_BUS_ISA;
cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
} else
return (0);
bytecnt = 8;
if (card->para[1]) {
cs->hw.sedl.cfg_reg = card->para[1];
cs->irq = card->para[0];
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
bytecnt = 16;
}
} else {
rc = setup_sedlbauer_isapnp(card, &bytecnt);
if (!rc)
return (0);
if (rc > 0)
goto ready;
/* Probe for Sedlbauer speed pci */
rc = setup_sedlbauer_pci(card);
if (!rc)
return (0);
bytecnt = 256;
}
ready:
/* In case of the sedlbauer pcmcia card, this region is in use,
* reserved for us by the card manager. So we do not check it
* here, it would fail.
*/
if (cs->hw.sedl.bus != SEDL_BUS_PCMCIA &&
!request_region(cs->hw.sedl.cfg_reg, bytecnt, "sedlbauer isdn")) {
printk(KERN_WARNING
"HiSax: %s config port %x-%x already in use\n",
CardType[card->typ],
cs->hw.sedl.cfg_reg,
cs->hw.sedl.cfg_reg + bytecnt);
return (0);
}
printk(KERN_INFO
"Sedlbauer: defined at 0x%x-0x%x IRQ %d\n",
cs->hw.sedl.cfg_reg,
cs->hw.sedl.cfg_reg + bytecnt,
cs->irq);
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &Sedl_card_msg;
/*
* testing ISA and PCMCIA Cards for IPAC, default is ISAC
* do not test for PCI card, because ports are different
* and PCI card uses only IPAC (for the moment)
*/
if (cs->hw.sedl.bus != SEDL_BUS_PCI) {
val = readreg(cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_ADR,
cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC, IPAC_ID);
printk(KERN_DEBUG "Sedlbauer: testing IPAC version %x\n", val);
if ((val == 1) || (val == 2)) {
/* IPAC */
cs->subtyp = SEDL_SPEED_WIN2_PC104;
if (cs->hw.sedl.bus == SEDL_BUS_PCMCIA) {
cs->subtyp = SEDL_SPEED_STAR2;
}
cs->hw.sedl.chip = SEDL_CHIP_IPAC;
} else {
/* ISAC_HSCX oder ISAC_ISAR */
if (cs->hw.sedl.chip == SEDL_CHIP_TEST) {
cs->hw.sedl.chip = SEDL_CHIP_ISAC_HSCX;
}
}
}
/*
* hw.sedl.chip is now properly set
*/
printk(KERN_INFO "Sedlbauer: %s detected\n",
Sedlbauer_Types[cs->subtyp]);
setup_isac(cs);
if (cs->hw.sedl.chip == SEDL_CHIP_IPAC) {
if (cs->hw.sedl.bus == SEDL_BUS_PCI) {
cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_ADR;
cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_IPAC;
cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_IPAC;
} else {
cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_ADR;
cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC;
cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC;
}
test_and_set_bit(HW_IPAC, &cs->HW_Flags);
cs->readisac = &ReadISAC_IPAC;
cs->writeisac = &WriteISAC_IPAC;
cs->readisacfifo = &ReadISACfifo_IPAC;
cs->writeisacfifo = &WriteISACfifo_IPAC;
cs->irq_func = &sedlbauer_interrupt_ipac;
val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ID);
printk(KERN_INFO "Sedlbauer: IPAC version %x\n", val);
} else {
/* ISAC_HSCX oder ISAC_ISAR */
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
if (cs->hw.sedl.bus == SEDL_BUS_PCI) {
cs->hw.sedl.adr = cs->hw.sedl.cfg_reg +
SEDL_ISAR_PCI_ADR;
cs->hw.sedl.isac = cs->hw.sedl.cfg_reg +
SEDL_ISAR_PCI_ISAC;
cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg +
SEDL_ISAR_PCI_ISAR;
} else {
cs->hw.sedl.adr = cs->hw.sedl.cfg_reg +
SEDL_ISAR_ISA_ADR;
cs->hw.sedl.isac = cs->hw.sedl.cfg_reg +
SEDL_ISAR_ISA_ISAC;
cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg +
SEDL_ISAR_ISA_ISAR;
cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg +
SEDL_ISAR_ISA_ISAR_RESET_ON;
cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg +
SEDL_ISAR_ISA_ISAR_RESET_OFF;
}
cs->bcs[0].hw.isar.reg = &cs->hw.sedl.isar;
cs->bcs[1].hw.isar.reg = &cs->hw.sedl.isar;
test_and_set_bit(HW_ISAR, &cs->HW_Flags);
cs->irq_func = &sedlbauer_interrupt_isar;
cs->auxcmd = &isar_auxcmd;
ISACVersion(cs, "Sedlbauer:");
cs->BC_Read_Reg = &ReadISAR;
cs->BC_Write_Reg = &WriteISAR;
cs->BC_Send_Data = &isar_fill_fifo;
bytecnt = 3;
while (bytecnt) {
ver = ISARVersion(cs, "Sedlbauer:");
if (ver < 0)
printk(KERN_WARNING
"Sedlbauer: wrong ISAR version (ret = %d)\n", ver);
else
break;
reset_sedlbauer(cs);
bytecnt--;
}
if (!bytecnt) {
release_io_sedlbauer(cs);
return (0);
}
} else {
if (cs->hw.sedl.bus == SEDL_BUS_PCMCIA) {
cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_ADR;
cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_ISAC;
cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_HSCX;
cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_RESET;
cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_RESET;
cs->irq_flags |= IRQF_SHARED;
} else {
cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_ADR;
cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_ISAC;
cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_HSCX;
cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_RESET_ON;
cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_RESET_OFF;
}
cs->irq_func = &sedlbauer_interrupt;
ISACVersion(cs, "Sedlbauer:");
if (HscxVersion(cs, "Sedlbauer:")) {
printk(KERN_WARNING
"Sedlbauer: wrong HSCX versions check IO address\n");
release_io_sedlbauer(cs);
return (0);
}
}
}
return (1);
}
| gpl-2.0 |
Jazz-823/kernel_samsung_i9505 | drivers/media/dvb/ttpci/budget-av.c | 4890 | 45686 | /*
* budget-av.c: driver for the SAA7146 based Budget DVB cards
* with analog video in
*
* Compiled from various sources by Michael Hunold <michael@mihu.de>
*
* CI interface support (c) 2004 Olivier Gournet <ogournet@anevia.com> &
* Andrew de Quincey <adq_dvb@lidskialf.net>
*
* Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de>
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at http://www.linuxtv.org/
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "budget.h"
#include "stv0299.h"
#include "stb0899_drv.h"
#include "stb0899_reg.h"
#include "stb0899_cfg.h"
#include "tda8261.h"
#include "tda8261_cfg.h"
#include "tda1002x.h"
#include "tda1004x.h"
#include "tua6100.h"
#include "dvb-pll.h"
#include <media/saa7146_vv.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/spinlock.h>
#include "dvb_ca_en50221.h"
#define DEBICICAM 0x02420000
#define SLOTSTATUS_NONE 1
#define SLOTSTATUS_PRESENT 2
#define SLOTSTATUS_RESET 4
#define SLOTSTATUS_READY 8
#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_av {
struct budget budget;
struct video_device *vd;
int cur_input;
int has_saa7113;
struct tasklet_struct ciintf_irq_tasklet;
int slot_status;
struct dvb_ca_en50221 ca;
u8 reinitialise_demod:1;
};
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot);
/* GPIO Connections:
* 0 - Vcc/Reset (Reset is controlled by capacitor). Resets the frontend *AS WELL*!
* 1 - CI memory select 0=>IO memory, 1=>Attribute Memory
* 2 - CI Card Enable (Active Low)
* 3 - CI Card Detect
*/
/****************************************************************************
* INITIALIZATION
****************************************************************************/
static u8 i2c_readreg(struct i2c_adapter *i2c, u8 id, u8 reg)
{
u8 mm1[] = { 0x00 };
u8 mm2[] = { 0x00 };
struct i2c_msg msgs[2];
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
msgs[0].addr = msgs[1].addr = id / 2;
mm1[0] = reg;
msgs[0].len = 1;
msgs[1].len = 1;
msgs[0].buf = mm1;
msgs[1].buf = mm2;
i2c_transfer(i2c, msgs, 2);
return mm2[0];
}
static int i2c_readregs(struct i2c_adapter *i2c, u8 id, u8 reg, u8 * buf, u8 len)
{
u8 mm1[] = { reg };
struct i2c_msg msgs[2] = {
{.addr = id / 2,.flags = 0,.buf = mm1,.len = 1},
{.addr = id / 2,.flags = I2C_M_RD,.buf = buf,.len = len}
};
if (i2c_transfer(i2c, msgs, 2) != 2)
return -EIO;
return 0;
}
static int i2c_writereg(struct i2c_adapter *i2c, u8 id, u8 reg, u8 val)
{
u8 msg[2] = { reg, val };
struct i2c_msg msgs;
msgs.flags = 0;
msgs.addr = id / 2;
msgs.len = 2;
msgs.buf = msg;
return i2c_transfer(i2c, &msgs, 1);
}
static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI);
udelay(1);
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 0xfff, 1, 0, 1);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 1\n");
}
return result;
}
static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI);
udelay(1);
result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 0xfff, 1, value, 0, 1);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 2\n");
}
return result;
}
static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
udelay(1);
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 3, 1, 0, 0);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 3\n");
return -ETIMEDOUT;
}
return result;
}
static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
int result;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
udelay(1);
result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 3, 1, value, 0, 0);
if (result == -ETIMEDOUT) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 5\n");
}
return result;
}
static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
return -EINVAL;
dprintk(1, "ciintf_slot_reset\n");
budget_av->slot_status = SLOTSTATUS_RESET;
saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTHI); /* disable card */
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTHI); /* Vcc off */
msleep(2);
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO); /* Vcc on */
msleep(20); /* 20 ms Vcc settling time */
saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTLO); /* enable card */
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
msleep(20);
/* reinitialise the frontend if necessary */
if (budget_av->reinitialise_demod)
dvb_frontend_reinitialise(budget_av->budget.dvb_frontend);
return 0;
}
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
return -EINVAL;
dprintk(1, "ciintf_slot_shutdown\n");
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
budget_av->slot_status = SLOTSTATUS_NONE;
return 0;
}
static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
return -EINVAL;
dprintk(1, "ciintf_slot_ts_enable: %d\n", budget_av->slot_status);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA);
return 0;
}
static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
int result;
if (slot != 0)
return -EINVAL;
/* test the card detect line - needs to be done carefully
* since it never goes high for some CAMs on this interface (e.g. topuptv) */
if (budget_av->slot_status == SLOTSTATUS_NONE) {
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
udelay(1);
if (saa7146_read(saa, PSR) & MASK_06) {
if (budget_av->slot_status == SLOTSTATUS_NONE) {
budget_av->slot_status = SLOTSTATUS_PRESENT;
pr_info("cam inserted A\n");
}
}
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
}
/* We also try and read from IO memory to work round the above detection bug. If
* there is no CAM, we will get a timeout. Only done if there is no cam
* present, since this test actually breaks some cams :(
*
* if the CI interface is not open, we also do the above test since we
* don't care if the cam has problems - we'll be resetting it on open() anyway */
if ((budget_av->slot_status == SLOTSTATUS_NONE) || (!open)) {
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, 0, 1, 0, 1);
if ((result >= 0) && (budget_av->slot_status == SLOTSTATUS_NONE)) {
budget_av->slot_status = SLOTSTATUS_PRESENT;
pr_info("cam inserted B\n");
} else if (result < 0) {
if (budget_av->slot_status != SLOTSTATUS_NONE) {
ciintf_slot_shutdown(ca, slot);
pr_info("cam ejected 5\n");
return 0;
}
}
}
/* read from attribute memory in reset/ready state to know when the CAM is ready */
if (budget_av->slot_status == SLOTSTATUS_RESET) {
result = ciintf_read_attribute_mem(ca, slot, 0);
if (result == 0x1d) {
budget_av->slot_status = SLOTSTATUS_READY;
}
}
/* work out correct return code */
if (budget_av->slot_status != SLOTSTATUS_NONE) {
if (budget_av->slot_status & SLOTSTATUS_READY) {
return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
}
return DVB_CA_EN50221_POLL_CAM_PRESENT;
}
return 0;
}
static int ciintf_init(struct budget_av *budget_av)
{
struct saa7146_dev *saa = budget_av->budget.dev;
int result;
memset(&budget_av->ca, 0, sizeof(struct dvb_ca_en50221));
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO);
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTLO);
saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTLO);
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
/* Enable DEBI pins */
saa7146_write(saa, MC1, MASK_27 | MASK_11);
/* register CI interface */
budget_av->ca.owner = THIS_MODULE;
budget_av->ca.read_attribute_mem = ciintf_read_attribute_mem;
budget_av->ca.write_attribute_mem = ciintf_write_attribute_mem;
budget_av->ca.read_cam_control = ciintf_read_cam_control;
budget_av->ca.write_cam_control = ciintf_write_cam_control;
budget_av->ca.slot_reset = ciintf_slot_reset;
budget_av->ca.slot_shutdown = ciintf_slot_shutdown;
budget_av->ca.slot_ts_enable = ciintf_slot_ts_enable;
budget_av->ca.poll_slot_status = ciintf_poll_slot_status;
budget_av->ca.data = budget_av;
budget_av->budget.ci_present = 1;
budget_av->slot_status = SLOTSTATUS_NONE;
if ((result = dvb_ca_en50221_init(&budget_av->budget.dvb_adapter,
&budget_av->ca, 0, 1)) != 0) {
pr_err("ci initialisation failed\n");
goto error;
}
pr_info("ci interface initialised\n");
return 0;
error:
saa7146_write(saa, MC1, MASK_27);
return result;
}
static void ciintf_deinit(struct budget_av *budget_av)
{
struct saa7146_dev *saa = budget_av->budget.dev;
saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT);
saa7146_setgpio(saa, 1, SAA7146_GPIO_INPUT);
saa7146_setgpio(saa, 2, SAA7146_GPIO_INPUT);
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
/* release the CA device */
dvb_ca_en50221_release(&budget_av->ca);
/* disable DEBI pins */
saa7146_write(saa, MC1, MASK_27);
}
static const u8 saa7113_tab[] = {
0x01, 0x08,
0x02, 0xc0,
0x03, 0x33,
0x04, 0x00,
0x05, 0x00,
0x06, 0xeb,
0x07, 0xe0,
0x08, 0x28,
0x09, 0x00,
0x0a, 0x80,
0x0b, 0x47,
0x0c, 0x40,
0x0d, 0x00,
0x0e, 0x01,
0x0f, 0x44,
0x10, 0x08,
0x11, 0x0c,
0x12, 0x7b,
0x13, 0x00,
0x15, 0x00, 0x16, 0x00, 0x17, 0x00,
0x57, 0xff,
0x40, 0x82, 0x58, 0x00, 0x59, 0x54, 0x5a, 0x07,
0x5b, 0x83, 0x5e, 0x00,
0xff
};
static int saa7113_init(struct budget_av *budget_av)
{
struct budget *budget = &budget_av->budget;
struct saa7146_dev *saa = budget->dev;
const u8 *data = saa7113_tab;
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTHI);
msleep(200);
if (i2c_writereg(&budget->i2c_adap, 0x4a, 0x01, 0x08) != 1) {
dprintk(1, "saa7113 not found on KNC card\n");
return -ENODEV;
}
dprintk(1, "saa7113 detected and initializing\n");
while (*data != 0xff) {
i2c_writereg(&budget->i2c_adap, 0x4a, *data, *(data + 1));
data += 2;
}
dprintk(1, "saa7113 status=%02x\n", i2c_readreg(&budget->i2c_adap, 0x4a, 0x1f));
return 0;
}
static int saa7113_setinput(struct budget_av *budget_av, int input)
{
struct budget *budget = &budget_av->budget;
if (1 != budget_av->has_saa7113)
return -ENODEV;
if (input == 1) {
i2c_writereg(&budget->i2c_adap, 0x4a, 0x02, 0xc7);
i2c_writereg(&budget->i2c_adap, 0x4a, 0x09, 0x80);
} else if (input == 0) {
i2c_writereg(&budget->i2c_adap, 0x4a, 0x02, 0xc0);
i2c_writereg(&budget->i2c_adap, 0x4a, 0x09, 0x00);
} else
return -EINVAL;
budget_av->cur_input = input;
return 0;
}
static int philips_su1278_ty_ci_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
u8 m1;
aclk = 0xb5;
if (srate < 2000000)
bclk = 0x86;
else if (srate < 5000000)
bclk = 0x89;
else if (srate < 15000000)
bclk = 0x8f;
else if (srate < 45000000)
bclk = 0x95;
m1 = 0x14;
if (srate < 4000000)
m1 = 0x10;
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
stv0299_writereg(fe, 0x0f, 0x80 | m1);
return 0;
}
static int philips_su1278_ty_ci_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 div;
u8 buf[4];
struct budget *budget = (struct budget *) fe->dvb->priv;
struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
if ((c->frequency < 950000) || (c->frequency > 2150000))
return -EINVAL;
div = (c->frequency + (125 - 1)) / 125; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
buf[3] = 0x20;
if (c->symbol_rate < 4000000)
buf[3] |= 1;
if (c->frequency < 1250000)
buf[3] |= 0;
else if (c->frequency < 1550000)
buf[3] |= 0x40;
else if (c->frequency < 2050000)
buf[3] |= 0x80;
else if (c->frequency < 2150000)
buf[3] |= 0xC0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static u8 typhoon_cinergy1200s_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
0x06, 0x40, /* DAC not used, set to high impendance mode */
0x07, 0x00, /* DAC LSB */
0x08, 0x40, /* DiSEqC off */
0x09, 0x00, /* FIFO */
0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
0x10, 0x3f, // AGC2 0x3d
0x11, 0x84,
0x12, 0xb9,
0x15, 0xc9, // lock detector threshold
0x16, 0x00,
0x17, 0x00,
0x18, 0x00,
0x19, 0x00,
0x1a, 0x00,
0x1f, 0x50,
0x20, 0x00,
0x21, 0x00,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
0x29, 0x1e, // 1/2 threshold
0x2a, 0x14, // 2/3 threshold
0x2b, 0x0f, // 3/4 threshold
0x2c, 0x09, // 5/6 threshold
0x2d, 0x05, // 7/8 threshold
0x2e, 0x01,
0x31, 0x1f, // test all FECs
0x32, 0x19, // viterbi and synchro search
0x33, 0xfc, // rs control
0x34, 0x93, // error control
0x0f, 0x92,
0xff, 0xff
};
static struct stv0299_config typhoon_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
static struct stv0299_config cinergy_1200s_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_0,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
static struct stv0299_config cinergy_1200s_1894_0010_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
.invert = 1,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = (struct budget *) fe->dvb->priv;
u8 buf[6];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
int i;
#define CU1216_IF 36125000
#define TUNER_MUL 62500
u32 div = (c->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0xce;
buf[3] = (c->frequency < 150000000 ? 0x01 :
c->frequency < 445000000 ? 0x02 : 0x04);
buf[4] = 0xde;
buf[5] = 0x20;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
/* wait for the pll lock */
msg.flags = I2C_M_RD;
msg.len = 1;
for (i = 0; i < 20; i++) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) == 1 && (buf[0] & 0x40))
break;
msleep(10);
}
/* switch the charge pump to the lower current */
msg.flags = 0;
msg.len = 2;
msg.buf = &buf[2];
buf[2] &= ~0x40;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct tda1002x_config philips_cu1216_config = {
.demod_address = 0x0c,
.invert = 1,
};
static struct tda1002x_config philips_cu1216_config_altaddress = {
.demod_address = 0x0d,
.invert = 0,
};
static struct tda10023_config philips_cu1216_tda10023_config = {
.demod_address = 0x0c,
.invert = 1,
};
static int philips_tu1216_tuner_init(struct dvb_frontend *fe)
{
struct budget *budget = (struct budget *) fe->dvb->priv;
static u8 tu1216_init[] = { 0x0b, 0xf5, 0x85, 0xab };
struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tu1216_init,.len = sizeof(tu1216_init) };
// setup PLL configuration
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = (struct budget *) fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tuner_buf,.len =
sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
// determine charge pump
tuner_frequency = c->frequency + 36166000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000)
cp = 3;
else if (tuner_frequency < 160000000)
cp = 5;
else if (tuner_frequency < 200000000)
cp = 6;
else if (tuner_frequency < 290000000)
cp = 3;
else if (tuner_frequency < 420000000)
cp = 5;
else if (tuner_frequency < 480000000)
cp = 6;
else if (tuner_frequency < 620000000)
cp = 3;
else if (tuner_frequency < 830000000)
cp = 5;
else if (tuner_frequency < 895000000)
cp = 7;
else
return -EINVAL;
// determine band
if (c->frequency < 49000000)
return -EINVAL;
else if (c->frequency < 161000000)
band = 1;
else if (c->frequency < 444000000)
band = 2;
else if (c->frequency < 861000000)
band = 4;
else
return -EINVAL;
// setup PLL filter
switch (c->bandwidth_hz) {
case 6000000:
filter = 0;
break;
case 7000000:
filter = 0;
break;
case 8000000:
filter = 1;
break;
default:
return -EINVAL;
}
// calculate divisor
// ((36166000+((1000000/6)/2)) + Finput)/(1000000/6)
tuner_frequency = (((c->frequency / 1000) * 6) + 217496) / 1000;
// setup tuner buffer
tuner_buf[0] = (tuner_frequency >> 8) & 0x7f;
tuner_buf[1] = tuner_frequency & 0xff;
tuner_buf[2] = 0xca;
tuner_buf[3] = (cp << 5) | (filter << 3) | band;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static int philips_tu1216_request_firmware(struct dvb_frontend *fe,
const struct firmware **fw, char *name)
{
struct budget *budget = (struct budget *) fe->dvb->priv;
return request_firmware(fw, name, &budget->dev->pci->dev);
}
static struct tda1004x_config philips_tu1216_config = {
.demod_address = 0x8,
.invert = 1,
.invert_oclk = 1,
.xtal_freq = TDA10046_XTAL_4M,
.agc_config = TDA10046_AGC_DEFAULT,
.if_freq = TDA10046_FREQ_3617,
.request_firmware = philips_tu1216_request_firmware,
};
static u8 philips_sd1878_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
0x04, 0x7d,
0x05, 0x35,
0x06, 0x40,
0x07, 0x00,
0x08, 0x43,
0x09, 0x02,
0x0C, 0x51,
0x0D, 0x82,
0x0E, 0x23,
0x10, 0x3f,
0x11, 0x84,
0x12, 0xb9,
0x15, 0xc9,
0x16, 0x19,
0x17, 0x8c,
0x18, 0x59,
0x19, 0xf8,
0x1a, 0xfe,
0x1c, 0x7f,
0x1d, 0x00,
0x1e, 0x00,
0x1f, 0x50,
0x20, 0x00,
0x21, 0x00,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00,
0x29, 0x28,
0x2a, 0x14,
0x2b, 0x0f,
0x2c, 0x09,
0x2d, 0x09,
0x31, 0x1f,
0x32, 0x19,
0x33, 0xfc,
0x34, 0x93,
0xff, 0xff
};
static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe,
u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
u8 m1;
aclk = 0xb5;
if (srate < 2000000)
bclk = 0x86;
else if (srate < 5000000)
bclk = 0x89;
else if (srate < 15000000)
bclk = 0x8f;
else if (srate < 45000000)
bclk = 0x95;
m1 = 0x14;
if (srate < 4000000)
m1 = 0x10;
stv0299_writereg(fe, 0x0e, 0x23);
stv0299_writereg(fe, 0x0f, 0x94);
stv0299_writereg(fe, 0x10, 0x39);
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x15, 0xc9);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
stv0299_writereg(fe, 0x0f, 0x80 | m1);
return 0;
}
static struct stv0299_config philips_sd1878_config = {
.demod_address = 0x68,
.inittab = philips_sd1878_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP0,
.min_delay_ms = 100,
.set_symbol_rate = philips_sd1878_ci_set_symbol_rate,
};
/* KNC1 DVB-S (STB0899) Inittab */
static const struct stb0899_s1_reg knc1_stb0899_s1_init_1[] = {
{ STB0899_DEV_ID , 0x81 },
{ STB0899_DISCNTRL1 , 0x32 },
{ STB0899_DISCNTRL2 , 0x80 },
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x8c },
{ STB0899_DISF22RX , 0x9a },
{ STB0899_SYSREG , 0x0b },
{ STB0899_ACRPRESC , 0x11 },
{ STB0899_ACRDIV1 , 0x0a },
{ STB0899_ACRDIV2 , 0x05 },
{ STB0899_DACR1 , 0x00 },
{ STB0899_DACR2 , 0x00 },
{ STB0899_OUTCFG , 0x00 },
{ STB0899_MODECFG , 0x00 },
{ STB0899_IRQSTATUS_3 , 0x30 },
{ STB0899_IRQSTATUS_2 , 0x00 },
{ STB0899_IRQSTATUS_1 , 0x00 },
{ STB0899_IRQSTATUS_0 , 0x00 },
{ STB0899_IRQMSK_3 , 0xf3 },
{ STB0899_IRQMSK_2 , 0xfc },
{ STB0899_IRQMSK_1 , 0xff },
{ STB0899_IRQMSK_0 , 0xff },
{ STB0899_IRQCFG , 0x00 },
{ STB0899_I2CCFG , 0x88 },
{ STB0899_I2CRPT , 0x58 }, /* Repeater=8, Stop=disabled */
{ STB0899_IOPVALUE5 , 0x00 },
{ STB0899_IOPVALUE4 , 0x20 },
{ STB0899_IOPVALUE3 , 0xc9 },
{ STB0899_IOPVALUE2 , 0x90 },
{ STB0899_IOPVALUE1 , 0x40 },
{ STB0899_IOPVALUE0 , 0x00 },
{ STB0899_GPIO00CFG , 0x82 },
{ STB0899_GPIO01CFG , 0x82 },
{ STB0899_GPIO02CFG , 0x82 },
{ STB0899_GPIO03CFG , 0x82 },
{ STB0899_GPIO04CFG , 0x82 },
{ STB0899_GPIO05CFG , 0x82 },
{ STB0899_GPIO06CFG , 0x82 },
{ STB0899_GPIO07CFG , 0x82 },
{ STB0899_GPIO08CFG , 0x82 },
{ STB0899_GPIO09CFG , 0x82 },
{ STB0899_GPIO10CFG , 0x82 },
{ STB0899_GPIO11CFG , 0x82 },
{ STB0899_GPIO12CFG , 0x82 },
{ STB0899_GPIO13CFG , 0x82 },
{ STB0899_GPIO14CFG , 0x82 },
{ STB0899_GPIO15CFG , 0x82 },
{ STB0899_GPIO16CFG , 0x82 },
{ STB0899_GPIO17CFG , 0x82 },
{ STB0899_GPIO18CFG , 0x82 },
{ STB0899_GPIO19CFG , 0x82 },
{ STB0899_GPIO20CFG , 0x82 },
{ STB0899_SDATCFG , 0xb8 },
{ STB0899_SCLTCFG , 0xba },
{ STB0899_AGCRFCFG , 0x08 }, /* 0x1c */
{ STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
{ STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
{ STB0899_DIRCLKCFG , 0x82 },
{ STB0899_CLKOUT27CFG , 0x7e },
{ STB0899_STDBYCFG , 0x82 },
{ STB0899_CS0CFG , 0x82 },
{ STB0899_CS1CFG , 0x82 },
{ STB0899_DISEQCOCFG , 0x20 },
{ STB0899_GPIO32CFG , 0x82 },
{ STB0899_GPIO33CFG , 0x82 },
{ STB0899_GPIO34CFG , 0x82 },
{ STB0899_GPIO35CFG , 0x82 },
{ STB0899_GPIO36CFG , 0x82 },
{ STB0899_GPIO37CFG , 0x82 },
{ STB0899_GPIO38CFG , 0x82 },
{ STB0899_GPIO39CFG , 0x82 },
{ STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
{ STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
{ STB0899_FILTCTRL , 0x00 },
{ STB0899_SYSCTRL , 0x00 },
{ STB0899_STOPCLK1 , 0x20 },
{ STB0899_STOPCLK2 , 0x00 },
{ STB0899_INTBUFSTATUS , 0x00 },
{ STB0899_INTBUFCTRL , 0x0a },
{ 0xffff , 0xff },
};
static const struct stb0899_s1_reg knc1_stb0899_s1_init_3[] = {
{ STB0899_DEMOD , 0x00 },
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x41 },
{ STB0899_AGC1REF , 0x08 },
{ STB0899_RTC , 0x7a },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x33 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xee },
{ STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xdd },
{ STB0899_LDT2 , 0xc9 },
{ STB0899_EQUALREF , 0xb4 },
{ STB0899_TMGRAMP , 0x10 },
{ STB0899_TMGTHD , 0x30 },
{ STB0899_IDCCOMP , 0xfb },
{ STB0899_QDCCOMP , 0x03 },
{ STB0899_POWERI , 0x3b },
{ STB0899_POWERQ , 0x3d },
{ STB0899_RCOMP , 0x81 },
{ STB0899_AGCIQIN , 0x80 },
{ STB0899_AGC2I1 , 0x04 },
{ STB0899_AGC2I2 , 0xf5 },
{ STB0899_TLIR , 0x25 },
{ STB0899_RTF , 0x80 },
{ STB0899_DSTATUS , 0x00 },
{ STB0899_LDI , 0xca },
{ STB0899_CFRM , 0xf1 },
{ STB0899_CFRL , 0xf3 },
{ STB0899_NIRM , 0x2a },
{ STB0899_NIRL , 0x05 },
{ STB0899_ISYMB , 0x17 },
{ STB0899_QSYMB , 0xfa },
{ STB0899_SFRH , 0x2f },
{ STB0899_SFRM , 0x68 },
{ STB0899_SFRL , 0x40 },
{ STB0899_SFRUPH , 0x2f },
{ STB0899_SFRUPM , 0x68 },
{ STB0899_SFRUPL , 0x40 },
{ STB0899_EQUAI1 , 0xfd },
{ STB0899_EQUAQ1 , 0x04 },
{ STB0899_EQUAI2 , 0x0f },
{ STB0899_EQUAQ2 , 0xff },
{ STB0899_EQUAI3 , 0xdf },
{ STB0899_EQUAQ3 , 0xfa },
{ STB0899_EQUAI4 , 0x37 },
{ STB0899_EQUAQ4 , 0x0d },
{ STB0899_EQUAI5 , 0xbd },
{ STB0899_EQUAQ5 , 0xf7 },
{ STB0899_DSTATUS2 , 0x00 },
{ STB0899_VSTATUS , 0x00 },
{ STB0899_VERROR , 0xff },
{ STB0899_IQSWAP , 0x2a },
{ STB0899_ECNT1M , 0x00 },
{ STB0899_ECNT1L , 0x00 },
{ STB0899_ECNT2M , 0x00 },
{ STB0899_ECNT2L , 0x00 },
{ STB0899_ECNT3M , 0x00 },
{ STB0899_ECNT3L , 0x00 },
{ STB0899_FECAUTO1 , 0x06 },
{ STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xf0 },
{ STB0899_VTH23 , 0xa0 },
{ STB0899_VTH34 , 0x78 },
{ STB0899_VTH56 , 0x4e },
{ STB0899_VTH67 , 0x48 },
{ STB0899_VTH78 , 0x38 },
{ STB0899_PRVIT , 0xff },
{ STB0899_VITSYNC , 0x19 },
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x40 },
{ STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x0c },
{ STB0899_TSOUT , 0x4d }, /* 0x0d for CAM */
{ STB0899_RSSYNCDEL , 0x00 },
{ STB0899_TSINHDELH , 0x02 },
{ STB0899_TSINHDELM , 0x00 },
{ STB0899_TSINHDELL , 0x00 },
{ STB0899_TSLLSTKM , 0x00 },
{ STB0899_TSLLSTKL , 0x00 },
{ STB0899_TSULSTKM , 0x00 },
{ STB0899_TSULSTKL , 0xab },
{ STB0899_PCKLENUL , 0x00 },
{ STB0899_PCKLENLL , 0xcc },
{ STB0899_RSPCKLEN , 0xcc },
{ STB0899_TSSTATUS , 0x80 },
{ STB0899_ERRCTRL1 , 0xb6 },
{ STB0899_ERRCTRL2 , 0x96 },
{ STB0899_ERRCTRL3 , 0x89 },
{ STB0899_DMONMSK1 , 0x27 },
{ STB0899_DMONMSK0 , 0x03 },
{ STB0899_DEMAPVIT , 0x5c },
{ STB0899_PLPARM , 0x1f },
{ STB0899_PDELCTRL , 0x48 },
{ STB0899_PDELCTRL2 , 0x00 },
{ STB0899_BBHCTRL1 , 0x00 },
{ STB0899_BBHCTRL2 , 0x00 },
{ STB0899_HYSTTHRESH , 0x77 },
{ STB0899_MATCSTM , 0x00 },
{ STB0899_MATCSTL , 0x00 },
{ STB0899_UPLCSTM , 0x00 },
{ STB0899_UPLCSTL , 0x00 },
{ STB0899_DFLCSTM , 0x00 },
{ STB0899_DFLCSTL , 0x00 },
{ STB0899_SYNCCST , 0x00 },
{ STB0899_SYNCDCSTM , 0x00 },
{ STB0899_SYNCDCSTL , 0x00 },
{ STB0899_ISI_ENTRY , 0x00 },
{ STB0899_ISI_BIT_EN , 0x00 },
{ STB0899_MATSTRM , 0x00 },
{ STB0899_MATSTRL , 0x00 },
{ STB0899_UPLSTRM , 0x00 },
{ STB0899_UPLSTRL , 0x00 },
{ STB0899_DFLSTRM , 0x00 },
{ STB0899_DFLSTRL , 0x00 },
{ STB0899_SYNCSTR , 0x00 },
{ STB0899_SYNCDSTRM , 0x00 },
{ STB0899_SYNCDSTRL , 0x00 },
{ STB0899_CFGPDELSTATUS1 , 0x10 },
{ STB0899_CFGPDELSTATUS2 , 0x00 },
{ STB0899_BBFERRORM , 0x00 },
{ STB0899_BBFERRORL , 0x00 },
{ STB0899_UPKTERRORM , 0x00 },
{ STB0899_UPKTERRORL , 0x00 },
{ 0xffff , 0xff },
};
/* STB0899 demodulator config for the KNC1 and clones */
static struct stb0899_config knc1_dvbs2_config = {
.init_dev = knc1_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = knc1_stb0899_s1_init_3,
.init_s2_fec = stb0899_s2_init_4,
.init_tst = stb0899_s1_init_5,
.postproc = NULL,
.demod_address = 0x68,
// .ts_output_mode = STB0899_OUT_PARALLEL, /* types = SERIAL/PARALLEL */
.block_sync_mode = STB0899_SYNC_FORCED, /* DSS, SYNC_FORCED/UNSYNCED */
// .ts_pfbit_toggle = STB0899_MPEG_NORMAL, /* DirecTV, MPEG toggling seq */
.xtal_freq = 27000000,
.inversion = IQ_SWAP_OFF, /* 1 */
.lo_clk = 76500000,
.hi_clk = 90000000,
.esno_ave = STB0899_DVBS2_ESNO_AVE,
.esno_quant = STB0899_DVBS2_ESNO_QUANT,
.avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
.avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
.miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
.uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
.uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
.uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
.sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
.btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
.btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
.crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
.ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
.tuner_get_frequency = tda8261_get_frequency,
.tuner_set_frequency = tda8261_set_frequency,
.tuner_set_bandwidth = NULL,
.tuner_get_bandwidth = tda8261_get_bandwidth,
.tuner_set_rfsiggain = NULL
};
/*
* SD1878/SHA tuner config
* 1F, Single I/P, Horizontal mount, High Sensitivity
*/
static const struct tda8261_config sd1878c_config = {
// .name = "SD1878/SHA",
.addr = 0x60,
.step_size = TDA8261_STEP_1000 /* kHz */
};
static u8 read_pwm(struct budget_av *budget_av)
{
u8 b = 0xff;
u8 pwm;
struct i2c_msg msg[] = { {.addr = 0x50,.flags = 0,.buf = &b,.len = 1},
{.addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1}
};
if ((i2c_transfer(&budget_av->budget.i2c_adap, msg, 2) != 2)
|| (pwm == 0xff))
pwm = 0x48;
return pwm;
}
#define SUBID_DVBS_KNC1 0x0010
#define SUBID_DVBS_KNC1_PLUS 0x0011
#define SUBID_DVBS_TYPHOON 0x4f56
#define SUBID_DVBS_CINERGY1200 0x1154
#define SUBID_DVBS_CYNERGY1200N 0x1155
#define SUBID_DVBS_TV_STAR 0x0014
#define SUBID_DVBS_TV_STAR_PLUS_X4 0x0015
#define SUBID_DVBS_TV_STAR_CI 0x0016
#define SUBID_DVBS2_KNC1 0x0018
#define SUBID_DVBS2_KNC1_OEM 0x0019
#define SUBID_DVBS_EASYWATCH_1 0x001a
#define SUBID_DVBS_EASYWATCH_2 0x001b
#define SUBID_DVBS2_EASYWATCH 0x001d
#define SUBID_DVBS_EASYWATCH 0x001e
#define SUBID_DVBC_EASYWATCH 0x002a
#define SUBID_DVBC_EASYWATCH_MK3 0x002c
#define SUBID_DVBC_KNC1 0x0020
#define SUBID_DVBC_KNC1_PLUS 0x0021
#define SUBID_DVBC_KNC1_MK3 0x0022
#define SUBID_DVBC_KNC1_TDA10024 0x0028
#define SUBID_DVBC_KNC1_PLUS_MK3 0x0023
#define SUBID_DVBC_CINERGY1200 0x1156
#define SUBID_DVBC_CINERGY1200_MK3 0x1176
#define SUBID_DVBT_EASYWATCH 0x003a
#define SUBID_DVBT_KNC1_PLUS 0x0031
#define SUBID_DVBT_KNC1 0x0030
#define SUBID_DVBT_CINERGY1200 0x1157
static void frontend_init(struct budget_av *budget_av)
{
struct saa7146_dev * saa = budget_av->budget.dev;
struct dvb_frontend * fe = NULL;
/* Enable / PowerON Frontend */
saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO);
/* Wait for PowerON */
msleep(100);
/* additional setup necessary for the PLUS cards */
switch (saa->pci->subsystem_device) {
case SUBID_DVBS_KNC1_PLUS:
case SUBID_DVBC_KNC1_PLUS:
case SUBID_DVBT_KNC1_PLUS:
case SUBID_DVBC_EASYWATCH:
case SUBID_DVBC_KNC1_PLUS_MK3:
case SUBID_DVBS2_KNC1:
case SUBID_DVBS2_KNC1_OEM:
case SUBID_DVBS2_EASYWATCH:
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI);
break;
}
switch (saa->pci->subsystem_device) {
case SUBID_DVBS_KNC1:
/*
* maybe that setting is needed for other dvb-s cards as well,
* but so far it has been only confirmed for this type
*/
budget_av->reinitialise_demod = 1;
/* fall through */
case SUBID_DVBS_KNC1_PLUS:
case SUBID_DVBS_EASYWATCH_1:
if (saa->pci->subsystem_vendor == 0x1894) {
fe = dvb_attach(stv0299_attach, &cinergy_1200s_1894_0010_config,
&budget_av->budget.i2c_adap);
if (fe) {
dvb_attach(tua6100_attach, fe, 0x60, &budget_av->budget.i2c_adap);
}
} else {
fe = dvb_attach(stv0299_attach, &typhoon_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
}
}
break;
case SUBID_DVBS_TV_STAR:
case SUBID_DVBS_TV_STAR_PLUS_X4:
case SUBID_DVBS_TV_STAR_CI:
case SUBID_DVBS_CYNERGY1200N:
case SUBID_DVBS_EASYWATCH:
case SUBID_DVBS_EASYWATCH_2:
fe = dvb_attach(stv0299_attach, &philips_sd1878_config,
&budget_av->budget.i2c_adap);
if (fe) {
dvb_attach(dvb_pll_attach, fe, 0x60,
&budget_av->budget.i2c_adap,
DVB_PLL_PHILIPS_SD1878_TDA8261);
}
break;
case SUBID_DVBS_TYPHOON:
fe = dvb_attach(stv0299_attach, &typhoon_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
}
break;
case SUBID_DVBS2_KNC1:
case SUBID_DVBS2_KNC1_OEM:
case SUBID_DVBS2_EASYWATCH:
budget_av->reinitialise_demod = 1;
if ((fe = dvb_attach(stb0899_attach, &knc1_dvbs2_config, &budget_av->budget.i2c_adap)))
dvb_attach(tda8261_attach, fe, &sd1878c_config, &budget_av->budget.i2c_adap);
break;
case SUBID_DVBS_CINERGY1200:
fe = dvb_attach(stv0299_attach, &cinergy_1200s_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
}
break;
case SUBID_DVBC_KNC1:
case SUBID_DVBC_KNC1_PLUS:
case SUBID_DVBC_CINERGY1200:
case SUBID_DVBC_EASYWATCH:
budget_av->reinitialise_demod = 1;
budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
fe = dvb_attach(tda10021_attach, &philips_cu1216_config,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
if (fe == NULL)
fe = dvb_attach(tda10021_attach, &philips_cu1216_config_altaddress,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
if (fe) {
fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
}
break;
case SUBID_DVBC_EASYWATCH_MK3:
case SUBID_DVBC_CINERGY1200_MK3:
case SUBID_DVBC_KNC1_MK3:
case SUBID_DVBC_KNC1_TDA10024:
case SUBID_DVBC_KNC1_PLUS_MK3:
budget_av->reinitialise_demod = 1;
budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
fe = dvb_attach(tda10023_attach,
&philips_cu1216_tda10023_config,
&budget_av->budget.i2c_adap,
read_pwm(budget_av));
if (fe) {
fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
}
break;
case SUBID_DVBT_EASYWATCH:
case SUBID_DVBT_KNC1:
case SUBID_DVBT_KNC1_PLUS:
case SUBID_DVBT_CINERGY1200:
budget_av->reinitialise_demod = 1;
fe = dvb_attach(tda10046_attach, &philips_tu1216_config,
&budget_av->budget.i2c_adap);
if (fe) {
fe->ops.tuner_ops.init = philips_tu1216_tuner_init;
fe->ops.tuner_ops.set_params = philips_tu1216_tuner_set_params;
}
break;
}
if (fe == NULL) {
pr_err("A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
saa->pci->vendor,
saa->pci->device,
saa->pci->subsystem_vendor,
saa->pci->subsystem_device);
return;
}
budget_av->budget.dvb_frontend = fe;
if (dvb_register_frontend(&budget_av->budget.dvb_adapter,
budget_av->budget.dvb_frontend)) {
pr_err("Frontend registration failed!\n");
dvb_frontend_detach(budget_av->budget.dvb_frontend);
budget_av->budget.dvb_frontend = NULL;
}
}
static void budget_av_irq(struct saa7146_dev *dev, u32 * isr)
{
struct budget_av *budget_av = (struct budget_av *) dev->ext_priv;
dprintk(8, "dev: %p, budget_av: %p\n", dev, budget_av);
if (*isr & MASK_10)
ttpci_budget_irq10_handler(dev, isr);
}
static int budget_av_detach(struct saa7146_dev *dev)
{
struct budget_av *budget_av = (struct budget_av *) dev->ext_priv;
int err;
dprintk(2, "dev: %p\n", dev);
if (1 == budget_av->has_saa7113) {
saa7146_setgpio(dev, 0, SAA7146_GPIO_OUTLO);
msleep(200);
saa7146_unregister_device(&budget_av->vd, dev);
saa7146_vv_release(dev);
}
if (budget_av->budget.ci_present)
ciintf_deinit(budget_av);
if (budget_av->budget.dvb_frontend != NULL) {
dvb_unregister_frontend(budget_av->budget.dvb_frontend);
dvb_frontend_detach(budget_av->budget.dvb_frontend);
}
err = ttpci_budget_deinit(&budget_av->budget);
kfree(budget_av);
return err;
}
#define KNC1_INPUTS 2
static struct v4l2_input knc1_inputs[KNC1_INPUTS] = {
{ 0, "Composite", V4L2_INPUT_TYPE_TUNER, 1, 0,
V4L2_STD_PAL_BG | V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 1, "S-Video", V4L2_INPUT_TYPE_CAMERA, 2, 0,
V4L2_STD_PAL_BG | V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
};
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
dprintk(1, "VIDIOC_ENUMINPUT %d\n", i->index);
if (i->index >= KNC1_INPUTS)
return -EINVAL;
memcpy(i, &knc1_inputs[i->index], sizeof(struct v4l2_input));
return 0;
}
static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct budget_av *budget_av = (struct budget_av *)dev->ext_priv;
*i = budget_av->cur_input;
dprintk(1, "VIDIOC_G_INPUT %d\n", *i);
return 0;
}
static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct budget_av *budget_av = (struct budget_av *)dev->ext_priv;
dprintk(1, "VIDIOC_S_INPUT %d\n", input);
return saa7113_setinput(budget_av, input);
}
static struct saa7146_ext_vv vv_data;
static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct budget_av *budget_av;
u8 *mac;
int err;
dprintk(2, "dev: %p\n", dev);
if (!(budget_av = kzalloc(sizeof(struct budget_av), GFP_KERNEL)))
return -ENOMEM;
budget_av->has_saa7113 = 0;
budget_av->budget.ci_present = 0;
dev->ext_priv = budget_av;
err = ttpci_budget_init(&budget_av->budget, dev, info, THIS_MODULE,
adapter_nr);
if (err) {
kfree(budget_av);
return err;
}
/* knc1 initialization */
saa7146_write(dev, DD1_STREAM_B, 0x04000000);
saa7146_write(dev, DD1_INIT, 0x07000600);
saa7146_write(dev, MC2, MASK_09 | MASK_25 | MASK_10 | MASK_26);
if (saa7113_init(budget_av) == 0) {
budget_av->has_saa7113 = 1;
if (0 != saa7146_vv_init(dev, &vv_data)) {
/* fixme: proper cleanup here */
ERR("cannot init vv subsystem\n");
return err;
}
vv_data.ops.vidioc_enum_input = vidioc_enum_input;
vv_data.ops.vidioc_g_input = vidioc_g_input;
vv_data.ops.vidioc_s_input = vidioc_s_input;
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
/* fixme: proper cleanup here */
ERR("cannot register capture v4l2 device\n");
saa7146_vv_release(dev);
return err;
}
/* beware: this modifies dev->vv ... */
saa7146_set_hps_source_and_sync(dev, SAA7146_HPS_SOURCE_PORT_A,
SAA7146_HPS_SYNC_PORT_A);
saa7113_setinput(budget_av, 0);
}
/* fixme: find some sane values here... */
saa7146_write(dev, PCI_BT_V1, 0x1c00101f);
mac = budget_av->budget.dvb_adapter.proposed_mac;
if (i2c_readregs(&budget_av->budget.i2c_adap, 0xa0, 0x30, mac, 6)) {
pr_err("KNC1-%d: Could not read MAC from KNC1 card\n",
budget_av->budget.dvb_adapter.num);
memset(mac, 0, 6);
} else {
pr_info("KNC1-%d: MAC addr = %pM\n",
budget_av->budget.dvb_adapter.num, mac);
}
budget_av->budget.dvb_adapter.priv = budget_av;
frontend_init(budget_av);
ciintf_init(budget_av);
ttpci_budget_init_hooks(&budget_av->budget);
return 0;
}
static struct saa7146_standard standard[] = {
{.name = "PAL",.id = V4L2_STD_PAL,
.v_offset = 0x17,.v_field = 288,
.h_offset = 0x14,.h_pixels = 680,
.v_max_out = 576,.h_max_out = 768 },
{.name = "NTSC",.id = V4L2_STD_NTSC,
.v_offset = 0x16,.v_field = 240,
.h_offset = 0x06,.h_pixels = 708,
.v_max_out = 480,.h_max_out = 640, },
};
static struct saa7146_ext_vv vv_data = {
.inputs = 2,
.capabilities = 0, // perhaps later: V4L2_CAP_VBI_CAPTURE, but that need tweaking with the saa7113
.flags = 0,
.stds = &standard[0],
.num_stds = ARRAY_SIZE(standard),
};
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S);
MAKE_BUDGET_INFO(knc1s2,"KNC1 DVB-S2", BUDGET_KNC1S2);
MAKE_BUDGET_INFO(sates2,"Satelco EasyWatch DVB-S2", BUDGET_KNC1S2);
MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C);
MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR);
MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S);
MAKE_BUDGET_INFO(satewps, "Satelco EasyWatch DVB-S", BUDGET_KNC1S);
MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP);
MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3);
MAKE_BUDGET_INFO(satewt, "Satelco EasyWatch DVB-T", BUDGET_KNC1T);
MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
MAKE_BUDGET_INFO(knc1spx4, "KNC1 DVB-S Plus X4", BUDGET_KNC1SP);
MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
MAKE_BUDGET_INFO(knc1cmk3, "KNC1 DVB-C MK3", BUDGET_KNC1C_MK3);
MAKE_BUDGET_INFO(knc1ctda10024, "KNC1 DVB-C TDA10024", BUDGET_KNC1C_TDA10024);
MAKE_BUDGET_INFO(knc1cpmk3, "KNC1 DVB-C Plus MK3", BUDGET_KNC1CP_MK3);
MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP);
MAKE_BUDGET_INFO(cin1200s, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
MAKE_BUDGET_INFO(cin1200sn, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C);
MAKE_BUDGET_INFO(cin1200cmk3, "Terratec Cinergy 1200 DVB-C MK3", BUDGET_CIN1200C_MK3);
MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T);
static struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x4f56),
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x0010),
MAKE_EXTENSION_PCI(knc1s, 0x1894, 0x0010),
MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011),
MAKE_EXTENSION_PCI(knc1sp, 0x1894, 0x0011),
MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014),
MAKE_EXTENSION_PCI(knc1spx4, 0x1894, 0x0015),
MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0018),
MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0019),
MAKE_EXTENSION_PCI(sates2, 0x1894, 0x001d),
MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b),
MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a),
MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c),
MAKE_EXTENSION_PCI(satewt, 0x1894, 0x003a),
MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
MAKE_EXTENSION_PCI(knc1cmk3, 0x1894, 0x0022),
MAKE_EXTENSION_PCI(knc1ctda10024, 0x1894, 0x0028),
MAKE_EXTENSION_PCI(knc1cpmk3, 0x1894, 0x0023),
MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030),
MAKE_EXTENSION_PCI(knc1tp, 0x1894, 0x0031),
MAKE_EXTENSION_PCI(cin1200s, 0x153b, 0x1154),
MAKE_EXTENSION_PCI(cin1200sn, 0x153b, 0x1155),
MAKE_EXTENSION_PCI(cin1200c, 0x153b, 0x1156),
MAKE_EXTENSION_PCI(cin1200cmk3, 0x153b, 0x1176),
MAKE_EXTENSION_PCI(cin1200t, 0x153b, 0x1157),
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension budget_extension = {
.name = "budget_av",
.flags = SAA7146_USE_I2C_IRQ,
.pci_tbl = pci_tbl,
.module = THIS_MODULE,
.attach = budget_av_attach,
.detach = budget_av_detach,
.irq_mask = MASK_10,
.irq_func = budget_av_irq,
};
static int __init budget_av_init(void)
{
return saa7146_register_extension(&budget_extension);
}
static void __exit budget_av_exit(void)
{
saa7146_unregister_extension(&budget_extension);
}
module_init(budget_av_init);
module_exit(budget_av_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
"budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
| gpl-2.0 |
JoinTheRealms/TF700-dualboot-stockbased | sound/isa/sb/emu8000_callback.c | 4890 | 13528 | /*
* synth callback routines for the emu8000 (AWE32/64)
*
* Copyright (C) 1999 Steve Ratcliffe
* Copyright (C) 1999-2000 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "emu8000_local.h"
#include <sound/asoundef.h>
/*
* prototypes
*/
static struct snd_emux_voice *get_voice(struct snd_emux *emu,
struct snd_emux_port *port);
static int start_voice(struct snd_emux_voice *vp);
static void trigger_voice(struct snd_emux_voice *vp);
static void release_voice(struct snd_emux_voice *vp);
static void update_voice(struct snd_emux_voice *vp, int update);
static void reset_voice(struct snd_emux *emu, int ch);
static void terminate_voice(struct snd_emux_voice *vp);
static void sysex(struct snd_emux *emu, char *buf, int len, int parsed,
struct snd_midi_channel_set *chset);
#ifdef CONFIG_SND_SEQUENCER_OSS
static int oss_ioctl(struct snd_emux *emu, int cmd, int p1, int p2);
#endif
static int load_fx(struct snd_emux *emu, int type, int mode,
const void __user *buf, long len);
static void set_pitch(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_volume(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_pan(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_fmmod(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_tremfreq(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_fm2frq2(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_filterQ(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void snd_emu8000_tweak_voice(struct snd_emu8000 *emu, int ch);
/*
* Ensure a value is between two points
* macro evaluates its args more than once, so changed to upper-case.
*/
#define LIMITVALUE(x, a, b) do { if ((x) < (a)) (x) = (a); else if ((x) > (b)) (x) = (b); } while (0)
#define LIMITMAX(x, a) do {if ((x) > (a)) (x) = (a); } while (0)
/*
* set up operators
*/
static struct snd_emux_operators emu8000_ops = {
.owner = THIS_MODULE,
.get_voice = get_voice,
.prepare = start_voice,
.trigger = trigger_voice,
.release = release_voice,
.update = update_voice,
.terminate = terminate_voice,
.reset = reset_voice,
.sample_new = snd_emu8000_sample_new,
.sample_free = snd_emu8000_sample_free,
.sample_reset = snd_emu8000_sample_reset,
.load_fx = load_fx,
.sysex = sysex,
#ifdef CONFIG_SND_SEQUENCER_OSS
.oss_ioctl = oss_ioctl,
#endif
};
void
snd_emu8000_ops_setup(struct snd_emu8000 *hw)
{
hw->emu->ops = emu8000_ops;
}
/*
* Terminate a voice
*/
static void
release_voice(struct snd_emux_voice *vp)
{
int dcysusv;
struct snd_emu8000 *hw;
hw = vp->hw;
dcysusv = 0x8000 | (unsigned char)vp->reg.parm.modrelease;
EMU8000_DCYSUS_WRITE(hw, vp->ch, dcysusv);
dcysusv = 0x8000 | (unsigned char)vp->reg.parm.volrelease;
EMU8000_DCYSUSV_WRITE(hw, vp->ch, dcysusv);
}
/*
*/
static void
terminate_voice(struct snd_emux_voice *vp)
{
struct snd_emu8000 *hw;
hw = vp->hw;
EMU8000_DCYSUSV_WRITE(hw, vp->ch, 0x807F);
}
/*
*/
static void
update_voice(struct snd_emux_voice *vp, int update)
{
struct snd_emu8000 *hw;
hw = vp->hw;
if (update & SNDRV_EMUX_UPDATE_VOLUME)
set_volume(hw, vp);
if (update & SNDRV_EMUX_UPDATE_PITCH)
set_pitch(hw, vp);
if ((update & SNDRV_EMUX_UPDATE_PAN) &&
vp->port->ctrls[EMUX_MD_REALTIME_PAN])
set_pan(hw, vp);
if (update & SNDRV_EMUX_UPDATE_FMMOD)
set_fmmod(hw, vp);
if (update & SNDRV_EMUX_UPDATE_TREMFREQ)
set_tremfreq(hw, vp);
if (update & SNDRV_EMUX_UPDATE_FM2FRQ2)
set_fm2frq2(hw, vp);
if (update & SNDRV_EMUX_UPDATE_Q)
set_filterQ(hw, vp);
}
/*
* Find a channel (voice) within the EMU that is not in use or at least
* less in use than other channels. Always returns a valid pointer
* no matter what. If there is a real shortage of voices then one
* will be cut. Such is life.
*
* The channel index (vp->ch) must be initialized in this routine.
* In Emu8k, it is identical with the array index.
*/
static struct snd_emux_voice *
get_voice(struct snd_emux *emu, struct snd_emux_port *port)
{
int i;
struct snd_emux_voice *vp;
struct snd_emu8000 *hw;
/* what we are looking for, in order of preference */
enum {
OFF=0, RELEASED, PLAYING, END
};
/* Keeps track of what we are finding */
struct best {
unsigned int time;
int voice;
} best[END];
struct best *bp;
hw = emu->hw;
for (i = 0; i < END; i++) {
best[i].time = (unsigned int)(-1); /* XXX MAX_?INT really */;
best[i].voice = -1;
}
/*
* Go through them all and get a best one to use.
*/
for (i = 0; i < emu->max_voices; i++) {
int state, val;
vp = &emu->voices[i];
state = vp->state;
if (state == SNDRV_EMUX_ST_OFF)
bp = best + OFF;
else if (state == SNDRV_EMUX_ST_RELEASED ||
state == SNDRV_EMUX_ST_PENDING) {
bp = best + RELEASED;
val = (EMU8000_CVCF_READ(hw, vp->ch) >> 16) & 0xffff;
if (! val)
bp = best + OFF;
}
else if (state & SNDRV_EMUX_ST_ON)
bp = best + PLAYING;
else
continue;
/* check if sample is finished playing (non-looping only) */
if (state != SNDRV_EMUX_ST_OFF &&
(vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_SINGLESHOT)) {
val = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff;
if (val >= vp->reg.loopstart)
bp = best + OFF;
}
if (vp->time < bp->time) {
bp->time = vp->time;
bp->voice = i;
}
}
for (i = 0; i < END; i++) {
if (best[i].voice >= 0) {
vp = &emu->voices[best[i].voice];
vp->ch = best[i].voice;
return vp;
}
}
/* not found */
return NULL;
}
/*
*/
static int
start_voice(struct snd_emux_voice *vp)
{
unsigned int temp;
int ch;
int addr;
struct snd_midi_channel *chan;
struct snd_emu8000 *hw;
hw = vp->hw;
ch = vp->ch;
chan = vp->chan;
/* channel to be silent and idle */
EMU8000_DCYSUSV_WRITE(hw, ch, 0x0080);
EMU8000_VTFT_WRITE(hw, ch, 0x0000FFFF);
EMU8000_CVCF_WRITE(hw, ch, 0x0000FFFF);
EMU8000_PTRX_WRITE(hw, ch, 0);
EMU8000_CPF_WRITE(hw, ch, 0);
/* set pitch offset */
set_pitch(hw, vp);
/* set envelope parameters */
EMU8000_ENVVAL_WRITE(hw, ch, vp->reg.parm.moddelay);
EMU8000_ATKHLD_WRITE(hw, ch, vp->reg.parm.modatkhld);
EMU8000_DCYSUS_WRITE(hw, ch, vp->reg.parm.moddcysus);
EMU8000_ENVVOL_WRITE(hw, ch, vp->reg.parm.voldelay);
EMU8000_ATKHLDV_WRITE(hw, ch, vp->reg.parm.volatkhld);
/* decay/sustain parameter for volume envelope is used
for triggerg the voice */
/* cutoff and volume */
set_volume(hw, vp);
/* modulation envelope heights */
EMU8000_PEFE_WRITE(hw, ch, vp->reg.parm.pefe);
/* lfo1/2 delay */
EMU8000_LFO1VAL_WRITE(hw, ch, vp->reg.parm.lfo1delay);
EMU8000_LFO2VAL_WRITE(hw, ch, vp->reg.parm.lfo2delay);
/* lfo1 pitch & cutoff shift */
set_fmmod(hw, vp);
/* lfo1 volume & freq */
set_tremfreq(hw, vp);
/* lfo2 pitch & freq */
set_fm2frq2(hw, vp);
/* pan & loop start */
set_pan(hw, vp);
/* chorus & loop end (chorus 8bit, MSB) */
addr = vp->reg.loopend - 1;
temp = vp->reg.parm.chorus;
temp += (int)chan->control[MIDI_CTL_E3_CHORUS_DEPTH] * 9 / 10;
LIMITMAX(temp, 255);
temp = (temp <<24) | (unsigned int)addr;
EMU8000_CSL_WRITE(hw, ch, temp);
/* Q & current address (Q 4bit value, MSB) */
addr = vp->reg.start - 1;
temp = vp->reg.parm.filterQ;
temp = (temp<<28) | (unsigned int)addr;
EMU8000_CCCA_WRITE(hw, ch, temp);
/* clear unknown registers */
EMU8000_00A0_WRITE(hw, ch, 0);
EMU8000_0080_WRITE(hw, ch, 0);
/* reset volume */
temp = vp->vtarget << 16;
EMU8000_VTFT_WRITE(hw, ch, temp | vp->ftarget);
EMU8000_CVCF_WRITE(hw, ch, temp | 0xff00);
return 0;
}
/*
* Start envelope
*/
static void
trigger_voice(struct snd_emux_voice *vp)
{
int ch = vp->ch;
unsigned int temp;
struct snd_emu8000 *hw;
hw = vp->hw;
/* set reverb and pitch target */
temp = vp->reg.parm.reverb;
temp += (int)vp->chan->control[MIDI_CTL_E1_REVERB_DEPTH] * 9 / 10;
LIMITMAX(temp, 255);
temp = (temp << 8) | (vp->ptarget << 16) | vp->aaux;
EMU8000_PTRX_WRITE(hw, ch, temp);
EMU8000_CPF_WRITE(hw, ch, vp->ptarget << 16);
EMU8000_DCYSUSV_WRITE(hw, ch, vp->reg.parm.voldcysus);
}
/*
* reset voice parameters
*/
static void
reset_voice(struct snd_emux *emu, int ch)
{
struct snd_emu8000 *hw;
hw = emu->hw;
EMU8000_DCYSUSV_WRITE(hw, ch, 0x807F);
snd_emu8000_tweak_voice(hw, ch);
}
/*
* Set the pitch of a possibly playing note.
*/
static void
set_pitch(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
EMU8000_IP_WRITE(hw, vp->ch, vp->apitch);
}
/*
* Set the volume of a possibly already playing note
*/
static void
set_volume(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
int ifatn;
ifatn = (unsigned char)vp->acutoff;
ifatn = (ifatn << 8);
ifatn |= (unsigned char)vp->avol;
EMU8000_IFATN_WRITE(hw, vp->ch, ifatn);
}
/*
* Set pan and loop start address.
*/
static void
set_pan(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned int temp;
temp = ((unsigned int)vp->apan<<24) | ((unsigned int)vp->reg.loopstart - 1);
EMU8000_PSST_WRITE(hw, vp->ch, temp);
}
#define MOD_SENSE 18
static void
set_fmmod(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned short fmmod;
short pitch;
unsigned char cutoff;
int modulation;
pitch = (char)(vp->reg.parm.fmmod>>8);
cutoff = (vp->reg.parm.fmmod & 0xff);
modulation = vp->chan->gm_modulation + vp->chan->midi_pressure;
pitch += (MOD_SENSE * modulation) / 1200;
LIMITVALUE(pitch, -128, 127);
fmmod = ((unsigned char)pitch<<8) | cutoff;
EMU8000_FMMOD_WRITE(hw, vp->ch, fmmod);
}
/* set tremolo (lfo1) volume & frequency */
static void
set_tremfreq(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
EMU8000_TREMFRQ_WRITE(hw, vp->ch, vp->reg.parm.tremfrq);
}
/* set lfo2 pitch & frequency */
static void
set_fm2frq2(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned short fm2frq2;
short pitch;
unsigned char freq;
int modulation;
pitch = (char)(vp->reg.parm.fm2frq2>>8);
freq = vp->reg.parm.fm2frq2 & 0xff;
modulation = vp->chan->gm_modulation + vp->chan->midi_pressure;
pitch += (MOD_SENSE * modulation) / 1200;
LIMITVALUE(pitch, -128, 127);
fm2frq2 = ((unsigned char)pitch<<8) | freq;
EMU8000_FM2FRQ2_WRITE(hw, vp->ch, fm2frq2);
}
/* set filterQ */
static void
set_filterQ(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned int addr;
addr = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff;
addr |= (vp->reg.parm.filterQ << 28);
EMU8000_CCCA_WRITE(hw, vp->ch, addr);
}
/*
* set the envelope & LFO parameters to the default values
*/
static void
snd_emu8000_tweak_voice(struct snd_emu8000 *emu, int i)
{
/* set all mod/vol envelope shape to minimum */
EMU8000_ENVVOL_WRITE(emu, i, 0x8000);
EMU8000_ENVVAL_WRITE(emu, i, 0x8000);
EMU8000_DCYSUS_WRITE(emu, i, 0x7F7F);
EMU8000_ATKHLDV_WRITE(emu, i, 0x7F7F);
EMU8000_ATKHLD_WRITE(emu, i, 0x7F7F);
EMU8000_PEFE_WRITE(emu, i, 0); /* mod envelope height to zero */
EMU8000_LFO1VAL_WRITE(emu, i, 0x8000); /* no delay for LFO1 */
EMU8000_LFO2VAL_WRITE(emu, i, 0x8000);
EMU8000_IP_WRITE(emu, i, 0xE000); /* no pitch shift */
EMU8000_IFATN_WRITE(emu, i, 0xFF00); /* volume to minimum */
EMU8000_FMMOD_WRITE(emu, i, 0);
EMU8000_TREMFRQ_WRITE(emu, i, 0);
EMU8000_FM2FRQ2_WRITE(emu, i, 0);
}
/*
* sysex callback
*/
static void
sysex(struct snd_emux *emu, char *buf, int len, int parsed, struct snd_midi_channel_set *chset)
{
struct snd_emu8000 *hw;
hw = emu->hw;
switch (parsed) {
case SNDRV_MIDI_SYSEX_GS_CHORUS_MODE:
hw->chorus_mode = chset->gs_chorus_mode;
snd_emu8000_update_chorus_mode(hw);
break;
case SNDRV_MIDI_SYSEX_GS_REVERB_MODE:
hw->reverb_mode = chset->gs_reverb_mode;
snd_emu8000_update_reverb_mode(hw);
break;
}
}
#ifdef CONFIG_SND_SEQUENCER_OSS
/*
* OSS ioctl callback
*/
static int
oss_ioctl(struct snd_emux *emu, int cmd, int p1, int p2)
{
struct snd_emu8000 *hw;
hw = emu->hw;
switch (cmd) {
case _EMUX_OSS_REVERB_MODE:
hw->reverb_mode = p1;
snd_emu8000_update_reverb_mode(hw);
break;
case _EMUX_OSS_CHORUS_MODE:
hw->chorus_mode = p1;
snd_emu8000_update_chorus_mode(hw);
break;
case _EMUX_OSS_INITIALIZE_CHIP:
/* snd_emu8000_init(hw); */ /*ignored*/
break;
case _EMUX_OSS_EQUALIZER:
hw->bass_level = p1;
hw->treble_level = p2;
snd_emu8000_update_equalizer(hw);
break;
}
return 0;
}
#endif
/*
* additional patch keys
*/
#define SNDRV_EMU8000_LOAD_CHORUS_FX 0x10 /* optarg=mode */
#define SNDRV_EMU8000_LOAD_REVERB_FX 0x11 /* optarg=mode */
/*
* callback routine
*/
static int
load_fx(struct snd_emux *emu, int type, int mode, const void __user *buf, long len)
{
struct snd_emu8000 *hw;
hw = emu->hw;
/* skip header */
buf += 16;
len -= 16;
switch (type) {
case SNDRV_EMU8000_LOAD_CHORUS_FX:
return snd_emu8000_load_chorus_fx(hw, mode, buf, len);
case SNDRV_EMU8000_LOAD_REVERB_FX:
return snd_emu8000_load_reverb_fx(hw, mode, buf, len);
}
return -EINVAL;
}
| gpl-2.0 |
CyanogenMod/samsung-kernel-galaxys | sound/isa/sb/emu8000_callback.c | 4890 | 13528 | /*
* synth callback routines for the emu8000 (AWE32/64)
*
* Copyright (C) 1999 Steve Ratcliffe
* Copyright (C) 1999-2000 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "emu8000_local.h"
#include <sound/asoundef.h>
/*
* prototypes
*/
static struct snd_emux_voice *get_voice(struct snd_emux *emu,
struct snd_emux_port *port);
static int start_voice(struct snd_emux_voice *vp);
static void trigger_voice(struct snd_emux_voice *vp);
static void release_voice(struct snd_emux_voice *vp);
static void update_voice(struct snd_emux_voice *vp, int update);
static void reset_voice(struct snd_emux *emu, int ch);
static void terminate_voice(struct snd_emux_voice *vp);
static void sysex(struct snd_emux *emu, char *buf, int len, int parsed,
struct snd_midi_channel_set *chset);
#ifdef CONFIG_SND_SEQUENCER_OSS
static int oss_ioctl(struct snd_emux *emu, int cmd, int p1, int p2);
#endif
static int load_fx(struct snd_emux *emu, int type, int mode,
const void __user *buf, long len);
static void set_pitch(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_volume(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_pan(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_fmmod(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_tremfreq(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_fm2frq2(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void set_filterQ(struct snd_emu8000 *hw, struct snd_emux_voice *vp);
static void snd_emu8000_tweak_voice(struct snd_emu8000 *emu, int ch);
/*
* Ensure a value is between two points
* macro evaluates its args more than once, so changed to upper-case.
*/
#define LIMITVALUE(x, a, b) do { if ((x) < (a)) (x) = (a); else if ((x) > (b)) (x) = (b); } while (0)
#define LIMITMAX(x, a) do {if ((x) > (a)) (x) = (a); } while (0)
/*
* set up operators
*/
static struct snd_emux_operators emu8000_ops = {
.owner = THIS_MODULE,
.get_voice = get_voice,
.prepare = start_voice,
.trigger = trigger_voice,
.release = release_voice,
.update = update_voice,
.terminate = terminate_voice,
.reset = reset_voice,
.sample_new = snd_emu8000_sample_new,
.sample_free = snd_emu8000_sample_free,
.sample_reset = snd_emu8000_sample_reset,
.load_fx = load_fx,
.sysex = sysex,
#ifdef CONFIG_SND_SEQUENCER_OSS
.oss_ioctl = oss_ioctl,
#endif
};
void
snd_emu8000_ops_setup(struct snd_emu8000 *hw)
{
hw->emu->ops = emu8000_ops;
}
/*
* Terminate a voice
*/
static void
release_voice(struct snd_emux_voice *vp)
{
int dcysusv;
struct snd_emu8000 *hw;
hw = vp->hw;
dcysusv = 0x8000 | (unsigned char)vp->reg.parm.modrelease;
EMU8000_DCYSUS_WRITE(hw, vp->ch, dcysusv);
dcysusv = 0x8000 | (unsigned char)vp->reg.parm.volrelease;
EMU8000_DCYSUSV_WRITE(hw, vp->ch, dcysusv);
}
/*
*/
static void
terminate_voice(struct snd_emux_voice *vp)
{
struct snd_emu8000 *hw;
hw = vp->hw;
EMU8000_DCYSUSV_WRITE(hw, vp->ch, 0x807F);
}
/*
*/
static void
update_voice(struct snd_emux_voice *vp, int update)
{
struct snd_emu8000 *hw;
hw = vp->hw;
if (update & SNDRV_EMUX_UPDATE_VOLUME)
set_volume(hw, vp);
if (update & SNDRV_EMUX_UPDATE_PITCH)
set_pitch(hw, vp);
if ((update & SNDRV_EMUX_UPDATE_PAN) &&
vp->port->ctrls[EMUX_MD_REALTIME_PAN])
set_pan(hw, vp);
if (update & SNDRV_EMUX_UPDATE_FMMOD)
set_fmmod(hw, vp);
if (update & SNDRV_EMUX_UPDATE_TREMFREQ)
set_tremfreq(hw, vp);
if (update & SNDRV_EMUX_UPDATE_FM2FRQ2)
set_fm2frq2(hw, vp);
if (update & SNDRV_EMUX_UPDATE_Q)
set_filterQ(hw, vp);
}
/*
* Find a channel (voice) within the EMU that is not in use or at least
* less in use than other channels. Always returns a valid pointer
* no matter what. If there is a real shortage of voices then one
* will be cut. Such is life.
*
* The channel index (vp->ch) must be initialized in this routine.
* In Emu8k, it is identical with the array index.
*/
static struct snd_emux_voice *
get_voice(struct snd_emux *emu, struct snd_emux_port *port)
{
int i;
struct snd_emux_voice *vp;
struct snd_emu8000 *hw;
/* what we are looking for, in order of preference */
enum {
OFF=0, RELEASED, PLAYING, END
};
/* Keeps track of what we are finding */
struct best {
unsigned int time;
int voice;
} best[END];
struct best *bp;
hw = emu->hw;
for (i = 0; i < END; i++) {
best[i].time = (unsigned int)(-1); /* XXX MAX_?INT really */;
best[i].voice = -1;
}
/*
* Go through them all and get a best one to use.
*/
for (i = 0; i < emu->max_voices; i++) {
int state, val;
vp = &emu->voices[i];
state = vp->state;
if (state == SNDRV_EMUX_ST_OFF)
bp = best + OFF;
else if (state == SNDRV_EMUX_ST_RELEASED ||
state == SNDRV_EMUX_ST_PENDING) {
bp = best + RELEASED;
val = (EMU8000_CVCF_READ(hw, vp->ch) >> 16) & 0xffff;
if (! val)
bp = best + OFF;
}
else if (state & SNDRV_EMUX_ST_ON)
bp = best + PLAYING;
else
continue;
/* check if sample is finished playing (non-looping only) */
if (state != SNDRV_EMUX_ST_OFF &&
(vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_SINGLESHOT)) {
val = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff;
if (val >= vp->reg.loopstart)
bp = best + OFF;
}
if (vp->time < bp->time) {
bp->time = vp->time;
bp->voice = i;
}
}
for (i = 0; i < END; i++) {
if (best[i].voice >= 0) {
vp = &emu->voices[best[i].voice];
vp->ch = best[i].voice;
return vp;
}
}
/* not found */
return NULL;
}
/*
*/
static int
start_voice(struct snd_emux_voice *vp)
{
unsigned int temp;
int ch;
int addr;
struct snd_midi_channel *chan;
struct snd_emu8000 *hw;
hw = vp->hw;
ch = vp->ch;
chan = vp->chan;
/* channel to be silent and idle */
EMU8000_DCYSUSV_WRITE(hw, ch, 0x0080);
EMU8000_VTFT_WRITE(hw, ch, 0x0000FFFF);
EMU8000_CVCF_WRITE(hw, ch, 0x0000FFFF);
EMU8000_PTRX_WRITE(hw, ch, 0);
EMU8000_CPF_WRITE(hw, ch, 0);
/* set pitch offset */
set_pitch(hw, vp);
/* set envelope parameters */
EMU8000_ENVVAL_WRITE(hw, ch, vp->reg.parm.moddelay);
EMU8000_ATKHLD_WRITE(hw, ch, vp->reg.parm.modatkhld);
EMU8000_DCYSUS_WRITE(hw, ch, vp->reg.parm.moddcysus);
EMU8000_ENVVOL_WRITE(hw, ch, vp->reg.parm.voldelay);
EMU8000_ATKHLDV_WRITE(hw, ch, vp->reg.parm.volatkhld);
/* decay/sustain parameter for volume envelope is used
for triggerg the voice */
/* cutoff and volume */
set_volume(hw, vp);
/* modulation envelope heights */
EMU8000_PEFE_WRITE(hw, ch, vp->reg.parm.pefe);
/* lfo1/2 delay */
EMU8000_LFO1VAL_WRITE(hw, ch, vp->reg.parm.lfo1delay);
EMU8000_LFO2VAL_WRITE(hw, ch, vp->reg.parm.lfo2delay);
/* lfo1 pitch & cutoff shift */
set_fmmod(hw, vp);
/* lfo1 volume & freq */
set_tremfreq(hw, vp);
/* lfo2 pitch & freq */
set_fm2frq2(hw, vp);
/* pan & loop start */
set_pan(hw, vp);
/* chorus & loop end (chorus 8bit, MSB) */
addr = vp->reg.loopend - 1;
temp = vp->reg.parm.chorus;
temp += (int)chan->control[MIDI_CTL_E3_CHORUS_DEPTH] * 9 / 10;
LIMITMAX(temp, 255);
temp = (temp <<24) | (unsigned int)addr;
EMU8000_CSL_WRITE(hw, ch, temp);
/* Q & current address (Q 4bit value, MSB) */
addr = vp->reg.start - 1;
temp = vp->reg.parm.filterQ;
temp = (temp<<28) | (unsigned int)addr;
EMU8000_CCCA_WRITE(hw, ch, temp);
/* clear unknown registers */
EMU8000_00A0_WRITE(hw, ch, 0);
EMU8000_0080_WRITE(hw, ch, 0);
/* reset volume */
temp = vp->vtarget << 16;
EMU8000_VTFT_WRITE(hw, ch, temp | vp->ftarget);
EMU8000_CVCF_WRITE(hw, ch, temp | 0xff00);
return 0;
}
/*
* Start envelope
*/
static void
trigger_voice(struct snd_emux_voice *vp)
{
int ch = vp->ch;
unsigned int temp;
struct snd_emu8000 *hw;
hw = vp->hw;
/* set reverb and pitch target */
temp = vp->reg.parm.reverb;
temp += (int)vp->chan->control[MIDI_CTL_E1_REVERB_DEPTH] * 9 / 10;
LIMITMAX(temp, 255);
temp = (temp << 8) | (vp->ptarget << 16) | vp->aaux;
EMU8000_PTRX_WRITE(hw, ch, temp);
EMU8000_CPF_WRITE(hw, ch, vp->ptarget << 16);
EMU8000_DCYSUSV_WRITE(hw, ch, vp->reg.parm.voldcysus);
}
/*
* reset voice parameters
*/
static void
reset_voice(struct snd_emux *emu, int ch)
{
struct snd_emu8000 *hw;
hw = emu->hw;
EMU8000_DCYSUSV_WRITE(hw, ch, 0x807F);
snd_emu8000_tweak_voice(hw, ch);
}
/*
* Set the pitch of a possibly playing note.
*/
static void
set_pitch(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
EMU8000_IP_WRITE(hw, vp->ch, vp->apitch);
}
/*
* Set the volume of a possibly already playing note
*/
static void
set_volume(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
int ifatn;
ifatn = (unsigned char)vp->acutoff;
ifatn = (ifatn << 8);
ifatn |= (unsigned char)vp->avol;
EMU8000_IFATN_WRITE(hw, vp->ch, ifatn);
}
/*
* Set pan and loop start address.
*/
static void
set_pan(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned int temp;
temp = ((unsigned int)vp->apan<<24) | ((unsigned int)vp->reg.loopstart - 1);
EMU8000_PSST_WRITE(hw, vp->ch, temp);
}
#define MOD_SENSE 18
static void
set_fmmod(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned short fmmod;
short pitch;
unsigned char cutoff;
int modulation;
pitch = (char)(vp->reg.parm.fmmod>>8);
cutoff = (vp->reg.parm.fmmod & 0xff);
modulation = vp->chan->gm_modulation + vp->chan->midi_pressure;
pitch += (MOD_SENSE * modulation) / 1200;
LIMITVALUE(pitch, -128, 127);
fmmod = ((unsigned char)pitch<<8) | cutoff;
EMU8000_FMMOD_WRITE(hw, vp->ch, fmmod);
}
/* set tremolo (lfo1) volume & frequency */
static void
set_tremfreq(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
EMU8000_TREMFRQ_WRITE(hw, vp->ch, vp->reg.parm.tremfrq);
}
/* set lfo2 pitch & frequency */
static void
set_fm2frq2(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned short fm2frq2;
short pitch;
unsigned char freq;
int modulation;
pitch = (char)(vp->reg.parm.fm2frq2>>8);
freq = vp->reg.parm.fm2frq2 & 0xff;
modulation = vp->chan->gm_modulation + vp->chan->midi_pressure;
pitch += (MOD_SENSE * modulation) / 1200;
LIMITVALUE(pitch, -128, 127);
fm2frq2 = ((unsigned char)pitch<<8) | freq;
EMU8000_FM2FRQ2_WRITE(hw, vp->ch, fm2frq2);
}
/* set filterQ */
static void
set_filterQ(struct snd_emu8000 *hw, struct snd_emux_voice *vp)
{
unsigned int addr;
addr = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff;
addr |= (vp->reg.parm.filterQ << 28);
EMU8000_CCCA_WRITE(hw, vp->ch, addr);
}
/*
* set the envelope & LFO parameters to the default values
*/
static void
snd_emu8000_tweak_voice(struct snd_emu8000 *emu, int i)
{
/* set all mod/vol envelope shape to minimum */
EMU8000_ENVVOL_WRITE(emu, i, 0x8000);
EMU8000_ENVVAL_WRITE(emu, i, 0x8000);
EMU8000_DCYSUS_WRITE(emu, i, 0x7F7F);
EMU8000_ATKHLDV_WRITE(emu, i, 0x7F7F);
EMU8000_ATKHLD_WRITE(emu, i, 0x7F7F);
EMU8000_PEFE_WRITE(emu, i, 0); /* mod envelope height to zero */
EMU8000_LFO1VAL_WRITE(emu, i, 0x8000); /* no delay for LFO1 */
EMU8000_LFO2VAL_WRITE(emu, i, 0x8000);
EMU8000_IP_WRITE(emu, i, 0xE000); /* no pitch shift */
EMU8000_IFATN_WRITE(emu, i, 0xFF00); /* volume to minimum */
EMU8000_FMMOD_WRITE(emu, i, 0);
EMU8000_TREMFRQ_WRITE(emu, i, 0);
EMU8000_FM2FRQ2_WRITE(emu, i, 0);
}
/*
* sysex callback
*/
static void
sysex(struct snd_emux *emu, char *buf, int len, int parsed, struct snd_midi_channel_set *chset)
{
struct snd_emu8000 *hw;
hw = emu->hw;
switch (parsed) {
case SNDRV_MIDI_SYSEX_GS_CHORUS_MODE:
hw->chorus_mode = chset->gs_chorus_mode;
snd_emu8000_update_chorus_mode(hw);
break;
case SNDRV_MIDI_SYSEX_GS_REVERB_MODE:
hw->reverb_mode = chset->gs_reverb_mode;
snd_emu8000_update_reverb_mode(hw);
break;
}
}
#ifdef CONFIG_SND_SEQUENCER_OSS
/*
* OSS ioctl callback
*/
static int
oss_ioctl(struct snd_emux *emu, int cmd, int p1, int p2)
{
struct snd_emu8000 *hw;
hw = emu->hw;
switch (cmd) {
case _EMUX_OSS_REVERB_MODE:
hw->reverb_mode = p1;
snd_emu8000_update_reverb_mode(hw);
break;
case _EMUX_OSS_CHORUS_MODE:
hw->chorus_mode = p1;
snd_emu8000_update_chorus_mode(hw);
break;
case _EMUX_OSS_INITIALIZE_CHIP:
/* snd_emu8000_init(hw); */ /*ignored*/
break;
case _EMUX_OSS_EQUALIZER:
hw->bass_level = p1;
hw->treble_level = p2;
snd_emu8000_update_equalizer(hw);
break;
}
return 0;
}
#endif
/*
* additional patch keys
*/
#define SNDRV_EMU8000_LOAD_CHORUS_FX 0x10 /* optarg=mode */
#define SNDRV_EMU8000_LOAD_REVERB_FX 0x11 /* optarg=mode */
/*
* callback routine
*/
static int
load_fx(struct snd_emux *emu, int type, int mode, const void __user *buf, long len)
{
struct snd_emu8000 *hw;
hw = emu->hw;
/* skip header */
buf += 16;
len -= 16;
switch (type) {
case SNDRV_EMU8000_LOAD_CHORUS_FX:
return snd_emu8000_load_chorus_fx(hw, mode, buf, len);
case SNDRV_EMU8000_LOAD_REVERB_FX:
return snd_emu8000_load_reverb_fx(hw, mode, buf, len);
}
return -EINVAL;
}
| gpl-2.0 |
AngSanley/angsanleykernel-nokia-normandy | drivers/parport/parport_mfc3.c | 4890 | 10988 | /* Low-level parallel port routines for the Multiface 3 card
*
* Author: Joerg Dorchain <joerg@dorchain.net>
*
* (C) The elitist m68k Users(TM)
*
* based on the existing parport_amiga and lp_mfc
*
*
* From the MFC3 documentation:
*
* Miscellaneous PIA Details
* -------------------------
*
* The two open-drain interrupt outputs /IRQA and /IRQB are routed to
* /INT2 of the Z2 bus.
*
* The CPU data bus of the PIA (D0-D7) is connected to D8-D15 on the Z2
* bus. This means that any PIA registers are accessed at even addresses.
*
* Centronics Pin Connections for the PIA
* --------------------------------------
*
* The following table shows the connections between the PIA and the
* Centronics interface connector. These connections implement a single, but
* very complete, Centronics type interface. The Pin column gives the pin
* numbers of the PIA. The Centronics pin numbers can be found in the section
* "Parallel Connectors".
*
*
* Pin | PIA | Dir | Centronics Names
* -------+-----+-----+---------------------------------------------------------
* 19 | CB2 | --> | /STROBE (aka /DRDY)
* 10-17 | PBx | <-> | DATA0 - DATA7
* 18 | CB1 | <-- | /ACK
* 40 | CA1 | <-- | BUSY
* 3 | PA1 | <-- | PAPER-OUT (aka POUT)
* 4 | PA2 | <-- | SELECTED (aka SEL)
* 9 | PA7 | --> | /INIT (aka /RESET or /INPUT-PRIME)
* 6 | PA4 | <-- | /ERROR (aka /FAULT)
* 7 | PA5 | --> | DIR (aka /SELECT-IN)
* 8 | PA6 | --> | /AUTO-FEED-XT
* 39 | CA2 | --> | open
* 5 | PA3 | <-- | /ACK (same as CB1!)
* 2 | PA0 | <-- | BUSY (same as CA1!)
* -------+-----+-----+---------------------------------------------------------
*
* Should be enough to understand some of the driver.
*
* Per convention for normal use the port registers are visible.
* If you need the data direction registers, restore the value in the
* control register.
*/
#include "multiface.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/mc6821.h>
#include <linux/zorro.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
/* Maximum Number of Cards supported */
#define MAX_MFC 5
#undef DEBUG
#ifdef DEBUG
#define DPRINTK printk
#else
static inline int DPRINTK(void *nothing, ...) {return 0;}
#endif
static struct parport *this_port[MAX_MFC] = {NULL, };
static volatile int dummy; /* for trigger readds */
#define pia(dev) ((struct pia *)(dev->base))
static struct parport_operations pp_mfc3_ops;
static void mfc3_write_data(struct parport *p, unsigned char data)
{
DPRINTK(KERN_DEBUG "write_data %c\n",data);
dummy = pia(p)->pprb; /* clears irq bit */
/* Triggers also /STROBE.*/
pia(p)->pprb = data;
}
static unsigned char mfc3_read_data(struct parport *p)
{
/* clears interrupt bit. Triggers also /STROBE. */
return pia(p)->pprb;
}
static unsigned char control_pc_to_mfc3(unsigned char control)
{
unsigned char ret = 32|64;
if (control & PARPORT_CONTROL_SELECT) /* XXX: What is SELECP? */
ret &= ~32; /* /SELECT_IN */
if (control & PARPORT_CONTROL_INIT) /* INITP */
ret |= 128;
if (control & PARPORT_CONTROL_AUTOFD) /* AUTOLF */
ret &= ~64;
if (control & PARPORT_CONTROL_STROBE) /* Strobe */
/* Handled directly by hardware */;
return ret;
}
static unsigned char control_mfc3_to_pc(unsigned char control)
{
unsigned char ret = PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_SELECT;
if (control & 128) /* /INITP */
ret |= PARPORT_CONTROL_INIT;
if (control & 64) /* /AUTOLF */
ret &= ~PARPORT_CONTROL_AUTOFD;
if (control & 32) /* /SELECT_IN */
ret &= ~PARPORT_CONTROL_SELECT;
return ret;
}
static void mfc3_write_control(struct parport *p, unsigned char control)
{
DPRINTK(KERN_DEBUG "write_control %02x\n",control);
pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control);
}
static unsigned char mfc3_read_control( struct parport *p)
{
DPRINTK(KERN_DEBUG "read_control \n");
return control_mfc3_to_pc(pia(p)->ppra & 0xe0);
}
static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, unsigned char val)
{
unsigned char old;
DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val);
old = mfc3_read_control(p);
mfc3_write_control(p, (old & ~mask) ^ val);
return old;
}
#if 0 /* currently unused */
static unsigned char status_pc_to_mfc3(unsigned char status)
{
unsigned char ret = 1;
if (status & PARPORT_STATUS_BUSY) /* Busy */
ret &= ~1;
if (status & PARPORT_STATUS_ACK) /* Ack */
ret |= 8;
if (status & PARPORT_STATUS_PAPEROUT) /* PaperOut */
ret |= 2;
if (status & PARPORT_STATUS_SELECT) /* select */
ret |= 4;
if (status & PARPORT_STATUS_ERROR) /* error */
ret |= 16;
return ret;
}
#endif
static unsigned char status_mfc3_to_pc(unsigned char status)
{
unsigned char ret = PARPORT_STATUS_BUSY;
if (status & 1) /* Busy */
ret &= ~PARPORT_STATUS_BUSY;
if (status & 2) /* PaperOut */
ret |= PARPORT_STATUS_PAPEROUT;
if (status & 4) /* Selected */
ret |= PARPORT_STATUS_SELECT;
if (status & 8) /* Ack */
ret |= PARPORT_STATUS_ACK;
if (status & 16) /* /ERROR */
ret |= PARPORT_STATUS_ERROR;
return ret;
}
#if 0 /* currently unused */
static void mfc3_write_status( struct parport *p, unsigned char status)
{
DPRINTK(KERN_DEBUG "write_status %02x\n",status);
pia(p)->ppra = (pia(p)->ppra & 0xe0) | status_pc_to_mfc3(status);
}
#endif
static unsigned char mfc3_read_status(struct parport *p)
{
unsigned char status;
status = status_mfc3_to_pc(pia(p)->ppra & 0x1f);
DPRINTK(KERN_DEBUG "read_status %02x\n", status);
return status;
}
#if 0 /* currently unused */
static void mfc3_change_mode( struct parport *p, int m)
{
/* XXX: This port only has one mode, and I am
not sure about the corresponding PC-style mode*/
}
#endif
static int use_cnt = 0;
static irqreturn_t mfc3_interrupt(int irq, void *dev_id)
{
int i;
for( i = 0; i < MAX_MFC; i++)
if (this_port[i] != NULL)
if (pia(this_port[i])->crb & 128) { /* Board caused interrupt */
dummy = pia(this_port[i])->pprb; /* clear irq bit */
parport_generic_irq(this_port[i]);
}
return IRQ_HANDLED;
}
static void mfc3_enable_irq(struct parport *p)
{
pia(p)->crb |= PIA_C1_ENABLE_IRQ;
}
static void mfc3_disable_irq(struct parport *p)
{
pia(p)->crb &= ~PIA_C1_ENABLE_IRQ;
}
static void mfc3_data_forward(struct parport *p)
{
DPRINTK(KERN_DEBUG "forward\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 255; /* all pins output */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
}
static void mfc3_data_reverse(struct parport *p)
{
DPRINTK(KERN_DEBUG "reverse\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 0; /* all pins input */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
}
static void mfc3_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.amiga.data = 0;
s->u.amiga.datadir = 255;
s->u.amiga.status = 0;
s->u.amiga.statusdir = 0xe0;
}
static void mfc3_save_state(struct parport *p, struct parport_state *s)
{
s->u.amiga.data = pia(p)->pprb;
pia(p)->crb &= ~PIA_DDR;
s->u.amiga.datadir = pia(p)->pddrb;
pia(p)->crb |= PIA_DDR;
s->u.amiga.status = pia(p)->ppra;
pia(p)->cra &= ~PIA_DDR;
s->u.amiga.statusdir = pia(p)->pddrb;
pia(p)->cra |= PIA_DDR;
}
static void mfc3_restore_state(struct parport *p, struct parport_state *s)
{
pia(p)->pprb = s->u.amiga.data;
pia(p)->crb &= ~PIA_DDR;
pia(p)->pddrb = s->u.amiga.datadir;
pia(p)->crb |= PIA_DDR;
pia(p)->ppra = s->u.amiga.status;
pia(p)->cra &= ~PIA_DDR;
pia(p)->pddrb = s->u.amiga.statusdir;
pia(p)->cra |= PIA_DDR;
}
static struct parport_operations pp_mfc3_ops = {
.write_data = mfc3_write_data,
.read_data = mfc3_read_data,
.write_control = mfc3_write_control,
.read_control = mfc3_read_control,
.frob_control = mfc3_frob_control,
.read_status = mfc3_read_status,
.enable_irq = mfc3_enable_irq,
.disable_irq = mfc3_disable_irq,
.data_forward = mfc3_data_forward,
.data_reverse = mfc3_data_reverse,
.init_state = mfc3_init_state,
.save_state = mfc3_save_state,
.restore_state = mfc3_restore_state,
.epp_write_data = parport_ieee1284_epp_write_data,
.epp_read_data = parport_ieee1284_epp_read_data,
.epp_write_addr = parport_ieee1284_epp_write_addr,
.epp_read_addr = parport_ieee1284_epp_read_addr,
.ecp_write_data = parport_ieee1284_ecp_write_data,
.ecp_read_data = parport_ieee1284_ecp_read_data,
.ecp_write_addr = parport_ieee1284_ecp_write_addr,
.compat_write_data = parport_ieee1284_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
.owner = THIS_MODULE,
};
/* ----------- Initialisation code --------------------------------- */
static int __init parport_mfc3_init(void)
{
struct parport *p;
int pias = 0;
struct pia *pp;
struct zorro_dev *z = NULL;
if (!MACH_IS_AMIGA)
return -ENODEV;
while ((z = zorro_find_device(ZORRO_PROD_BSC_MULTIFACE_III, z))) {
unsigned long piabase = z->resource.start+PIABASE;
if (!request_mem_region(piabase, sizeof(struct pia), "PIA"))
continue;
pp = (struct pia *)ZTWO_VADDR(piabase);
pp->crb = 0;
pp->pddrb = 255; /* all data pins output */
pp->crb = PIA_DDR|32|8;
dummy = pp->pddrb; /* reading clears interrupt */
pp->cra = 0;
pp->pddra = 0xe0; /* /RESET, /DIR ,/AUTO-FEED output */
pp->cra = PIA_DDR;
pp->ppra = 0; /* reset printer */
udelay(10);
pp->ppra = 128;
p = parport_register_port((unsigned long)pp, IRQ_AMIGA_PORTS,
PARPORT_DMA_NONE, &pp_mfc3_ops);
if (!p)
goto out_port;
if (p->irq != PARPORT_IRQ_NONE) {
if (use_cnt++ == 0)
if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, IRQF_SHARED, p->name, &pp_mfc3_ops))
goto out_irq;
}
p->dev = &z->dev;
this_port[pias++] = p;
printk(KERN_INFO "%s: Multiface III port using irq\n", p->name);
/* XXX: set operating mode */
p->private_data = (void *)piabase;
parport_announce_port (p);
if (pias >= MAX_MFC)
break;
continue;
out_irq:
parport_put_port(p);
out_port:
release_mem_region(piabase, sizeof(struct pia));
}
return pias ? 0 : -ENODEV;
}
static void __exit parport_mfc3_exit(void)
{
int i;
for (i = 0; i < MAX_MFC; i++) {
if (!this_port[i])
continue;
parport_remove_port(this_port[i]);
if (this_port[i]->irq != PARPORT_IRQ_NONE) {
if (--use_cnt == 0)
free_irq(IRQ_AMIGA_PORTS, &pp_mfc3_ops);
}
release_mem_region(ZTWO_PADDR(this_port[i]->private_data), sizeof(struct pia));
parport_put_port(this_port[i]);
}
}
MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port");
MODULE_LICENSE("GPL");
module_init(parport_mfc3_init)
module_exit(parport_mfc3_exit)
| gpl-2.0 |
calixtolinux/calixto-android-jb | net/x25/x25_proc.c | 5402 | 6489 | /*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.4 with seq_file support
*
* This module:
* This module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* History
* 2002/10/06 Arnaldo Carvalho de Melo seq_file support
*/
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/x25.h>
#ifdef CONFIG_PROC_FS
static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_route_list_lock)
{
read_lock_bh(&x25_route_list_lock);
return seq_list_start_head(&x25_route_list, *pos);
}
static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &x25_route_list, pos);
}
static void x25_seq_route_stop(struct seq_file *seq, void *v)
__releases(x25_route_list_lock)
{
read_unlock_bh(&x25_route_list_lock);
}
static int x25_seq_route_show(struct seq_file *seq, void *v)
{
struct x25_route *rt = list_entry(v, struct x25_route, node);
if (v == &x25_route_list) {
seq_puts(seq, "Address Digits Device\n");
goto out;
}
rt = v;
seq_printf(seq, "%-15s %-6d %-5s\n",
rt->address.x25_addr, rt->sigdigits,
rt->dev ? rt->dev->name : "???");
out:
return 0;
}
static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_list_lock)
{
read_lock_bh(&x25_list_lock);
return seq_hlist_start_head(&x25_list, *pos);
}
static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &x25_list, pos);
}
static void x25_seq_socket_stop(struct seq_file *seq, void *v)
__releases(x25_list_lock)
{
read_unlock_bh(&x25_list_lock);
}
static int x25_seq_socket_show(struct seq_file *seq, void *v)
{
struct sock *s;
struct x25_sock *x25;
struct net_device *dev;
const char *devname;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "dest_addr src_addr dev lci st vs vr "
"va t t2 t21 t22 t23 Snd-Q Rcv-Q inode\n");
goto out;
}
s = sk_entry(v);
x25 = x25_sk(s);
if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL)
devname = "???";
else
devname = x25->neighbour->dev->name;
seq_printf(seq, "%-10s %-10s %-5s %3.3X %d %d %d %d %3lu %3lu "
"%3lu %3lu %3lu %5d %5d %ld\n",
!x25->dest_addr.x25_addr[0] ? "*" : x25->dest_addr.x25_addr,
!x25->source_addr.x25_addr[0] ? "*" : x25->source_addr.x25_addr,
devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr,
x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ,
x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
out:
return 0;
}
static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_forward_list_lock)
{
read_lock_bh(&x25_forward_list_lock);
return seq_list_start_head(&x25_forward_list, *pos);
}
static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &x25_forward_list, pos);
}
static void x25_seq_forward_stop(struct seq_file *seq, void *v)
__releases(x25_forward_list_lock)
{
read_unlock_bh(&x25_forward_list_lock);
}
static int x25_seq_forward_show(struct seq_file *seq, void *v)
{
struct x25_forward *f = list_entry(v, struct x25_forward, node);
if (v == &x25_forward_list) {
seq_printf(seq, "lci dev1 dev2\n");
goto out;
}
f = v;
seq_printf(seq, "%d %-10s %-10s\n",
f->lci, f->dev1->name, f->dev2->name);
out:
return 0;
}
static const struct seq_operations x25_seq_route_ops = {
.start = x25_seq_route_start,
.next = x25_seq_route_next,
.stop = x25_seq_route_stop,
.show = x25_seq_route_show,
};
static const struct seq_operations x25_seq_socket_ops = {
.start = x25_seq_socket_start,
.next = x25_seq_socket_next,
.stop = x25_seq_socket_stop,
.show = x25_seq_socket_show,
};
static const struct seq_operations x25_seq_forward_ops = {
.start = x25_seq_forward_start,
.next = x25_seq_forward_next,
.stop = x25_seq_forward_stop,
.show = x25_seq_forward_show,
};
static int x25_seq_socket_open(struct inode *inode, struct file *file)
{
return seq_open(file, &x25_seq_socket_ops);
}
static int x25_seq_route_open(struct inode *inode, struct file *file)
{
return seq_open(file, &x25_seq_route_ops);
}
static int x25_seq_forward_open(struct inode *inode, struct file *file)
{
return seq_open(file, &x25_seq_forward_ops);
}
static const struct file_operations x25_seq_socket_fops = {
.owner = THIS_MODULE,
.open = x25_seq_socket_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations x25_seq_route_fops = {
.owner = THIS_MODULE,
.open = x25_seq_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations x25_seq_forward_fops = {
.owner = THIS_MODULE,
.open = x25_seq_forward_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static struct proc_dir_entry *x25_proc_dir;
int __init x25_proc_init(void)
{
struct proc_dir_entry *p;
int rc = -ENOMEM;
x25_proc_dir = proc_mkdir("x25", init_net.proc_net);
if (!x25_proc_dir)
goto out;
p = proc_create("route", S_IRUGO, x25_proc_dir, &x25_seq_route_fops);
if (!p)
goto out_route;
p = proc_create("socket", S_IRUGO, x25_proc_dir, &x25_seq_socket_fops);
if (!p)
goto out_socket;
p = proc_create("forward", S_IRUGO, x25_proc_dir,
&x25_seq_forward_fops);
if (!p)
goto out_forward;
rc = 0;
out:
return rc;
out_forward:
remove_proc_entry("socket", x25_proc_dir);
out_socket:
remove_proc_entry("route", x25_proc_dir);
out_route:
remove_proc_entry("x25", init_net.proc_net);
goto out;
}
void __exit x25_proc_exit(void)
{
remove_proc_entry("forward", x25_proc_dir);
remove_proc_entry("route", x25_proc_dir);
remove_proc_entry("socket", x25_proc_dir);
remove_proc_entry("x25", init_net.proc_net);
}
#else /* CONFIG_PROC_FS */
int __init x25_proc_init(void)
{
return 0;
}
void __exit x25_proc_exit(void)
{
}
#endif /* CONFIG_PROC_FS */
| gpl-2.0 |
crpalmer/android_kernel_samsung_mondrianwifi | fs/ocfs2/dlm/dlmlock.c | 7450 | 19940 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* dlmlock.c
*
* underlying calls for lock creation
*
* Copyright (C) 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include "cluster/heartbeat.h"
#include "cluster/nodemanager.h"
#include "cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmconvert.h"
#define MLOG_MASK_PREFIX ML_DLM
#include "cluster/masklog.h"
static struct kmem_cache *dlm_lock_cache = NULL;
static DEFINE_SPINLOCK(dlm_cookie_lock);
static u64 dlm_next_cookie = 1;
static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags);
static void dlm_init_lock(struct dlm_lock *newlock, int type,
u8 node, u64 cookie);
static void dlm_lock_release(struct kref *kref);
static void dlm_lock_detach_lockres(struct dlm_lock *lock);
int dlm_init_lock_cache(void)
{
dlm_lock_cache = kmem_cache_create("o2dlm_lock",
sizeof(struct dlm_lock),
0, SLAB_HWCACHE_ALIGN, NULL);
if (dlm_lock_cache == NULL)
return -ENOMEM;
return 0;
}
void dlm_destroy_lock_cache(void)
{
if (dlm_lock_cache)
kmem_cache_destroy(dlm_lock_cache);
}
/* Tell us whether we can grant a new lock request.
* locking:
* caller needs: res->spinlock
* taken: none
* held on exit: none
* returns: 1 if the lock can be granted, 0 otherwise.
*/
static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
struct list_head *iter;
struct dlm_lock *tmplock;
list_for_each(iter, &res->granted) {
tmplock = list_entry(iter, struct dlm_lock, list);
if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
return 0;
}
list_for_each(iter, &res->converting) {
tmplock = list_entry(iter, struct dlm_lock, list);
if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
return 0;
if (!dlm_lock_compatible(tmplock->ml.convert_type,
lock->ml.type))
return 0;
}
return 1;
}
/* performs lock creation at the lockres master site
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: DLM_NORMAL, DLM_NOTQUEUED
*/
static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags)
{
int call_ast = 0, kick_thread = 0;
enum dlm_status status = DLM_NORMAL;
mlog(0, "type=%d\n", lock->ml.type);
spin_lock(&res->spinlock);
/* if called from dlm_create_lock_handler, need to
* ensure it will not sleep in dlm_wait_on_lockres */
status = __dlm_lockres_state_to_status(res);
if (status != DLM_NORMAL &&
lock->ml.node != dlm->node_num) {
/* erf. state changed after lock was dropped. */
spin_unlock(&res->spinlock);
dlm_error(status);
return status;
}
__dlm_wait_on_lockres(res);
__dlm_lockres_reserve_ast(res);
if (dlm_can_grant_new_lock(res, lock)) {
mlog(0, "I can grant this lock right away\n");
/* got it right away */
lock->lksb->status = DLM_NORMAL;
status = DLM_NORMAL;
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->granted);
/* for the recovery lock, we can't allow the ast
* to be queued since the dlmthread is already
* frozen. but the recovery lock is always locked
* with LKM_NOQUEUE so we do not need the ast in
* this special case */
if (!dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
kick_thread = 1;
call_ast = 1;
} else {
mlog(0, "%s: returning DLM_NORMAL to "
"node %u for reco lock\n", dlm->name,
lock->ml.node);
}
} else {
/* for NOQUEUE request, unless we get the
* lock right away, return DLM_NOTQUEUED */
if (flags & LKM_NOQUEUE) {
status = DLM_NOTQUEUED;
if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
mlog(0, "%s: returning NOTQUEUED to "
"node %u for reco lock\n", dlm->name,
lock->ml.node);
}
} else {
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->blocked);
kick_thread = 1;
}
}
spin_unlock(&res->spinlock);
wake_up(&res->wq);
/* either queue the ast or release it */
if (call_ast)
dlm_queue_ast(dlm, lock);
else
dlm_lockres_release_ast(dlm, res);
dlm_lockres_calc_usage(dlm, res);
if (kick_thread)
dlm_kick_thread(dlm, res);
return status;
}
void dlm_revert_pending_lock(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
/* remove from local queue if it failed */
list_del_init(&lock->list);
lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
}
/*
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: DLM_DENIED, DLM_RECOVERING, or net status
*/
static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags)
{
enum dlm_status status = DLM_DENIED;
int lockres_changed = 1;
mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
lock->ml.type, res->lockname.len,
res->lockname.name, flags);
/*
* Wait if resource is getting recovered, remastered, etc.
* If the resource was remastered and new owner is self, then exit.
*/
spin_lock(&res->spinlock);
__dlm_wait_on_lockres(res);
if (res->owner == dlm->node_num) {
spin_unlock(&res->spinlock);
return DLM_RECOVERING;
}
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* add lock to local (secondary) queue */
dlm_lock_get(lock);
list_add_tail(&lock->list, &res->blocked);
lock->lock_pending = 1;
spin_unlock(&res->spinlock);
/* spec seems to say that you will get DLM_NORMAL when the lock
* has been queued, meaning we need to wait for a reply here. */
status = dlm_send_remote_lock_request(dlm, res, lock, flags);
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->lock_pending = 0;
if (status != DLM_NORMAL) {
if (status == DLM_RECOVERING &&
dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* recovery lock was mastered by dead node.
* we need to have calc_usage shoot down this
* lockres and completely remaster it. */
mlog(0, "%s: recovery lock was owned by "
"dead node %u, remaster it now.\n",
dlm->name, res->owner);
} else if (status != DLM_NOTQUEUED) {
/*
* DO NOT call calc_usage, as this would unhash
* the remote lockres before we ever get to use
* it. treat as if we never made any change to
* the lockres.
*/
lockres_changed = 0;
dlm_error(status);
}
dlm_revert_pending_lock(res, lock);
dlm_lock_put(lock);
} else if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* special case for the $RECOVERY lock.
* there will never be an AST delivered to put
* this lock on the proper secondary queue
* (granted), so do it manually. */
mlog(0, "%s: $RECOVERY lock for this node (%u) is "
"mastered by %u; got lock, manually granting (no ast)\n",
dlm->name, dlm->node_num, res->owner);
list_move_tail(&lock->list, &res->granted);
}
spin_unlock(&res->spinlock);
if (lockres_changed)
dlm_lockres_calc_usage(dlm, res);
wake_up(&res->wq);
return status;
}
/* for remote lock creation.
* locking:
* caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
* taken: none
* held on exit: none
* returns: DLM_NOLOCKMGR, or net status
*/
static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
struct dlm_lock *lock, int flags)
{
struct dlm_create_lock create;
int tmpret, status = 0;
enum dlm_status ret;
memset(&create, 0, sizeof(create));
create.node_idx = dlm->node_num;
create.requested_type = lock->ml.type;
create.cookie = lock->ml.cookie;
create.namelen = res->lockname.len;
create.flags = cpu_to_be32(flags);
memcpy(create.name, res->lockname.name, create.namelen);
tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
sizeof(create), res->owner, &status);
if (tmpret >= 0) {
ret = status;
if (ret == DLM_REJECTED) {
mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
"owned by node %u. That node is coming back up "
"currently.\n", dlm->name, create.namelen,
create.name, res->owner);
dlm_print_one_lock_resource(res);
BUG();
}
} else {
mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
"node %u\n", dlm->name, create.namelen, create.name,
tmpret, res->owner);
if (dlm_is_host_down(tmpret))
ret = DLM_RECOVERING;
else
ret = dlm_err_to_dlm_status(tmpret);
}
return ret;
}
void dlm_lock_get(struct dlm_lock *lock)
{
kref_get(&lock->lock_refs);
}
void dlm_lock_put(struct dlm_lock *lock)
{
kref_put(&lock->lock_refs, dlm_lock_release);
}
static void dlm_lock_release(struct kref *kref)
{
struct dlm_lock *lock;
lock = container_of(kref, struct dlm_lock, lock_refs);
BUG_ON(!list_empty(&lock->list));
BUG_ON(!list_empty(&lock->ast_list));
BUG_ON(!list_empty(&lock->bast_list));
BUG_ON(lock->ast_pending);
BUG_ON(lock->bast_pending);
dlm_lock_detach_lockres(lock);
if (lock->lksb_kernel_allocated) {
mlog(0, "freeing kernel-allocated lksb\n");
kfree(lock->lksb);
}
kmem_cache_free(dlm_lock_cache, lock);
}
/* associate a lock with it's lockres, getting a ref on the lockres */
void dlm_lock_attach_lockres(struct dlm_lock *lock,
struct dlm_lock_resource *res)
{
dlm_lockres_get(res);
lock->lockres = res;
}
/* drop ref on lockres, if there is still one associated with lock */
static void dlm_lock_detach_lockres(struct dlm_lock *lock)
{
struct dlm_lock_resource *res;
res = lock->lockres;
if (res) {
lock->lockres = NULL;
mlog(0, "removing lock's lockres reference\n");
dlm_lockres_put(res);
}
}
static void dlm_init_lock(struct dlm_lock *newlock, int type,
u8 node, u64 cookie)
{
INIT_LIST_HEAD(&newlock->list);
INIT_LIST_HEAD(&newlock->ast_list);
INIT_LIST_HEAD(&newlock->bast_list);
spin_lock_init(&newlock->spinlock);
newlock->ml.type = type;
newlock->ml.convert_type = LKM_IVMODE;
newlock->ml.highest_blocked = LKM_IVMODE;
newlock->ml.node = node;
newlock->ml.pad1 = 0;
newlock->ml.list = 0;
newlock->ml.flags = 0;
newlock->ast = NULL;
newlock->bast = NULL;
newlock->astdata = NULL;
newlock->ml.cookie = cpu_to_be64(cookie);
newlock->ast_pending = 0;
newlock->bast_pending = 0;
newlock->convert_pending = 0;
newlock->lock_pending = 0;
newlock->unlock_pending = 0;
newlock->cancel_pending = 0;
newlock->lksb_kernel_allocated = 0;
kref_init(&newlock->lock_refs);
}
struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
struct dlm_lockstatus *lksb)
{
struct dlm_lock *lock;
int kernel_allocated = 0;
lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
if (!lock)
return NULL;
if (!lksb) {
/* zero memory only if kernel-allocated */
lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
if (!lksb) {
kmem_cache_free(dlm_lock_cache, lock);
return NULL;
}
kernel_allocated = 1;
}
dlm_init_lock(lock, type, node, cookie);
if (kernel_allocated)
lock->lksb_kernel_allocated = 1;
lock->lksb = lksb;
lksb->lockid = lock;
return lock;
}
/* handler for lock creation net message
* locking:
* caller needs: none
* taken: takes and drops res->spinlock
* held on exit: none
* returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
*/
int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_ctxt *dlm = data;
struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
struct dlm_lock_resource *res = NULL;
struct dlm_lock *newlock = NULL;
struct dlm_lockstatus *lksb = NULL;
enum dlm_status status = DLM_NORMAL;
char *name;
unsigned int namelen;
BUG_ON(!dlm);
if (!dlm_grab(dlm))
return DLM_REJECTED;
name = create->name;
namelen = create->namelen;
status = DLM_REJECTED;
if (!dlm_domain_fully_joined(dlm)) {
mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
"sending a create_lock message for lock %.*s!\n",
dlm->name, create->node_idx, namelen, name);
dlm_error(status);
goto leave;
}
status = DLM_IVBUFLEN;
if (namelen > DLM_LOCKID_NAME_MAX) {
dlm_error(status);
goto leave;
}
status = DLM_SYSERR;
newlock = dlm_new_lock(create->requested_type,
create->node_idx,
be64_to_cpu(create->cookie), NULL);
if (!newlock) {
dlm_error(status);
goto leave;
}
lksb = newlock->lksb;
if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
lksb->flags |= DLM_LKSB_GET_LVB;
mlog(0, "set DLM_LKSB_GET_LVB flag\n");
}
status = DLM_IVLOCKID;
res = dlm_lookup_lockres(dlm, name, namelen);
if (!res) {
dlm_error(status);
goto leave;
}
spin_lock(&res->spinlock);
status = __dlm_lockres_state_to_status(res);
spin_unlock(&res->spinlock);
if (status != DLM_NORMAL) {
mlog(0, "lockres recovering/migrating/in-progress\n");
goto leave;
}
dlm_lock_attach_lockres(newlock, res);
status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
leave:
if (status != DLM_NORMAL)
if (newlock)
dlm_lock_put(newlock);
if (res)
dlm_lockres_put(res);
dlm_put(dlm);
return status;
}
/* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie)
{
u64 tmpnode = node_num;
/* shift single byte of node num into top 8 bits */
tmpnode <<= 56;
spin_lock(&dlm_cookie_lock);
*cookie = (dlm_next_cookie | tmpnode);
if (++dlm_next_cookie & 0xff00000000000000ull) {
mlog(0, "This node's cookie will now wrap!\n");
dlm_next_cookie = 1;
}
spin_unlock(&dlm_cookie_lock);
}
enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
struct dlm_lockstatus *lksb, int flags,
const char *name, int namelen, dlm_astlockfunc_t *ast,
void *data, dlm_bastlockfunc_t *bast)
{
enum dlm_status status;
struct dlm_lock_resource *res = NULL;
struct dlm_lock *lock = NULL;
int convert = 0, recovery = 0;
/* yes this function is a mess.
* TODO: clean this up. lots of common code in the
* lock and convert paths, especially in the retry blocks */
if (!lksb) {
dlm_error(DLM_BADARGS);
return DLM_BADARGS;
}
status = DLM_BADPARAM;
if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) {
dlm_error(status);
goto error;
}
if (flags & ~LKM_VALID_FLAGS) {
dlm_error(status);
goto error;
}
convert = (flags & LKM_CONVERT);
recovery = (flags & LKM_RECOVERY);
if (recovery &&
(!dlm_is_recovery_lock(name, namelen) || convert) ) {
dlm_error(status);
goto error;
}
if (convert && (flags & LKM_LOCAL)) {
mlog(ML_ERROR, "strange LOCAL convert request!\n");
goto error;
}
if (convert) {
/* CONVERT request */
/* if converting, must pass in a valid dlm_lock */
lock = lksb->lockid;
if (!lock) {
mlog(ML_ERROR, "NULL lock pointer in convert "
"request\n");
goto error;
}
res = lock->lockres;
if (!res) {
mlog(ML_ERROR, "NULL lockres pointer in convert "
"request\n");
goto error;
}
dlm_lockres_get(res);
/* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
* static after the original lock call. convert requests will
* ensure that everything is the same, or return DLM_BADARGS.
* this means that DLM_DENIED_NOASTS will never be returned.
*/
if (lock->lksb != lksb || lock->ast != ast ||
lock->bast != bast || lock->astdata != data) {
status = DLM_BADARGS;
mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, "
"astdata=%p\n", lksb, ast, bast, data);
mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
"astdata=%p\n", lock->lksb, lock->ast,
lock->bast, lock->astdata);
goto error;
}
retry_convert:
dlm_wait_for_recovery(dlm);
if (res->owner == dlm->node_num)
status = dlmconvert_master(dlm, res, lock, flags, mode);
else
status = dlmconvert_remote(dlm, res, lock, flags, mode);
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
/* for now, see how this works without sleeping
* and just retry right away. I suspect the reco
* or migration will complete fast enough that
* no waiting will be necessary */
mlog(0, "retrying convert with migration/recovery/"
"in-progress\n");
msleep(100);
goto retry_convert;
}
} else {
u64 tmpcookie;
/* LOCK request */
status = DLM_BADARGS;
if (!name) {
dlm_error(status);
goto error;
}
status = DLM_IVBUFLEN;
if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) {
dlm_error(status);
goto error;
}
dlm_get_next_cookie(dlm->node_num, &tmpcookie);
lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb);
if (!lock) {
dlm_error(status);
goto error;
}
if (!recovery)
dlm_wait_for_recovery(dlm);
/* find or create the lock resource */
res = dlm_get_lock_resource(dlm, name, namelen, flags);
if (!res) {
status = DLM_IVLOCKID;
dlm_error(status);
goto error;
}
mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
dlm_lock_attach_lockres(lock, res);
lock->ast = ast;
lock->bast = bast;
lock->astdata = data;
retry_lock:
if (flags & LKM_VALBLK) {
mlog(0, "LKM_VALBLK passed by caller\n");
/* LVB requests for non PR, PW or EX locks are
* ignored. */
if (mode < LKM_PRMODE)
flags &= ~LKM_VALBLK;
else {
flags |= LKM_GET_LVB;
lock->lksb->flags |= DLM_LKSB_GET_LVB;
}
}
if (res->owner == dlm->node_num)
status = dlmlock_master(dlm, res, lock, flags);
else
status = dlmlock_remote(dlm, res, lock, flags);
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
msleep(100);
if (recovery) {
if (status != DLM_RECOVERING)
goto retry_lock;
/* wait to see the node go down, then
* drop down and allow the lockres to
* get cleaned up. need to remaster. */
dlm_wait_for_node_death(dlm, res->owner,
DLM_NODE_DEATH_WAIT_MAX);
} else {
dlm_wait_for_recovery(dlm);
goto retry_lock;
}
}
/* Inflight taken in dlm_get_lock_resource() is dropped here */
spin_lock(&res->spinlock);
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
dlm_lockres_calc_usage(dlm, res);
dlm_kick_thread(dlm, res);
if (status != DLM_NORMAL) {
lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
if (status != DLM_NOTQUEUED)
dlm_error(status);
goto error;
}
}
error:
if (status != DLM_NORMAL) {
if (lock && !convert)
dlm_lock_put(lock);
// this is kind of unnecessary
lksb->status = status;
}
/* put lockres ref from the convert path
* or from dlm_get_lock_resource */
if (res)
dlm_lockres_put(res);
return status;
}
EXPORT_SYMBOL_GPL(dlmlock);
| gpl-2.0 |
TEAM-Gummy/android_kernel_samsung_hlte | arch/arm/mach-w90x900/mfp.c | 9754 | 4272 | /*
* linux/arch/arm/mach-w90x900/mfp.c
*
* Copyright (c) 2008 Nuvoton technology corporation
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <mach/hardware.h>
#define REG_MFSEL (W90X900_VA_GCR + 0xC)
#define GPSELF (0x01 << 1)
#define GPSELC (0x03 << 2)
#define GPSELD (0x0f << 4)
#define GPSELEI0 (0x01 << 26)
#define GPSELEI1 (0x01 << 27)
#define GPIOG0TO1 (0x03 << 14)
#define GPIOG2TO3 (0x03 << 16)
#define GPIOG22TO23 (0x03 << 22)
#define GPIOG18TO20 (0x07 << 18)
#define ENSPI (0x0a << 14)
#define ENI2C0 (0x01 << 14)
#define ENI2C1 (0x01 << 16)
#define ENAC97 (0x02 << 22)
#define ENSD1 (0x02 << 18)
#define ENSD0 (0x0a << 4)
#define ENKPI (0x02 << 2)
#define ENNAND (0x01 << 2)
static DEFINE_MUTEX(mfp_mutex);
void mfp_set_groupf(struct device *dev)
{
unsigned long mfpen;
const char *dev_id;
BUG_ON(!dev);
mutex_lock(&mfp_mutex);
dev_id = dev_name(dev);
mfpen = __raw_readl(REG_MFSEL);
if (strcmp(dev_id, "nuc900-emc") == 0)
mfpen |= GPSELF;/*enable mac*/
else
mfpen &= ~GPSELF;/*GPIOF[9:0]*/
__raw_writel(mfpen, REG_MFSEL);
mutex_unlock(&mfp_mutex);
}
EXPORT_SYMBOL(mfp_set_groupf);
void mfp_set_groupc(struct device *dev)
{
unsigned long mfpen;
const char *dev_id;
BUG_ON(!dev);
mutex_lock(&mfp_mutex);
dev_id = dev_name(dev);
mfpen = __raw_readl(REG_MFSEL);
if (strcmp(dev_id, "nuc900-lcd") == 0)
mfpen |= GPSELC;/*enable lcd*/
else if (strcmp(dev_id, "nuc900-kpi") == 0) {
mfpen &= (~GPSELC);/*enable kpi*/
mfpen |= ENKPI;
} else if (strcmp(dev_id, "nuc900-nand") == 0) {
mfpen &= (~GPSELC);/*enable nand*/
mfpen |= ENNAND;
} else
mfpen &= (~GPSELC);/*GPIOC[14:0]*/
__raw_writel(mfpen, REG_MFSEL);
mutex_unlock(&mfp_mutex);
}
EXPORT_SYMBOL(mfp_set_groupc);
void mfp_set_groupi(struct device *dev)
{
unsigned long mfpen;
const char *dev_id;
BUG_ON(!dev);
mutex_lock(&mfp_mutex);
dev_id = dev_name(dev);
mfpen = __raw_readl(REG_MFSEL);
mfpen &= ~GPSELEI1;/*default gpio16*/
if (strcmp(dev_id, "nuc900-wdog") == 0)
mfpen |= GPSELEI1;/*enable wdog*/
else if (strcmp(dev_id, "nuc900-atapi") == 0)
mfpen |= GPSELEI0;/*enable atapi*/
else if (strcmp(dev_id, "nuc900-keypad") == 0)
mfpen &= ~GPSELEI0;/*enable keypad*/
__raw_writel(mfpen, REG_MFSEL);
mutex_unlock(&mfp_mutex);
}
EXPORT_SYMBOL(mfp_set_groupi);
void mfp_set_groupg(struct device *dev, const char *subname)
{
unsigned long mfpen;
const char *dev_id;
BUG_ON((!dev) && (!subname));
mutex_lock(&mfp_mutex);
if (subname != NULL)
dev_id = subname;
else
dev_id = dev_name(dev);
mfpen = __raw_readl(REG_MFSEL);
if (strcmp(dev_id, "nuc900-spi") == 0) {
mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);
mfpen |= ENSPI;/*enable spi*/
} else if (strcmp(dev_id, "nuc900-i2c0") == 0) {
mfpen &= ~(GPIOG0TO1);
mfpen |= ENI2C0;/*enable i2c0*/
} else if (strcmp(dev_id, "nuc900-i2c1") == 0) {
mfpen &= ~(GPIOG2TO3);
mfpen |= ENI2C1;/*enable i2c1*/
} else if (strcmp(dev_id, "nuc900-ac97") == 0) {
mfpen &= ~(GPIOG22TO23);
mfpen |= ENAC97;/*enable AC97*/
} else if (strcmp(dev_id, "nuc900-mmc-port1") == 0) {
mfpen &= ~(GPIOG18TO20);
mfpen |= (ENSD1 | 0x01);/*enable sd1*/
} else {
mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/
}
__raw_writel(mfpen, REG_MFSEL);
mutex_unlock(&mfp_mutex);
}
EXPORT_SYMBOL(mfp_set_groupg);
void mfp_set_groupd(struct device *dev, const char *subname)
{
unsigned long mfpen;
const char *dev_id;
BUG_ON((!dev) && (!subname));
mutex_lock(&mfp_mutex);
if (subname != NULL)
dev_id = subname;
else
dev_id = dev_name(dev);
mfpen = __raw_readl(REG_MFSEL);
if (strcmp(dev_id, "nuc900-mmc-port0") == 0) {
mfpen &= ~GPSELD;/*enable sd0*/
mfpen |= ENSD0;
} else
mfpen &= (~GPSELD);
__raw_writel(mfpen, REG_MFSEL);
mutex_unlock(&mfp_mutex);
}
EXPORT_SYMBOL(mfp_set_groupd);
| gpl-2.0 |
linino/kernel_3.3.8 | drivers/net/wireless/wl1251/init.c | 11034 | 9169 | /*
* This file is part of wl1251
*
* Copyright (C) 2009 Nokia Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "init.h"
#include "wl12xx_80211.h"
#include "acx.h"
#include "cmd.h"
#include "reg.h"
int wl1251_hw_init_hwenc_config(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_feature_cfg(wl);
if (ret < 0) {
wl1251_warning("couldn't set feature config");
return ret;
}
ret = wl1251_acx_default_key(wl, wl->default_key);
if (ret < 0) {
wl1251_warning("couldn't set default key");
return ret;
}
return 0;
}
int wl1251_hw_init_templates_config(struct wl1251 *wl)
{
int ret;
u8 partial_vbm[PARTIAL_VBM_MAX];
/* send empty templates for fw memory reservation */
ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, NULL,
sizeof(struct wl12xx_probe_req_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, NULL,
sizeof(struct wl12xx_null_data_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, NULL,
sizeof(struct wl12xx_ps_poll_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL,
sizeof
(struct wl12xx_qos_null_data_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, NULL,
sizeof
(struct wl12xx_probe_resp_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_BEACON, NULL,
sizeof
(struct wl12xx_beacon_template));
if (ret < 0)
return ret;
/* tim templates, first reserve space then allocate an empty one */
memset(partial_vbm, 0, PARTIAL_VBM_MAX);
ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0);
if (ret < 0)
return ret;
ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter)
{
int ret;
ret = wl1251_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF);
if (ret < 0)
return ret;
ret = wl1251_acx_rx_config(wl, config, filter);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_phy_config(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_pd_threshold(wl);
if (ret < 0)
return ret;
ret = wl1251_acx_slot(wl, DEFAULT_SLOT_TIME);
if (ret < 0)
return ret;
ret = wl1251_acx_group_address_tbl(wl);
if (ret < 0)
return ret;
ret = wl1251_acx_service_period_timeout(wl);
if (ret < 0)
return ret;
ret = wl1251_acx_rts_threshold(wl, RTS_THRESHOLD_DEF);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_beacon_filter(struct wl1251 *wl)
{
int ret;
/* disable beacon filtering at this stage */
ret = wl1251_acx_beacon_filter_opt(wl, false);
if (ret < 0)
return ret;
ret = wl1251_acx_beacon_filter_table(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_pta(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_sg_enable(wl);
if (ret < 0)
return ret;
ret = wl1251_acx_sg_cfg(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_energy_detection(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_cca_threshold(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_bcn_dtim_options(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_power_auth(struct wl1251 *wl)
{
return wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
}
int wl1251_hw_init_mem_config(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_mem_cfg(wl);
if (ret < 0)
return ret;
wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map),
GFP_KERNEL);
if (!wl->target_mem_map) {
wl1251_error("couldn't allocate target memory map");
return -ENOMEM;
}
/* we now ask for the firmware built memory map */
ret = wl1251_acx_mem_map(wl, wl->target_mem_map,
sizeof(struct wl1251_acx_mem_map));
if (ret < 0) {
wl1251_error("couldn't retrieve firmware memory map");
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
return ret;
}
return 0;
}
static int wl1251_hw_init_txq_fill(u8 qid,
struct acx_tx_queue_qos_config *config,
u32 num_blocks)
{
config->qid = qid;
switch (qid) {
case QOS_AC_BE:
config->high_threshold =
(QOS_TX_HIGH_BE_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_BE_DEF * num_blocks) / 100;
break;
case QOS_AC_BK:
config->high_threshold =
(QOS_TX_HIGH_BK_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_BK_DEF * num_blocks) / 100;
break;
case QOS_AC_VI:
config->high_threshold =
(QOS_TX_HIGH_VI_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_VI_DEF * num_blocks) / 100;
break;
case QOS_AC_VO:
config->high_threshold =
(QOS_TX_HIGH_VO_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_VO_DEF * num_blocks) / 100;
break;
default:
wl1251_error("Invalid TX queue id: %d", qid);
return -EINVAL;
}
return 0;
}
static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
{
struct acx_tx_queue_qos_config *config;
struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map;
int ret, i;
wl1251_debug(DEBUG_ACX, "acx tx queue config");
config = kzalloc(sizeof(*config), GFP_KERNEL);
if (!config) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < MAX_NUM_OF_AC; i++) {
ret = wl1251_hw_init_txq_fill(i, config,
wl_mem_map->num_tx_mem_blocks);
if (ret < 0)
goto out;
ret = wl1251_cmd_configure(wl, ACX_TX_QUEUE_CFG,
config, sizeof(*config));
if (ret < 0)
goto out;
}
wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
out:
kfree(config);
return ret;
}
static int wl1251_hw_init_data_path_config(struct wl1251 *wl)
{
int ret;
/* asking for the data path parameters */
wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
GFP_KERNEL);
if (!wl->data_path) {
wl1251_error("Couldnt allocate data path parameters");
return -ENOMEM;
}
ret = wl1251_acx_data_path_params(wl, wl->data_path);
if (ret < 0) {
kfree(wl->data_path);
wl->data_path = NULL;
return ret;
}
return 0;
}
int wl1251_hw_init(struct wl1251 *wl)
{
struct wl1251_acx_mem_map *wl_mem_map;
int ret;
ret = wl1251_hw_init_hwenc_config(wl);
if (ret < 0)
return ret;
/* Template settings */
ret = wl1251_hw_init_templates_config(wl);
if (ret < 0)
return ret;
/* Default memory configuration */
ret = wl1251_hw_init_mem_config(wl);
if (ret < 0)
return ret;
/* Default data path configuration */
ret = wl1251_hw_init_data_path_config(wl);
if (ret < 0)
goto out_free_memmap;
/* RX config */
ret = wl1251_hw_init_rx_config(wl,
RX_CFG_PROMISCUOUS | RX_CFG_TSF,
RX_FILTER_OPTION_DEF);
/* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
RX_FILTER_OPTION_FILTER_ALL); */
if (ret < 0)
goto out_free_data_path;
/* TX queues config */
ret = wl1251_hw_init_tx_queue_config(wl);
if (ret < 0)
goto out_free_data_path;
/* PHY layer config */
ret = wl1251_hw_init_phy_config(wl);
if (ret < 0)
goto out_free_data_path;
/* Initialize connection monitoring thresholds */
ret = wl1251_acx_conn_monit_params(wl);
if (ret < 0)
goto out_free_data_path;
/* Beacon filtering */
ret = wl1251_hw_init_beacon_filter(wl);
if (ret < 0)
goto out_free_data_path;
/* Bluetooth WLAN coexistence */
ret = wl1251_hw_init_pta(wl);
if (ret < 0)
goto out_free_data_path;
/* Energy detection */
ret = wl1251_hw_init_energy_detection(wl);
if (ret < 0)
goto out_free_data_path;
/* Beacons and boradcast settings */
ret = wl1251_hw_init_beacon_broadcast(wl);
if (ret < 0)
goto out_free_data_path;
/* Enable data path */
ret = wl1251_cmd_data_path(wl, wl->channel, 1);
if (ret < 0)
goto out_free_data_path;
/* Default power state */
ret = wl1251_hw_init_power_auth(wl);
if (ret < 0)
goto out_free_data_path;
wl_mem_map = wl->target_mem_map;
wl1251_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x",
wl_mem_map->num_tx_mem_blocks,
wl->data_path->tx_control_addr,
wl_mem_map->num_rx_mem_blocks,
wl->data_path->rx_control_addr);
return 0;
out_free_data_path:
kfree(wl->data_path);
out_free_memmap:
kfree(wl->target_mem_map);
return ret;
}
| gpl-2.0 |
MSM8226-Samsung/android_kernel_samsung_ms013g | drivers/infiniband/hw/ipath/ipath_ruc.c | 13850 | 19908 | /*
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/sched.h>
#include <linux/spinlock.h>
#include "ipath_verbs.h"
#include "ipath_kernel.h"
/*
* Convert the AETH RNR timeout code into the number of milliseconds.
*/
const u32 ib_ipath_rnr_table[32] = {
656, /* 0 */
1, /* 1 */
1, /* 2 */
1, /* 3 */
1, /* 4 */
1, /* 5 */
1, /* 6 */
1, /* 7 */
1, /* 8 */
1, /* 9 */
1, /* A */
1, /* B */
1, /* C */
1, /* D */
2, /* E */
2, /* F */
3, /* 10 */
4, /* 11 */
6, /* 12 */
8, /* 13 */
11, /* 14 */
16, /* 15 */
21, /* 16 */
31, /* 17 */
41, /* 18 */
62, /* 19 */
82, /* 1A */
123, /* 1B */
164, /* 1C */
246, /* 1D */
328, /* 1E */
492 /* 1F */
};
/**
* ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
* @qp: the QP
*
* Called with the QP s_lock held and interrupts disabled.
* XXX Use a simple list for now. We might need a priority
* queue if we have lots of QPs waiting for RNR timeouts
* but that should be rare.
*/
void ipath_insert_rnr_queue(struct ipath_qp *qp)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
/* We already did a spin_lock_irqsave(), so just use spin_lock */
spin_lock(&dev->pending_lock);
if (list_empty(&dev->rnrwait))
list_add(&qp->timerwait, &dev->rnrwait);
else {
struct list_head *l = &dev->rnrwait;
struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
timerwait);
while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
qp->s_rnr_timeout -= nqp->s_rnr_timeout;
l = l->next;
if (l->next == &dev->rnrwait) {
nqp = NULL;
break;
}
nqp = list_entry(l->next, struct ipath_qp,
timerwait);
}
if (nqp)
nqp->s_rnr_timeout -= qp->s_rnr_timeout;
list_add(&qp->timerwait, l);
}
spin_unlock(&dev->pending_lock);
}
/**
* ipath_init_sge - Validate a RWQE and fill in the SGE state
* @qp: the QP
*
* Return 1 if OK.
*/
int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
u32 *lengthp, struct ipath_sge_state *ss)
{
int i, j, ret;
struct ib_wc wc;
*lengthp = 0;
for (i = j = 0; i < wqe->num_sge; i++) {
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
&wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
*lengthp += wqe->sg_list[i].length;
j++;
}
ss->num_sge = j;
ret = 1;
goto bail;
bad_lkey:
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr_id;
wc.status = IB_WC_LOC_PROT_ERR;
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
/* Signal solicited completion event. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
bail:
return ret;
}
/**
* ipath_get_rwqe - copy the next RWQE into the QP's RWQE
* @qp: the QP
* @wr_id_only: update qp->r_wr_id only, not qp->r_sge
*
* Return 0 if no RWQE is available, otherwise return 1.
*
* Can be called from interrupt level.
*/
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{
unsigned long flags;
struct ipath_rq *rq;
struct ipath_rwq *wq;
struct ipath_srq *srq;
struct ipath_rwqe *wqe;
void (*handler)(struct ib_event *, void *);
u32 tail;
int ret;
if (qp->ibqp.srq) {
srq = to_isrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler;
rq = &srq->rq;
} else {
srq = NULL;
handler = NULL;
rq = &qp->r_rq;
}
spin_lock_irqsave(&rq->lock, flags);
if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
ret = 0;
goto unlock;
}
wq = rq->wq;
tail = wq->tail;
/* Validate tail before using it since it is user writable. */
if (tail >= rq->size)
tail = 0;
do {
if (unlikely(tail == wq->head)) {
ret = 0;
goto unlock;
}
/* Make sure entry is read after head index is read. */
smp_rmb();
wqe = get_rwqe_ptr(rq, tail);
if (++tail >= rq->size)
tail = 0;
if (wr_id_only)
break;
qp->r_sge.sg_list = qp->r_sg_list;
} while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
qp->r_wr_id = wqe->wr_id;
wq->tail = tail;
ret = 1;
set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
if (handler) {
u32 n;
/*
* validate head pointer value and compute
* the number of remaining WQEs.
*/
n = wq->head;
if (n >= rq->size)
n = 0;
if (n < tail)
n += rq->size - tail;
else
n -= tail;
if (n < srq->limit) {
struct ib_event ev;
srq->limit = 0;
spin_unlock_irqrestore(&rq->lock, flags);
ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
handler(&ev, srq->ibsrq.srq_context);
goto bail;
}
}
unlock:
spin_unlock_irqrestore(&rq->lock, flags);
bail:
return ret;
}
/**
* ipath_ruc_loopback - handle UC and RC lookback requests
* @sqp: the sending QP
*
* This is called from ipath_do_send() to
* forward a WQE addressed to the same HCA.
* Note that although we are single threaded due to the tasklet, we still
* have to protect against post_send(). We don't have to worry about
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
*/
static void ipath_ruc_loopback(struct ipath_qp *sqp)
{
struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
struct ipath_qp *qp;
struct ipath_swqe *wqe;
struct ipath_sge *sge;
unsigned long flags;
struct ib_wc wc;
u64 sdata;
atomic64_t *maddr;
enum ib_wc_status send_status;
/*
* Note that we check the responder QP state after
* checking the requester's state.
*/
qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
goto unlock;
sqp->s_flags |= IPATH_S_BUSY;
again:
if (sqp->s_last == sqp->s_head)
goto clr_busy;
wqe = get_swqe_ptr(sqp, sqp->s_last);
/* Return if it is not OK to start a new work reqeust. */
if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND))
goto clr_busy;
/* We are in the error state, flush the work request. */
send_status = IB_WC_WR_FLUSH_ERR;
goto flush_send;
}
/*
* We can rely on the entry not changing without the s_lock
* being held until we update s_last.
* We increment s_cur to indicate s_last is in progress.
*/
if (sqp->s_last == sqp->s_cur) {
if (++sqp->s_cur >= sqp->s_size)
sqp->s_cur = 0;
}
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
dev->n_pkt_drops++;
/*
* For RC, the requester would timeout and retry so
* shortcut the timeouts and just signal too many retries.
*/
if (sqp->ibqp.qp_type == IB_QPT_RC)
send_status = IB_WC_RETRY_EXC_ERR;
else
send_status = IB_WC_SUCCESS;
goto serr;
}
memset(&wc, 0, sizeof wc);
send_status = IB_WC_SUCCESS;
sqp->s_sge.sge = wqe->sg_list[0];
sqp->s_sge.sg_list = wqe->sg_list + 1;
sqp->s_sge.num_sge = wqe->wr.num_sge;
sqp->s_len = wqe->length;
switch (wqe->wr.opcode) {
case IB_WR_SEND_WITH_IMM:
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
if (!ipath_get_rwqe(qp, 0))
goto rnr_nak;
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
if (!ipath_get_rwqe(qp, 1))
goto rnr_nak;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
goto inv_err;
if (wqe->length == 0)
break;
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
IB_ACCESS_REMOTE_WRITE)))
goto acc_err;
break;
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
goto inv_err;
if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
IB_ACCESS_REMOTE_READ)))
goto acc_err;
qp->r_sge.sge = wqe->sg_list[0];
qp->r_sge.sg_list = wqe->sg_list + 1;
qp->r_sge.num_sge = wqe->wr.num_sge;
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
wqe->wr.wr.atomic.remote_addr,
wqe->wr.wr.atomic.rkey,
IB_ACCESS_REMOTE_ATOMIC)))
goto acc_err;
/* Perform atomic OP and save result. */
maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
sdata = wqe->wr.wr.atomic.compare_add;
*(u64 *) sqp->s_sge.sge.vaddr =
(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->wr.wr.atomic.swap);
goto send_comp;
default:
send_status = IB_WC_LOC_QP_OP_ERR;
goto serr;
}
sge = &sqp->s_sge.sge;
while (sqp->s_len) {
u32 len = sqp->s_len;
if (len > sge->length)
len = sge->length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr != NULL) {
if (++sge->n >= IPATH_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
sqp->s_len -= len;
}
if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
else
wc.opcode = IB_WC_RECV;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp:
spin_lock_irqsave(&sqp->s_lock, flags);
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
ipath_send_complete(sqp, wqe, send_status);
goto again;
rnr_nak:
/* Handle RNR NAK */
if (qp->ibqp.qp_type == IB_QPT_UC)
goto send_comp;
/*
* Note: we don't need the s_lock held since the BUSY flag
* makes this single threaded.
*/
if (sqp->s_rnr_retry == 0) {
send_status = IB_WC_RNR_RETRY_EXC_ERR;
goto serr;
}
if (sqp->s_rnr_retry_cnt < 7)
sqp->s_rnr_retry--;
spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
goto clr_busy;
sqp->s_flags |= IPATH_S_WAITING;
dev->n_rnr_naks++;
sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
ipath_insert_rnr_queue(sqp);
goto clr_busy;
inv_err:
send_status = IB_WC_REM_INV_REQ_ERR;
wc.status = IB_WC_LOC_QP_OP_ERR;
goto err;
acc_err:
send_status = IB_WC_REM_ACCESS_ERR;
wc.status = IB_WC_LOC_PROT_ERR;
err:
/* responder goes to error state */
ipath_rc_error(qp, wc.status);
serr:
spin_lock_irqsave(&sqp->s_lock, flags);
ipath_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
sqp->s_flags &= ~IPATH_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
ev.device = sqp->ibqp.device;
ev.element.qp = &sqp->ibqp;
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
}
goto done;
}
clr_busy:
sqp->s_flags &= ~IPATH_S_BUSY;
unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
if (qp && atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
{
if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
qp->ibqp.qp_type == IB_QPT_SMI) {
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
}
/**
* ipath_no_bufs_available - tell the layer driver we need buffers
* @qp: the QP that caused the problem
* @dev: the device we ran out of buffers on
*
* Called when we run out of PIO buffers.
* If we are now in the error state, return zero to flush the
* send work request.
*/
static int ipath_no_bufs_available(struct ipath_qp *qp,
struct ipath_ibdev *dev)
{
unsigned long flags;
int ret = 1;
/*
* Note that as soon as want_buffer() is called and
* possibly before it returns, ipath_ib_piobufavail()
* could be called. Therefore, put QP on the piowait list before
* enabling the PIO avail interrupt.
*/
spin_lock_irqsave(&qp->s_lock, flags);
if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
dev->n_piowait++;
qp->s_flags |= IPATH_S_WAITING;
qp->s_flags &= ~IPATH_S_BUSY;
spin_lock(&dev->pending_lock);
if (list_empty(&qp->piowait))
list_add_tail(&qp->piowait, &dev->piowait);
spin_unlock(&dev->pending_lock);
} else
ret = 0;
spin_unlock_irqrestore(&qp->s_lock, flags);
if (ret)
want_buffer(dev->dd, qp);
return ret;
}
/**
* ipath_make_grh - construct a GRH header
* @dev: a pointer to the ipath device
* @hdr: a pointer to the GRH header being constructed
* @grh: the global route address to send to
* @hwords: the number of 32 bit words of header being sent
* @nwords: the number of 32 bit words of data being sent
*
* Return the size of the header in 32 bit words.
*/
u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords)
{
hdr->version_tclass_flow =
cpu_to_be32((6 << 28) |
(grh->traffic_class << 20) |
grh->flow_label);
hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
/* next_hdr is defined by C8-7 in ch. 8.4.1 */
hdr->next_hdr = 0x1B;
hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = dev->gid_prefix;
hdr->sgid.global.interface_id = dev->dd->ipath_guid;
hdr->dgid = grh->dgid;
/* GRH header size in 32-bit words. */
return sizeof(struct ib_grh) / sizeof(u32);
}
void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
struct ipath_other_headers *ohdr,
u32 bth0, u32 bth2)
{
u16 lrh0;
u32 nwords;
u32 extra_bytes;
/* Construct the header. */
extra_bytes = -qp->s_cur_size & 3;
nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = IPATH_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
&qp->remote_ah_attr.grh,
qp->s_hdrwords, nwords);
lrh0 = IPATH_LRH_GRH;
}
lrh0 |= qp->remote_ah_attr.sl << 4;
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
qp->remote_ah_attr.src_path_bits);
bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(bth2);
}
/**
* ipath_do_send - perform a send on a QP
* @data: contains a pointer to the QP
*
* Process entries in the send work queue until credit or queue is
* exhausted. Only allow one CPU to send a packet per QP (tasklet).
* Otherwise, two threads could send packets out of order.
*/
void ipath_do_send(unsigned long data)
{
struct ipath_qp *qp = (struct ipath_qp *)data;
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
int (*make_req)(struct ipath_qp *qp);
unsigned long flags;
if ((qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC) &&
qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
ipath_ruc_loopback(qp);
goto bail;
}
if (qp->ibqp.qp_type == IB_QPT_RC)
make_req = ipath_make_rc_req;
else if (qp->ibqp.qp_type == IB_QPT_UC)
make_req = ipath_make_uc_req;
else
make_req = ipath_make_ud_req;
spin_lock_irqsave(&qp->s_lock, flags);
/* Return if we are already busy processing a work request. */
if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
spin_unlock_irqrestore(&qp->s_lock, flags);
goto bail;
}
qp->s_flags |= IPATH_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags);
again:
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
/*
* If no PIO bufs are available, return. An interrupt will
* call ipath_ib_piobufavail() when one is available.
*/
if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
qp->s_cur_sge, qp->s_cur_size)) {
if (ipath_no_bufs_available(qp, dev))
goto bail;
}
dev->n_unicast_xmit++;
/* Record that we sent the packet and s_hdr is empty. */
qp->s_hdrwords = 0;
}
if (make_req(qp))
goto again;
bail:;
}
/*
* This should be called with s_lock held.
*/
void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
return;
/* See ch. 11.2.4.1 and 10.7.3.1 */
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
memset(&wc, 0, sizeof wc);
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc.qp = &qp->ibqp;
if (status == IB_WC_SUCCESS)
wc.byte_len = wqe->length;
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
status != IB_WC_SUCCESS);
}
old_last = last = qp->s_last;
if (++last >= qp->s_size)
last = 0;
qp->s_last = last;
if (qp->s_cur == old_last)
qp->s_cur = last;
if (qp->s_tail == old_last)
qp->s_tail = last;
if (qp->state == IB_QPS_SQD && last == qp->s_cur)
qp->s_draining = 0;
}
| gpl-2.0 |
pichina/linux-bcache | drivers/hid/hid-ntrig.c | 27 | 2098 | /*
* HID driver for some ntrig "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
* Copyright (c) 2008 Rafi Rubin
*
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
#define NTRIG_DUPLICATE_USAGES 0x001
#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_DIGITIZER &&
(usage->hid & 0xff) == 0x47) {
nt_map_key_clear(BTN_TOOL_DOUBLETAP);
return 1;
}
return 0;
}
static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if (usage->type == EV_KEY || usage->type == EV_REL
|| usage->type == EV_ABS)
clear_bit(usage->code, *bit);
return 0;
}
static const struct hid_device_id ntrig_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN),
.driver_data = NTRIG_DUPLICATE_USAGES },
{ }
};
MODULE_DEVICE_TABLE(hid, ntrig_devices);
static struct hid_driver ntrig_driver = {
.name = "ntrig",
.id_table = ntrig_devices,
.input_mapping = ntrig_input_mapping,
.input_mapped = ntrig_input_mapped,
};
static int ntrig_init(void)
{
return hid_register_driver(&ntrig_driver);
}
static void ntrig_exit(void)
{
hid_unregister_driver(&ntrig_driver);
}
module_init(ntrig_init);
module_exit(ntrig_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
VanirAOSP/kernel_amlogic_mbx12122012 | fs/nfs/write.c | 27 | 40980 | /*
* linux/fs/nfs/write.c
*
* Write file data over NFS.
*
* Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/backing-dev.h>
#include <asm/uaccess.h>
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "nfs4_fs.h"
#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
#define MIN_POOL_WRITE (32)
#define MIN_POOL_COMMIT (4)
/*
* Local function declarations
*/
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
struct inode *inode, int ioflags);
static void nfs_redirty_request(struct nfs_page *req);
static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops;
static const struct rpc_call_ops nfs_commit_ops;
static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
static mempool_t *nfs_commit_mempool;
struct nfs_write_data *nfs_commitdata_alloc(void)
{
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
return p;
}
void nfs_commit_free(struct nfs_write_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
if (!p->pagevec) {
mempool_free(p, nfs_wdata_mempool);
p = NULL;
}
}
}
return p;
}
void nfs_writedata_free(struct nfs_write_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_wdata_mempool);
}
static void nfs_writedata_release(struct nfs_write_data *wdata)
{
put_nfs_open_context(wdata->args.context);
nfs_writedata_free(wdata);
}
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
{
ctx->error = error;
smp_wmb();
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}
static struct nfs_page *nfs_page_find_request_locked(struct page *page)
{
struct nfs_page *req = NULL;
if (PagePrivate(page)) {
req = (struct nfs_page *)page_private(page);
if (req != NULL)
kref_get(&req->wb_kref);
}
return req;
}
static struct nfs_page *nfs_page_find_request(struct page *page)
{
struct inode *inode = page->mapping->host;
struct nfs_page *req = NULL;
spin_lock(&inode->i_lock);
req = nfs_page_find_request_locked(page);
spin_unlock(&inode->i_lock);
return req;
}
/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
{
struct inode *inode = page->mapping->host;
loff_t end, i_size;
pgoff_t end_index;
spin_lock(&inode->i_lock);
i_size = i_size_read(inode);
end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
if (i_size > 0 && page->index < end_index)
goto out;
end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
if (i_size >= end)
goto out;
i_size_write(inode, end);
nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
out:
spin_unlock(&inode->i_lock);
}
/* A writeback failed: mark the page as bad, and invalidate the page cache */
static void nfs_set_pageerror(struct page *page)
{
SetPageError(page);
nfs_zap_mapping(page->mapping->host, page->mapping);
}
/* We can set the PG_uptodate flag if we see that a write request
* covers the full page.
*/
static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
{
if (PageUptodate(page))
return;
if (base != 0)
return;
if (count != nfs_page_length(page))
return;
SetPageUptodate(page);
}
static int wb_priority(struct writeback_control *wbc)
{
if (wbc->for_reclaim)
return FLUSH_HIGHPRI | FLUSH_STABLE;
if (wbc->for_kupdate || wbc->for_background)
return FLUSH_LOWPRI;
return 0;
}
/*
* NFS congestion control
*/
int nfs_congestion_kb;
#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
#define NFS_CONGESTION_OFF_THRESH \
(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
static int nfs_set_page_writeback(struct page *page)
{
int ret = test_set_page_writeback(page);
if (!ret) {
struct inode *inode = page->mapping->host;
struct nfs_server *nfss = NFS_SERVER(inode);
page_cache_get(page);
if (atomic_long_inc_return(&nfss->writeback) >
NFS_CONGESTION_ON_THRESH) {
set_bdi_congested(&nfss->backing_dev_info,
BLK_RW_ASYNC);
}
}
return ret;
}
static void nfs_end_page_writeback(struct page *page)
{
struct inode *inode = page->mapping->host;
struct nfs_server *nfss = NFS_SERVER(inode);
end_page_writeback(page);
page_cache_release(page);
if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
static struct nfs_page *nfs_find_and_lock_request(struct page *page)
{
struct inode *inode = page->mapping->host;
struct nfs_page *req;
int ret;
spin_lock(&inode->i_lock);
for (;;) {
req = nfs_page_find_request_locked(page);
if (req == NULL)
break;
if (nfs_set_page_tag_locked(req))
break;
/* Note: If we hold the page lock, as is the case in nfs_writepage,
* then the call to nfs_set_page_tag_locked() will always
* succeed provided that someone hasn't already marked the
* request as dirty (in which case we don't care).
*/
spin_unlock(&inode->i_lock);
ret = nfs_wait_on_request(req);
nfs_release_request(req);
if (ret != 0)
return ERR_PTR(ret);
spin_lock(&inode->i_lock);
}
spin_unlock(&inode->i_lock);
return req;
}
/*
* Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request().
*/
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
struct page *page)
{
struct nfs_page *req;
int ret = 0;
req = nfs_find_and_lock_request(page);
if (!req)
goto out;
ret = PTR_ERR(req);
if (IS_ERR(req))
goto out;
ret = nfs_set_page_writeback(page);
BUG_ON(ret != 0);
BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
if (!nfs_pageio_add_request(pgio, req)) {
nfs_redirty_request(req);
ret = pgio->pg_error;
}
out:
return ret;
}
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
{
struct inode *inode = page->mapping->host;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
nfs_pageio_cond_complete(pgio, page->index);
return nfs_page_async_flush(pgio, page);
}
/*
* Write an mmapped page to the server.
*/
static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
{
struct nfs_pageio_descriptor pgio;
int err;
nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
err = nfs_do_writepage(page, wbc, &pgio);
nfs_pageio_complete(&pgio);
if (err < 0)
return err;
if (pgio.pg_error < 0)
return pgio.pg_error;
return 0;
}
int nfs_writepage(struct page *page, struct writeback_control *wbc)
{
int ret;
ret = nfs_writepage_locked(page, wbc);
unlock_page(page);
return ret;
}
static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
{
int ret;
ret = nfs_do_writepage(page, wbc, data);
unlock_page(page);
return ret;
}
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
unsigned long *bitlock = &NFS_I(inode)->flags;
struct nfs_pageio_descriptor pgio;
int err;
/* Stop dirtying of new pages while we sync */
err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
nfs_wait_bit_killable, TASK_KILLABLE);
if (err)
goto out_err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
nfs_pageio_complete(&pgio);
clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
smp_mb__after_clear_bit();
wake_up_bit(bitlock, NFS_INO_FLUSHING);
if (err < 0)
goto out_err;
err = pgio.pg_error;
if (err < 0)
goto out_err;
return 0;
out_err:
return err;
}
/*
* Insert a write request into an inode
*/
static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(inode);
int error;
error = radix_tree_preload(GFP_NOFS);
if (error != 0)
goto out;
/* Lock the request! */
nfs_lock_request_dontget(req);
spin_lock(&inode->i_lock);
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
BUG_ON(error);
if (!nfsi->npages) {
igrab(inode);
if (nfs_have_delegation(inode, FMODE_WRITE))
nfsi->change_attr++;
}
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
nfsi->npages++;
kref_get(&req->wb_kref);
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
NFS_PAGE_TAG_LOCKED);
spin_unlock(&inode->i_lock);
radix_tree_preload_end();
out:
return error;
}
/*
* Remove a write request from an inode
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
struct inode *inode = req->wb_context->path.dentry->d_inode;
struct nfs_inode *nfsi = NFS_I(inode);
BUG_ON (!NFS_WBACK_BUSY(req));
spin_lock(&inode->i_lock);
set_page_private(req->wb_page, 0);
ClearPagePrivate(req->wb_page);
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
nfsi->npages--;
if (!nfsi->npages) {
spin_unlock(&inode->i_lock);
iput(inode);
} else
spin_unlock(&inode->i_lock);
nfs_clear_request(req);
nfs_release_request(req);
}
static void
nfs_mark_request_dirty(struct nfs_page *req)
{
__set_page_dirty_nobuffers(req->wb_page);
__mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
/*
* Add a request to the inode's commit list.
*/
static void
nfs_mark_request_commit(struct nfs_page *req)
{
struct inode *inode = req->wb_context->path.dentry->d_inode;
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
set_bit(PG_CLEAN, &(req)->wb_flags);
radix_tree_tag_set(&nfsi->nfs_page_tree,
req->wb_index,
NFS_PAGE_TAG_COMMIT);
nfsi->ncommit++;
spin_unlock(&inode->i_lock);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
static int
nfs_clear_request_commit(struct nfs_page *req)
{
struct page *page = req->wb_page;
if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
dec_zone_page_state(page, NR_UNSTABLE_NFS);
dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
return 1;
}
return 0;
}
static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
return data->verf.committed != NFS_FILE_SYNC;
}
static inline
int nfs_reschedule_unstable_write(struct nfs_page *req)
{
if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
nfs_mark_request_commit(req);
return 1;
}
if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
nfs_mark_request_dirty(req);
return 1;
}
return 0;
}
#else
static inline void
nfs_mark_request_commit(struct nfs_page *req)
{
}
static inline int
nfs_clear_request_commit(struct nfs_page *req)
{
return 0;
}
static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
return 0;
}
static inline
int nfs_reschedule_unstable_write(struct nfs_page *req)
{
return 0;
}
#endif
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static int
nfs_need_commit(struct nfs_inode *nfsi)
{
return radix_tree_tagged(&nfsi->nfs_page_tree, NFS_PAGE_TAG_COMMIT);
}
/*
* nfs_scan_commit - Scan an inode for commit requests
* @inode: NFS inode to scan
* @dst: destination list
* @idx_start: lower bound of page->index to scan.
* @npages: idx_start + npages sets the upper bound to scan.
*
* Moves requests from the inode's 'commit' request list.
* The requests are *not* checked to ensure that they form a contiguous set.
*/
static int
nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
{
struct nfs_inode *nfsi = NFS_I(inode);
int ret;
if (!nfs_need_commit(nfsi))
return 0;
ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
if (ret > 0)
nfsi->ncommit -= ret;
if (nfs_need_commit(NFS_I(inode)))
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
}
#else
static inline int nfs_need_commit(struct nfs_inode *nfsi)
{
return 0;
}
static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
{
return 0;
}
#endif
/*
* Search for an existing write request, and attempt to update
* it to reflect a new dirty region on a given page.
*
* If the attempt fails, then the existing request is flushed out
* to disk.
*/
static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
struct page *page,
unsigned int offset,
unsigned int bytes)
{
struct nfs_page *req;
unsigned int rqend;
unsigned int end;
int error;
if (!PagePrivate(page))
return NULL;
end = offset + bytes;
spin_lock(&inode->i_lock);
for (;;) {
req = nfs_page_find_request_locked(page);
if (req == NULL)
goto out_unlock;
rqend = req->wb_offset + req->wb_bytes;
/*
* Tell the caller to flush out the request if
* the offsets are non-contiguous.
* Note: nfs_flush_incompatible() will already
* have flushed out requests having wrong owners.
*/
if (offset > rqend
|| end < req->wb_offset)
goto out_flushme;
if (nfs_set_page_tag_locked(req))
break;
/* The request is locked, so wait and then retry */
spin_unlock(&inode->i_lock);
error = nfs_wait_on_request(req);
nfs_release_request(req);
if (error != 0)
goto out_err;
spin_lock(&inode->i_lock);
}
if (nfs_clear_request_commit(req) &&
radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
NFS_I(inode)->ncommit--;
/* Okay, the request matches. Update the region */
if (offset < req->wb_offset) {
req->wb_offset = offset;
req->wb_pgbase = offset;
}
if (end > rqend)
req->wb_bytes = end - req->wb_offset;
else
req->wb_bytes = rqend - req->wb_offset;
out_unlock:
spin_unlock(&inode->i_lock);
return req;
out_flushme:
spin_unlock(&inode->i_lock);
nfs_release_request(req);
error = nfs_wb_page(inode, page);
out_err:
return ERR_PTR(error);
}
/*
* Try to update an existing write request, or create one if there is none.
*
* Note: Should always be called with the Page Lock held to prevent races
* if we have to add a new request. Also assumes that the caller has
* already called nfs_flush_incompatible() if necessary.
*/
static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
struct page *page, unsigned int offset, unsigned int bytes)
{
struct inode *inode = page->mapping->host;
struct nfs_page *req;
int error;
req = nfs_try_to_update_request(inode, page, offset, bytes);
if (req != NULL)
goto out;
req = nfs_create_request(ctx, inode, page, offset, bytes);
if (IS_ERR(req))
goto out;
error = nfs_inode_add_request(inode, req);
if (error != 0) {
nfs_release_request(req);
req = ERR_PTR(error);
}
out:
return req;
}
static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
unsigned int offset, unsigned int count)
{
struct nfs_page *req;
req = nfs_setup_write_request(ctx, page, offset, count);
if (IS_ERR(req))
return PTR_ERR(req);
nfs_mark_request_dirty(req);
/* Update file length */
nfs_grow_file(page, offset, count);
nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
nfs_mark_request_dirty(req);
nfs_clear_page_tag_locked(req);
return 0;
}
int nfs_flush_incompatible(struct file *file, struct page *page)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct nfs_page *req;
int do_flush, status;
/*
* Look for a request corresponding to this page. If there
* is one, and it belongs to another file, we flush it out
* before we try to copy anything into the page. Do this
* due to the lack of an ACCESS-type call in NFSv2.
* Also do the same if we find a request from an existing
* dropped page.
*/
do {
req = nfs_page_find_request(page);
if (req == NULL)
return 0;
do_flush = req->wb_page != page || req->wb_context != ctx;
nfs_release_request(req);
if (!do_flush)
return 0;
status = nfs_wb_page(page->mapping->host, page);
} while (status == 0);
return status;
}
/*
* If the page cache is marked as unsafe or invalid, then we can't rely on
* the PageUptodate() flag. In this case, we will need to turn off
* write optimisations that depend on the page contents being correct.
*/
static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
{
return PageUptodate(page) &&
!(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
}
/*
* Update and possibly write a cached page of an NFS file.
*
* XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
* things with a page scheduled for an RPC call (e.g. invalidate it).
*/
int nfs_updatepage(struct file *file, struct page *page,
unsigned int offset, unsigned int count)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = page->mapping->host;
int status = 0;
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
file->f_path.dentry->d_parent->d_name.name,
file->f_path.dentry->d_name.name, count,
(long long)(page_offset(page) + offset));
/* If we're not using byte range locks, and we know the page
* is up to date, it may be more efficient to extend the write
* to cover the entire page in order to avoid fragmentation
* inefficiencies.
*/
if (nfs_write_pageuptodate(page, inode) &&
inode->i_flock == NULL &&
!(file->f_flags & O_DSYNC)) {
count = max(count + offset, nfs_page_length(page));
offset = 0;
}
status = nfs_writepage_setup(ctx, page, offset, count);
if (status < 0)
nfs_set_pageerror(page);
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
return status;
}
static void nfs_writepage_release(struct nfs_page *req)
{
struct page *page = req->wb_page;
if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
nfs_inode_remove_request(req);
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
}
static int flush_task_priority(int how)
{
switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
case FLUSH_HIGHPRI:
return RPC_PRIORITY_HIGH;
case FLUSH_LOWPRI:
return RPC_PRIORITY_LOW;
}
return RPC_PRIORITY_NORMAL;
}
/*
* Set up the argument/result storage required for the RPC call.
*/
static int nfs_write_rpcsetup(struct nfs_page *req,
struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
unsigned int count, unsigned int offset,
int how)
{
struct inode *inode = req->wb_context->path.dentry->d_inode;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = req->wb_context->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(inode),
.task = &data->task,
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
.priority = priority,
};
int ret = 0;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
data->req = req;
data->inode = inode = req->wb_context->path.dentry->d_inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + offset;
data->args.pgbase = req->wb_pgbase + offset;
data->args.pages = data->pagevec;
data->args.count = count;
data->args.context = get_nfs_open_context(req->wb_context);
data->args.stable = NFS_UNSTABLE;
if (how & FLUSH_STABLE) {
data->args.stable = NFS_DATA_SYNC;
if (!nfs_need_commit(NFS_I(inode)))
data->args.stable = NFS_FILE_SYNC;
}
data->res.fattr = &data->fattr;
data->res.count = count;
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
/* Set up the initial task struct. */
NFS_PROTO(inode)->write_setup(data, &msg);
dprintk("NFS: %5u initiated write call "
"(req %s/%lld, %u bytes @ offset %llu)\n",
data->task.tk_pid,
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
count,
(unsigned long long)data->args.offset);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
goto out;
}
if (how & FLUSH_SYNC) {
ret = rpc_wait_for_completion_task(task);
if (ret == 0)
ret = task->tk_status;
}
rpc_put_task(task);
out:
return ret;
}
/* If a nfs_flush_* function fails, it should remove reqs from @head and
* call this on each, which will prepare them to be retried on next
* writeback using standard nfs.
*/
static void nfs_redirty_request(struct nfs_page *req)
{
struct page *page = req->wb_page;
nfs_mark_request_dirty(req);
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
}
/*
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
struct nfs_page *req = nfs_list_entry(head->next);
struct page *page = req->wb_page;
struct nfs_write_data *data;
size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
unsigned int offset;
int requests = 0;
int ret = 0;
LIST_HEAD(list);
nfs_list_remove_request(req);
nbytes = count;
do {
size_t len = min(nbytes, wsize);
data = nfs_writedata_alloc(1);
if (!data)
goto out_bad;
list_add(&data->pages, &list);
requests++;
nbytes -= len;
} while (nbytes != 0);
atomic_set(&req->wb_complete, requests);
ClearPageError(page);
offset = 0;
nbytes = count;
do {
int ret2;
data = list_entry(list.next, struct nfs_write_data, pages);
list_del_init(&data->pages);
data->pagevec[0] = page;
if (nbytes < wsize)
wsize = nbytes;
ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
wsize, offset, how);
if (ret == 0)
ret = ret2;
offset += wsize;
nbytes -= wsize;
} while (nbytes != 0);
return ret;
out_bad:
while (!list_empty(&list)) {
data = list_entry(list.next, struct nfs_write_data, pages);
list_del(&data->pages);
nfs_writedata_release(data);
}
nfs_redirty_request(req);
return -ENOMEM;
}
/*
* Create an RPC task for the given write request and kick it.
* The page must have been locked by the caller.
*
* It may happen that the page we're passed is not marked dirty.
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
struct nfs_page *req;
struct page **pages;
struct nfs_write_data *data;
data = nfs_writedata_alloc(npages);
if (!data)
goto out_bad;
pages = data->pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
ClearPageError(req->wb_page);
*pages++ = req->wb_page;
}
req = nfs_list_entry(data->pages.next);
/* Set up the argument struct */
return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
out_bad:
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_redirty_request(req);
}
return -ENOMEM;
}
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags)
{
size_t wsize = NFS_SERVER(inode)->wsize;
if (wsize < PAGE_CACHE_SIZE)
nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
else
nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
}
/*
* Handle a write reply that flushed part of a page.
*/
static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
dprintk("NFS: %5u write(%s/%lld %d@%lld)",
task->tk_pid,
data->req->wb_context->path.dentry->d_inode->i_sb->s_id,
(long long)
NFS_FILEID(data->req->wb_context->path.dentry->d_inode),
data->req->wb_bytes, (long long)req_offset(data->req));
nfs_writeback_done(task, data);
}
static void nfs_writeback_release_partial(void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_page *req = data->req;
struct page *page = req->wb_page;
int status = data->task.tk_status;
if (status < 0) {
nfs_set_pageerror(page);
nfs_context_set_write_error(req->wb_context, status);
dprintk(", error = %d\n", status);
goto out;
}
if (nfs_write_need_commit(data)) {
struct inode *inode = page->mapping->host;
spin_lock(&inode->i_lock);
if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
/* Do nothing we need to resend the writes */
} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
dprintk(" defer commit\n");
} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
set_bit(PG_NEED_RESCHED, &req->wb_flags);
clear_bit(PG_NEED_COMMIT, &req->wb_flags);
dprintk(" server reboot detected\n");
}
spin_unlock(&inode->i_lock);
} else
dprintk(" OK\n");
out:
if (atomic_dec_and_test(&req->wb_complete))
nfs_writepage_release(req);
nfs_writedata_release(calldata);
}
#if defined(CONFIG_NFS_V4_1)
void nfs_write_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
if (nfs4_setup_sequence(clp, &data->args.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
#endif /* CONFIG_NFS_V4_1 */
static const struct rpc_call_ops nfs_write_partial_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_partial,
.rpc_release = nfs_writeback_release_partial,
};
/*
* Handle a write reply that flushes a whole page.
*
* FIXME: There is an inherent race with invalidate_inode_pages and
* writebacks since the page->count is kept > 1 for as long
* as the page has a write request pending.
*/
static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
nfs_writeback_done(task, data);
}
static void nfs_writeback_release_full(void *calldata)
{
struct nfs_write_data *data = calldata;
int status = data->task.tk_status;
/* Update attributes as result of writeback. */
while (!list_empty(&data->pages)) {
struct nfs_page *req = nfs_list_entry(data->pages.next);
struct page *page = req->wb_page;
nfs_list_remove_request(req);
dprintk("NFS: %5u write (%s/%lld %d@%lld)",
data->task.tk_pid,
req->wb_context->path.dentry->d_inode->i_sb->s_id,
(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
req->wb_bytes,
(long long)req_offset(req));
if (status < 0) {
nfs_set_pageerror(page);
nfs_context_set_write_error(req->wb_context, status);
dprintk(", error = %d\n", status);
goto remove_request;
}
if (nfs_write_need_commit(data)) {
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
nfs_mark_request_commit(req);
dprintk(" marked for commit\n");
goto next;
}
dprintk(" OK\n");
remove_request:
nfs_inode_remove_request(req);
next:
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
}
nfs_writedata_release(calldata);
}
static const struct rpc_call_ops nfs_write_full_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_full,
.rpc_release = nfs_writeback_release_full,
};
/*
* This function is called when the WRITE call is complete.
*/
int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
struct nfs_server *server = NFS_SERVER(data->inode);
int status;
dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
task->tk_pid, task->tk_status);
/*
* ->write_done will attempt to use post-op attributes to detect
* conflicting writes by other clients. A strict interpretation
* of close-to-open would allow us to continue caching even if
* another writer had changed the file, but some applications
* depend on tighter cache coherency when writing.
*/
status = NFS_PROTO(data->inode)->write_done(task, data);
if (status != 0)
return status;
nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
/* We tried a write call, but the server did not
* commit data to stable storage even though we
* requested it.
* Note: There is a known bug in Tru64 < 5.0 in which
* the server reports NFS_DATA_SYNC, but performs
* NFS_FILE_SYNC. We therefore implement this checking
* as a dprintk() in order to avoid filling syslog.
*/
static unsigned long complain;
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
server->nfs_client->cl_hostname,
resp->verf->committed, argp->stable);
complain = jiffies + 300 * HZ;
}
}
#endif
/* Is this a short write? */
if (task->tk_status >= 0 && resp->count < argp->count) {
static unsigned long complain;
nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
/* Has the server at least made some progress? */
if (resp->count != 0) {
/* Was this an NFSv2 write or an NFSv3 stable write? */
if (resp->verf->committed != NFS_UNSTABLE) {
/* Resend from where the server left off */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
} else {
/* Resend as a stable write in order to avoid
* headaches in the case of a server crash.
*/
argp->stable = NFS_FILE_SYNC;
}
nfs_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
printk(KERN_WARNING
"NFS: Server wrote zero bytes, expected %u.\n",
argp->count);
complain = jiffies + 300 * HZ;
}
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
return 0;
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
{
if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
return 1;
if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags,
NFS_INO_COMMIT, nfs_wait_bit_killable,
TASK_KILLABLE))
return 1;
return 0;
}
static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
{
clear_bit(NFS_INO_COMMIT, &nfsi->flags);
smp_mb__after_clear_bit();
wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
}
static void nfs_commitdata_release(void *data)
{
struct nfs_write_data *wdata = data;
put_nfs_open_context(wdata->args.context);
nfs_commit_free(wdata);
}
/*
* Set up the argument/result storage required for the RPC call.
*/
static int nfs_commit_rpcsetup(struct list_head *head,
struct nfs_write_data *data,
int how)
{
struct nfs_page *first = nfs_list_entry(head->next);
struct inode *inode = first->wb_context->path.dentry->d_inode;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = first->wb_context->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_commit_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
.priority = priority,
};
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
list_splice_init(head, &data->pages);
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(data->inode);
/* Note: we always request a commit of the entire inode */
data->args.offset = 0;
data->args.count = 0;
data->args.context = get_nfs_open_context(first->wb_context);
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
/* Set up the initial task struct. */
NFS_PROTO(inode)->commit_setup(data, &msg);
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
return 0;
}
/*
* Commit dirty pages
*/
static int
nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
struct nfs_write_data *data;
struct nfs_page *req;
data = nfs_commitdata_alloc();
if (!data)
goto out_bad;
/* Set up the argument struct */
return nfs_commit_rpcsetup(head, data, how);
out_bad:
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_commit(req);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
BDI_RECLAIMABLE);
nfs_clear_page_tag_locked(req);
}
nfs_commit_clear_lock(NFS_I(inode));
return -ENOMEM;
}
/*
* COMMIT call returned
*/
static void nfs_commit_done(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
dprintk("NFS: %5u nfs_commit_done (status %d)\n",
task->tk_pid, task->tk_status);
/* Call the NFS version-specific code */
if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
return;
}
static void nfs_commit_release(void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_page *req;
int status = data->task.tk_status;
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
nfs_clear_request_commit(req);
dprintk("NFS: commit (%s/%lld %d@%lld)",
req->wb_context->path.dentry->d_inode->i_sb->s_id,
(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
req->wb_bytes,
(long long)req_offset(req));
if (status < 0) {
nfs_context_set_write_error(req->wb_context, status);
nfs_inode_remove_request(req);
dprintk(", error = %d\n", status);
goto next;
}
/* Okay, COMMIT succeeded, apparently. Check the verifier
* returned by the server against all stored verfs. */
if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
/* We have a match */
nfs_inode_remove_request(req);
dprintk(" OK\n");
goto next;
}
/* We have a mismatch. Write the page again */
dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
next:
nfs_clear_page_tag_locked(req);
}
nfs_commit_clear_lock(NFS_I(data->inode));
nfs_commitdata_release(calldata);
}
static const struct rpc_call_ops nfs_commit_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
static int nfs_commit_inode(struct inode *inode, int how)
{
LIST_HEAD(head);
int may_wait = how & FLUSH_SYNC;
int res = 0;
if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
goto out;
spin_lock(&inode->i_lock);
res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&inode->i_lock);
if (res) {
int error = nfs_commit_list(inode, &head, how);
if (error < 0)
return error;
if (may_wait)
wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
nfs_wait_bit_killable,
TASK_KILLABLE);
} else
nfs_commit_clear_lock(NFS_I(inode));
out:
return res;
}
static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
{
struct nfs_inode *nfsi = NFS_I(inode);
int flags = FLUSH_SYNC;
int ret = 0;
/* Don't commit yet if this is a non-blocking flush and there are
* lots of outstanding writes for this mapping.
*/
if (wbc->sync_mode == WB_SYNC_NONE &&
nfsi->ncommit <= (nfsi->npages >> 1))
goto out_mark_dirty;
if (wbc->nonblocking || wbc->for_background)
flags = 0;
ret = nfs_commit_inode(inode, flags);
if (ret >= 0) {
if (wbc->sync_mode == WB_SYNC_NONE) {
if (ret < wbc->nr_to_write)
wbc->nr_to_write -= ret;
else
wbc->nr_to_write = 0;
}
return 0;
}
out_mark_dirty:
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
}
#else
static int nfs_commit_inode(struct inode *inode, int how)
{
return 0;
}
static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
{
return 0;
}
#endif
int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
return nfs_commit_unstable_pages(inode, wbc);
}
/*
* flush the inode to disk.
*/
int nfs_wb_all(struct inode *inode)
{
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
};
return sync_inode(inode, &wbc);
}
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
struct nfs_page *req;
int ret = 0;
BUG_ON(!PageLocked(page));
for (;;) {
wait_on_page_writeback(page);
req = nfs_page_find_request(page);
if (req == NULL)
break;
if (nfs_lock_request_dontget(req)) {
nfs_inode_remove_request(req);
/*
* In case nfs_inode_remove_request has marked the
* page as being dirty
*/
cancel_dirty_page(page, PAGE_CACHE_SIZE);
nfs_unlock_request(req);
break;
}
ret = nfs_wait_on_request(req);
nfs_release_request(req);
if (ret < 0)
break;
}
return ret;
}
/*
* Write back all requests on one page - we do this before reading it.
*/
int nfs_wb_page(struct inode *inode, struct page *page)
{
loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
.range_start = range_start,
.range_end = range_end,
};
int ret;
while(PagePrivate(page)) {
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc);
if (ret < 0)
goto out_error;
}
ret = sync_inode(inode, &wbc);
if (ret < 0)
goto out_error;
}
return 0;
out_error:
return ret;
}
#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page)
{
struct nfs_page *req;
int ret;
nfs_fscache_release_page(page, GFP_KERNEL);
req = nfs_find_and_lock_request(page);
ret = PTR_ERR(req);
if (IS_ERR(req))
goto out;
ret = migrate_page(mapping, newpage, page);
if (!req)
goto out;
if (ret)
goto out_unlock;
page_cache_get(newpage);
spin_lock(&mapping->host->i_lock);
req->wb_page = newpage;
SetPagePrivate(newpage);
set_page_private(newpage, (unsigned long)req);
ClearPagePrivate(page);
set_page_private(page, 0);
spin_unlock(&mapping->host->i_lock);
page_cache_release(page);
out_unlock:
nfs_clear_page_tag_locked(req);
out:
return ret;
}
#endif
int __init nfs_init_writepagecache(void)
{
nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
sizeof(struct nfs_write_data),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_wdata_cachep == NULL)
return -ENOMEM;
nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
nfs_wdata_cachep);
if (nfs_wdata_mempool == NULL)
return -ENOMEM;
nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
nfs_wdata_cachep);
if (nfs_commit_mempool == NULL)
return -ENOMEM;
/*
* NFS congestion size, scale with available memory.
*
* 64MB: 8192k
* 128MB: 11585k
* 256MB: 16384k
* 512MB: 23170k
* 1GB: 32768k
* 2GB: 46340k
* 4GB: 65536k
* 8GB: 92681k
* 16GB: 131072k
*
* This allows larger machines to have larger/more transfers.
* Limit the default to 256M
*/
nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
if (nfs_congestion_kb > 256*1024)
nfs_congestion_kb = 256*1024;
return 0;
}
void nfs_destroy_writepagecache(void)
{
mempool_destroy(nfs_commit_mempool);
mempool_destroy(nfs_wdata_mempool);
kmem_cache_destroy(nfs_wdata_cachep);
}
| gpl-2.0 |
h4ck3rm1k3/linux-android-kernel-qemu | drivers/platform/x86/asus-laptop.c | 27 | 42066 | /*
* asus-laptop.c - Asus Laptop Support
*
*
* Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
* Copyright (C) 2006-2007 Corentin Chary
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* The development page for this driver is located at
* http://sourceforge.net/projects/acpi4asus/
*
* Credits:
* Pontus Fuchs - Helper functions, cleanup
* Johann Wiesner - Small compile fixes
* John Belmonte - ACPI code for Toshiba laptop was a good starting point.
* Eric Burghard - LED display support for W1N
* Josh Green - Light Sens support
* Thomas Tuttle - His first patch for led support was very helpfull
* Sam Lin - GPS support
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/proc_fs.h>
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/rfkill.h>
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
#define ASUS_LAPTOP_VERSION "0.42"
#define ASUS_LAPTOP_NAME "Asus Laptop Support"
#define ASUS_LAPTOP_CLASS "hotkey"
#define ASUS_LAPTOP_DEVICE_NAME "Hotkey"
#define ASUS_LAPTOP_FILE KBUILD_MODNAME
#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD."
MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
MODULE_DESCRIPTION(ASUS_LAPTOP_NAME);
MODULE_LICENSE("GPL");
/*
* WAPF defines the behavior of the Fn+Fx wlan key
* The significance of values is yet to be found, but
* most of the time:
* 0x0 will do nothing
* 0x1 will allow to control the device with Fn+Fx key.
* 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key
* 0x5 like 0x1 or 0x4
* So, if something doesn't work as you want, just try other values =)
*/
static uint wapf = 1;
module_param(wapf, uint, 0444);
MODULE_PARM_DESC(wapf, "WAPF value");
static int wlan_status = 1;
static int bluetooth_status = 1;
module_param(wlan_status, int, 0444);
MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
"default is 1");
module_param(bluetooth_status, int, 0444);
MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
"default is 1");
/*
* Some events we use, same for all Asus
*/
#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
#define ATKD_BR_MIN ATKD_BR_UP
#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
#define ATKD_LCD_ON 0x33
#define ATKD_LCD_OFF 0x34
/*
* Known bits returned by \_SB.ATKD.HWRS
*/
#define WL_HWRS 0x80
#define BT_HWRS 0x100
/*
* Flags for hotk status
* WL_ON and BT_ON are also used for wireless_status()
*/
#define WL_RSTS 0x01 /* internal Wifi */
#define BT_RSTS 0x02 /* internal Bluetooth */
/* LED */
#define METHOD_MLED "MLED"
#define METHOD_TLED "TLED"
#define METHOD_RLED "RLED" /* W1JC */
#define METHOD_PLED "PLED" /* A7J */
#define METHOD_GLED "GLED" /* G1, G2 (probably) */
/* LEDD */
#define METHOD_LEDD "SLCM"
/*
* Bluetooth and WLAN
* WLED and BLED are not handled like other XLED, because in some dsdt
* they also control the WLAN/Bluetooth device.
*/
#define METHOD_WLAN "WLED"
#define METHOD_BLUETOOTH "BLED"
#define METHOD_WL_STATUS "RSTS"
/* Brightness */
#define METHOD_BRIGHTNESS_SET "SPLV"
#define METHOD_BRIGHTNESS_GET "GPLV"
/* Backlight */
static acpi_handle lcd_switch_handle;
static char *lcd_switch_paths[] = {
"\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
"\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
"\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
"\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
"\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
"\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
"\\_SB.PCI0.PX40.Q10", /* S1x */
"\\Q10"}; /* A2x, L2D, L3D, M2E */
/* Display */
#define METHOD_SWITCH_DISPLAY "SDSP"
static acpi_handle display_get_handle;
static char *display_get_paths[] = {
/* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
"\\_SB.PCI0.P0P1.VGA.GETD",
/* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
"\\_SB.PCI0.P0P2.VGA.GETD",
/* A6V A6Q */
"\\_SB.PCI0.P0P3.VGA.GETD",
/* A6T, A6M */
"\\_SB.PCI0.P0PA.VGA.GETD",
/* L3C */
"\\_SB.PCI0.PCI1.VGAC.NMAP",
/* Z96F */
"\\_SB.PCI0.VGA.GETD",
/* A2D */
"\\ACTD",
/* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
"\\ADVG",
/* P30 */
"\\DNXT",
/* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
"\\INFB",
/* A3F A6F A3N A3L M6N W3N W6A */
"\\SSTE"};
#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
/* GPS */
/* R2H use different handle for GPS on/off */
#define METHOD_GPS_ON "SDON"
#define METHOD_GPS_OFF "SDOF"
#define METHOD_GPS_STATUS "GPST"
/* Keyboard light */
#define METHOD_KBD_LIGHT_SET "SLKB"
#define METHOD_KBD_LIGHT_GET "GLKB"
/*
* Define a specific led structure to keep the main structure clean
*/
struct asus_led {
int wk;
struct work_struct work;
struct led_classdev led;
struct asus_laptop *asus;
const char *method;
};
/*
* This is the main structure, we can use it to store anything interesting
* about the hotk device
*/
struct asus_laptop {
char *name; /* laptop name */
struct acpi_table_header *dsdt_info;
struct platform_device *platform_device;
struct acpi_device *device; /* the device we are in */
struct backlight_device *backlight_device;
struct input_dev *inputdev;
struct key_entry *keymap;
struct asus_led mled;
struct asus_led tled;
struct asus_led rled;
struct asus_led pled;
struct asus_led gled;
struct asus_led kled;
struct workqueue_struct *led_workqueue;
int wireless_status;
bool have_rsts;
int lcd_state;
struct rfkill *gps_rfkill;
acpi_handle handle; /* the handle of the hotk device */
u32 ledd_status; /* status of the LED display */
u8 light_level; /* light sensor level */
u8 light_switch; /* light sensor switch value */
u16 event_count[128]; /* count for each event TODO make this better */
u16 *keycode_map;
};
static const struct key_entry asus_keymap[] = {
/* Lenovo SL Specific keycodes */
{KE_KEY, 0x02, { KEY_SCREENLOCK } },
{KE_KEY, 0x05, { KEY_WLAN } },
{KE_KEY, 0x08, { KEY_F13 } },
{KE_KEY, 0x17, { KEY_ZOOM } },
{KE_KEY, 0x1f, { KEY_BATTERY } },
/* End of Lenovo SL Specific keycodes */
{KE_KEY, 0x30, { KEY_VOLUMEUP } },
{KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{KE_KEY, 0x32, { KEY_MUTE } },
{KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
{KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
{KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{KE_KEY, 0x41, { KEY_NEXTSONG } },
{KE_KEY, 0x43, { KEY_STOPCD } },
{KE_KEY, 0x45, { KEY_PLAYPAUSE } },
{KE_KEY, 0x4c, { KEY_MEDIA } },
{KE_KEY, 0x50, { KEY_EMAIL } },
{KE_KEY, 0x51, { KEY_WWW } },
{KE_KEY, 0x55, { KEY_CALC } },
{KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
{KE_KEY, 0x5D, { KEY_WLAN } },
{KE_KEY, 0x5E, { KEY_WLAN } },
{KE_KEY, 0x5F, { KEY_WLAN } },
{KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
{KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
{KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
{KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
{KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
{KE_KEY, 0x7E, { KEY_BLUETOOTH } },
{KE_KEY, 0x7D, { KEY_BLUETOOTH } },
{KE_KEY, 0x82, { KEY_CAMERA } },
{KE_KEY, 0x88, { KEY_WLAN } },
{KE_KEY, 0x8A, { KEY_PROG1 } },
{KE_KEY, 0x95, { KEY_MEDIA } },
{KE_KEY, 0x99, { KEY_PHONE } },
{KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
{KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
{KE_END, 0},
};
/*
* This function evaluates an ACPI method, given an int as parameter, the
* method is searched within the scope of the handle, can be NULL. The output
* of the method is written is output, which can also be NULL
*
* returns 0 if write is successful, -1 else.
*/
static int write_acpi_int_ret(acpi_handle handle, const char *method, int val,
struct acpi_buffer *output)
{
struct acpi_object_list params; /* list of input parameters (an int) */
union acpi_object in_obj; /* the only param we use */
acpi_status status;
if (!handle)
return -1;
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = val;
status = acpi_evaluate_object(handle, (char *)method, ¶ms, output);
if (status == AE_OK)
return 0;
else
return -1;
}
static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
return write_acpi_int_ret(handle, method, val, NULL);
}
static int acpi_check_handle(acpi_handle handle, const char *method,
acpi_handle *ret)
{
acpi_status status;
if (method == NULL)
return -ENODEV;
if (ret)
status = acpi_get_handle(handle, (char *)method,
ret);
else {
acpi_handle dummy;
status = acpi_get_handle(handle, (char *)method,
&dummy);
}
if (status != AE_OK) {
if (ret)
pr_warning("Error finding %s\n", method);
return -ENODEV;
}
return 0;
}
/* Generic LED function */
static int asus_led_set(struct asus_laptop *asus, const char *method,
int value)
{
if (!strcmp(method, METHOD_MLED))
value = !value;
else if (!strcmp(method, METHOD_GLED))
value = !value + 1;
else
value = !!value;
return write_acpi_int(asus->handle, method, value);
}
/*
* LEDs
*/
/* /sys/class/led handlers */
static void asus_led_cdev_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_led *led = container_of(led_cdev, struct asus_led, led);
struct asus_laptop *asus = led->asus;
led->wk = !!value;
queue_work(asus->led_workqueue, &led->work);
}
static void asus_led_cdev_update(struct work_struct *work)
{
struct asus_led *led = container_of(work, struct asus_led, work);
struct asus_laptop *asus = led->asus;
asus_led_set(asus, led->method, led->wk);
}
static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev)
{
return led_cdev->brightness;
}
/*
* Keyboard backlight (also a LED)
*/
static int asus_kled_lvl(struct asus_laptop *asus)
{
unsigned long long kblv;
struct acpi_object_list params;
union acpi_object in_obj;
acpi_status rv;
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = 2;
rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
¶ms, &kblv);
if (ACPI_FAILURE(rv)) {
pr_warning("Error reading kled level\n");
return -ENODEV;
}
return kblv;
}
static int asus_kled_set(struct asus_laptop *asus, int kblv)
{
if (kblv > 0)
kblv = (1 << 7) | (kblv & 0x7F);
else
kblv = 0;
if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
pr_warning("Keyboard LED display write failed\n");
return -EINVAL;
}
return 0;
}
static void asus_kled_cdev_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_led *led = container_of(led_cdev, struct asus_led, led);
struct asus_laptop *asus = led->asus;
led->wk = value;
queue_work(asus->led_workqueue, &led->work);
}
static void asus_kled_cdev_update(struct work_struct *work)
{
struct asus_led *led = container_of(work, struct asus_led, work);
struct asus_laptop *asus = led->asus;
asus_kled_set(asus, led->wk);
}
static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
{
struct asus_led *led = container_of(led_cdev, struct asus_led, led);
struct asus_laptop *asus = led->asus;
return asus_kled_lvl(asus);
}
static void asus_led_exit(struct asus_laptop *asus)
{
if (asus->mled.led.dev)
led_classdev_unregister(&asus->mled.led);
if (asus->tled.led.dev)
led_classdev_unregister(&asus->tled.led);
if (asus->pled.led.dev)
led_classdev_unregister(&asus->pled.led);
if (asus->rled.led.dev)
led_classdev_unregister(&asus->rled.led);
if (asus->gled.led.dev)
led_classdev_unregister(&asus->gled.led);
if (asus->kled.led.dev)
led_classdev_unregister(&asus->kled.led);
if (asus->led_workqueue) {
destroy_workqueue(asus->led_workqueue);
asus->led_workqueue = NULL;
}
}
/* Ugly macro, need to fix that later */
static int asus_led_register(struct asus_laptop *asus,
struct asus_led *led,
const char *name, const char *method)
{
struct led_classdev *led_cdev = &led->led;
if (!method || acpi_check_handle(asus->handle, method, NULL))
return 0; /* Led not present */
led->asus = asus;
led->method = method;
INIT_WORK(&led->work, asus_led_cdev_update);
led_cdev->name = name;
led_cdev->brightness_set = asus_led_cdev_set;
led_cdev->brightness_get = asus_led_cdev_get;
led_cdev->max_brightness = 1;
return led_classdev_register(&asus->platform_device->dev, led_cdev);
}
static int asus_led_init(struct asus_laptop *asus)
{
int r;
/*
* Functions that actually update the LED's are called from a
* workqueue. By doing this as separate work rather than when the LED
* subsystem asks, we avoid messing with the Asus ACPI stuff during a
* potentially bad time, such as a timer interrupt.
*/
asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!asus->led_workqueue)
return -ENOMEM;
r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED);
if (r)
goto error;
r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED);
if (r)
goto error;
r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED);
if (r)
goto error;
r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED);
if (r)
goto error;
r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED);
if (r)
goto error;
if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) &&
!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) {
struct asus_led *led = &asus->kled;
struct led_classdev *cdev = &led->led;
led->asus = asus;
INIT_WORK(&led->work, asus_kled_cdev_update);
cdev->name = "asus::kbd_backlight";
cdev->brightness_set = asus_kled_cdev_set;
cdev->brightness_get = asus_kled_cdev_get;
cdev->max_brightness = 3;
r = led_classdev_register(&asus->platform_device->dev, cdev);
}
error:
if (r)
asus_led_exit(asus);
return r;
}
/*
* Backlight device
*/
static int asus_lcd_status(struct asus_laptop *asus)
{
return asus->lcd_state;
}
static int asus_lcd_set(struct asus_laptop *asus, int value)
{
int lcd = 0;
acpi_status status = 0;
lcd = !!value;
if (lcd == asus_lcd_status(asus))
return 0;
if (!lcd_switch_handle)
return -ENODEV;
status = acpi_evaluate_object(lcd_switch_handle,
NULL, NULL, NULL);
if (ACPI_FAILURE(status)) {
pr_warning("Error switching LCD\n");
return -ENODEV;
}
asus->lcd_state = lcd;
return 0;
}
static void lcd_blank(struct asus_laptop *asus, int blank)
{
struct backlight_device *bd = asus->backlight_device;
asus->lcd_state = (blank == FB_BLANK_UNBLANK);
if (bd) {
bd->props.power = blank;
backlight_update_status(bd);
}
}
static int asus_read_brightness(struct backlight_device *bd)
{
struct asus_laptop *asus = bl_get_data(bd);
unsigned long long value;
acpi_status rv = AE_OK;
rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
NULL, &value);
if (ACPI_FAILURE(rv))
pr_warning("Error reading brightness\n");
return value;
}
static int asus_set_brightness(struct backlight_device *bd, int value)
{
struct asus_laptop *asus = bl_get_data(bd);
if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
pr_warning("Error changing brightness\n");
return -EIO;
}
return 0;
}
static int update_bl_status(struct backlight_device *bd)
{
struct asus_laptop *asus = bl_get_data(bd);
int rv;
int value = bd->props.brightness;
rv = asus_set_brightness(bd, value);
if (rv)
return rv;
value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
return asus_lcd_set(asus, value);
}
static struct backlight_ops asusbl_ops = {
.get_brightness = asus_read_brightness,
.update_status = update_bl_status,
};
static int asus_backlight_notify(struct asus_laptop *asus)
{
struct backlight_device *bd = asus->backlight_device;
int old = bd->props.brightness;
backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
return old;
}
static int asus_backlight_init(struct asus_laptop *asus)
{
struct backlight_device *bd;
struct device *dev = &asus->platform_device->dev;
struct backlight_properties props;
if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
lcd_switch_handle) {
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = 15;
bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
asus, &asusbl_ops, &props);
if (IS_ERR(bd)) {
pr_err("Could not register asus backlight device\n");
asus->backlight_device = NULL;
return PTR_ERR(bd);
}
asus->backlight_device = bd;
bd->props.power = FB_BLANK_UNBLANK;
bd->props.brightness = asus_read_brightness(bd);
backlight_update_status(bd);
}
return 0;
}
static void asus_backlight_exit(struct asus_laptop *asus)
{
if (asus->backlight_device)
backlight_device_unregister(asus->backlight_device);
asus->backlight_device = NULL;
}
/*
* Platform device handlers
*/
/*
* We write our info in page, we begin at offset off and cannot write more
* than count bytes. We set eof to 1 if we handle those 2 values. We return the
* number of bytes written in page
*/
static ssize_t show_infos(struct device *dev,
struct device_attribute *attr, char *page)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
int len = 0;
unsigned long long temp;
char buf[16]; /* enough for all info */
acpi_status rv = AE_OK;
/*
* We use the easy way, we don't care of off and count,
* so we don't set eof to 1
*/
len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
len += sprintf(page + len, "Model reference : %s\n", asus->name);
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
* bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
* The significance of others is yet to be found.
*/
rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "SFUN value : %#x\n",
(uint) temp);
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
* The significance of others is yet to be found.
* If we don't find the method, we assume the device are present.
*/
rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "HRWS value : %#x\n",
(uint) temp);
/*
* Another value for userspace: the ASYM method returns 0x02 for
* battery low and 0x04 for battery critical, its readings tend to be
* more accurate than those provided by _BST.
* Note: since not all the laptops provide this method, errors are
* silently ignored.
*/
rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "ASYM value : %#x\n",
(uint) temp);
if (asus->dsdt_info) {
snprintf(buf, 16, "%d", asus->dsdt_info->length);
len += sprintf(page + len, "DSDT length : %s\n", buf);
snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
len += sprintf(page + len, "DSDT checksum : %s\n", buf);
snprintf(buf, 16, "%d", asus->dsdt_info->revision);
len += sprintf(page + len, "DSDT revision : %s\n", buf);
snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
len += sprintf(page + len, "OEM id : %s\n", buf);
snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
len += sprintf(page + len, "OEM table id : %s\n", buf);
snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
}
return len;
}
static int parse_arg(const char *buf, unsigned long count, int *val)
{
if (!count)
return 0;
if (count > 31)
return -EINVAL;
if (sscanf(buf, "%i", val) != 1)
return -EINVAL;
return count;
}
static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
const char *buf, size_t count,
const char *method)
{
int rv, value;
int out = 0;
rv = parse_arg(buf, count, &value);
if (rv > 0)
out = value ? 1 : 0;
if (write_acpi_int(asus->handle, method, value))
return -ENODEV;
return rv;
}
/*
* LEDD display
*/
static ssize_t show_ledd(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sprintf(buf, "0x%08x\n", asus->ledd_status);
}
static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
pr_warning("LED display write failed\n");
return -ENODEV;
}
asus->ledd_status = (u32) value;
}
return rv;
}
/*
* Wireless
*/
static int asus_wireless_status(struct asus_laptop *asus, int mask)
{
unsigned long long status;
acpi_status rv = AE_OK;
if (!asus->have_rsts)
return (asus->wireless_status & mask) ? 1 : 0;
rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
pr_warning("Error reading Wireless status\n");
return -EINVAL;
}
return !!(status & mask);
}
/*
* WLAN
*/
static int asus_wlan_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
pr_warning("Error setting wlan status to %d", status);
return -EIO;
}
return 0;
}
static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
}
static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
}
/*
* Bluetooth
*/
static int asus_bluetooth_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
pr_warning("Error setting bluetooth status to %d", status);
return -EIO;
}
return 0;
}
static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
}
static ssize_t store_bluetooth(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
}
/*
* Display
*/
static void asus_set_display(struct asus_laptop *asus, int value)
{
/* no sanity check needed for now */
if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
pr_warning("Error setting display\n");
return;
}
static int read_display(struct asus_laptop *asus)
{
unsigned long long value = 0;
acpi_status rv = AE_OK;
/*
* In most of the case, we know how to set the display, but sometime
* we can't read it
*/
if (display_get_handle) {
rv = acpi_evaluate_integer(display_get_handle, NULL,
NULL, &value);
if (ACPI_FAILURE(rv))
pr_warning("Error reading display status\n");
}
value &= 0x0F; /* needed for some models, shouldn't hurt others */
return value;
}
/*
* Now, *this* one could be more user-friendly, but so far, no-one has
* complained. The significance of bits is the same as in store_disp()
*/
static ssize_t show_disp(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
if (!display_get_handle)
return -ENODEV;
return sprintf(buf, "%d\n", read_display(asus));
}
/*
* Experimental support for display switching. As of now: 1 should activate
* the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI.
* Any combination (bitwise) of these will suffice. I never actually tested 4
* displays hooked up simultaneously, so be warned. See the acpi4asus README
* for more info.
*/
static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
asus_set_display(asus, value);
return rv;
}
/*
* Light Sens
*/
static void asus_als_switch(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
pr_warning("Error setting light sensor switch\n");
asus->light_switch = value;
}
static ssize_t show_lssw(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", asus->light_switch);
}
static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
asus_als_switch(asus, value ? 1 : 0);
return rv;
}
static void asus_als_level(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
pr_warning("Error setting light sensor level\n");
asus->light_level = value;
}
static ssize_t show_lslvl(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", asus->light_level);
}
static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
/* 0 <= value <= 15 */
asus_als_level(asus, value);
}
return rv;
}
/*
* GPS
*/
static int asus_gps_status(struct asus_laptop *asus)
{
unsigned long long status;
acpi_status rv = AE_OK;
rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
pr_warning("Error reading GPS status\n");
return -ENODEV;
}
return !!status;
}
static int asus_gps_switch(struct asus_laptop *asus, int status)
{
const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF;
if (write_acpi_int(asus->handle, meth, 0x02))
return -ENODEV;
return 0;
}
static ssize_t show_gps(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", asus_gps_status(asus));
}
static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
int ret;
rv = parse_arg(buf, count, &value);
if (rv <= 0)
return -EINVAL;
ret = asus_gps_switch(asus, !!value);
if (ret)
return ret;
rfkill_set_sw_state(asus->gps_rfkill, !value);
return rv;
}
/*
* rfkill
*/
static int asus_gps_rfkill_set(void *data, bool blocked)
{
acpi_handle handle = data;
return asus_gps_switch(handle, !blocked);
}
static const struct rfkill_ops asus_gps_rfkill_ops = {
.set_block = asus_gps_rfkill_set,
};
static void asus_rfkill_exit(struct asus_laptop *asus)
{
if (asus->gps_rfkill) {
rfkill_unregister(asus->gps_rfkill);
rfkill_destroy(asus->gps_rfkill);
asus->gps_rfkill = NULL;
}
}
static int asus_rfkill_init(struct asus_laptop *asus)
{
int result;
if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) ||
acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) ||
acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL))
return 0;
asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
RFKILL_TYPE_GPS,
&asus_gps_rfkill_ops, NULL);
if (!asus->gps_rfkill)
return -EINVAL;
result = rfkill_register(asus->gps_rfkill);
if (result) {
rfkill_destroy(asus->gps_rfkill);
asus->gps_rfkill = NULL;
}
return result;
}
/*
* Input device (i.e. hotkeys)
*/
static void asus_input_notify(struct asus_laptop *asus, int event)
{
if (asus->inputdev)
sparse_keymap_report_event(asus->inputdev, event, 1, true);
}
static int asus_input_init(struct asus_laptop *asus)
{
struct input_dev *input;
int error;
input = input_allocate_device();
if (!input) {
pr_info("Unable to allocate input device\n");
return -ENOMEM;
}
input->name = "Asus Laptop extra buttons";
input->phys = ASUS_LAPTOP_FILE "/input0";
input->id.bustype = BUS_HOST;
input->dev.parent = &asus->platform_device->dev;
input_set_drvdata(input, asus);
error = sparse_keymap_setup(input, asus_keymap, NULL);
if (error) {
pr_err("Unable to setup input device keymap\n");
goto err_free_dev;
}
error = input_register_device(input);
if (error) {
pr_info("Unable to register input device\n");
goto err_free_keymap;
}
asus->inputdev = input;
return 0;
err_free_keymap:
sparse_keymap_free(input);
err_free_dev:
input_free_device(input);
return error;
}
static void asus_input_exit(struct asus_laptop *asus)
{
if (asus->inputdev) {
sparse_keymap_free(asus->inputdev);
input_unregister_device(asus->inputdev);
}
}
/*
* ACPI driver
*/
static void asus_acpi_notify(struct acpi_device *device, u32 event)
{
struct asus_laptop *asus = acpi_driver_data(device);
u16 count;
/*
* We need to tell the backlight device when the backlight power is
* switched
*/
if (event == ATKD_LCD_ON)
lcd_blank(asus, FB_BLANK_UNBLANK);
else if (event == ATKD_LCD_OFF)
lcd_blank(asus, FB_BLANK_POWERDOWN);
/* TODO Find a better way to handle events count. */
count = asus->event_count[event % 128]++;
acpi_bus_generate_proc_event(asus->device, event, count);
acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
dev_name(&asus->device->dev), event,
count);
/* Brightness events are special */
if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
/* Ignore them completely if the acpi video driver is used */
if (asus->backlight_device != NULL) {
/* Update the backlight device. */
asus_backlight_notify(asus);
}
return ;
}
asus_input_notify(asus, event);
}
static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth,
store_bluetooth);
static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
static void asus_sysfs_exit(struct asus_laptop *asus)
{
struct platform_device *device = asus->platform_device;
device_remove_file(&device->dev, &dev_attr_infos);
device_remove_file(&device->dev, &dev_attr_wlan);
device_remove_file(&device->dev, &dev_attr_bluetooth);
device_remove_file(&device->dev, &dev_attr_display);
device_remove_file(&device->dev, &dev_attr_ledd);
device_remove_file(&device->dev, &dev_attr_ls_switch);
device_remove_file(&device->dev, &dev_attr_ls_level);
device_remove_file(&device->dev, &dev_attr_gps);
}
static int asus_sysfs_init(struct asus_laptop *asus)
{
struct platform_device *device = asus->platform_device;
int err;
err = device_create_file(&device->dev, &dev_attr_infos);
if (err)
return err;
if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) {
err = device_create_file(&device->dev, &dev_attr_wlan);
if (err)
return err;
}
if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) {
err = device_create_file(&device->dev, &dev_attr_bluetooth);
if (err)
return err;
}
if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) {
err = device_create_file(&device->dev, &dev_attr_display);
if (err)
return err;
}
if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) {
err = device_create_file(&device->dev, &dev_attr_ledd);
if (err)
return err;
}
if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
!acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
err = device_create_file(&device->dev, &dev_attr_ls_switch);
if (err)
return err;
err = device_create_file(&device->dev, &dev_attr_ls_level);
if (err)
return err;
}
if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
!acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
!acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) {
err = device_create_file(&device->dev, &dev_attr_gps);
if (err)
return err;
}
return err;
}
static int asus_platform_init(struct asus_laptop *asus)
{
int err;
asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
if (!asus->platform_device)
return -ENOMEM;
platform_set_drvdata(asus->platform_device, asus);
err = platform_device_add(asus->platform_device);
if (err)
goto fail_platform_device;
err = asus_sysfs_init(asus);
if (err)
goto fail_sysfs;
return 0;
fail_sysfs:
asus_sysfs_exit(asus);
platform_device_del(asus->platform_device);
fail_platform_device:
platform_device_put(asus->platform_device);
return err;
}
static void asus_platform_exit(struct asus_laptop *asus)
{
asus_sysfs_exit(asus);
platform_device_unregister(asus->platform_device);
}
static struct platform_driver platform_driver = {
.driver = {
.name = ASUS_LAPTOP_FILE,
.owner = THIS_MODULE,
}
};
static int asus_handle_init(char *name, acpi_handle * handle,
char **paths, int num_paths)
{
int i;
acpi_status status;
for (i = 0; i < num_paths; i++) {
status = acpi_get_handle(NULL, paths[i], handle);
if (ACPI_SUCCESS(status))
return 0;
}
*handle = NULL;
return -ENODEV;
}
#define ASUS_HANDLE_INIT(object) \
asus_handle_init(#object, &object##_handle, object##_paths, \
ARRAY_SIZE(object##_paths))
/*
* This function is used to initialize the context with right values. In this
* method, we can make all the detection we want, and modify the asus_laptop
* struct
*/
static int asus_laptop_get_info(struct asus_laptop *asus)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
unsigned long long bsts_result, hwrs_result;
char *string = NULL;
acpi_status status;
/*
* Get DSDT headers early enough to allow for differentiating between
* models, but late enough to allow acpi_bus_register_driver() to fail
* before doing anything ACPI-specific. Should we encounter a machine,
* which needs special handling (i.e. its hotkey device has a different
* HID), this bit will be moved.
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
if (ACPI_FAILURE(status))
pr_warning("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
/* This needs to be called for some laptops to init properly */
status =
acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
pr_warning("Error calling BSTS\n");
else if (bsts_result)
pr_notice("BSTS called, 0x%02x returned\n",
(uint) bsts_result);
/* This too ... */
if (write_acpi_int(asus->handle, "CWAP", wapf))
pr_err("Error calling CWAP(%d)\n", wapf);
/*
* Try to match the object returned by INIT to the specific model.
* Handle every possible object (or the lack of thereof) the DSDT
* writers might throw at us. When in trouble, we pass NULL to
* asus_model_match() and try something completely different.
*/
if (buffer.pointer) {
model = buffer.pointer;
switch (model->type) {
case ACPI_TYPE_STRING:
string = model->string.pointer;
break;
case ACPI_TYPE_BUFFER:
string = model->buffer.pointer;
break;
default:
string = "";
break;
}
}
asus->name = kstrdup(string, GFP_KERNEL);
if (!asus->name) {
kfree(buffer.pointer);
return -ENOMEM;
}
if (*string)
pr_notice(" %s model detected\n", string);
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
* The significance of others is yet to be found.
*/
status =
acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
if (!ACPI_FAILURE(status))
pr_notice(" HRWS returned %x", (int)hwrs_result);
if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
asus->have_rsts = true;
/* Scheduled for removal */
ASUS_HANDLE_INIT(lcd_switch);
ASUS_HANDLE_INIT(display_get);
kfree(model);
return AE_OK;
}
static bool asus_device_present;
static int __devinit asus_acpi_init(struct asus_laptop *asus)
{
int result = 0;
result = acpi_bus_get_status(asus->device);
if (result)
return result;
if (!asus->device->status.present) {
pr_err("Hotkey device not present, aborting\n");
return -ENODEV;
}
result = asus_laptop_get_info(asus);
if (result)
return result;
/* WLED and BLED are on by default */
if (bluetooth_status >= 0)
asus_bluetooth_set(asus, !!bluetooth_status);
if (wlan_status >= 0)
asus_wlan_set(asus, !!wlan_status);
/* Keyboard Backlight is on by default */
if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
asus_kled_set(asus, 1);
/* LED display is off by default */
asus->ledd_status = 0xFFF;
/* Set initial values of light sensor and level */
asus->light_switch = 0; /* Default to light sensor disabled */
asus->light_level = 5; /* level 5 for sensor sensitivity */
if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
!acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
asus_als_switch(asus, asus->light_switch);
asus_als_level(asus, asus->light_level);
}
asus->lcd_state = 1; /* LCD should be on when the module load */
return result;
}
static int __devinit asus_acpi_add(struct acpi_device *device)
{
struct asus_laptop *asus;
int result;
pr_notice("Asus Laptop Support version %s\n",
ASUS_LAPTOP_VERSION);
asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL);
if (!asus)
return -ENOMEM;
asus->handle = device->handle;
strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME);
strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS);
device->driver_data = asus;
asus->device = device;
result = asus_acpi_init(asus);
if (result)
goto fail_platform;
/*
* Register the platform device first. It is used as a parent for the
* sub-devices below.
*/
result = asus_platform_init(asus);
if (result)
goto fail_platform;
if (!acpi_video_backlight_support()) {
result = asus_backlight_init(asus);
if (result)
goto fail_backlight;
} else
pr_info("Backlight controlled by ACPI video driver\n");
result = asus_input_init(asus);
if (result)
goto fail_input;
result = asus_led_init(asus);
if (result)
goto fail_led;
result = asus_rfkill_init(asus);
if (result)
goto fail_rfkill;
asus_device_present = true;
return 0;
fail_rfkill:
asus_led_exit(asus);
fail_led:
asus_input_exit(asus);
fail_input:
asus_backlight_exit(asus);
fail_backlight:
asus_platform_exit(asus);
fail_platform:
kfree(asus->name);
kfree(asus);
return result;
}
static int asus_acpi_remove(struct acpi_device *device, int type)
{
struct asus_laptop *asus = acpi_driver_data(device);
asus_backlight_exit(asus);
asus_rfkill_exit(asus);
asus_led_exit(asus);
asus_input_exit(asus);
asus_platform_exit(asus);
kfree(asus->name);
kfree(asus);
return 0;
}
static const struct acpi_device_id asus_device_ids[] = {
{"ATK0100", 0},
{"ATK0101", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, asus_device_ids);
static struct acpi_driver asus_acpi_driver = {
.name = ASUS_LAPTOP_NAME,
.class = ASUS_LAPTOP_CLASS,
.owner = THIS_MODULE,
.ids = asus_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = asus_acpi_add,
.remove = asus_acpi_remove,
.notify = asus_acpi_notify,
},
};
static int __init asus_laptop_init(void)
{
int result;
result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
result = acpi_bus_register_driver(&asus_acpi_driver);
if (result < 0)
goto fail_acpi_driver;
if (!asus_device_present) {
result = -ENODEV;
goto fail_no_device;
}
return 0;
fail_no_device:
acpi_bus_unregister_driver(&asus_acpi_driver);
fail_acpi_driver:
platform_driver_unregister(&platform_driver);
return result;
}
static void __exit asus_laptop_exit(void)
{
acpi_bus_unregister_driver(&asus_acpi_driver);
platform_driver_unregister(&platform_driver);
}
module_init(asus_laptop_init);
module_exit(asus_laptop_exit);
| gpl-2.0 |
librae8226/linux-2.6.30.4 | drivers/net/irda/ksdazzle-sir.c | 27 | 23490 | /*****************************************************************************
*
* Filename: ksdazzle.c
* Version: 0.1.2
* Description: Irda KingSun Dazzle USB Dongle
* Status: Experimental
* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
*
* Based on stir4200, mcs7780, kingsun-sir drivers.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*****************************************************************************/
/*
* Following is my most current (2007-07-26) understanding of how the Kingsun
* 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This
* information was deduced by examining the USB traffic captured with USBSnoopy
* from the WinXP driver. Feel free to update here as more of the dongle is
* known.
*
* General: This dongle exposes one interface with two interrupt endpoints, one
* IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine
* dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and
* unwrapped manually as in stir4200, kingsun-sir, and ks959-sir.
*
* Transmission: To transmit an IrDA frame, it is necessary to wrap it, then
* split it into multiple segments of up to 7 bytes each, and transmit each in
* sequence. It seems that sending a single big block (like kingsun-sir does)
* won't work with this dongle. Each segment needs to be prefixed with a value
* equal to (unsigned char)0xF8 + <number of bytes in segment>, inside a payload
* of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9,
* and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the
* payload, not considered by the prefix, are ignored (set to 0 by this
* implementation).
*
* Reception: To receive data, the driver must poll the dongle regularly (like
* kingsun-sir.c) with interrupt URBs. If data is available, it will be returned
* in payloads from 0 to 8 bytes long. When concatenated, these payloads form
* a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir
*
* Speed change: To change the speed of the dongle, the driver prepares a
* control URB with the following as a setup packet:
* bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
* bRequest 0x09
* wValue 0x0200
* wIndex 0x0001
* wLength 0x0008 (length of the payload)
* The payload is a 8-byte record, apparently identical to the one used in
* drivers/usb/serial/cypress_m8.c to change speed:
* __u32 baudSpeed;
* unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits
* unsigned int : 1;
* unsigned int stopBits : 1;
* unsigned int parityEnable : 1;
* unsigned int parityType : 1;
* unsigned int : 1;
* unsigned int reset : 1;
* unsigned char reserved[3]; // set to 0
*
* For now only SIR speeds have been observed with this dongle. Therefore,
* nothing is known on what changes (if any) must be done to frame wrapping /
* unwrapping for higher than SIR speeds. This driver assumes no change is
* necessary and announces support for all the way to 115200 bps.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/crc32.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/crc.h>
#define KSDAZZLE_VENDOR_ID 0x07d0
#define KSDAZZLE_PRODUCT_ID 0x4100
/* These are the currently known USB ids */
static struct usb_device_id dongles[] = {
/* KingSun Co,Ltd IrDA/USB Bridge */
{USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)},
{}
};
MODULE_DEVICE_TABLE(usb, dongles);
#define KINGSUN_MTT 0x07
#define KINGSUN_REQ_RECV 0x01
#define KINGSUN_REQ_SEND 0x09
#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */
#define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */
struct ksdazzle_speedparams {
__le32 baudrate; /* baud rate, little endian */
__u8 flags;
__u8 reserved[3];
} __attribute__ ((packed));
#define KS_DATA_5_BITS 0x00
#define KS_DATA_6_BITS 0x01
#define KS_DATA_7_BITS 0x02
#define KS_DATA_8_BITS 0x03
#define KS_STOP_BITS_1 0x00
#define KS_STOP_BITS_2 0x08
#define KS_PAR_DISABLE 0x00
#define KS_PAR_EVEN 0x10
#define KS_PAR_ODD 0x30
#define KS_RESET 0x80
#define KINGSUN_EP_IN 0
#define KINGSUN_EP_OUT 1
struct ksdazzle_cb {
struct usb_device *usbdev; /* init: probe_irda */
struct net_device *netdev; /* network layer */
struct irlap_cb *irlap; /* The link layer we are binded to */
struct qos_info qos;
struct urb *tx_urb;
__u8 *tx_buf_clear;
unsigned int tx_buf_clear_used;
unsigned int tx_buf_clear_sent;
__u8 tx_payload[8];
struct urb *rx_urb;
__u8 *rx_buf;
iobuff_t rx_unwrap_buff;
struct usb_ctrlrequest *speed_setuprequest;
struct urb *speed_urb;
struct ksdazzle_speedparams speedparams;
unsigned int new_speed;
__u8 ep_in;
__u8 ep_out;
spinlock_t lock;
int receiving;
};
/* Callback transmission routine */
static void ksdazzle_speed_irq(struct urb *urb)
{
/* unlink, shutdown, unplug, other nasties */
if (urb->status != 0) {
err("ksdazzle_speed_irq: urb asynchronously failed - %d",
urb->status);
}
}
/* Send a control request to change speed of the dongle */
static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed)
{
static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400,
57600, 115200, 576000, 1152000, 4000000, 0
};
int err;
unsigned int i;
if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL)
return -ENOMEM;
/* Check that requested speed is among the supported ones */
for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ;
if (supported_speeds[i] == 0)
return -EOPNOTSUPP;
memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams));
kingsun->speedparams.baudrate = cpu_to_le32(speed);
kingsun->speedparams.flags = KS_DATA_8_BITS;
/* speed_setuprequest pre-filled in ksdazzle_probe */
usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev,
usb_sndctrlpipe(kingsun->usbdev, 0),
(unsigned char *)kingsun->speed_setuprequest,
&(kingsun->speedparams),
sizeof(struct ksdazzle_speedparams),
ksdazzle_speed_irq, kingsun);
kingsun->speed_urb->status = 0;
err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC);
return err;
}
/* Submit one fragment of an IrDA frame to the dongle */
static void ksdazzle_send_irq(struct urb *urb);
static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun)
{
unsigned int wraplen;
int ret;
/* We can send at most 7 bytes of payload at a time */
wraplen = 7;
if (wraplen > kingsun->tx_buf_clear_used)
wraplen = kingsun->tx_buf_clear_used;
/* Prepare payload prefix with used length */
memset(kingsun->tx_payload, 0, 8);
kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen;
memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen);
usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1);
kingsun->tx_urb->status = 0;
ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC);
/* Remember how much data was sent, in order to update at callback */
kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0;
return ret;
}
/* Callback transmission routine */
static void ksdazzle_send_irq(struct urb *urb)
{
struct ksdazzle_cb *kingsun = urb->context;
struct net_device *netdev = kingsun->netdev;
int ret = 0;
/* in process of stopping, just drop data */
if (!netif_running(kingsun->netdev)) {
err("ksdazzle_send_irq: Network not running!");
return;
}
/* unlink, shutdown, unplug, other nasties */
if (urb->status != 0) {
err("ksdazzle_send_irq: urb asynchronously failed - %d",
urb->status);
return;
}
if (kingsun->tx_buf_clear_used > 0) {
/* Update data remaining to be sent */
if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) {
memmove(kingsun->tx_buf_clear,
kingsun->tx_buf_clear +
kingsun->tx_buf_clear_sent,
kingsun->tx_buf_clear_used -
kingsun->tx_buf_clear_sent);
}
kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent;
kingsun->tx_buf_clear_sent = 0;
if (kingsun->tx_buf_clear_used > 0) {
/* There is more data to be sent */
if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
err("ksdazzle_send_irq: failed tx_urb submit: %d", ret);
switch (ret) {
case -ENODEV:
case -EPIPE:
break;
default:
netdev->stats.tx_errors++;
netif_start_queue(netdev);
}
}
} else {
/* All data sent, send next speed && wake network queue */
if (kingsun->new_speed != -1 &&
cpu_to_le32(kingsun->new_speed) !=
kingsun->speedparams.baudrate)
ksdazzle_change_speed(kingsun,
kingsun->new_speed);
netif_wake_queue(netdev);
}
}
}
/*
* Called from net/core when new frame is available.
*/
static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ksdazzle_cb *kingsun;
unsigned int wraplen;
int ret = 0;
if (skb == NULL || netdev == NULL)
return -EINVAL;
netif_stop_queue(netdev);
/* the IRDA wrapping routines don't deal with non linear skb */
SKB_LINEAR_ASSERT(skb);
kingsun = netdev_priv(netdev);
spin_lock(&kingsun->lock);
kingsun->new_speed = irda_get_next_speed(skb);
/* Append data to the end of whatever data remains to be transmitted */
wraplen =
async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE);
kingsun->tx_buf_clear_used = wraplen;
if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
err("ksdazzle_hard_xmit: failed tx_urb submit: %d", ret);
switch (ret) {
case -ENODEV:
case -EPIPE:
break;
default:
netdev->stats.tx_errors++;
netif_start_queue(netdev);
}
} else {
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += skb->len;
}
dev_kfree_skb(skb);
spin_unlock(&kingsun->lock);
return ret;
}
/* Receive callback function */
static void ksdazzle_rcv_irq(struct urb *urb)
{
struct ksdazzle_cb *kingsun = urb->context;
struct net_device *netdev = kingsun->netdev;
/* in process of stopping, just drop data */
if (!netif_running(netdev)) {
kingsun->receiving = 0;
return;
}
/* unlink, shutdown, unplug, other nasties */
if (urb->status != 0) {
err("ksdazzle_rcv_irq: urb asynchronously failed - %d",
urb->status);
kingsun->receiving = 0;
return;
}
if (urb->actual_length > 0) {
__u8 *bytes = urb->transfer_buffer;
unsigned int i;
for (i = 0; i < urb->actual_length; i++) {
async_unwrap_char(netdev, &netdev->stats,
&kingsun->rx_unwrap_buff, bytes[i]);
}
kingsun->receiving =
(kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
}
/* This urb has already been filled in ksdazzle_net_open. It is assumed that
urb keeps the pointer to the payload buffer.
*/
urb->status = 0;
usb_submit_urb(urb, GFP_ATOMIC);
}
/*
* Function ksdazzle_net_open (dev)
*
* Network device is taken up. Usually this is done by "ifconfig irda0 up"
*/
static int ksdazzle_net_open(struct net_device *netdev)
{
struct ksdazzle_cb *kingsun = netdev_priv(netdev);
int err = -ENOMEM;
char hwname[16];
/* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */
kingsun->receiving = 0;
/* Initialize for SIR to copy data directly into skb. */
kingsun->rx_unwrap_buff.in_frame = FALSE;
kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU;
kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
if (!kingsun->rx_unwrap_buff.skb)
goto free_mem;
skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->rx_urb)
goto free_mem;
kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->tx_urb)
goto free_mem;
kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->speed_urb)
goto free_mem;
/* Initialize speed for dongle */
kingsun->new_speed = 9600;
err = ksdazzle_change_speed(kingsun, 9600);
if (err < 0)
goto free_mem;
/*
* Now that everything should be initialized properly,
* Open new IrLAP layer instance to take care of us...
*/
sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
if (!kingsun->irlap) {
err("ksdazzle-sir: irlap_open failed");
goto free_mem;
}
/* Start reception. */
usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq,
kingsun, 1);
kingsun->rx_urb->status = 0;
err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
if (err) {
err("ksdazzle-sir: first urb-submit failed: %d", err);
goto close_irlap;
}
netif_start_queue(netdev);
/* Situation at this point:
- all work buffers allocated
- urbs allocated and ready to fill
- max rx packet known (in max_rx)
- unwrap state machine initialized, in state outside of any frame
- receive request in progress
- IrLAP layer started, about to hand over packets to send
*/
return 0;
close_irlap:
irlap_close(kingsun->irlap);
free_mem:
usb_free_urb(kingsun->speed_urb);
kingsun->speed_urb = NULL;
usb_free_urb(kingsun->tx_urb);
kingsun->tx_urb = NULL;
usb_free_urb(kingsun->rx_urb);
kingsun->rx_urb = NULL;
if (kingsun->rx_unwrap_buff.skb) {
kfree_skb(kingsun->rx_unwrap_buff.skb);
kingsun->rx_unwrap_buff.skb = NULL;
kingsun->rx_unwrap_buff.head = NULL;
}
return err;
}
/*
* Function ksdazzle_net_close (dev)
*
* Network device is taken down. Usually this is done by
* "ifconfig irda0 down"
*/
static int ksdazzle_net_close(struct net_device *netdev)
{
struct ksdazzle_cb *kingsun = netdev_priv(netdev);
/* Stop transmit processing */
netif_stop_queue(netdev);
/* Mop up receive && transmit urb's */
usb_kill_urb(kingsun->tx_urb);
usb_free_urb(kingsun->tx_urb);
kingsun->tx_urb = NULL;
usb_kill_urb(kingsun->speed_urb);
usb_free_urb(kingsun->speed_urb);
kingsun->speed_urb = NULL;
usb_kill_urb(kingsun->rx_urb);
usb_free_urb(kingsun->rx_urb);
kingsun->rx_urb = NULL;
kfree_skb(kingsun->rx_unwrap_buff.skb);
kingsun->rx_unwrap_buff.skb = NULL;
kingsun->rx_unwrap_buff.head = NULL;
kingsun->rx_unwrap_buff.in_frame = FALSE;
kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
kingsun->receiving = 0;
/* Stop and remove instance of IrLAP */
irlap_close(kingsun->irlap);
kingsun->irlap = NULL;
return 0;
}
/*
* IOCTLs : Extra out-of-band network commands...
*/
static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq,
int cmd)
{
struct if_irda_req *irq = (struct if_irda_req *)rq;
struct ksdazzle_cb *kingsun = netdev_priv(netdev);
int ret = 0;
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* Check if the device is still there */
if (netif_device_present(kingsun->netdev))
return ksdazzle_change_speed(kingsun,
irq->ifr_baudrate);
break;
case SIOCSMEDIABUSY: /* Set media busy */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* Check if the IrDA stack is still there */
if (netif_running(kingsun->netdev))
irda_device_set_media_busy(kingsun->netdev, TRUE);
break;
case SIOCGRECEIVING:
/* Only approximately true */
irq->ifr_receiving = kingsun->receiving;
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
static const struct net_device_ops ksdazzle_ops = {
.ndo_start_xmit = ksdazzle_hard_xmit,
.ndo_open = ksdazzle_net_open,
.ndo_stop = ksdazzle_net_close,
.ndo_do_ioctl = ksdazzle_net_ioctl,
};
/*
* This routine is called by the USB subsystem for each new device
* in the system. We need to check if the device is ours, and in
* this case start handling it.
*/
static int ksdazzle_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct usb_device *dev = interface_to_usbdev(intf);
struct ksdazzle_cb *kingsun = NULL;
struct net_device *net = NULL;
int ret = -ENOMEM;
int pipe, maxp_in, maxp_out;
__u8 ep_in;
__u8 ep_out;
/* Check that there really are two interrupt endpoints. Check based on the
one in drivers/usb/input/usbmouse.c
*/
interface = intf->cur_altsetting;
if (interface->desc.bNumEndpoints != 2) {
err("ksdazzle: expected 2 endpoints, found %d",
interface->desc.bNumEndpoints);
return -ENODEV;
}
endpoint = &interface->endpoint[KINGSUN_EP_IN].desc;
if (!usb_endpoint_is_int_in(endpoint)) {
err("ksdazzle: endpoint 0 is not interrupt IN");
return -ENODEV;
}
ep_in = endpoint->bEndpointAddress;
pipe = usb_rcvintpipe(dev, ep_in);
maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
if (maxp_in > 255 || maxp_in <= 1) {
err("ksdazzle: endpoint 0 has max packet size %d not in range [2..255]", maxp_in);
return -ENODEV;
}
endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc;
if (!usb_endpoint_is_int_out(endpoint)) {
err("ksdazzle: endpoint 1 is not interrupt OUT");
return -ENODEV;
}
ep_out = endpoint->bEndpointAddress;
pipe = usb_sndintpipe(dev, ep_out);
maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
/* Allocate network device container. */
net = alloc_irdadev(sizeof(*kingsun));
if (!net)
goto err_out1;
SET_NETDEV_DEV(net, &intf->dev);
kingsun = netdev_priv(net);
kingsun->netdev = net;
kingsun->usbdev = dev;
kingsun->ep_in = ep_in;
kingsun->ep_out = ep_out;
kingsun->irlap = NULL;
kingsun->tx_urb = NULL;
kingsun->tx_buf_clear = NULL;
kingsun->tx_buf_clear_used = 0;
kingsun->tx_buf_clear_sent = 0;
kingsun->rx_urb = NULL;
kingsun->rx_buf = NULL;
kingsun->rx_unwrap_buff.in_frame = FALSE;
kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
kingsun->rx_unwrap_buff.skb = NULL;
kingsun->receiving = 0;
spin_lock_init(&kingsun->lock);
kingsun->speed_setuprequest = NULL;
kingsun->speed_urb = NULL;
kingsun->speedparams.baudrate = 0;
/* Allocate input buffer */
kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL);
if (!kingsun->rx_buf)
goto free_mem;
/* Allocate output buffer */
kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL);
if (!kingsun->tx_buf_clear)
goto free_mem;
/* Allocate and initialize speed setup packet */
kingsun->speed_setuprequest =
kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!kingsun->speed_setuprequest)
goto free_mem;
kingsun->speed_setuprequest->bRequestType =
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND;
kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200);
kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001);
kingsun->speed_setuprequest->wLength =
cpu_to_le16(sizeof(struct ksdazzle_speedparams));
printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, "
"Vendor: %x, Product: %x\n",
dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&kingsun->qos);
/* Baud rates known to be supported. Please uncomment if devices (other
than a SonyEriccson K300 phone) can be shown to support higher speeds
with this dongle.
*/
kingsun->qos.baud_rate.bits =
IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
irda_qos_bits_to_value(&kingsun->qos);
/* Override the network functions we need to use */
net->netdev_ops = &ksdazzle_ops;
ret = register_netdev(net);
if (ret != 0)
goto free_mem;
dev_info(&net->dev, "IrDA: Registered KingSun/Dazzle device %s\n",
net->name);
usb_set_intfdata(intf, kingsun);
/* Situation at this point:
- all work buffers allocated
- setup requests pre-filled
- urbs not allocated, set to NULL
- max rx packet known (is KINGSUN_FIFO_SIZE)
- unwrap state machine (partially) initialized, but skb == NULL
*/
return 0;
free_mem:
kfree(kingsun->speed_setuprequest);
kfree(kingsun->tx_buf_clear);
kfree(kingsun->rx_buf);
free_netdev(net);
err_out1:
return ret;
}
/*
* The current device is removed, the USB layer tell us to shut it down...
*/
static void ksdazzle_disconnect(struct usb_interface *intf)
{
struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
if (!kingsun)
return;
unregister_netdev(kingsun->netdev);
/* Mop up receive && transmit urb's */
usb_kill_urb(kingsun->speed_urb);
usb_free_urb(kingsun->speed_urb);
kingsun->speed_urb = NULL;
usb_kill_urb(kingsun->tx_urb);
usb_free_urb(kingsun->tx_urb);
kingsun->tx_urb = NULL;
usb_kill_urb(kingsun->rx_urb);
usb_free_urb(kingsun->rx_urb);
kingsun->rx_urb = NULL;
kfree(kingsun->speed_setuprequest);
kfree(kingsun->tx_buf_clear);
kfree(kingsun->rx_buf);
free_netdev(kingsun->netdev);
usb_set_intfdata(intf, NULL);
}
#ifdef CONFIG_PM
/* USB suspend, so power off the transmitter/receiver */
static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message)
{
struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
netif_device_detach(kingsun->netdev);
if (kingsun->speed_urb != NULL)
usb_kill_urb(kingsun->speed_urb);
if (kingsun->tx_urb != NULL)
usb_kill_urb(kingsun->tx_urb);
if (kingsun->rx_urb != NULL)
usb_kill_urb(kingsun->rx_urb);
return 0;
}
/* Coming out of suspend, so reset hardware */
static int ksdazzle_resume(struct usb_interface *intf)
{
struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
if (kingsun->rx_urb != NULL) {
/* Setup request already filled in ksdazzle_probe */
usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
}
netif_device_attach(kingsun->netdev);
return 0;
}
#endif
/*
* USB device callbacks
*/
static struct usb_driver irda_driver = {
.name = "ksdazzle-sir",
.probe = ksdazzle_probe,
.disconnect = ksdazzle_disconnect,
.id_table = dongles,
#ifdef CONFIG_PM
.suspend = ksdazzle_suspend,
.resume = ksdazzle_resume,
#endif
};
/*
* Module insertion
*/
static int __init ksdazzle_init(void)
{
return usb_register(&irda_driver);
}
module_init(ksdazzle_init);
/*
* Module removal
*/
static void __exit ksdazzle_cleanup(void)
{
/* Deregister the driver and remove all pending instances */
usb_deregister(&irda_driver);
}
module_exit(ksdazzle_cleanup);
MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Myself5/android_kernel_sony_msm8974 | drivers/staging/prima/CORE/SVC/src/logging/wlan_logging_sock_svc.c | 27 | 18752 | /*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/******************************************************************************
* wlan_logging_sock_svc.c
*
******************************************************************************/
#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE
#include <vmalloc.h>
#include <wlan_nlink_srv.h>
#include <vos_status.h>
#include <vos_trace.h>
#include <wlan_nlink_common.h>
#include <wlan_logging_sock_svc.h>
#include <vos_types.h>
#include <kthread.h>
#include "vos_memory.h"
#define LOGGING_TRACE(level, args...) \
VOS_TRACE(VOS_MODULE_ID_HDD, level, ## args)
/* Global variables */
#define ANI_NL_MSG_LOG_TYPE 89
#define ANI_NL_MSG_READY_IND_TYPE 90
#define INVALID_PID -1
#define MAX_LOGMSG_LENGTH 4096
#define SECONDS_IN_A_DAY (86400)
struct log_msg {
struct list_head node;
unsigned int radio;
unsigned int index;
/* indicates the current filled log length in logbuf */
unsigned int filled_length;
/*
* Buf to hold the log msg
* tAniHdr + log
*/
char logbuf[MAX_LOGMSG_LENGTH];
};
struct wlan_logging {
/* Log Fatal and ERROR to console */
bool log_fe_to_console;
/* Number of buffers to be used for logging */
int num_buf;
/* Lock to synchronize access to shared logging resource */
spinlock_t spin_lock;
/* Holds the free node which can be used for filling logs */
struct list_head free_list;
/* Holds the filled nodes which needs to be indicated to APP */
struct list_head filled_list;
/* Wait queue for Logger thread */
wait_queue_head_t wait_queue;
/* Logger thread */
struct task_struct *thread;
/* Logging thread sets this variable on exit */
struct completion shutdown_comp;
/* Indicates to logger thread to exit */
bool exit;
/* Holds number of dropped logs*/
unsigned int drop_count;
/* current logbuf to which the log will be filled to */
struct log_msg *pcur_node;
};
static struct wlan_logging gwlan_logging;
static struct log_msg *gplog_msg;
/* PID of the APP to log the message */
static int gapp_pid = INVALID_PID;
static char wlan_logging_ready[] = "WLAN LOGGING READY";
/*
* Broadcast Logging service ready indication to any Logging application
* Each netlink message will have a message of type tAniMsgHdr inside.
*/
void wlan_logging_srv_nl_ready_indication(void)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
tAniNlHdr *wnl = NULL;
int payload_len;
int err;
static int rate_limit;
payload_len = sizeof(tAniHdr) + sizeof(wlan_logging_ready) +
sizeof(wnl->radio);
skb = dev_alloc_skb(NLMSG_SPACE(payload_len));
if (NULL == skb) {
if (!rate_limit) {
LOGGING_TRACE(VOS_TRACE_LEVEL_ERROR,
"NLINK: skb alloc fail %s", __func__);
}
rate_limit = 1;
return;
}
rate_limit = 0;
nlh = nlmsg_put(skb, 0, 0, ANI_NL_MSG_LOG, payload_len,
NLM_F_REQUEST);
if (NULL == nlh) {
LOGGING_TRACE(VOS_TRACE_LEVEL_ERROR,
"%s: nlmsg_put() failed for msg size[%d]",
__func__, payload_len);
kfree_skb(skb);
return;
}
wnl = (tAniNlHdr *) nlh;
wnl->radio = 0;
wnl->wmsg.type = ANI_NL_MSG_READY_IND_TYPE;
wnl->wmsg.length = sizeof(wlan_logging_ready);
memcpy((char*)&wnl->wmsg + sizeof(tAniHdr),
wlan_logging_ready,
sizeof(wlan_logging_ready));
/* sender is in group 1<<0 */
NETLINK_CB(skb).dst_group = WLAN_NLINK_MCAST_GRP_ID;
/*multicast the message to all listening processes*/
err = nl_srv_bcast(skb);
if (err) {
LOGGING_TRACE(VOS_TRACE_LEVEL_INFO_LOW,
"NLINK: Ready Indication Send Fail %s, err %d",
__func__, err);
}
return;
}
/* Utility function to send a netlink message to an application
* in user space
*/
static int wlan_send_sock_msg_to_app(tAniHdr *wmsg, int radio,
int src_mod, int pid)
{
int err = -1;
int payload_len;
int tot_msg_len;
tAniNlHdr *wnl = NULL;
struct sk_buff *skb;
struct nlmsghdr *nlh;
int wmsg_length = be16_to_cpu(wmsg->length);
static int nlmsg_seq;
if (radio < 0 || radio > ANI_MAX_RADIOS) {
LOGGING_TRACE(VOS_TRACE_LEVEL_ERROR,
"%s: invalid radio id [%d]",
__func__, radio);
return -EINVAL;
}
payload_len = wmsg_length + sizeof(wnl->radio);
tot_msg_len = NLMSG_SPACE(payload_len);
skb = dev_alloc_skb(tot_msg_len);
if (skb == NULL) {
LOGGING_TRACE(VOS_TRACE_LEVEL_ERROR,
"%s: dev_alloc_skb() failed for msg size[%d]",
__func__, tot_msg_len);
return -ENOMEM;
}
nlh = nlmsg_put(skb, pid, nlmsg_seq++, src_mod, payload_len,
NLM_F_REQUEST);
if (NULL == nlh) {
LOGGING_TRACE(VOS_TRACE_LEVEL_ERROR,
"%s: nlmsg_put() failed for msg size[%d]",
__func__, tot_msg_len);
kfree_skb(skb);
return -ENOMEM;
}
wnl = (tAniNlHdr *) nlh;
wnl->radio = radio;
vos_mem_copy(&wnl->wmsg, wmsg, wmsg_length);
LOGGING_TRACE(VOS_TRACE_LEVEL_INFO,
"%s: Sending Msg Type [0x%X] to pid[%d]\n",
__func__, be16_to_cpu(wmsg->type), pid);
err = nl_srv_ucast(skb, pid, MSG_DONTWAIT);
return err;
}
static void set_default_logtoapp_log_level(void)
{
vos_trace_setValue(VOS_MODULE_ID_WDI, VOS_TRACE_LEVEL_ALL, VOS_TRUE);
vos_trace_setValue(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ALL, VOS_TRUE);
vos_trace_setValue(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ALL, VOS_TRUE);
vos_trace_setValue(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_ALL, VOS_TRUE);
vos_trace_setValue(VOS_MODULE_ID_WDA, VOS_TRACE_LEVEL_ALL, VOS_TRUE);
vos_trace_setValue(VOS_MODULE_ID_HDD_SOFTAP, VOS_TRACE_LEVEL_ALL,
VOS_TRUE);
vos_trace_setValue(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ALL, VOS_TRUE);
}
static void clear_default_logtoapp_log_level(void)
{
int module;
for (module = 0; module < VOS_MODULE_ID_MAX; module++) {
vos_trace_setValue(module, VOS_TRACE_LEVEL_NONE,
VOS_FALSE);
vos_trace_setValue(module, VOS_TRACE_LEVEL_FATAL,
VOS_TRUE);
vos_trace_setValue(module, VOS_TRACE_LEVEL_ERROR,
VOS_TRUE);
}
vos_trace_setValue(VOS_MODULE_ID_RSV3, VOS_TRACE_LEVEL_NONE,
VOS_FALSE);
vos_trace_setValue(VOS_MODULE_ID_RSV4, VOS_TRACE_LEVEL_NONE,
VOS_FALSE);
}
/* Need to call this with spin_lock acquired */
static int wlan_queue_logmsg_for_app(void)
{
char *ptr;
int ret = 0;
ptr = &gwlan_logging.pcur_node->logbuf[sizeof(tAniHdr)];
ptr[gwlan_logging.pcur_node->filled_length] = '\0';
*(unsigned short *)(gwlan_logging.pcur_node->logbuf) =
ANI_NL_MSG_LOG_TYPE;
*(unsigned short *)(gwlan_logging.pcur_node->logbuf + 2) =
gwlan_logging.pcur_node->filled_length;
list_add_tail(&gwlan_logging.pcur_node->node,
&gwlan_logging.filled_list);
if (!list_empty(&gwlan_logging.free_list)) {
/* Get buffer from free list */
gwlan_logging.pcur_node =
(struct log_msg *)(gwlan_logging.free_list.next);
list_del_init(gwlan_logging.free_list.next);
} else if (!list_empty(&gwlan_logging.filled_list)) {
/* Get buffer from filled list */
/* This condition will drop the packet from being
* indicated to app
*/
gwlan_logging.pcur_node =
(struct log_msg *)(gwlan_logging.filled_list.next);
++gwlan_logging.drop_count;
/* print every 64th drop count */
if (gapp_pid != INVALID_PID && (!(gwlan_logging.drop_count % 0x40))) {
pr_err("%s: drop_count = %u index = %d filled_length = %d\n",
__func__, gwlan_logging.drop_count,
gwlan_logging.pcur_node->index,
gwlan_logging.pcur_node->filled_length);
}
list_del_init(gwlan_logging.filled_list.next);
ret = 1;
}
/* Reset the current node values */
gwlan_logging.pcur_node->filled_length = 0;
return ret;
}
int wlan_log_to_user(VOS_TRACE_LEVEL log_level, char *to_be_sent, int length)
{
/* Add the current time stamp */
char *ptr;
char tbuf[50];
int tlen;
int total_log_len;
unsigned int *pfilled_length;
bool wake_up_thread = false;
unsigned long flags;
struct timeval tv;
if (gapp_pid == INVALID_PID) {
/*
* This is to make sure that we print the logs to kmsg console
* when no logger app is running. This is also needed to
* log the initial messages during loading of driver where even
* if app is running it will not be able to
* register with driver immediately and start logging all the
* messages.
*/
pr_err("%s\n", to_be_sent);
}
/* Format the Log time [Secondselapsedinaday.microseconds] */
do_gettimeofday(&tv);
tlen = snprintf(tbuf, sizeof(tbuf), "[%s][%5lu.%06lu] ", current->comm,
(unsigned long) (tv.tv_sec%SECONDS_IN_A_DAY),
tv.tv_usec);
/* 1+1 indicate '\n'+'\0' */
total_log_len = length + tlen + 1 + 1;
spin_lock_irqsave(&gwlan_logging.spin_lock, flags);
// wlan logging svc resources are not yet initialized
if (!gwlan_logging.pcur_node) {
spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags);
return -EIO;
}
pfilled_length = &gwlan_logging.pcur_node->filled_length;
/* Check if we can accomodate more log into current node/buffer */
if ((MAX_LOGMSG_LENGTH - (*pfilled_length + sizeof(tAniNlHdr))) <
total_log_len) {
wake_up_thread = true;
wlan_queue_logmsg_for_app();
pfilled_length = &gwlan_logging.pcur_node->filled_length;
}
ptr = &gwlan_logging.pcur_node->logbuf[sizeof(tAniHdr)];
/* Assumption here is that we receive logs which is always less than
* MAX_LOGMSG_LENGTH, where we can accomodate the
* tAniNlHdr + [context][timestamp] + log
* VOS_ASSERT if we cannot accomodate the the complete log into
* the available buffer.
*
* Continue and copy logs to the available length and discard the rest.
*/
if (MAX_LOGMSG_LENGTH < (sizeof(tAniNlHdr) + total_log_len)) {
VOS_ASSERT(0);
total_log_len = MAX_LOGMSG_LENGTH - sizeof(tAniNlHdr) - 2;
}
vos_mem_copy(&ptr[*pfilled_length], tbuf, tlen);
vos_mem_copy(&ptr[*pfilled_length + tlen], to_be_sent,
min(length, (total_log_len - tlen)));
*pfilled_length += tlen + min(length, total_log_len - tlen);
ptr[*pfilled_length] = '\n';
*pfilled_length += 1;
spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags);
/* Wakeup logger thread */
if ((true == wake_up_thread)) {
/* If there is logger app registered wakeup the logging
* thread Else broadcast a Ready Indication message,
* apps which are waiting on this message can
* register for the logs.
*/
if ( (gapp_pid != INVALID_PID)) {
wake_up_interruptible(&gwlan_logging.wait_queue);
}
else {
wlan_logging_srv_nl_ready_indication();
}
}
if ((gapp_pid != INVALID_PID)
&& gwlan_logging.log_fe_to_console
&& ((VOS_TRACE_LEVEL_FATAL == log_level)
|| (VOS_TRACE_LEVEL_ERROR == log_level))) {
pr_err("%s\n", to_be_sent);
}
return 0;
}
static int send_filled_buffers_to_user(void)
{
int ret = -1;
struct log_msg *plog_msg;
int payload_len;
int tot_msg_len;
tAniNlHdr *wnl;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
static int nlmsg_seq;
unsigned long flags;
static int rate_limit;
while (!list_empty(&gwlan_logging.filled_list)
&& !gwlan_logging.exit) {
skb = dev_alloc_skb(MAX_LOGMSG_LENGTH);
if (skb == NULL) {
if (!rate_limit) {
pr_err("%s: dev_alloc_skb() failed for msg size[%d] drop count = %u\n",
__func__, MAX_LOGMSG_LENGTH,
gwlan_logging.drop_count);
}
rate_limit = 1;
ret = -ENOMEM;
break;
}
rate_limit = 0;
spin_lock_irqsave(&gwlan_logging.spin_lock, flags);
plog_msg = (struct log_msg *)
(gwlan_logging.filled_list.next);
list_del_init(gwlan_logging.filled_list.next);
spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags);
/* 4 extra bytes for the radio idx */
payload_len = plog_msg->filled_length +
sizeof(wnl->radio) + sizeof(tAniHdr);
tot_msg_len = NLMSG_SPACE(payload_len);
nlh = nlmsg_put(skb, gapp_pid, nlmsg_seq++,
ANI_NL_MSG_LOG, payload_len,
NLM_F_REQUEST);
if (NULL == nlh) {
spin_lock_irqsave(&gwlan_logging.spin_lock, flags);
list_add_tail(&plog_msg->node,
&gwlan_logging.free_list);
spin_unlock_irqrestore(&gwlan_logging.spin_lock,
flags);
pr_err("%s: drop_count = %u\n", __func__,
++gwlan_logging.drop_count);
pr_err("%s: nlmsg_put() failed for msg size[%d]\n",
__func__, tot_msg_len);
dev_kfree_skb(skb);
skb = NULL;
ret = -EINVAL;
continue;
}
wnl = (tAniNlHdr *) nlh;
wnl->radio = plog_msg->radio;
vos_mem_copy(&wnl->wmsg, plog_msg->logbuf,
plog_msg->filled_length +
sizeof(tAniHdr));
spin_lock_irqsave(&gwlan_logging.spin_lock, flags);
list_add_tail(&plog_msg->node,
&gwlan_logging.free_list);
spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags);
ret = nl_srv_ucast(skb, gapp_pid, 0);
if (ret < 0) {
pr_err("%s: Send Failed %d drop_count = %u\n",
__func__, ret, ++gwlan_logging.drop_count);
skb = NULL;
gapp_pid = INVALID_PID;
clear_default_logtoapp_log_level();
wlan_logging_srv_nl_ready_indication();
} else {
skb = NULL;
ret = 0;
}
}
return ret;
}
/**
* wlan_logging_thread() - The WLAN Logger thread
* @Arg - pointer to the HDD context
*
* This thread logs log message to App registered for the logs.
*/
static int wlan_logging_thread(void *Arg)
{
int ret_wait_status = 0;
int ret = 0;
set_user_nice(current, -2);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
daemonize("wlan_logging_thread");
#endif
while (!gwlan_logging.exit) {
ret_wait_status = wait_event_interruptible(
gwlan_logging.wait_queue,
(!list_empty(&gwlan_logging.filled_list)
|| gwlan_logging.exit));
if (ret_wait_status == -ERESTARTSYS) {
pr_err("%s: wait_event_interruptible returned -ERESTARTSYS",
__func__);
break;
}
if (gwlan_logging.exit) {
pr_err("%s: Exiting the thread\n", __func__);
break;
}
if (INVALID_PID == gapp_pid) {
pr_err("%s: Invalid PID\n", __func__);
continue;
}
ret = send_filled_buffers_to_user();
if (-ENOMEM == ret) {
msleep(200);
}
}
pr_info("%s: Terminating\n", __func__);
complete_and_exit(&gwlan_logging.shutdown_comp, 0);
return 0;
}
/*
* Process all the Netlink messages from Logger Socket app in user space
*/
static int wlan_logging_proc_sock_rx_msg(struct sk_buff *skb)
{
tAniNlHdr *wnl;
int radio;
int type;
int ret;
wnl = (tAniNlHdr *) skb->data;
radio = wnl->radio;
type = wnl->nlh.nlmsg_type;
if (radio < 0 || radio > ANI_MAX_RADIOS) {
LOGGING_TRACE(VOS_TRACE_LEVEL_ERROR,
"%s: invalid radio id [%d]\n",
__func__, radio);
return -EINVAL;
}
if (gapp_pid != INVALID_PID) {
if (wnl->nlh.nlmsg_pid > gapp_pid) {
gapp_pid = wnl->nlh.nlmsg_pid;
}
spin_lock_bh(&gwlan_logging.spin_lock);
if (gwlan_logging.pcur_node->filled_length) {
wlan_queue_logmsg_for_app();
}
spin_unlock_bh(&gwlan_logging.spin_lock);
wake_up_interruptible(&gwlan_logging.wait_queue);
} else {
/* This is to set the default levels (WLAN logging
* default values not the VOS trace default) when
* logger app is registered for the first time.
*/
gapp_pid = wnl->nlh.nlmsg_pid;
set_default_logtoapp_log_level();
}
ret = wlan_send_sock_msg_to_app(&wnl->wmsg, 0,
ANI_NL_MSG_LOG, wnl->nlh.nlmsg_pid);
if (ret < 0) {
LOGGING_TRACE(VOS_TRACE_LEVEL_ERROR,
"wlan_send_sock_msg_to_app: failed");
}
return ret;
}
int wlan_logging_sock_activate_svc(int log_fe_to_console, int num_buf)
{
int i = 0;
unsigned long irq_flag;
pr_info("%s: Initalizing FEConsoleLog = %d NumBuff = %d\n",
__func__, log_fe_to_console, num_buf);
gapp_pid = INVALID_PID;
gplog_msg = (struct log_msg *) vmalloc(
num_buf * sizeof(struct log_msg));
if (!gplog_msg) {
pr_err("%s: Could not allocate memory\n", __func__);
return -ENOMEM;
}
vos_mem_zero(gplog_msg, (num_buf * sizeof(struct log_msg)));
gwlan_logging.log_fe_to_console = !!log_fe_to_console;
gwlan_logging.num_buf = num_buf;
spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag);
INIT_LIST_HEAD(&gwlan_logging.free_list);
INIT_LIST_HEAD(&gwlan_logging.filled_list);
for (i = 0; i < num_buf; i++) {
list_add(&gplog_msg[i].node, &gwlan_logging.free_list);
gplog_msg[i].index = i;
}
gwlan_logging.pcur_node = (struct log_msg *)
(gwlan_logging.free_list.next);
list_del_init(gwlan_logging.free_list.next);
spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag);
init_waitqueue_head(&gwlan_logging.wait_queue);
gwlan_logging.exit = false;
init_completion(&gwlan_logging.shutdown_comp);
gwlan_logging.thread = kthread_create(wlan_logging_thread, NULL,
"wlan_logging_thread");
if (IS_ERR(gwlan_logging.thread)) {
pr_err("%s: Could not Create LogMsg Thread Controller",
__func__);
spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag);
vfree(gplog_msg);
gplog_msg = NULL;
gwlan_logging.pcur_node = NULL;
spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag);
return -ENOMEM;
}
wake_up_process(gwlan_logging.thread);
nl_srv_register(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);
//Broadcast SVC ready message to logging app/s running
wlan_logging_srv_nl_ready_indication();
pr_info("%s: Activated wlan_logging svc\n", __func__);
return 0;
}
int wlan_logging_sock_deactivate_svc(void)
{
unsigned long irq_flag;
if (!gplog_msg)
return 0;
nl_srv_unregister(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);
clear_default_logtoapp_log_level();
gapp_pid = INVALID_PID;
INIT_COMPLETION(gwlan_logging.shutdown_comp);
gwlan_logging.exit = true;
wake_up_interruptible(&gwlan_logging.wait_queue);
wait_for_completion(&gwlan_logging.shutdown_comp);
spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag);
vfree(gplog_msg);
gwlan_logging.pcur_node = NULL;
spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag);
pr_info("%s: Deactivate wlan_logging svc\n", __func__);
return 0;
}
int wlan_logging_sock_init_svc(void)
{
spin_lock_init(&gwlan_logging.spin_lock);
gapp_pid = INVALID_PID;
gwlan_logging.pcur_node = NULL;
return 0;
}
int wlan_logging_sock_deinit_svc(void)
{
gwlan_logging.pcur_node = NULL;
gapp_pid = INVALID_PID;
return 0;
}
#endif /* WLAN_LOGGING_SOCK_SVC_ENABLE */
| gpl-2.0 |
ggsamsa/rate_monotonic | arch/powerpc/mm/pgtable_32.c | 27 | 10677 | /*
* This file contains the routines setting up the linux page tables.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/lmb.h>
#include <linux/slab.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/io.h>
#include "mmu_decl.h"
unsigned long ioremap_base;
unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
#define HAVE_BATS 1
#endif
#if defined(CONFIG_FSL_BOOKE)
#define HAVE_TLBCAM 1
#endif
extern char etext[], _stext[];
#ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa);
void setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, int flags);
#else /* !HAVE_BATS */
#define v_mapped_by_bats(x) (0UL)
#define p_mapped_by_bats(x) (0UL)
#endif /* HAVE_BATS */
#ifdef HAVE_TLBCAM
extern unsigned int tlbcam_index;
extern phys_addr_t v_mapped_by_tlbcam(unsigned long va);
extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
#else /* !HAVE_TLBCAM */
#define v_mapped_by_tlbcam(x) (0UL)
#define p_mapped_by_tlbcam(x) (0UL)
#endif /* HAVE_TLBCAM */
#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
/* pgdir take page or two with 4K pages and a page fraction otherwise */
#ifndef CONFIG_PPC_4K_PAGES
ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
#else
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
PGDIR_ORDER - PAGE_SHIFT);
#endif
return ret;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifndef CONFIG_PPC_4K_PAGES
kfree((void *)pgd);
#else
free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
#endif
}
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
extern void *early_get_page(void);
if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
} else {
pte = (pte_t *)early_get_page();
if (pte)
clear_page(pte);
}
return pte;
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO;
#else
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
#endif
ptepage = alloc_pages(flags, 0);
if (!ptepage)
return NULL;
pgtable_page_ctor(ptepage);
return ptepage;
}
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);
void __iomem *
ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
{
/* writeable implies dirty for kernel addresses */
if (flags & _PAGE_RW)
flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_flags);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
void *caller)
{
unsigned long v, i;
phys_addr_t p;
int err;
/* Make sure we have the base flags */
if ((flags & _PAGE_PRESENT) == 0)
flags |= PAGE_KERNEL;
/* Non-cacheable page cannot be coherent */
if (flags & _PAGE_NO_CACHE)
flags &= ~_PAGE_COHERENT;
/*
* Choose an address to map it to.
* Once the vmalloc system is running, we use it.
* Before then, we use space going down from ioremap_base
* (ioremap_bot records where we're up to).
*/
p = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - p;
/*
* If the address lies within the first 16 MB, assume it's in ISA
* memory space
*/
if (p < 16*1024*1024)
p += _ISA_MEM_BASE;
#ifndef CONFIG_CRASH_DUMP
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
if (mem_init_done && (p < virt_to_phys(high_memory)) &&
!(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
#endif
if (size == 0)
return NULL;
/*
* Is it already mapped? Perhaps overlapped by a previous
* BAT mapping. If the whole area is mapped then we're done,
* otherwise remap it since we want to keep the virt addrs for
* each request contiguous.
*
* We make the assumption here that if the bottom and top
* of the range we want are mapped then it's mapped to the
* same virt address (and this is contiguous).
* -- Cort
*/
if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
goto out;
if ((v = p_mapped_by_tlbcam(p)))
goto out;
if (mem_init_done) {
struct vm_struct *area;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
return NULL;
v = (unsigned long) area->addr;
} else {
v = (ioremap_bot -= size);
}
/*
* Should check if it is a candidate for a BAT mapping
*/
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_page(v+i, p+i, flags);
if (err) {
if (mem_init_done)
vunmap((void *)v);
return NULL;
}
out:
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
EXPORT_SYMBOL(__ioremap);
void iounmap(volatile void __iomem *addr)
{
/*
* If mapped by BATs then there is nothing to do.
* Calling vfree() generates a benign warning.
*/
if (v_mapped_by_bats((unsigned long)addr)) return;
if (addr > high_memory && (unsigned long) addr < ioremap_bot)
vunmap((void *) (PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(iounmap);
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
/* The PTE should never be already set nor present in the
* hash table
*/
BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
flags);
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
return err;
}
/*
* Map in a chunk of physical memory starting at start.
*/
void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
{
unsigned long v, s, f;
phys_addr_t p;
int ktext;
s = offset;
v = PAGE_OFFSET + s;
p = memstart_addr + s;
for (; s < top; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
map_page(v, p, f);
#ifdef CONFIG_PPC_STD_MMU_32
if (ktext)
hash_preload(&init_mm, v, 0, 0x300);
#endif
v += PAGE_SIZE;
p += PAGE_SIZE;
}
}
void __init mapin_ram(void)
{
unsigned long s, top;
#ifndef CONFIG_WII
top = total_lowmem;
s = mmu_mapin_ram(top);
__mapin_ram_chunk(s, top);
#else
if (!wii_hole_size) {
s = mmu_mapin_ram(total_lowmem);
__mapin_ram_chunk(s, total_lowmem);
} else {
top = wii_hole_start;
s = mmu_mapin_ram(top);
__mapin_ram_chunk(s, top);
top = lmb_end_of_DRAM();
s = wii_mmu_mapin_mem2(top);
__mapin_ram_chunk(s, top);
}
#endif
}
/* Scan the real Linux page tables and return a PTE pointer for
* a virtual address in a context.
* Returns true (1) if PTE was found, zero otherwise. The pointer to
* the PTE pointer is unmodified if PTE is not found.
*/
int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int retval = 0;
pgd = pgd_offset(mm, addr & PAGE_MASK);
if (pgd) {
pud = pud_offset(pgd, addr & PAGE_MASK);
if (pud && pud_present(*pud)) {
pmd = pmd_offset(pud, addr & PAGE_MASK);
if (pmd_present(*pmd)) {
pte = pte_offset_map(pmd, addr & PAGE_MASK);
if (pte) {
retval = 1;
*ptep = pte;
if (pmdp)
*pmdp = pmd;
/* XXX caller needs to do pte_unmap, yuck */
}
}
}
}
return(retval);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
static int __change_page_attr(struct page *page, pgprot_t prot)
{
pte_t *kpte;
pmd_t *kpmd;
unsigned long address;
BUG_ON(PageHighMem(page));
address = (unsigned long)page_address(page);
if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address))
return 0;
if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb();
#ifdef CONFIG_PPC_STD_MMU
flush_hash_pages(0, address, pmd_val(*kpmd), 1);
#else
flush_tlb_page(NULL, address);
#endif
pte_unmap(kpte);
return 0;
}
/*
* Change the page attributes of an page in the linear mapping.
*
* THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY
*/
static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
int i, err = 0;
unsigned long flags;
local_irq_save(flags);
for (i = 0; i < numpages; i++, page++) {
err = __change_page_attr(page, prot);
if (err)
break;
}
local_irq_restore(flags);
return err;
}
void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
static int fixmaps;
void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
{
unsigned long address = __fix_to_virt(idx);
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
}
map_page(address, phys, pgprot_val(flags));
fixmaps++;
}
void __this_fixmap_does_not_exist(void)
{
WARN_ON(1);
}
| gpl-2.0 |
cameron581/sabermod_rom_toolchain | gcc/c-family/c-semantics.c | 27 | 4260 | /* This file contains subroutine used by the C front-end to construct GENERIC.
Copyright (C) 2000-2013 Free Software Foundation, Inc.
Written by Benjamin Chelf (chelf@codesourcery.com).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "function.h"
#include "splay-tree.h"
#include "c-common.h"
#include "flags.h"
#include "tree-iterator.h"
/* Create an empty statement tree rooted at T. */
tree
push_stmt_list (void)
{
tree t;
t = alloc_stmt_list ();
vec_safe_push (stmt_list_stack, t);
return t;
}
/* Finish the statement tree rooted at T. */
tree
pop_stmt_list (tree t)
{
tree u = NULL_TREE;
/* Pop statement lists until we reach the target level. The extra
nestings will be due to outstanding cleanups. */
while (1)
{
u = stmt_list_stack->pop ();
if (!stmt_list_stack->is_empty ())
{
tree x = stmt_list_stack->last ();
STATEMENT_LIST_HAS_LABEL (x) |= STATEMENT_LIST_HAS_LABEL (u);
}
if (t == u)
break;
}
gcc_assert (u != NULL_TREE);
/* If the statement list is completely empty, just return it. This is
just as good small as build_empty_stmt, with the advantage that
statement lists are merged when they appended to one another. So
using the STATEMENT_LIST avoids pathological buildup of EMPTY_STMT_P
statements. */
if (TREE_SIDE_EFFECTS (t))
{
tree_stmt_iterator i = tsi_start (t);
/* If the statement list contained exactly one statement, then
extract it immediately. */
if (tsi_one_before_end_p (i))
{
u = tsi_stmt (i);
tsi_delink (&i);
free_stmt_list (t);
t = u;
}
}
return t;
}
/* Build a generic statement based on the given type of node and
arguments. Similar to `build_nt', except that we set
EXPR_LOCATION to LOC. */
/* ??? This should be obsolete with the lineno_stmt productions
in the grammar. */
tree
build_stmt (location_t loc, enum tree_code code, ...)
{
tree ret;
int length, i;
va_list p;
bool side_effects;
/* This function cannot be used to construct variably-sized nodes. */
gcc_assert (TREE_CODE_CLASS (code) != tcc_vl_exp);
va_start (p, code);
ret = make_node (code);
TREE_TYPE (ret) = void_type_node;
length = TREE_CODE_LENGTH (code);
SET_EXPR_LOCATION (ret, loc);
/* TREE_SIDE_EFFECTS will already be set for statements with
implicit side effects. Here we make sure it is set for other
expressions by checking whether the parameters have side
effects. */
side_effects = false;
for (i = 0; i < length; i++)
{
tree t = va_arg (p, tree);
if (t && !TYPE_P (t))
side_effects |= TREE_SIDE_EFFECTS (t);
TREE_OPERAND (ret, i) = t;
}
TREE_SIDE_EFFECTS (ret) |= side_effects;
va_end (p);
return ret;
}
/* Build a REALPART_EXPR or IMAGPART_EXPR, according to CODE, from ARG. */
tree
build_real_imag_expr (location_t location, enum tree_code code, tree arg)
{
tree ret;
tree arg_type = TREE_TYPE (arg);
gcc_assert (code == REALPART_EXPR || code == IMAGPART_EXPR);
if (TREE_CODE (arg_type) == COMPLEX_TYPE)
{
ret = build1 (code, TREE_TYPE (TREE_TYPE (arg)), arg);
SET_EXPR_LOCATION (ret, location);
}
else if (INTEGRAL_TYPE_P (arg_type) || SCALAR_FLOAT_TYPE_P (arg_type))
{
ret = (code == REALPART_EXPR
? arg
: omit_one_operand_loc (location, arg_type,
integer_zero_node, arg));
}
else
{
error_at (location, "wrong type argument to %s",
code == REALPART_EXPR ? "__real" : "__imag");
ret = error_mark_node;
}
return ret;
}
| gpl-2.0 |
kalxas/QGIS | src/core/raster/qgscontrastenhancementfunction.cpp | 27 | 3134 | /* **************************************************************************
qgscontrastenhancementfunction.cpp - description
-------------------
begin : Fri Nov 16 2007
copyright : (C) 2007 by Peter J. Ersts
email : ersts@amnh.org
****************************************************************************/
/* **************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgscontrastenhancementfunction.h"
#include "qgscontrastenhancement.h"
QgsContrastEnhancementFunction::QgsContrastEnhancementFunction( Qgis::DataType dataType, double minimumValue, double maximumValue )
: mMaximumValue( maximumValue )
, mMinimumValue( minimumValue )
, mMinimumMaximumRange( mMaximumValue - mMinimumValue )
, mQgsRasterDataType( dataType )
, mMaximumValuePossible( QgsContrastEnhancement::maximumValuePossible( mQgsRasterDataType ) )
, mMinimumValuePossible( QgsContrastEnhancement::minimumValuePossible( mQgsRasterDataType ) )
{
}
QgsContrastEnhancementFunction::QgsContrastEnhancementFunction( const QgsContrastEnhancementFunction &f )
: mMaximumValue( f.mMaximumValue )
, mMinimumValue( f.mMinimumValue )
, mMinimumMaximumRange( f.mMinimumMaximumRange )
, mQgsRasterDataType( f.mQgsRasterDataType )
, mMaximumValuePossible( f.mMaximumValuePossible )
, mMinimumValuePossible( f.mMinimumValuePossible )
{
}
int QgsContrastEnhancementFunction::enhance( double value )
{
if ( mQgsRasterDataType == Qgis::DataType::Byte )
{
return static_cast<int>( value );
}
else
{
return static_cast<int>( ( ( ( value - mMinimumValuePossible ) / ( mMaximumValuePossible - mMinimumValuePossible ) ) * 255.0 ) );
}
}
bool QgsContrastEnhancementFunction::isValueInDisplayableRange( double value )
{
//A default check is to see if the provided value is with the range for the data type
// Write the test as ( v >= min && v <= max ) so that v = NaN returns false
return value >= mMinimumValuePossible && value <= mMaximumValuePossible;
}
void QgsContrastEnhancementFunction::setMaximumValue( double value )
{
if ( mMaximumValuePossible < value )
{
mMaximumValue = mMaximumValuePossible;
}
else
{
mMaximumValue = value;
}
mMinimumMaximumRange = mMaximumValue - mMinimumValue;
}
void QgsContrastEnhancementFunction::setMinimumValue( double value )
{
if ( mMinimumValuePossible > value )
{
mMinimumValue = mMinimumValuePossible;
}
else
{
mMinimumValue = value;
}
mMinimumMaximumRange = mMaximumValue - mMinimumValue;
}
| gpl-2.0 |
smipi1/elce2015-tiny-linux | net/ipv6/sysctl_net_ipv6.c | 539 | 4470 | /*
* sysctl_net_ipv6.c: sysctl interface to net IPV6 subsystem.
*
* Changes:
* YOSHIFUJI Hideaki @USAGI: added icmp sysctl table.
*/
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/ndisc.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/inet_frag.h>
static int one = 1;
static struct ctl_table ipv6_table_template[] = {
{
.procname = "bindv6only",
.data = &init_net.ipv6.sysctl.bindv6only,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "anycast_src_echo_reply",
.data = &init_net.ipv6.sysctl.anycast_src_echo_reply,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "flowlabel_consistency",
.data = &init_net.ipv6.sysctl.flowlabel_consistency,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "auto_flowlabels",
.data = &init_net.ipv6.sysctl.auto_flowlabels,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "fwmark_reflect",
.data = &init_net.ipv6.sysctl.fwmark_reflect,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ }
};
static struct ctl_table ipv6_rotable[] = {
{
.procname = "mld_max_msf",
.data = &sysctl_mld_max_msf,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "mld_qrv",
.data = &sysctl_mld_qrv,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &one
},
{ }
};
static int __net_init ipv6_sysctl_net_init(struct net *net)
{
struct ctl_table *ipv6_table;
struct ctl_table *ipv6_route_table;
struct ctl_table *ipv6_icmp_table;
int err;
err = -ENOMEM;
ipv6_table = kmemdup(ipv6_table_template, sizeof(ipv6_table_template),
GFP_KERNEL);
if (!ipv6_table)
goto out;
ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
goto out_ipv6_table;
ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
if (!ipv6_icmp_table)
goto out_ipv6_route_table;
net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table);
if (!net->ipv6.sysctl.hdr)
goto out_ipv6_icmp_table;
net->ipv6.sysctl.route_hdr =
register_net_sysctl(net, "net/ipv6/route", ipv6_route_table);
if (!net->ipv6.sysctl.route_hdr)
goto out_unregister_ipv6_table;
net->ipv6.sysctl.icmp_hdr =
register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table);
if (!net->ipv6.sysctl.icmp_hdr)
goto out_unregister_route_table;
err = 0;
out:
return err;
out_unregister_route_table:
unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
out_unregister_ipv6_table:
unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
out_ipv6_icmp_table:
kfree(ipv6_icmp_table);
out_ipv6_route_table:
kfree(ipv6_route_table);
out_ipv6_table:
kfree(ipv6_table);
goto out;
}
static void __net_exit ipv6_sysctl_net_exit(struct net *net)
{
struct ctl_table *ipv6_table;
struct ctl_table *ipv6_route_table;
struct ctl_table *ipv6_icmp_table;
ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg;
ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg;
ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr);
unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
kfree(ipv6_table);
kfree(ipv6_route_table);
kfree(ipv6_icmp_table);
}
static struct pernet_operations ipv6_sysctl_net_ops = {
.init = ipv6_sysctl_net_init,
.exit = ipv6_sysctl_net_exit,
};
static struct ctl_table_header *ip6_header;
int ipv6_sysctl_register(void)
{
int err = -ENOMEM;
ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
if (ip6_header == NULL)
goto out;
err = register_pernet_subsys(&ipv6_sysctl_net_ops);
if (err)
goto err_pernet;
out:
return err;
err_pernet:
unregister_net_sysctl_table(ip6_header);
goto out;
}
void ipv6_sysctl_unregister(void)
{
unregister_net_sysctl_table(ip6_header);
unregister_pernet_subsys(&ipv6_sysctl_net_ops);
}
| gpl-2.0 |
sooorajjj/android_kernel_cyanogen_msm8916 | fs/xfs/xfs_vnodeops.c | 2075 | 46658 | /*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* Copyright (c) 2012 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_itable.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_acl.h"
#include "xfs_attr.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_utils.h"
#include "xfs_rtalloc.h"
#include "xfs_trans_space.h"
#include "xfs_log_priv.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_symlink.h"
/*
* This is called by xfs_inactive to free any blocks beyond eof
* when the link count isn't zero and by xfs_dm_punch_hole() when
* punching a hole to EOF.
*/
int
xfs_free_eofblocks(
xfs_mount_t *mp,
xfs_inode_t *ip,
bool need_iolock)
{
xfs_trans_t *tp;
int error;
xfs_fileoff_t end_fsb;
xfs_fileoff_t last_fsb;
xfs_filblks_t map_len;
int nimaps;
xfs_bmbt_irec_t imap;
/*
* Figure out if there are any blocks beyond the end
* of the file. If not, then there is nothing to do.
*/
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (last_fsb <= end_fsb)
return 0;
map_len = last_fsb - end_fsb;
nimaps = 1;
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!error && (nimaps != 0) &&
(imap.br_startblock != HOLESTARTBLOCK ||
ip->i_delayed_blks)) {
/*
* Attach the dquots to the inode up front.
*/
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
/*
* There are blocks after the end of file.
* Free them up now by truncating the file to
* its current size.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
if (need_iolock) {
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
xfs_trans_cancel(tp, 0);
return EAGAIN;
}
}
error = xfs_trans_reserve(tp, 0,
XFS_ITRUNCATE_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_ITRUNCATE_LOG_COUNT);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
if (need_iolock)
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
/*
* Do not update the on-disk file size. If we update the
* on-disk file size and then the system crashes before the
* contents of the file are flushed to disk then the files
* may be full of holes (ie NULL files bug).
*/
error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
XFS_ISIZE(ip));
if (error) {
/*
* If we get an error at this point we simply don't
* bother truncating the file.
*/
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
} else {
error = xfs_trans_commit(tp,
XFS_TRANS_RELEASE_LOG_RES);
if (!error)
xfs_inode_clear_eofblocks_tag(ip);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (need_iolock)
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
}
return error;
}
int
xfs_release(
xfs_inode_t *ip)
{
xfs_mount_t *mp = ip->i_mount;
int error;
if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
return 0;
/* If this is a read-only mount, don't do this (would generate I/O) */
if (mp->m_flags & XFS_MOUNT_RDONLY)
return 0;
if (!XFS_FORCED_SHUTDOWN(mp)) {
int truncated;
/*
* If we are using filestreams, and we have an unlinked
* file that we are processing the last close on, then nothing
* will be able to reopen and write to this file. Purge this
* inode from the filestreams cache so that it doesn't delay
* teardown of the inode.
*/
if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
xfs_filestream_deassociate(ip);
/*
* If we previously truncated this file and removed old data
* in the process, we want to initiate "early" writeout on
* the last close. This is an attempt to combat the notorious
* NULL files problem which is particularly noticeable from a
* truncate down, buffered (re-)write (delalloc), followed by
* a crash. What we are effectively doing here is
* significantly reducing the time window where we'd otherwise
* be exposed to that problem.
*/
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
if (truncated) {
xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) {
error = -filemap_flush(VFS_I(ip)->i_mapping);
if (error)
return error;
}
}
}
if (ip->i_d.di_nlink == 0)
return 0;
if (xfs_can_free_eofblocks(ip, false)) {
/*
* If we can't get the iolock just skip truncating the blocks
* past EOF because we could deadlock with the mmap_sem
* otherwise. We'll get another chance to drop them once the
* last reference to the inode is dropped, so we'll never leak
* blocks permanently.
*
* Further, check if the inode is being opened, written and
* closed frequently and we have delayed allocation blocks
* outstanding (e.g. streaming writes from the NFS server),
* truncating the blocks past EOF will cause fragmentation to
* occur.
*
* In this case don't do the truncation, either, but we have to
* be careful how we detect this case. Blocks beyond EOF show
* up as i_delayed_blks even when the inode is clean, so we
* need to truncate them away first before checking for a dirty
* release. Hence on the first dirty close we will still remove
* the speculative allocation, but after that we will leave it
* in place.
*/
if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
return 0;
error = xfs_free_eofblocks(mp, ip, true);
if (error && error != EAGAIN)
return error;
/* delalloc blocks after truncation means it really is dirty */
if (ip->i_delayed_blks)
xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
}
return 0;
}
/*
* xfs_inactive
*
* This is called when the vnode reference count for the vnode
* goes to zero. If the file has been unlinked, then it must
* now be truncated. Also, we clear all of the read-ahead state
* kept for the inode here since the file is now closed.
*/
int
xfs_inactive(
xfs_inode_t *ip)
{
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int committed;
xfs_trans_t *tp;
xfs_mount_t *mp;
int error;
int truncate = 0;
/*
* If the inode is already free, then there can be nothing
* to clean up here.
*/
if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
ASSERT(ip->i_df.if_real_bytes == 0);
ASSERT(ip->i_df.if_broot_bytes == 0);
return VN_INACTIVE_CACHE;
}
mp = ip->i_mount;
error = 0;
/* If this is a read-only mount, don't do this (would generate I/O) */
if (mp->m_flags & XFS_MOUNT_RDONLY)
goto out;
if (ip->i_d.di_nlink != 0) {
/*
* force is true because we are evicting an inode from the
* cache. Post-eof blocks must be freed, lest we end up with
* broken free space accounting.
*/
if (xfs_can_free_eofblocks(ip, true)) {
error = xfs_free_eofblocks(mp, ip, false);
if (error)
return VN_INACTIVE_CACHE;
}
goto out;
}
if (S_ISREG(ip->i_d.di_mode) &&
(ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
truncate = 1;
error = xfs_qm_dqattach(ip, 0);
if (error)
return VN_INACTIVE_CACHE;
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
error = xfs_trans_reserve(tp, 0,
(truncate || S_ISLNK(ip->i_d.di_mode)) ?
XFS_ITRUNCATE_LOG_RES(mp) :
XFS_IFREE_LOG_RES(mp),
0,
XFS_TRANS_PERM_LOG_RES,
XFS_ITRUNCATE_LOG_COUNT);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
return VN_INACTIVE_CACHE;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
if (S_ISLNK(ip->i_d.di_mode)) {
/*
* Zero length symlinks _can_ exist.
*/
if (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) {
error = xfs_inactive_symlink_rmt(ip, &tp);
if (error)
goto out_cancel;
} else if (ip->i_df.if_bytes > 0) {
xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
XFS_DATA_FORK);
ASSERT(ip->i_df.if_bytes == 0);
}
} else if (truncate) {
ip->i_d.di_size = 0;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
if (error)
goto out_cancel;
ASSERT(ip->i_d.di_nextents == 0);
}
/*
* If there are attributes associated with the file then blow them away
* now. The code calls a routine that recursively deconstructs the
* attribute fork. We need to just commit the current transaction
* because we can't use it for xfs_attr_inactive().
*/
if (ip->i_d.di_anextents > 0) {
ASSERT(ip->i_d.di_forkoff != 0);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error)
goto out_unlock;
xfs_iunlock(ip, XFS_ILOCK_EXCL);
error = xfs_attr_inactive(ip);
if (error)
goto out;
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
error = xfs_trans_reserve(tp, 0,
XFS_IFREE_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_INACTIVE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
goto out;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
}
if (ip->i_afp)
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
ASSERT(ip->i_d.di_anextents == 0);
/*
* Free the inode.
*/
xfs_bmap_init(&free_list, &first_block);
error = xfs_ifree(tp, ip, &free_list);
if (error) {
/*
* If we fail to free the inode, shut down. The cancel
* might do that, we need to make sure. Otherwise the
* inode might be lost for a long time or forever.
*/
if (!XFS_FORCED_SHUTDOWN(mp)) {
xfs_notice(mp, "%s: xfs_ifree returned error %d",
__func__, error);
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
}
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
} else {
/*
* Credit the quota account(s). The inode is gone.
*/
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
/*
* Just ignore errors at this point. There is nothing we can
* do except to try to keep going. Make sure it's not a silent
* error.
*/
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
__func__, error);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error)
xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
__func__, error);
}
/*
* Release the dquots held by inode, if any.
*/
xfs_qm_dqdetach(ip);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
return VN_INACTIVE_CACHE;
out_cancel:
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
goto out_unlock;
}
/*
* Lookups up an inode from "name". If ci_name is not NULL, then a CI match
* is allowed, otherwise it has to be an exact match. If a CI match is found,
* ci_name->name will point to a the actual name (caller must free) or
* will be set to NULL if an exact match is found.
*/
int
xfs_lookup(
xfs_inode_t *dp,
struct xfs_name *name,
xfs_inode_t **ipp,
struct xfs_name *ci_name)
{
xfs_ino_t inum;
int error;
uint lock_mode;
trace_xfs_lookup(dp, name);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO);
lock_mode = xfs_ilock_map_shared(dp);
error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
xfs_iunlock_map_shared(dp, lock_mode);
if (error)
goto out;
error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
if (error)
goto out_free_name;
return 0;
out_free_name:
if (ci_name)
kmem_free(ci_name->name);
out:
*ipp = NULL;
return error;
}
int
xfs_create(
xfs_inode_t *dp,
struct xfs_name *name,
umode_t mode,
xfs_dev_t rdev,
xfs_inode_t **ipp)
{
int is_dir = S_ISDIR(mode);
struct xfs_mount *mp = dp->i_mount;
struct xfs_inode *ip = NULL;
struct xfs_trans *tp = NULL;
int error;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
uint cancel_flags;
int committed;
prid_t prid;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
uint resblks;
uint log_res;
uint log_count;
trace_xfs_create(dp, name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = xfs_get_projid(dp);
else
prid = XFS_PROJID_DEFAULT;
/*
* Make sure that we have allocated dquot(s) on disk.
*/
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
return error;
if (is_dir) {
rdev = 0;
resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
log_res = XFS_MKDIR_LOG_RES(mp);
log_count = XFS_MKDIR_LOG_COUNT;
tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
} else {
resblks = XFS_CREATE_SPACE_RES(mp, name->len);
log_res = XFS_CREATE_LOG_RES(mp);
log_count = XFS_CREATE_LOG_COUNT;
tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
}
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
/*
* Initially assume that the file does not exist and
* reserve the resources for that case. If that is not
* the case we'll drop the one we have and get a more
* appropriate transaction later.
*/
error = xfs_trans_reserve(tp, resblks, log_res, 0,
XFS_TRANS_PERM_LOG_RES, log_count);
if (error == ENOSPC) {
/* flush outstanding delalloc blocks and retry */
xfs_flush_inodes(mp);
error = xfs_trans_reserve(tp, resblks, log_res, 0,
XFS_TRANS_PERM_LOG_RES, log_count);
}
if (error == ENOSPC) {
/* No space at all so try a "no-allocation" reservation */
resblks = 0;
error = xfs_trans_reserve(tp, 0, log_res, 0,
XFS_TRANS_PERM_LOG_RES, log_count);
}
if (error) {
cancel_flags = 0;
goto out_trans_cancel;
}
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = true;
xfs_bmap_init(&free_list, &first_block);
/*
* Reserve disk quota and the inode.
*/
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
if (error)
goto out_trans_cancel;
error = xfs_dir_canenter(tp, dp, name, resblks);
if (error)
goto out_trans_cancel;
/*
* A newly created regular or special file just has one directory
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
prid, resblks > 0, &ip, &committed);
if (error) {
if (error == ENOSPC)
goto out_trans_cancel;
goto out_trans_abort;
}
/*
* Now we join the directory inode to the transaction. We do not do it
* earlier because xfs_dir_ialloc might commit the previous transaction
* (and release all the locks). An error from here on will result in
* the transaction cancel unlocking dp so don't do it explicitly in the
* error path.
*/
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = false;
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
&first_block, &free_list, resblks ?
resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != ENOSPC);
goto out_trans_abort;
}
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
if (is_dir) {
error = xfs_dir_init(tp, ip, dp);
if (error)
goto out_bmap_cancel;
error = xfs_bumplink(tp, dp);
if (error)
goto out_bmap_cancel;
}
/*
* If this is a synchronous mount, make sure that the
* create transaction goes to disk before returning to
* the user.
*/
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
/*
* Attach the dquot(s) to the inodes and modify them incore.
* These ids of the inode couldn't have changed since the new
* inode has been locked ever since it was created.
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
goto out_bmap_cancel;
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error)
goto out_release_inode;
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
*ipp = ip;
return 0;
out_bmap_cancel:
xfs_bmap_cancel(&free_list);
out_trans_abort:
cancel_flags |= XFS_TRANS_ABORT;
out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
out_release_inode:
/*
* Wait until after the current transaction is aborted to
* release the inode. This prevents recursive transactions
* and deadlocks from xfs_inactive.
*/
if (ip)
IRELE(ip);
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error;
}
#ifdef DEBUG
int xfs_locked_n;
int xfs_small_retries;
int xfs_middle_retries;
int xfs_lots_retries;
int xfs_lock_delays;
#endif
/*
* Bump the subclass so xfs_lock_inodes() acquires each lock with
* a different value
*/
static inline int
xfs_lock_inumorder(int lock_mode, int subclass)
{
if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
return lock_mode;
}
/*
* The following routine will lock n inodes in exclusive mode.
* We assume the caller calls us with the inodes in i_ino order.
*
* We need to detect deadlock where an inode that we lock
* is in the AIL and we start waiting for another inode that is locked
* by a thread in a long running transaction (such as truncate). This can
* result in deadlock since the long running trans might need to wait
* for the inode we just locked in order to push the tail and free space
* in the log.
*/
void
xfs_lock_inodes(
xfs_inode_t **ips,
int inodes,
uint lock_mode)
{
int attempts = 0, i, j, try_lock;
xfs_log_item_t *lp;
ASSERT(ips && (inodes >= 2)); /* we need at least two */
try_lock = 0;
i = 0;
again:
for (; i < inodes; i++) {
ASSERT(ips[i]);
if (i && (ips[i] == ips[i-1])) /* Already locked */
continue;
/*
* If try_lock is not set yet, make sure all locked inodes
* are not in the AIL.
* If any are, set try_lock to be used later.
*/
if (!try_lock) {
for (j = (i - 1); j >= 0 && !try_lock; j--) {
lp = (xfs_log_item_t *)ips[j]->i_itemp;
if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
try_lock++;
}
}
}
/*
* If any of the previous locks we have locked is in the AIL,
* we must TRY to get the second and subsequent locks. If
* we can't get any, we must release all we have
* and try again.
*/
if (try_lock) {
/* try_lock must be 0 if i is 0. */
/*
* try_lock means we have an inode locked
* that is in the AIL.
*/
ASSERT(i != 0);
if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
attempts++;
/*
* Unlock all previous guys and try again.
* xfs_iunlock will try to push the tail
* if the inode is in the AIL.
*/
for(j = i - 1; j >= 0; j--) {
/*
* Check to see if we've already
* unlocked this one.
* Not the first one going back,
* and the inode ptr is the same.
*/
if ((j != (i - 1)) && ips[j] ==
ips[j+1])
continue;
xfs_iunlock(ips[j], lock_mode);
}
if ((attempts % 5) == 0) {
delay(1); /* Don't just spin the CPU */
#ifdef DEBUG
xfs_lock_delays++;
#endif
}
i = 0;
try_lock = 0;
goto again;
}
} else {
xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
}
}
#ifdef DEBUG
if (attempts) {
if (attempts < 5) xfs_small_retries++;
else if (attempts < 100) xfs_middle_retries++;
else xfs_lots_retries++;
} else {
xfs_locked_n++;
}
#endif
}
/*
* xfs_lock_two_inodes() can only be used to lock one type of lock
* at a time - the iolock or the ilock, but not both at once. If
* we lock both at once, lockdep will report false positives saying
* we have violated locking orders.
*/
void
xfs_lock_two_inodes(
xfs_inode_t *ip0,
xfs_inode_t *ip1,
uint lock_mode)
{
xfs_inode_t *temp;
int attempts = 0;
xfs_log_item_t *lp;
if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
ASSERT(ip0->i_ino != ip1->i_ino);
if (ip0->i_ino > ip1->i_ino) {
temp = ip0;
ip0 = ip1;
ip1 = temp;
}
again:
xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
/*
* If the first lock we have locked is in the AIL, we must TRY to get
* the second lock. If we can't get it, we must release the first one
* and try again.
*/
lp = (xfs_log_item_t *)ip0->i_itemp;
if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
xfs_iunlock(ip0, lock_mode);
if ((++attempts % 5) == 0)
delay(1); /* Don't just spin the CPU */
goto again;
}
} else {
xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
}
}
int
xfs_remove(
xfs_inode_t *dp,
struct xfs_name *name,
xfs_inode_t *ip)
{
xfs_mount_t *mp = dp->i_mount;
xfs_trans_t *tp = NULL;
int is_dir = S_ISDIR(ip->i_d.di_mode);
int error = 0;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
int link_zero;
uint resblks;
uint log_count;
trace_xfs_remove(dp, name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
error = xfs_qm_dqattach(dp, 0);
if (error)
goto std_return;
error = xfs_qm_dqattach(ip, 0);
if (error)
goto std_return;
if (is_dir) {
tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
log_count = XFS_DEFAULT_LOG_COUNT;
} else {
tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
log_count = XFS_REMOVE_LOG_COUNT;
}
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
/*
* We try to get the real space reservation first,
* allowing for directory btree deletion(s) implying
* possible bmap insert(s). If we can't get the space
* reservation then we use 0 instead, and avoid the bmap
* btree insert(s) in the directory code by, if the bmap
* insert tries to happen, instead trimming the LAST
* block from the directory.
*/
resblks = XFS_REMOVE_SPACE_RES(mp);
error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, log_count);
if (error == ENOSPC) {
resblks = 0;
error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, log_count);
}
if (error) {
ASSERT(error != ENOSPC);
cancel_flags = 0;
goto out_trans_cancel;
}
xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
/*
* If we're removing a directory perform some additional validation.
*/
if (is_dir) {
ASSERT(ip->i_d.di_nlink >= 2);
if (ip->i_d.di_nlink != 2) {
error = XFS_ERROR(ENOTEMPTY);
goto out_trans_cancel;
}
if (!xfs_dir_isempty(ip)) {
error = XFS_ERROR(ENOTEMPTY);
goto out_trans_cancel;
}
}
xfs_bmap_init(&free_list, &first_block);
error = xfs_dir_removename(tp, dp, name, ip->i_ino,
&first_block, &free_list, resblks);
if (error) {
ASSERT(error != ENOENT);
goto out_bmap_cancel;
}
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
if (is_dir) {
/*
* Drop the link from ip's "..".
*/
error = xfs_droplink(tp, dp);
if (error)
goto out_bmap_cancel;
/*
* Drop the "." link from ip to self.
*/
error = xfs_droplink(tp, ip);
if (error)
goto out_bmap_cancel;
} else {
/*
* When removing a non-directory we need to log the parent
* inode here. For a directory this is done implicitly
* by the xfs_droplink call for the ".." entry.
*/
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
}
/*
* Drop the link from dp to ip.
*/
error = xfs_droplink(tp, ip);
if (error)
goto out_bmap_cancel;
/*
* Determine if this is the last link while
* we are in the transaction.
*/
link_zero = (ip->i_d.di_nlink == 0);
/*
* If this is a synchronous mount, make sure that the
* remove transaction goes to disk before returning to
* the user.
*/
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
goto out_bmap_cancel;
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error)
goto std_return;
/*
* If we are using filestreams, kill the stream association.
* If the file is still open it may get a new one but that
* will get killed on last close in xfs_close() so we don't
* have to worry about that.
*/
if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
xfs_filestream_deassociate(ip);
return 0;
out_bmap_cancel:
xfs_bmap_cancel(&free_list);
cancel_flags |= XFS_TRANS_ABORT;
out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
std_return:
return error;
}
int
xfs_link(
xfs_inode_t *tdp,
xfs_inode_t *sip,
struct xfs_name *target_name)
{
xfs_mount_t *mp = tdp->i_mount;
xfs_trans_t *tp;
int error;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
int resblks;
trace_xfs_link(tdp, target_name);
ASSERT(!S_ISDIR(sip->i_d.di_mode));
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
error = xfs_qm_dqattach(sip, 0);
if (error)
goto std_return;
error = xfs_qm_dqattach(tdp, 0);
if (error)
goto std_return;
tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
if (error == ENOSPC) {
resblks = 0;
error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
}
if (error) {
cancel_flags = 0;
goto error_return;
}
xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
/*
* If we are using project inheritance, we only allow hard link
* creation in our tree when the project IDs are the same; else
* the tree quota mechanism could be circumvented.
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
(xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
error = XFS_ERROR(EXDEV);
goto error_return;
}
error = xfs_dir_canenter(tp, tdp, target_name, resblks);
if (error)
goto error_return;
xfs_bmap_init(&free_list, &first_block);
error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
&first_block, &free_list, resblks);
if (error)
goto abort_return;
xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
error = xfs_bumplink(tp, sip);
if (error)
goto abort_return;
/*
* If this is a synchronous mount, make sure that the
* link transaction goes to disk before returning to
* the user.
*/
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
xfs_trans_set_sync(tp);
}
error = xfs_bmap_finish (&tp, &free_list, &committed);
if (error) {
xfs_bmap_cancel(&free_list);
goto abort_return;
}
return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
abort_return:
cancel_flags |= XFS_TRANS_ABORT;
error_return:
xfs_trans_cancel(tp, cancel_flags);
std_return:
return error;
}
int
xfs_set_dmattrs(
xfs_inode_t *ip,
u_int evmask,
u_int16_t state)
{
xfs_mount_t *mp = ip->i_mount;
xfs_trans_t *tp;
int error;
if (!capable(CAP_SYS_ADMIN))
return XFS_ERROR(EPERM);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0);
if (error) {
xfs_trans_cancel(tp, 0);
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
ip->i_d.di_dmevmask = evmask;
ip->i_d.di_dmstate = state;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp, 0);
return error;
}
/*
* xfs_alloc_file_space()
* This routine allocates disk space for the given file.
*
* If alloc_type == 0, this request is for an ALLOCSP type
* request which will change the file size. In this case, no
* DMAPI event will be generated by the call. A TRUNCATE event
* will be generated later by xfs_setattr.
*
* If alloc_type != 0, this request is for a RESVSP type
* request, and a DMAPI DM_EVENT_WRITE will be generated if the
* lower block boundary byte address is less than the file's
* length.
*
* RETURNS:
* 0 on success
* errno on error
*
*/
STATIC int
xfs_alloc_file_space(
xfs_inode_t *ip,
xfs_off_t offset,
xfs_off_t len,
int alloc_type,
int attr_flags)
{
xfs_mount_t *mp = ip->i_mount;
xfs_off_t count;
xfs_filblks_t allocated_fsb;
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
xfs_fsblock_t firstfsb;
int nimaps;
int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
xfs_bmap_free_t free_list;
uint qblocks, resblks, resrtextents;
int committed;
int error;
trace_xfs_alloc_file_space(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
if (len <= 0)
return XFS_ERROR(EINVAL);
rt = XFS_IS_REALTIME_INODE(ip);
extsz = xfs_get_extsz_hint(ip);
count = len;
imapp = &imaps[0];
nimaps = 1;
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
allocatesize_fsb = XFS_B_TO_FSB(mp, count);
/*
* Allocate file space until done or until there is an error
*/
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
/*
* Determine space reservations for data/realtime.
*/
if (unlikely(extsz)) {
s = startoffset_fsb;
do_div(s, extsz);
s *= extsz;
e = startoffset_fsb + allocatesize_fsb;
if ((temp = do_mod(startoffset_fsb, extsz)))
e += temp;
if ((temp = do_mod(e, extsz)))
e += extsz - temp;
} else {
s = 0;
e = allocatesize_fsb;
}
/*
* The transaction reservation is limited to a 32-bit block
* count, hence we need to limit the number of blocks we are
* trying to reserve to avoid an overflow. We can't allocate
* more than @nimaps extents, and an extent is limited on disk
* to MAXEXTLEN (21 bits), so use that to enforce the limit.
*/
resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
if (unlikely(rt)) {
resrtextents = qblocks = resblks;
resrtextents /= mp->m_sb.sb_rextsize;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
quota_flag = XFS_QMOPT_RES_RTBLKS;
} else {
resrtextents = 0;
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
quota_flag = XFS_QMOPT_RES_REGBLKS;
}
/*
* Allocate and setup the transaction.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), resrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
/*
* Check for running out of space
*/
if (error) {
/*
* Free the transaction structure.
*/
ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
0, quota_flag);
if (error)
goto error1;
xfs_trans_ijoin(tp, ip, 0);
xfs_bmap_init(&free_list, &firstfsb);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
allocatesize_fsb, alloc_type, &firstfsb,
0, imapp, &nimaps, &free_list);
if (error) {
goto error0;
}
/*
* Complete the transaction
*/
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
goto error0;
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error) {
break;
}
allocated_fsb = imapp->br_blockcount;
if (nimaps == 0) {
error = XFS_ERROR(ENOSPC);
break;
}
startoffset_fsb += allocated_fsb;
allocatesize_fsb -= allocated_fsb;
}
return error;
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
xfs_bmap_cancel(&free_list);
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* Zero file bytes between startoff and endoff inclusive.
* The iolock is held exclusive and no blocks are buffered.
*
* This function is used by xfs_free_file_space() to zero
* partial blocks when the range to free is not block aligned.
* When unreserving space with boundaries that are not block
* aligned we round up the start and round down the end
* boundaries and then use this function to zero the parts of
* the blocks that got dropped during the rounding.
*/
STATIC int
xfs_zero_remaining_bytes(
xfs_inode_t *ip,
xfs_off_t startoff,
xfs_off_t endoff)
{
xfs_bmbt_irec_t imap;
xfs_fileoff_t offset_fsb;
xfs_off_t lastoffset;
xfs_off_t offset;
xfs_buf_t *bp;
xfs_mount_t *mp = ip->i_mount;
int nimap;
int error = 0;
/*
* Avoid doing I/O beyond eof - it's not necessary
* since nothing can read beyond eof. The space will
* be zeroed when the file is extended anyway.
*/
if (startoff >= XFS_ISIZE(ip))
return 0;
if (endoff > XFS_ISIZE(ip))
endoff = XFS_ISIZE(ip);
bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp,
BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp)
return XFS_ERROR(ENOMEM);
xfs_buf_unlock(bp);
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1;
error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
if (error || nimap < 1)
break;
ASSERT(imap.br_blockcount >= 1);
ASSERT(imap.br_startoff == offset_fsb);
lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
if (lastoffset > endoff)
lastoffset = endoff;
if (imap.br_startblock == HOLESTARTBLOCK)
continue;
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
if (imap.br_state == XFS_EXT_UNWRITTEN)
continue;
XFS_BUF_UNDONE(bp);
XFS_BUF_UNWRITE(bp);
XFS_BUF_READ(bp);
XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
xfsbdstrat(mp, bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp,
"xfs_zero_remaining_bytes(read)");
break;
}
memset(bp->b_addr +
(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
0, lastoffset - offset + 1);
XFS_BUF_UNDONE(bp);
XFS_BUF_UNREAD(bp);
XFS_BUF_WRITE(bp);
xfsbdstrat(mp, bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp,
"xfs_zero_remaining_bytes(write)");
break;
}
}
xfs_buf_free(bp);
return error;
}
/*
* xfs_free_file_space()
* This routine frees disk space for the given file.
*
* This routine is only called by xfs_change_file_space
* for an UNRESVSP type call.
*
* RETURNS:
* 0 on success
* errno on error
*
*/
STATIC int
xfs_free_file_space(
xfs_inode_t *ip,
xfs_off_t offset,
xfs_off_t len,
int attr_flags)
{
int committed;
int done;
xfs_fileoff_t endoffset_fsb;
int error;
xfs_fsblock_t firstfsb;
xfs_bmap_free_t free_list;
xfs_bmbt_irec_t imap;
xfs_off_t ioffset;
xfs_extlen_t mod=0;
xfs_mount_t *mp;
int nimap;
uint resblks;
xfs_off_t rounding;
int rt;
xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp;
int need_iolock = 1;
mp = ip->i_mount;
trace_xfs_free_file_space(ip);
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
error = 0;
if (len <= 0) /* if nothing being freed */
return error;
rt = XFS_IS_REALTIME_INODE(ip);
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
if (attr_flags & XFS_ATTR_NOLOCK)
need_iolock = 0;
if (need_iolock) {
xfs_ilock(ip, XFS_IOLOCK_EXCL);
/* wait for the completion of any pending DIOs */
inode_dio_wait(VFS_I(ip));
}
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
ioffset = offset & ~(rounding - 1);
error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
ioffset, -1);
if (error)
goto out_unlock_iolock;
truncate_pagecache_range(VFS_I(ip), ioffset, -1);
/*
* Need to zero the stuff we're not freeing, on disk.
* If it's a realtime file & can't use unwritten extents then we
* actually need to zero the extent edges. Otherwise xfs_bunmapi
* will take care of it for us.
*/
if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
nimap = 1;
error = xfs_bmapi_read(ip, startoffset_fsb, 1,
&imap, &nimap, 0);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
xfs_daddr_t block;
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
block = imap.br_startblock;
mod = do_div(block, mp->m_sb.sb_rextsize);
if (mod)
startoffset_fsb += mp->m_sb.sb_rextsize - mod;
}
nimap = 1;
error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
&imap, &nimap, 0);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
mod++;
if (mod && (mod != mp->m_sb.sb_rextsize))
endoffset_fsb -= mod;
}
}
if ((done = (endoffset_fsb <= startoffset_fsb)))
/*
* One contiguous piece to clear
*/
error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
else {
/*
* Some full blocks, possibly two pieces to clear
*/
if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
error = xfs_zero_remaining_bytes(ip, offset,
XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
if (!error &&
XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
error = xfs_zero_remaining_bytes(ip,
XFS_FSB_TO_B(mp, endoffset_fsb),
offset + len - 1);
}
/*
* free file space until done or until there is an error
*/
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
while (!error && !done) {
/*
* allocate and setup the transaction. Allow this
* transaction to dip into the reserve blocks to ensure
* the freeing of the space succeeds at ENOSPC.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
tp->t_flags |= XFS_TRANS_RESERVE;
error = xfs_trans_reserve(tp,
resblks,
XFS_WRITE_LOG_RES(mp),
0,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
/*
* check for running out of space
*/
if (error) {
/*
* Free the transaction structure.
*/
ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota(tp, mp,
ip->i_udquot, ip->i_gdquot,
resblks, 0, XFS_QMOPT_RES_REGBLKS);
if (error)
goto error1;
xfs_trans_ijoin(tp, ip, 0);
/*
* issue the bunmapi() call to free the blocks
*/
xfs_bmap_init(&free_list, &firstfsb);
error = xfs_bunmapi(tp, ip, startoffset_fsb,
endoffset_fsb - startoffset_fsb,
0, 2, &firstfsb, &free_list, &done);
if (error) {
goto error0;
}
/*
* complete the transaction
*/
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
goto error0;
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
out_unlock_iolock:
if (need_iolock)
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
error0:
xfs_bmap_cancel(&free_list);
error1:
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
XFS_ILOCK_EXCL);
return error;
}
STATIC int
xfs_zero_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len,
int attr_flags)
{
struct xfs_mount *mp = ip->i_mount;
uint granularity;
xfs_off_t start_boundary;
xfs_off_t end_boundary;
int error;
granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
/*
* Round the range of extents we are going to convert inwards. If the
* offset is aligned, then it doesn't get changed so we zero from the
* start of the block offset points to.
*/
start_boundary = round_up(offset, granularity);
end_boundary = round_down(offset + len, granularity);
ASSERT(start_boundary >= offset);
ASSERT(end_boundary <= offset + len);
if (!(attr_flags & XFS_ATTR_NOLOCK))
xfs_ilock(ip, XFS_IOLOCK_EXCL);
if (start_boundary < end_boundary - 1) {
/* punch out the page cache over the conversion range */
truncate_pagecache_range(VFS_I(ip), start_boundary,
end_boundary - 1);
/* convert the blocks */
error = xfs_alloc_file_space(ip, start_boundary,
end_boundary - start_boundary - 1,
XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
attr_flags);
if (error)
goto out_unlock;
/* We've handled the interior of the range, now for the edges */
if (start_boundary != offset)
error = xfs_iozero(ip, offset, start_boundary - offset);
if (error)
goto out_unlock;
if (end_boundary != offset + len)
error = xfs_iozero(ip, end_boundary,
offset + len - end_boundary);
} else {
/*
* It's either a sub-granularity range or the range spanned lies
* partially across two adjacent blocks.
*/
error = xfs_iozero(ip, offset, len);
}
out_unlock:
if (!(attr_flags & XFS_ATTR_NOLOCK))
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
/*
* xfs_change_file_space()
* This routine allocates or frees disk space for the given file.
* The user specified parameters are checked for alignment and size
* limitations.
*
* RETURNS:
* 0 on success
* errno on error
*
*/
int
xfs_change_file_space(
xfs_inode_t *ip,
int cmd,
xfs_flock64_t *bf,
xfs_off_t offset,
int attr_flags)
{
xfs_mount_t *mp = ip->i_mount;
int clrprealloc;
int error;
xfs_fsize_t fsize;
int setprealloc;
xfs_off_t startoffset;
xfs_trans_t *tp;
struct iattr iattr;
if (!S_ISREG(ip->i_d.di_mode))
return XFS_ERROR(EINVAL);
switch (bf->l_whence) {
case 0: /*SEEK_SET*/
break;
case 1: /*SEEK_CUR*/
bf->l_start += offset;
break;
case 2: /*SEEK_END*/
bf->l_start += XFS_ISIZE(ip);
break;
default:
return XFS_ERROR(EINVAL);
}
/*
* length of <= 0 for resv/unresv/zero is invalid. length for
* alloc/free is ignored completely and we have no idea what userspace
* might have set it to, so set it to zero to allow range
* checks to pass.
*/
switch (cmd) {
case XFS_IOC_ZERO_RANGE:
case XFS_IOC_RESVSP:
case XFS_IOC_RESVSP64:
case XFS_IOC_UNRESVSP:
case XFS_IOC_UNRESVSP64:
if (bf->l_len <= 0)
return XFS_ERROR(EINVAL);
break;
default:
bf->l_len = 0;
break;
}
if (bf->l_start < 0 ||
bf->l_start > mp->m_super->s_maxbytes ||
bf->l_start + bf->l_len < 0 ||
bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
return XFS_ERROR(EINVAL);
bf->l_whence = 0;
startoffset = bf->l_start;
fsize = XFS_ISIZE(ip);
setprealloc = clrprealloc = 0;
switch (cmd) {
case XFS_IOC_ZERO_RANGE:
error = xfs_zero_file_space(ip, startoffset, bf->l_len,
attr_flags);
if (error)
return error;
setprealloc = 1;
break;
case XFS_IOC_RESVSP:
case XFS_IOC_RESVSP64:
error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
XFS_BMAPI_PREALLOC, attr_flags);
if (error)
return error;
setprealloc = 1;
break;
case XFS_IOC_UNRESVSP:
case XFS_IOC_UNRESVSP64:
if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
attr_flags)))
return error;
break;
case XFS_IOC_ALLOCSP:
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP:
case XFS_IOC_FREESP64:
/*
* These operations actually do IO when extending the file, but
* the allocation is done seperately to the zeroing that is
* done. This set of operations need to be serialised against
* other IO operations, such as truncate and buffered IO. We
* need to take the IOLOCK here to serialise the allocation and
* zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
* truncate, direct IO) from racing against the transient
* allocated but not written state we can have here.
*/
xfs_ilock(ip, XFS_IOLOCK_EXCL);
if (startoffset > fsize) {
error = xfs_alloc_file_space(ip, fsize,
startoffset - fsize, 0,
attr_flags | XFS_ATTR_NOLOCK);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
break;
}
}
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = startoffset;
error = xfs_setattr_size(ip, &iattr,
attr_flags | XFS_ATTR_NOLOCK);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
if (error)
return error;
clrprealloc = 1;
break;
default:
ASSERT(0);
return XFS_ERROR(EINVAL);
}
/*
* update the inode timestamp, mode, and prealloc flag bits
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp),
0, 0, 0))) {
/* ASSERT(0); */
xfs_trans_cancel(tp, 0);
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
if ((attr_flags & XFS_ATTR_DMI) == 0) {
ip->i_d.di_mode &= ~S_ISUID;
/*
* Note that we don't have to worry about mandatory
* file locking being disabled here because we only
* clear the S_ISGID bit if the Group execute bit is
* on, but if it was on then mandatory locking wouldn't
* have been enabled.
*/
if (ip->i_d.di_mode & S_IXGRP)
ip->i_d.di_mode &= ~S_ISGID;
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
}
if (setprealloc)
ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
else if (clrprealloc)
ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (attr_flags & XFS_ATTR_SYNC)
xfs_trans_set_sync(tp);
return xfs_trans_commit(tp, 0);
}
| gpl-2.0 |
zephiK/android_kernel_moto_shamuLP | tools/perf/tests/dso-data.c | 2331 | 3121 | #include "util.h"
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include "machine.h"
#include "symbol.h"
#include "tests.h"
#define TEST_ASSERT_VAL(text, cond) \
do { \
if (!(cond)) { \
pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
return -1; \
} \
} while (0)
static char *test_file(int size)
{
static char buf_templ[] = "/tmp/test-XXXXXX";
char *templ = buf_templ;
int fd, i;
unsigned char *buf;
fd = mkstemp(templ);
if (fd < 0) {
perror("mkstemp failed");
return NULL;
}
buf = malloc(size);
if (!buf) {
close(fd);
return NULL;
}
for (i = 0; i < size; i++)
buf[i] = (unsigned char) ((int) i % 10);
if (size != write(fd, buf, size))
templ = NULL;
close(fd);
return templ;
}
#define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20)
struct test_data_offset {
off_t offset;
u8 data[10];
int size;
};
struct test_data_offset offsets[] = {
/* Fill first cache page. */
{
.offset = 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read first cache page. */
{
.offset = 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Fill cache boundary pages. */
{
.offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read cache boundary pages. */
{
.offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Fill final cache page. */
{
.offset = TEST_FILE_SIZE - 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read final cache page. */
{
.offset = TEST_FILE_SIZE - 10,
.data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
.size = 10,
},
/* Read final cache page. */
{
.offset = TEST_FILE_SIZE - 3,
.data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 },
.size = 3,
},
};
int test__dso_data(void)
{
struct machine machine;
struct dso *dso;
char *file = test_file(TEST_FILE_SIZE);
size_t i;
TEST_ASSERT_VAL("No test file", file);
memset(&machine, 0, sizeof(machine));
dso = dso__new((const char *)file);
/* Basic 10 bytes tests. */
for (i = 0; i < ARRAY_SIZE(offsets); i++) {
struct test_data_offset *data = &offsets[i];
ssize_t size;
u8 buf[10];
memset(buf, 0, 10);
size = dso__data_read_offset(dso, &machine, data->offset,
buf, 10);
TEST_ASSERT_VAL("Wrong size", size == data->size);
TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10));
}
/* Read cross multiple cache pages. */
{
ssize_t size;
int c;
u8 *buf;
buf = malloc(TEST_FILE_SIZE);
TEST_ASSERT_VAL("ENOMEM\n", buf);
/* First iteration to fill caches, second one to read them. */
for (c = 0; c < 2; c++) {
memset(buf, 0, TEST_FILE_SIZE);
size = dso__data_read_offset(dso, &machine, 10,
buf, TEST_FILE_SIZE);
TEST_ASSERT_VAL("Wrong size",
size == (TEST_FILE_SIZE - 10));
for (i = 0; i < (size_t)size; i++)
TEST_ASSERT_VAL("Wrong data",
buf[i] == (i % 10));
}
free(buf);
}
dso__delete(dso);
unlink(file);
return 0;
}
| gpl-2.0 |
jollaman999/jolla-kernel_G_Gen3-Stock | drivers/i2c/busses/i2c-s3c2410.c | 4635 | 25965 | /* linux/drivers/i2c/busses/i2c-s3c2410.c
*
* Copyright (C) 2004,2005,2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 I2C Controller
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of_i2c.h>
#include <linux/of_gpio.h>
#include <asm/irq.h>
#include <plat/regs-iic.h>
#include <plat/iic.h>
/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
STATE_START,
STATE_READ,
STATE_WRITE,
STATE_STOP
};
enum s3c24xx_i2c_type {
TYPE_S3C2410,
TYPE_S3C2440,
};
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
unsigned int suspended:1;
struct i2c_msg *msg;
unsigned int msg_num;
unsigned int msg_idx;
unsigned int msg_ptr;
unsigned int tx_setup;
unsigned int irq;
enum s3c24xx_i2c_state state;
unsigned long clkrate;
void __iomem *regs;
struct clk *clk;
struct device *dev;
struct resource *ioarea;
struct i2c_adapter adap;
struct s3c2410_platform_i2c *pdata;
int gpios[2];
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
};
/* default platform data removed, dev should always carry data. */
/* s3c24xx_i2c_is2440()
*
* return true is this is an s3c2440
*/
static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
{
struct platform_device *pdev = to_platform_device(i2c->dev);
enum s3c24xx_i2c_type type;
#ifdef CONFIG_OF
if (i2c->dev->of_node)
return of_device_is_compatible(i2c->dev->of_node,
"samsung,s3c2440-i2c");
#endif
type = platform_get_device_id(pdev)->driver_data;
return type == TYPE_S3C2440;
}
/* s3c24xx_i2c_master_complete
*
* complete the message and wake up the caller, using the given return code,
* or zero to mean ok.
*/
static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "master_complete %d\n", ret);
i2c->msg_ptr = 0;
i2c->msg = NULL;
i2c->msg_idx++;
i2c->msg_num = 0;
if (ret)
i2c->msg_idx = ret;
wake_up(&i2c->wait);
}
static inline void s3c24xx_i2c_disable_ack(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp & ~S3C2410_IICCON_ACKEN, i2c->regs + S3C2410_IICCON);
}
static inline void s3c24xx_i2c_enable_ack(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp | S3C2410_IICCON_ACKEN, i2c->regs + S3C2410_IICCON);
}
/* irq enable/disable functions */
static inline void s3c24xx_i2c_disable_irq(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp & ~S3C2410_IICCON_IRQEN, i2c->regs + S3C2410_IICCON);
}
static inline void s3c24xx_i2c_enable_irq(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp | S3C2410_IICCON_IRQEN, i2c->regs + S3C2410_IICCON);
}
/* s3c24xx_i2c_message_start
*
* put the start of a message onto the bus
*/
static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
struct i2c_msg *msg)
{
unsigned int addr = (msg->addr & 0x7f) << 1;
unsigned long stat;
unsigned long iiccon;
stat = 0;
stat |= S3C2410_IICSTAT_TXRXEN;
if (msg->flags & I2C_M_RD) {
stat |= S3C2410_IICSTAT_MASTER_RX;
addr |= 1;
} else
stat |= S3C2410_IICSTAT_MASTER_TX;
if (msg->flags & I2C_M_REV_DIR_ADDR)
addr ^= 1;
/* todo - check for wether ack wanted or not */
s3c24xx_i2c_enable_ack(i2c);
iiccon = readl(i2c->regs + S3C2410_IICCON);
writel(stat, i2c->regs + S3C2410_IICSTAT);
dev_dbg(i2c->dev, "START: %08lx to IICSTAT, %02x to DS\n", stat, addr);
writeb(addr, i2c->regs + S3C2410_IICDS);
/* delay here to ensure the data byte has gotten onto the bus
* before the transaction is started */
ndelay(i2c->tx_setup);
dev_dbg(i2c->dev, "iiccon, %08lx\n", iiccon);
writel(iiccon, i2c->regs + S3C2410_IICCON);
stat |= S3C2410_IICSTAT_START;
writel(stat, i2c->regs + S3C2410_IICSTAT);
}
static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
{
unsigned long iicstat = readl(i2c->regs + S3C2410_IICSTAT);
dev_dbg(i2c->dev, "STOP\n");
/* stop the transfer */
iicstat &= ~S3C2410_IICSTAT_START;
writel(iicstat, i2c->regs + S3C2410_IICSTAT);
i2c->state = STATE_STOP;
s3c24xx_i2c_master_complete(i2c, ret);
s3c24xx_i2c_disable_irq(i2c);
}
/* helper functions to determine the current state in the set of
* messages we are sending */
/* is_lastmsg()
*
* returns TRUE if the current message is the last in the set
*/
static inline int is_lastmsg(struct s3c24xx_i2c *i2c)
{
return i2c->msg_idx >= (i2c->msg_num - 1);
}
/* is_msglast
*
* returns TRUE if we this is the last byte in the current message
*/
static inline int is_msglast(struct s3c24xx_i2c *i2c)
{
return i2c->msg_ptr == i2c->msg->len-1;
}
/* is_msgend
*
* returns TRUE if we reached the end of the current message
*/
static inline int is_msgend(struct s3c24xx_i2c *i2c)
{
return i2c->msg_ptr >= i2c->msg->len;
}
/* i2c_s3c_irq_nextbyte
*
* process an interrupt and work out what to do
*/
static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
unsigned char byte;
int ret = 0;
switch (i2c->state) {
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
goto out;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
s3c24xx_i2c_disable_irq(i2c);
goto out_ack;
case STATE_START:
/* last thing we did was send a start condition on the
* bus, or started a new i2c message
*/
if (iicstat & S3C2410_IICSTAT_LASTBIT &&
!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
/* ack was not received... */
dev_dbg(i2c->dev, "ack was not received\n");
s3c24xx_i2c_stop(i2c, -ENXIO);
goto out_ack;
}
if (i2c->msg->flags & I2C_M_RD)
i2c->state = STATE_READ;
else
i2c->state = STATE_WRITE;
/* terminate the transfer if there is nothing to do
* as this is used by the i2c probe to find devices. */
if (is_lastmsg(i2c) && i2c->msg->len == 0) {
s3c24xx_i2c_stop(i2c, 0);
goto out_ack;
}
if (i2c->state == STATE_READ)
goto prepare_read;
/* fall through to the write state, as we will need to
* send a byte as well */
case STATE_WRITE:
/* we are writing data to the device... check for the
* end of the message, and if so, work out what to do
*/
if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
if (iicstat & S3C2410_IICSTAT_LASTBIT) {
dev_dbg(i2c->dev, "WRITE: No Ack\n");
s3c24xx_i2c_stop(i2c, -ECONNREFUSED);
goto out_ack;
}
}
retry_write:
if (!is_msgend(i2c)) {
byte = i2c->msg->buf[i2c->msg_ptr++];
writeb(byte, i2c->regs + S3C2410_IICDS);
/* delay after writing the byte to allow the
* data setup time on the bus, as writing the
* data to the register causes the first bit
* to appear on SDA, and SCL will change as
* soon as the interrupt is acknowledged */
ndelay(i2c->tx_setup);
} else if (!is_lastmsg(i2c)) {
/* we need to go to the next i2c message */
dev_dbg(i2c->dev, "WRITE: Next Message\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
/* check to see if we need to do another message */
if (i2c->msg->flags & I2C_M_NOSTART) {
if (i2c->msg->flags & I2C_M_RD) {
/* cannot do this, the controller
* forces us to send a new START
* when we change direction */
s3c24xx_i2c_stop(i2c, -EINVAL);
}
goto retry_write;
} else {
/* send the new start */
s3c24xx_i2c_message_start(i2c, i2c->msg);
i2c->state = STATE_START;
}
} else {
/* send stop */
s3c24xx_i2c_stop(i2c, 0);
}
break;
case STATE_READ:
/* we have a byte of data in the data register, do
* something with it, and then work out wether we are
* going to do any more read/write
*/
byte = readb(i2c->regs + S3C2410_IICDS);
i2c->msg->buf[i2c->msg_ptr++] = byte;
prepare_read:
if (is_msglast(i2c)) {
/* last byte of buffer */
if (is_lastmsg(i2c))
s3c24xx_i2c_disable_ack(i2c);
} else if (is_msgend(i2c)) {
/* ok, we've read the entire buffer, see if there
* is anything else we need to do */
if (is_lastmsg(i2c)) {
/* last message, send stop and complete */
dev_dbg(i2c->dev, "READ: Send Stop\n");
s3c24xx_i2c_stop(i2c, 0);
} else {
/* go to the next transfer */
dev_dbg(i2c->dev, "READ: Next Transfer\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
}
}
break;
}
/* acknowlegde the IRQ and get back on with the work */
out_ack:
tmp = readl(i2c->regs + S3C2410_IICCON);
tmp &= ~S3C2410_IICCON_IRQPEND;
writel(tmp, i2c->regs + S3C2410_IICCON);
out:
return ret;
}
/* s3c24xx_i2c_irq
*
* top level IRQ servicing routine
*/
static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
{
struct s3c24xx_i2c *i2c = dev_id;
unsigned long status;
unsigned long tmp;
status = readl(i2c->regs + S3C2410_IICSTAT);
if (status & S3C2410_IICSTAT_ARBITR) {
/* deal with arbitration loss */
dev_err(i2c->dev, "deal with arbitration loss\n");
}
if (i2c->state == STATE_IDLE) {
dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n");
tmp = readl(i2c->regs + S3C2410_IICCON);
tmp &= ~S3C2410_IICCON_IRQPEND;
writel(tmp, i2c->regs + S3C2410_IICCON);
goto out;
}
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent */
i2c_s3c_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
}
/* s3c24xx_i2c_set_master
*
* get the i2c bus for a master transaction
*/
static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
{
unsigned long iicstat;
int timeout = 400;
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
if (!(iicstat & S3C2410_IICSTAT_BUSBUSY))
return 0;
msleep(1);
}
return -ETIMEDOUT;
}
/* s3c24xx_i2c_doxfer
*
* this starts an i2c transfer
*/
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
unsigned long iicstat, timeout;
int spins = 20;
int ret;
if (i2c->suspended)
return -EIO;
ret = s3c24xx_i2c_set_master(i2c);
if (ret != 0) {
dev_err(i2c->dev, "cannot get bus (error %d)\n", ret);
ret = -EAGAIN;
goto out;
}
spin_lock_irq(&i2c->lock);
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
i2c->msg_idx = 0;
i2c->state = STATE_START;
s3c24xx_i2c_enable_irq(i2c);
s3c24xx_i2c_message_start(i2c, msgs);
spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
ret = i2c->msg_idx;
/* having these next two as dev_err() makes life very
* noisy when doing an i2cdetect */
if (timeout == 0)
dev_dbg(i2c->dev, "timeout\n");
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
/* ensure the stop has been through the bus */
dev_dbg(i2c->dev, "waiting for bus idle\n");
/* first, try busy waiting briefly */
do {
cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
/* if that timed out sleep */
if (!spins) {
msleep(1);
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
}
if (iicstat & S3C2410_IICSTAT_START)
dev_warn(i2c->dev, "timeout waiting for bus idle\n");
out:
return ret;
}
/* s3c24xx_i2c_xfer
*
* first port of call from the i2c bus code when an message needs
* transferring across the i2c bus.
*/
static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct s3c24xx_i2c *i2c = (struct s3c24xx_i2c *)adap->algo_data;
int retry;
int ret;
pm_runtime_get_sync(&adap->dev);
clk_enable(i2c->clk);
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN) {
clk_disable(i2c->clk);
pm_runtime_put_sync(&adap->dev);
return ret;
}
dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry);
udelay(100);
}
clk_disable(i2c->clk);
pm_runtime_put_sync(&adap->dev);
return -EREMOTEIO;
}
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
static const struct i2c_algorithm s3c24xx_i2c_algorithm = {
.master_xfer = s3c24xx_i2c_xfer,
.functionality = s3c24xx_i2c_func,
};
/* s3c24xx_i2c_calcdivisor
*
* return the divisor settings for a given frequency
*/
static int s3c24xx_i2c_calcdivisor(unsigned long clkin, unsigned int wanted,
unsigned int *div1, unsigned int *divs)
{
unsigned int calc_divs = clkin / wanted;
unsigned int calc_div1;
if (calc_divs > (16*16))
calc_div1 = 512;
else
calc_div1 = 16;
calc_divs += calc_div1-1;
calc_divs /= calc_div1;
if (calc_divs == 0)
calc_divs = 1;
if (calc_divs > 17)
calc_divs = 17;
*divs = calc_divs;
*div1 = calc_div1;
return clkin / (calc_divs * calc_div1);
}
/* s3c24xx_i2c_clockrate
*
* work out a divisor for the user requested frequency setting,
* either by the requested frequency, or scanning the acceptable
* range of frequencies until something is found
*/
static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
{
struct s3c2410_platform_i2c *pdata = i2c->pdata;
unsigned long clkin = clk_get_rate(i2c->clk);
unsigned int divs, div1;
unsigned long target_frequency;
u32 iiccon;
int freq;
i2c->clkrate = clkin;
clkin /= 1000; /* clkin now in KHz */
dev_dbg(i2c->dev, "pdata desired frequency %lu\n", pdata->frequency);
target_frequency = pdata->frequency ? pdata->frequency : 100000;
target_frequency /= 1000; /* Target frequency now in KHz */
freq = s3c24xx_i2c_calcdivisor(clkin, target_frequency, &div1, &divs);
if (freq > target_frequency) {
dev_err(i2c->dev,
"Unable to achieve desired frequency %luKHz." \
" Lowest achievable %dKHz\n", target_frequency, freq);
return -EINVAL;
}
*got = freq;
iiccon = readl(i2c->regs + S3C2410_IICCON);
iiccon &= ~(S3C2410_IICCON_SCALEMASK | S3C2410_IICCON_TXDIV_512);
iiccon |= (divs-1);
if (div1 == 512)
iiccon |= S3C2410_IICCON_TXDIV_512;
writel(iiccon, i2c->regs + S3C2410_IICCON);
if (s3c24xx_i2c_is2440(i2c)) {
unsigned long sda_delay;
if (pdata->sda_delay) {
sda_delay = clkin * pdata->sda_delay;
sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
sda_delay = DIV_ROUND_UP(sda_delay, 5);
if (sda_delay > 3)
sda_delay = 3;
sda_delay |= S3C2410_IICLC_FILTER_ON;
} else
sda_delay = 0;
dev_dbg(i2c->dev, "IICLC=%08lx\n", sda_delay);
writel(sda_delay, i2c->regs + S3C2440_IICLC);
}
return 0;
}
#ifdef CONFIG_CPU_FREQ
#define freq_to_i2c(_n) container_of(_n, struct s3c24xx_i2c, freq_transition)
static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct s3c24xx_i2c *i2c = freq_to_i2c(nb);
unsigned long flags;
unsigned int got;
int delta_f;
int ret;
delta_f = clk_get_rate(i2c->clk) - i2c->clkrate;
/* if we're post-change and the input clock has slowed down
* or at pre-change and the clock is about to speed up, then
* adjust our clock rate. <0 is slow, >0 speedup.
*/
if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
(val == CPUFREQ_PRECHANGE && delta_f > 0)) {
spin_lock_irqsave(&i2c->lock, flags);
ret = s3c24xx_i2c_clockrate(i2c, &got);
spin_unlock_irqrestore(&i2c->lock, flags);
if (ret < 0)
dev_err(i2c->dev, "cannot find frequency\n");
else
dev_info(i2c->dev, "setting freq %d\n", got);
}
return 0;
}
static inline int s3c24xx_i2c_register_cpufreq(struct s3c24xx_i2c *i2c)
{
i2c->freq_transition.notifier_call = s3c24xx_i2c_cpufreq_transition;
return cpufreq_register_notifier(&i2c->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
static inline void s3c24xx_i2c_deregister_cpufreq(struct s3c24xx_i2c *i2c)
{
cpufreq_unregister_notifier(&i2c->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
static inline int s3c24xx_i2c_register_cpufreq(struct s3c24xx_i2c *i2c)
{
return 0;
}
static inline void s3c24xx_i2c_deregister_cpufreq(struct s3c24xx_i2c *i2c)
{
}
#endif
#ifdef CONFIG_OF
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
int idx, gpio, ret;
for (idx = 0; idx < 2; idx++) {
gpio = of_get_gpio(i2c->dev->of_node, idx);
if (!gpio_is_valid(gpio)) {
dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio);
goto free_gpio;
}
ret = gpio_request(gpio, "i2c-bus");
if (ret) {
dev_err(i2c->dev, "gpio [%d] request failed\n", gpio);
goto free_gpio;
}
}
return 0;
free_gpio:
while (--idx >= 0)
gpio_free(i2c->gpios[idx]);
return -EINVAL;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
{
unsigned int idx;
for (idx = 0; idx < 2; idx++)
gpio_free(i2c->gpios[idx]);
}
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
{
}
#endif
/* s3c24xx_i2c_init
*
* initialise the controller, set the IO lines and frequency
*/
static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
{
unsigned long iicon = S3C2410_IICCON_IRQEN | S3C2410_IICCON_ACKEN;
struct s3c2410_platform_i2c *pdata;
unsigned int freq;
/* get the plafrom data */
pdata = i2c->pdata;
/* inititalise the gpio */
if (pdata->cfg_gpio)
pdata->cfg_gpio(to_platform_device(i2c->dev));
else
if (s3c24xx_i2c_parse_dt_gpio(i2c))
return -EINVAL;
/* write slave address */
writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD);
dev_info(i2c->dev, "slave address 0x%02x\n", pdata->slave_addr);
writel(iicon, i2c->regs + S3C2410_IICCON);
/* we need to work out the divisors for the clock... */
if (s3c24xx_i2c_clockrate(i2c, &freq) != 0) {
writel(0, i2c->regs + S3C2410_IICCON);
dev_err(i2c->dev, "cannot meet bus frequency required\n");
return -EINVAL;
}
/* todo - check that the i2c lines aren't being dragged anywhere */
dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq);
dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon);
return 0;
}
#ifdef CONFIG_OF
/* s3c24xx_i2c_parse_dt
*
* Parse the device tree node and retreive the platform data.
*/
static void
s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
{
struct s3c2410_platform_i2c *pdata = i2c->pdata;
if (!np)
return;
pdata->bus_num = -1; /* i2c bus number is dynamically assigned */
of_property_read_u32(np, "samsung,i2c-sda-delay", &pdata->sda_delay);
of_property_read_u32(np, "samsung,i2c-slave-addr", &pdata->slave_addr);
of_property_read_u32(np, "samsung,i2c-max-bus-freq",
(u32 *)&pdata->frequency);
}
#else
static void
s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
{
return;
}
#endif
/* s3c24xx_i2c_probe
*
* called by the bus driver when a suitable device is found
*/
static int s3c24xx_i2c_probe(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c;
struct s3c2410_platform_i2c *pdata = NULL;
struct resource *res;
int ret;
if (!pdev->dev.of_node) {
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
}
}
i2c = devm_kzalloc(&pdev->dev, sizeof(struct s3c24xx_i2c), GFP_KERNEL);
if (!i2c) {
dev_err(&pdev->dev, "no memory for state\n");
return -ENOMEM;
}
i2c->pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!i2c->pdata) {
ret = -ENOMEM;
goto err_noclk;
}
if (pdata)
memcpy(i2c->pdata, pdata, sizeof(*pdata));
else
s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c);
strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &s3c24xx_i2c_algorithm;
i2c->adap.retries = 2;
i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
i2c->tx_setup = 50;
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
i2c->dev = &pdev->dev;
i2c->clk = clk_get(&pdev->dev, "i2c");
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = -ENOENT;
goto err_noclk;
}
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
clk_enable(i2c->clk);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IO resource\n");
ret = -ENOENT;
goto err_clk;
}
i2c->ioarea = request_mem_region(res->start, resource_size(res),
pdev->name);
if (i2c->ioarea == NULL) {
dev_err(&pdev->dev, "cannot request IO\n");
ret = -ENXIO;
goto err_clk;
}
i2c->regs = ioremap(res->start, resource_size(res));
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
ret = -ENXIO;
goto err_ioarea;
}
dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
i2c->regs, i2c->ioarea, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
/* initialise the i2c controller */
ret = s3c24xx_i2c_init(i2c);
if (ret != 0)
goto err_iomap;
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
goto err_iomap;
}
ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
goto err_iomap;
}
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
goto err_irq;
}
/* Note, previous versions of the driver used i2c_add_adapter()
* to add the bus at any number. We now pass the bus number via
* the platform data, so if unset it will now default to always
* being bus 0.
*/
i2c->adap.nr = i2c->pdata->bus_num;
i2c->adap.dev.of_node = pdev->dev.of_node;
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
goto err_cpufreq;
}
of_i2c_register_devices(&i2c->adap);
platform_set_drvdata(pdev, i2c);
pm_runtime_enable(&pdev->dev);
pm_runtime_enable(&i2c->adap.dev);
dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
clk_disable(i2c->clk);
return 0;
err_cpufreq:
s3c24xx_i2c_deregister_cpufreq(i2c);
err_irq:
free_irq(i2c->irq, i2c);
err_iomap:
iounmap(i2c->regs);
err_ioarea:
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
err_clk:
clk_disable(i2c->clk);
clk_put(i2c->clk);
err_noclk:
return ret;
}
/* s3c24xx_i2c_remove
*
* called when device is removed from the bus
*/
static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
pm_runtime_disable(&i2c->adap.dev);
pm_runtime_disable(&pdev->dev);
s3c24xx_i2c_deregister_cpufreq(i2c);
i2c_del_adapter(&i2c->adap);
free_irq(i2c->irq, i2c);
clk_disable(i2c->clk);
clk_put(i2c->clk);
iounmap(i2c->regs);
release_resource(i2c->ioarea);
s3c24xx_i2c_dt_gpio_free(i2c);
kfree(i2c->ioarea);
return 0;
}
#ifdef CONFIG_PM
static int s3c24xx_i2c_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
i2c->suspended = 1;
return 0;
}
static int s3c24xx_i2c_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
i2c->suspended = 0;
clk_enable(i2c->clk);
s3c24xx_i2c_init(i2c);
clk_disable(i2c->clk);
return 0;
}
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
.suspend_noirq = s3c24xx_i2c_suspend_noirq,
.resume = s3c24xx_i2c_resume,
};
#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
#else
#define S3C24XX_DEV_PM_OPS NULL
#endif
/* device driver for platform bus bits */
static struct platform_device_id s3c24xx_driver_ids[] = {
{
.name = "s3c2410-i2c",
.driver_data = TYPE_S3C2410,
}, {
.name = "s3c2440-i2c",
.driver_data = TYPE_S3C2440,
}, { },
};
MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_i2c_match[] = {
{ .compatible = "samsung,s3c2410-i2c" },
{ .compatible = "samsung,s3c2440-i2c" },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
#else
#define s3c24xx_i2c_match NULL
#endif
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
.id_table = s3c24xx_driver_ids,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-i2c",
.pm = S3C24XX_DEV_PM_OPS,
.of_match_table = s3c24xx_i2c_match,
},
};
static int __init i2c_adap_s3c_init(void)
{
return platform_driver_register(&s3c24xx_i2c_driver);
}
subsys_initcall(i2c_adap_s3c_init);
static void __exit i2c_adap_s3c_exit(void)
{
platform_driver_unregister(&s3c24xx_i2c_driver);
}
module_exit(i2c_adap_s3c_exit);
MODULE_DESCRIPTION("S3C24XX I2C Bus driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
thekraven/kernel_samsung_lt02ltespr | drivers/input/serio/ambakmi.c | 4891 | 4468 | /*
* linux/drivers/input/serio/ambakmi.c
*
* Copyright (C) 2000-2003 Deep Blue Solutions Ltd.
* Copyright (C) 2002 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
#include <linux/amba/kmi.h>
#include <linux/clk.h>
#include <asm/io.h>
#include <asm/irq.h>
#define KMI_BASE (kmi->base)
struct amba_kmi_port {
struct serio *io;
struct clk *clk;
void __iomem *base;
unsigned int irq;
unsigned int divisor;
unsigned int open;
};
static irqreturn_t amba_kmi_int(int irq, void *dev_id)
{
struct amba_kmi_port *kmi = dev_id;
unsigned int status = readb(KMIIR);
int handled = IRQ_NONE;
while (status & KMIIR_RXINTR) {
serio_interrupt(kmi->io, readb(KMIDATA), 0);
status = readb(KMIIR);
handled = IRQ_HANDLED;
}
return handled;
}
static int amba_kmi_write(struct serio *io, unsigned char val)
{
struct amba_kmi_port *kmi = io->port_data;
unsigned int timeleft = 10000; /* timeout in 100ms */
while ((readb(KMISTAT) & KMISTAT_TXEMPTY) == 0 && --timeleft)
udelay(10);
if (timeleft)
writeb(val, KMIDATA);
return timeleft ? 0 : SERIO_TIMEOUT;
}
static int amba_kmi_open(struct serio *io)
{
struct amba_kmi_port *kmi = io->port_data;
unsigned int divisor;
int ret;
ret = clk_enable(kmi->clk);
if (ret)
goto out;
divisor = clk_get_rate(kmi->clk) / 8000000 - 1;
writeb(divisor, KMICLKDIV);
writeb(KMICR_EN, KMICR);
ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi);
if (ret) {
printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
writeb(0, KMICR);
goto clk_disable;
}
writeb(KMICR_EN | KMICR_RXINTREN, KMICR);
return 0;
clk_disable:
clk_disable(kmi->clk);
out:
return ret;
}
static void amba_kmi_close(struct serio *io)
{
struct amba_kmi_port *kmi = io->port_data;
writeb(0, KMICR);
free_irq(kmi->irq, kmi);
clk_disable(kmi->clk);
}
static int __devinit amba_kmi_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;
int ret;
ret = amba_request_regions(dev, NULL);
if (ret)
return ret;
kmi = kzalloc(sizeof(struct amba_kmi_port), GFP_KERNEL);
io = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!kmi || !io) {
ret = -ENOMEM;
goto out;
}
io->id.type = SERIO_8042;
io->write = amba_kmi_write;
io->open = amba_kmi_open;
io->close = amba_kmi_close;
strlcpy(io->name, dev_name(&dev->dev), sizeof(io->name));
strlcpy(io->phys, dev_name(&dev->dev), sizeof(io->phys));
io->port_data = kmi;
io->dev.parent = &dev->dev;
kmi->io = io;
kmi->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!kmi->base) {
ret = -ENOMEM;
goto out;
}
kmi->clk = clk_get(&dev->dev, "KMIREFCLK");
if (IS_ERR(kmi->clk)) {
ret = PTR_ERR(kmi->clk);
goto unmap;
}
kmi->irq = dev->irq[0];
amba_set_drvdata(dev, kmi);
serio_register_port(kmi->io);
return 0;
unmap:
iounmap(kmi->base);
out:
kfree(kmi);
kfree(io);
amba_release_regions(dev);
return ret;
}
static int __devexit amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
amba_set_drvdata(dev, NULL);
serio_unregister_port(kmi->io);
clk_put(kmi->clk);
iounmap(kmi->base);
kfree(kmi);
amba_release_regions(dev);
return 0;
}
static int amba_kmi_resume(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
/* kick the serio layer to rescan this port */
serio_reconnect(kmi->io);
return 0;
}
static struct amba_id amba_kmi_idtable[] = {
{
.id = 0x00041050,
.mask = 0x000fffff,
},
{ 0, 0 }
};
MODULE_DEVICE_TABLE(amba, amba_kmi_idtable);
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
.owner = THIS_MODULE,
},
.id_table = amba_kmi_idtable,
.probe = amba_kmi_probe,
.remove = __devexit_p(amba_kmi_remove),
.resume = amba_kmi_resume,
};
module_amba_driver(ambakmi_driver);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("AMBA KMI controller driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Trinityhaxxor/Trinity_Kernel_msm8660_XperiaS | drivers/staging/vt6655/IEEE11h.c | 5403 | 9034 | /*
* Copyright (c) 1996, 2005 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: IEEE11h.c
*
* Purpose:
*
* Functions:
*
* Revision History:
*
* Author: Yiching Chen
*
* Date: Mar. 31, 2005
*
*/
#include "ttype.h"
#include "tmacro.h"
#include "tether.h"
#include "IEEE11h.h"
#include "device.h"
#include "wmgr.h"
#include "rxtx.h"
#include "channel.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel = MSG_LEVEL_INFO;
#pragma pack(1)
typedef struct _WLAN_FRAME_ACTION {
WLAN_80211HDR_A3 Header;
unsigned char byCategory;
unsigned char byAction;
unsigned char abyVars[1];
} WLAN_FRAME_ACTION, *PWLAN_FRAME_ACTION;
typedef struct _WLAN_FRAME_MSRREQ {
WLAN_80211HDR_A3 Header;
unsigned char byCategory;
unsigned char byAction;
unsigned char byDialogToken;
WLAN_IE_MEASURE_REQ sMSRReqEIDs[1];
} WLAN_FRAME_MSRREQ, *PWLAN_FRAME_MSRREQ;
typedef struct _WLAN_FRAME_MSRREP {
WLAN_80211HDR_A3 Header;
unsigned char byCategory;
unsigned char byAction;
unsigned char byDialogToken;
WLAN_IE_MEASURE_REP sMSRRepEIDs[1];
} WLAN_FRAME_MSRREP, *PWLAN_FRAME_MSRREP;
typedef struct _WLAN_FRAME_TPCREQ {
WLAN_80211HDR_A3 Header;
unsigned char byCategory;
unsigned char byAction;
unsigned char byDialogToken;
WLAN_IE_TPC_REQ sTPCReqEIDs;
} WLAN_FRAME_TPCREQ, *PWLAN_FRAME_TPCREQ;
typedef struct _WLAN_FRAME_TPCREP {
WLAN_80211HDR_A3 Header;
unsigned char byCategory;
unsigned char byAction;
unsigned char byDialogToken;
WLAN_IE_TPC_REP sTPCRepEIDs;
} WLAN_FRAME_TPCREP, *PWLAN_FRAME_TPCREP;
#pragma pack()
/* action field reference ieee 802.11h Table 20e */
#define ACTION_MSRREQ 0
#define ACTION_MSRREP 1
#define ACTION_TPCREQ 2
#define ACTION_TPCREP 3
#define ACTION_CHSW 4
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
static bool s_bRxMSRReq(PSMgmtObject pMgmt, PWLAN_FRAME_MSRREQ pMSRReq,
unsigned int uLength)
{
size_t uNumOfEIDs = 0;
bool bResult = true;
if (uLength <= WLAN_A3FR_MAXLEN)
memcpy(pMgmt->abyCurrentMSRReq, pMSRReq, uLength);
uNumOfEIDs = ((uLength - offsetof(WLAN_FRAME_MSRREQ,
sMSRReqEIDs))/
(sizeof(WLAN_IE_MEASURE_REQ)));
pMgmt->pCurrMeasureEIDRep = &(((PWLAN_FRAME_MSRREP)
(pMgmt->abyCurrentMSRRep))->sMSRRepEIDs[0]);
pMgmt->uLengthOfRepEIDs = 0;
bResult = CARDbStartMeasure(pMgmt->pAdapter,
((PWLAN_FRAME_MSRREQ)
(pMgmt->abyCurrentMSRReq))->sMSRReqEIDs,
uNumOfEIDs
);
return bResult;
}
static bool s_bRxTPCReq(PSMgmtObject pMgmt,
PWLAN_FRAME_TPCREQ pTPCReq,
unsigned char byRate,
unsigned char byRSSI)
{
PWLAN_FRAME_TPCREP pFrame;
PSTxMgmtPacket pTxPacket = NULL;
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket +
sizeof(STxMgmtPacket));
pFrame = (PWLAN_FRAME_TPCREP)((unsigned char *)pTxPacket +
sizeof(STxMgmtPacket));
pFrame->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
);
memcpy(pFrame->Header.abyAddr1,
pTPCReq->Header.abyAddr2,
WLAN_ADDR_LEN);
memcpy(pFrame->Header.abyAddr2,
CARDpGetCurrentAddress(pMgmt->pAdapter),
WLAN_ADDR_LEN);
memcpy(pFrame->Header.abyAddr3,
pMgmt->abyCurrBSSID,
WLAN_BSSID_LEN);
pFrame->byCategory = 0;
pFrame->byAction = 3;
pFrame->byDialogToken = ((PWLAN_FRAME_MSRREQ)
(pMgmt->abyCurrentMSRReq))->byDialogToken;
pFrame->sTPCRepEIDs.byElementID = WLAN_EID_TPC_REP;
pFrame->sTPCRepEIDs.len = 2;
pFrame->sTPCRepEIDs.byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter);
switch (byRate) {
case RATE_54M:
pFrame->sTPCRepEIDs.byLinkMargin = 65 - byRSSI;
break;
case RATE_48M:
pFrame->sTPCRepEIDs.byLinkMargin = 66 - byRSSI;
break;
case RATE_36M:
pFrame->sTPCRepEIDs.byLinkMargin = 70 - byRSSI;
break;
case RATE_24M:
pFrame->sTPCRepEIDs.byLinkMargin = 74 - byRSSI;
break;
case RATE_18M:
pFrame->sTPCRepEIDs.byLinkMargin = 77 - byRSSI;
break;
case RATE_12M:
pFrame->sTPCRepEIDs.byLinkMargin = 79 - byRSSI;
break;
case RATE_9M:
pFrame->sTPCRepEIDs.byLinkMargin = 81 - byRSSI;
break;
case RATE_6M:
default:
pFrame->sTPCRepEIDs.byLinkMargin = 82 - byRSSI;
break;
}
pTxPacket->cbMPDULen = sizeof(WLAN_FRAME_TPCREP);
pTxPacket->cbPayloadLen = sizeof(WLAN_FRAME_TPCREP) -
WLAN_HDR_ADDR3_LEN;
if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
return false;
return true;
/* return (CARDbSendPacket(pMgmt->pAdapter, pFrame, PKT_TYPE_802_11_MNG,
sizeof(WLAN_FRAME_TPCREP))); */
}
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*+
*
* Description:
* Handles action management frames.
*
* Parameters:
* In:
* pMgmt - Management Object structure
* pRxPacket - Received packet
* Out:
* none
*
* Return Value: None.
*
-*/
bool
IEEE11hbMgrRxAction(void *pMgmtHandle, void *pRxPacket)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
PWLAN_FRAME_ACTION pAction = NULL;
unsigned int uLength = 0;
PWLAN_IE_CH_SW pChannelSwitch = NULL;
/* decode the frame */
uLength = ((PSRxMgmtPacket)pRxPacket)->cbMPDULen;
if (uLength > WLAN_A3FR_MAXLEN)
return false;
pAction = (PWLAN_FRAME_ACTION)
(((PSRxMgmtPacket)pRxPacket)->p80211Header);
if (pAction->byCategory == 0) {
switch (pAction->byAction) {
case ACTION_MSRREQ:
return s_bRxMSRReq(pMgmt,
(PWLAN_FRAME_MSRREQ)
pAction,
uLength);
break;
case ACTION_MSRREP:
break;
case ACTION_TPCREQ:
return s_bRxTPCReq(pMgmt,
(PWLAN_FRAME_TPCREQ) pAction,
((PSRxMgmtPacket)pRxPacket)->byRxRate,
(unsigned char)
((PSRxMgmtPacket)pRxPacket)->uRSSI);
break;
case ACTION_TPCREP:
break;
case ACTION_CHSW:
pChannelSwitch = (PWLAN_IE_CH_SW) (pAction->abyVars);
if ((pChannelSwitch->byElementID == WLAN_EID_CH_SWITCH)
&& (pChannelSwitch->len == 3)) {
/* valid element id */
CARDbChannelSwitch(pMgmt->pAdapter,
pChannelSwitch->byMode,
get_channel_mapping(pMgmt->pAdapter,
pChannelSwitch->byChannel,
pMgmt->eCurrentPHYMode),
pChannelSwitch->byCount);
}
break;
default:
DBG_PRT(MSG_LEVEL_DEBUG,
KERN_INFO"Unknown Action = %d\n",
pAction->byAction);
break;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Unknown Category = %d\n",
pAction->byCategory);
pAction->byCategory |= 0x80;
/*return (CARDbSendPacket(pMgmt->pAdapter, pAction, PKT_TYPE_802_11_MNG,
uLength));*/
return true;
}
return true;
}
bool IEEE11hbMSRRepTx(void *pMgmtHandle)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
PWLAN_FRAME_MSRREP pMSRRep = (PWLAN_FRAME_MSRREP)
(pMgmt->abyCurrentMSRRep + sizeof(STxMgmtPacket));
size_t uLength = 0;
PSTxMgmtPacket pTxPacket = NULL;
pTxPacket = (PSTxMgmtPacket)pMgmt->abyCurrentMSRRep;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket +
sizeof(STxMgmtPacket));
pMSRRep->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
);
memcpy(pMSRRep->Header.abyAddr1, ((PWLAN_FRAME_MSRREQ)
(pMgmt->abyCurrentMSRReq))->Header.abyAddr2, WLAN_ADDR_LEN);
memcpy(pMSRRep->Header.abyAddr2,
CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN);
memcpy(pMSRRep->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
pMSRRep->byCategory = 0;
pMSRRep->byAction = 1;
pMSRRep->byDialogToken = ((PWLAN_FRAME_MSRREQ)
(pMgmt->abyCurrentMSRReq))->byDialogToken;
uLength = pMgmt->uLengthOfRepEIDs + offsetof(WLAN_FRAME_MSRREP,
sMSRRepEIDs);
pTxPacket->cbMPDULen = uLength;
pTxPacket->cbPayloadLen = uLength - WLAN_HDR_ADDR3_LEN;
if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
return false;
return true;
/* return (CARDbSendPacket(pMgmt->pAdapter, pMSRRep, PKT_TYPE_802_11_MNG,
uLength)); */
}
| gpl-2.0 |
Snuzzo/B14CKB1RD_kernel_m8 | drivers/isdn/pcbit/drv.c | 7707 | 22246 | /*
* PCBIT-D interface with isdn4linux
*
* Copyright (C) 1996 Universidade de Lisboa
*
* Written by Pedro Roque Marques (roque@di.fc.ul.pt)
*
* This software may be used and distributed according to the terms of
* the GNU General Public License, incorporated herein by reference.
*/
/*
* Fixes:
*
* Nuno Grilo <l38486@alfa.ist.utl.pt>
* fixed msn_list NULL pointer dereference.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/isdnif.h>
#include <asm/string.h>
#include <asm/io.h>
#include <linux/ioport.h>
#include "pcbit.h"
#include "edss1.h"
#include "layer2.h"
#include "capi.h"
extern ushort last_ref_num;
static int pcbit_ioctl(isdn_ctrl *ctl);
static char *pcbit_devname[MAX_PCBIT_CARDS] = {
"pcbit0",
"pcbit1",
"pcbit2",
"pcbit3"
};
/*
* prototypes
*/
static int pcbit_command(isdn_ctrl *ctl);
static int pcbit_stat(u_char __user *buf, int len, int, int);
static int pcbit_xmit(int driver, int chan, int ack, struct sk_buff *skb);
static int pcbit_writecmd(const u_char __user *, int, int, int);
static int set_protocol_running(struct pcbit_dev *dev);
static void pcbit_clear_msn(struct pcbit_dev *dev);
static void pcbit_set_msn(struct pcbit_dev *dev, char *list);
static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
int pcbit_init_dev(int board, int mem_base, int irq)
{
struct pcbit_dev *dev;
isdn_if *dev_if;
if ((dev = kzalloc(sizeof(struct pcbit_dev), GFP_KERNEL)) == NULL)
{
printk("pcbit_init: couldn't malloc pcbit_dev struct\n");
return -ENOMEM;
}
dev_pcbit[board] = dev;
init_waitqueue_head(&dev->set_running_wq);
spin_lock_init(&dev->lock);
if (mem_base >= 0xA0000 && mem_base <= 0xFFFFF) {
dev->ph_mem = mem_base;
if (!request_mem_region(dev->ph_mem, 4096, "PCBIT mem")) {
printk(KERN_WARNING
"PCBIT: memory region %lx-%lx already in use\n",
dev->ph_mem, dev->ph_mem + 4096);
kfree(dev);
dev_pcbit[board] = NULL;
return -EACCES;
}
dev->sh_mem = ioremap(dev->ph_mem, 4096);
}
else
{
printk("memory address invalid");
kfree(dev);
dev_pcbit[board] = NULL;
return -EACCES;
}
dev->b1 = kzalloc(sizeof(struct pcbit_chan), GFP_KERNEL);
if (!dev->b1) {
printk("pcbit_init: couldn't malloc pcbit_chan struct\n");
iounmap(dev->sh_mem);
release_mem_region(dev->ph_mem, 4096);
kfree(dev);
return -ENOMEM;
}
dev->b2 = kzalloc(sizeof(struct pcbit_chan), GFP_KERNEL);
if (!dev->b2) {
printk("pcbit_init: couldn't malloc pcbit_chan struct\n");
kfree(dev->b1);
iounmap(dev->sh_mem);
release_mem_region(dev->ph_mem, 4096);
kfree(dev);
return -ENOMEM;
}
dev->b2->id = 1;
INIT_WORK(&dev->qdelivery, pcbit_deliver);
/*
* interrupts
*/
if (request_irq(irq, &pcbit_irq_handler, 0, pcbit_devname[board], dev) != 0)
{
kfree(dev->b1);
kfree(dev->b2);
iounmap(dev->sh_mem);
release_mem_region(dev->ph_mem, 4096);
kfree(dev);
dev_pcbit[board] = NULL;
return -EIO;
}
dev->irq = irq;
/* next frame to be received */
dev->rcv_seq = 0;
dev->send_seq = 0;
dev->unack_seq = 0;
dev->hl_hdrlen = 16;
dev_if = kmalloc(sizeof(isdn_if), GFP_KERNEL);
if (!dev_if) {
free_irq(irq, dev);
kfree(dev->b1);
kfree(dev->b2);
iounmap(dev->sh_mem);
release_mem_region(dev->ph_mem, 4096);
kfree(dev);
dev_pcbit[board] = NULL;
return -EIO;
}
dev->dev_if = dev_if;
dev_if->owner = THIS_MODULE;
dev_if->channels = 2;
dev_if->features = (ISDN_FEATURE_P_EURO | ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_L2_HDLC | ISDN_FEATURE_L2_TRANS);
dev_if->writebuf_skb = pcbit_xmit;
dev_if->hl_hdrlen = 16;
dev_if->maxbufsize = MAXBUFSIZE;
dev_if->command = pcbit_command;
dev_if->writecmd = pcbit_writecmd;
dev_if->readstat = pcbit_stat;
strcpy(dev_if->id, pcbit_devname[board]);
if (!register_isdn(dev_if)) {
free_irq(irq, dev);
kfree(dev->b1);
kfree(dev->b2);
iounmap(dev->sh_mem);
release_mem_region(dev->ph_mem, 4096);
kfree(dev);
dev_pcbit[board] = NULL;
return -EIO;
}
dev->id = dev_if->channels;
dev->l2_state = L2_DOWN;
dev->free = 511;
/*
* set_protocol_running(dev);
*/
return 0;
}
#ifdef MODULE
void pcbit_terminate(int board)
{
struct pcbit_dev *dev;
dev = dev_pcbit[board];
if (dev) {
/* unregister_isdn(dev->dev_if); */
free_irq(dev->irq, dev);
pcbit_clear_msn(dev);
kfree(dev->dev_if);
if (dev->b1->fsm_timer.function)
del_timer(&dev->b1->fsm_timer);
if (dev->b2->fsm_timer.function)
del_timer(&dev->b2->fsm_timer);
kfree(dev->b1);
kfree(dev->b2);
iounmap(dev->sh_mem);
release_mem_region(dev->ph_mem, 4096);
kfree(dev);
}
}
#endif
static int pcbit_command(isdn_ctrl *ctl)
{
struct pcbit_dev *dev;
struct pcbit_chan *chan;
struct callb_data info;
dev = finddev(ctl->driver);
if (!dev)
{
printk("pcbit_command: unknown device\n");
return -1;
}
chan = (ctl->arg & 0x0F) ? dev->b2 : dev->b1;
switch (ctl->command) {
case ISDN_CMD_IOCTL:
return pcbit_ioctl(ctl);
break;
case ISDN_CMD_DIAL:
info.type = EV_USR_SETUP_REQ;
info.data.setup.CalledPN = (char *) &ctl->parm.setup.phone;
pcbit_fsm_event(dev, chan, EV_USR_SETUP_REQ, &info);
break;
case ISDN_CMD_ACCEPTD:
pcbit_fsm_event(dev, chan, EV_USR_SETUP_RESP, NULL);
break;
case ISDN_CMD_ACCEPTB:
printk("ISDN_CMD_ACCEPTB - not really needed\n");
break;
case ISDN_CMD_HANGUP:
pcbit_fsm_event(dev, chan, EV_USR_RELEASE_REQ, NULL);
break;
case ISDN_CMD_SETL2:
chan->proto = (ctl->arg >> 8);
break;
case ISDN_CMD_CLREAZ:
pcbit_clear_msn(dev);
break;
case ISDN_CMD_SETEAZ:
pcbit_set_msn(dev, ctl->parm.num);
break;
case ISDN_CMD_SETL3:
if ((ctl->arg >> 8) != ISDN_PROTO_L3_TRANS)
printk(KERN_DEBUG "L3 protocol unknown\n");
break;
default:
printk(KERN_DEBUG "pcbit_command: unknown command\n");
break;
};
return 0;
}
/*
* Another Hack :-(
* on some conditions the board stops sending TDATA_CONFs
* let's see if we can turn around the problem
*/
#ifdef BLOCK_TIMER
static void pcbit_block_timer(unsigned long data)
{
struct pcbit_chan *chan;
struct pcbit_dev *dev;
isdn_ctrl ictl;
chan = (struct pcbit_chan *)data;
dev = chan2dev(chan);
if (dev == NULL) {
printk(KERN_DEBUG "pcbit: chan2dev failed\n");
return;
}
del_timer(&chan->block_timer);
chan->block_timer.function = NULL;
#ifdef DEBUG
printk(KERN_DEBUG "pcbit_block_timer\n");
#endif
chan->queued = 0;
ictl.driver = dev->id;
ictl.command = ISDN_STAT_BSENT;
ictl.arg = chan->id;
dev->dev_if->statcallb(&ictl);
}
#endif
static int pcbit_xmit(int driver, int chnum, int ack, struct sk_buff *skb)
{
ushort hdrlen;
int refnum, len;
struct pcbit_chan *chan;
struct pcbit_dev *dev;
dev = finddev(driver);
if (dev == NULL)
{
printk("finddev returned NULL");
return -1;
}
chan = chnum ? dev->b2 : dev->b1;
if (chan->fsm_state != ST_ACTIVE)
return -1;
if (chan->queued >= MAX_QUEUED)
{
#ifdef DEBUG_QUEUE
printk(KERN_DEBUG
"pcbit: %d packets already in queue - write fails\n",
chan->queued);
#endif
/*
* packet stays on the head of the device queue
* since dev_start_xmit will fail
* see net/core/dev.c
*/
#ifdef BLOCK_TIMER
if (chan->block_timer.function == NULL) {
init_timer(&chan->block_timer);
chan->block_timer.function = &pcbit_block_timer;
chan->block_timer.data = (long) chan;
chan->block_timer.expires = jiffies + 1 * HZ;
add_timer(&chan->block_timer);
}
#endif
return 0;
}
chan->queued++;
len = skb->len;
hdrlen = capi_tdata_req(chan, skb);
refnum = last_ref_num++ & 0x7fffU;
chan->s_refnum = refnum;
pcbit_l2_write(dev, MSG_TDATA_REQ, refnum, skb, hdrlen);
return len;
}
static int pcbit_writecmd(const u_char __user *buf, int len, int driver, int channel)
{
struct pcbit_dev *dev;
int i, j;
const u_char *loadbuf;
u_char *ptr = NULL;
u_char *cbuf;
int errstat;
dev = finddev(driver);
if (!dev)
{
printk("pcbit_writecmd: couldn't find device");
return -ENODEV;
}
switch (dev->l2_state) {
case L2_LWMODE:
/* check (size <= rdp_size); write buf into board */
if (len < 0 || len > BANK4 + 1 || len > 1024)
{
printk("pcbit_writecmd: invalid length %d\n", len);
return -EINVAL;
}
cbuf = memdup_user(buf, len);
if (IS_ERR(cbuf))
return PTR_ERR(cbuf);
memcpy_toio(dev->sh_mem, cbuf, len);
kfree(cbuf);
return len;
case L2_FWMODE:
/* this is the hard part */
/* dumb board */
/* get it into kernel space */
if ((ptr = kmalloc(len, GFP_KERNEL)) == NULL)
return -ENOMEM;
if (copy_from_user(ptr, buf, len)) {
kfree(ptr);
return -EFAULT;
}
loadbuf = ptr;
errstat = 0;
for (i = 0; i < len; i++)
{
for (j = 0; j < LOAD_RETRY; j++)
if (!(readb(dev->sh_mem + dev->loadptr)))
break;
if (j == LOAD_RETRY)
{
errstat = -ETIME;
printk("TIMEOUT i=%d\n", i);
break;
}
writeb(loadbuf[i], dev->sh_mem + dev->loadptr + 1);
writeb(0x01, dev->sh_mem + dev->loadptr);
dev->loadptr += 2;
if (dev->loadptr > LOAD_ZONE_END)
dev->loadptr = LOAD_ZONE_START;
}
kfree(ptr);
return errstat ? errstat : len;
default:
return -EBUSY;
}
}
/*
* demultiplexing of messages
*
*/
void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
struct sk_buff *skb,
ushort hdr_len, ushort refnum)
{
struct pcbit_chan *chan;
struct sk_buff *skb2;
unsigned short len;
struct callb_data cbdata;
int complete, err;
isdn_ctrl ictl;
switch (msg) {
case MSG_TDATA_IND:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
chan->r_refnum = skb->data[7];
skb_pull(skb, 8);
dev->dev_if->rcvcallb_skb(dev->id, chan->id, skb);
if (capi_tdata_resp(chan, &skb2) > 0)
pcbit_l2_write(dev, MSG_TDATA_RESP, refnum,
skb2, skb2->len);
return;
break;
case MSG_TDATA_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
#ifdef DEBUG
if ((*((ushort *)(skb->data + 2))) != 0) {
printk(KERN_DEBUG "TDATA_CONF error\n");
}
#endif
#ifdef BLOCK_TIMER
if (chan->queued == MAX_QUEUED) {
del_timer(&chan->block_timer);
chan->block_timer.function = NULL;
}
#endif
chan->queued--;
ictl.driver = dev->id;
ictl.command = ISDN_STAT_BSENT;
ictl.arg = chan->id;
dev->dev_if->statcallb(&ictl);
break;
case MSG_CONN_IND:
/*
* channel: 1st not used will do
* if both are used we're in trouble
*/
if (!dev->b1->fsm_state)
chan = dev->b1;
else if (!dev->b2->fsm_state)
chan = dev->b2;
else {
printk(KERN_INFO
"Incoming connection: no channels available");
if ((len = capi_disc_req(*(ushort *)(skb->data), &skb2, CAUSE_NOCHAN)) > 0)
pcbit_l2_write(dev, MSG_DISC_REQ, refnum, skb2, len);
break;
}
cbdata.data.setup.CalledPN = NULL;
cbdata.data.setup.CallingPN = NULL;
capi_decode_conn_ind(chan, skb, &cbdata);
cbdata.type = EV_NET_SETUP;
pcbit_fsm_event(dev, chan, EV_NET_SETUP, NULL);
if (pcbit_check_msn(dev, cbdata.data.setup.CallingPN))
pcbit_fsm_event(dev, chan, EV_USR_PROCED_REQ, &cbdata);
else
pcbit_fsm_event(dev, chan, EV_USR_RELEASE_REQ, NULL);
kfree(cbdata.data.setup.CalledPN);
kfree(cbdata.data.setup.CallingPN);
break;
case MSG_CONN_CONF:
/*
* We should be able to find the channel by the message
* reference number. The current version of the firmware
* doesn't sent the ref number correctly.
*/
#ifdef DEBUG
printk(KERN_DEBUG "refnum=%04x b1=%04x b2=%04x\n", refnum,
dev->b1->s_refnum,
dev->b2->s_refnum);
#endif
/* We just try to find a channel in the right state */
if (dev->b1->fsm_state == ST_CALL_INIT)
chan = dev->b1;
else {
if (dev->b2->s_refnum == ST_CALL_INIT)
chan = dev->b2;
else {
chan = NULL;
printk(KERN_WARNING "Connection Confirm - no channel in Call Init state\n");
break;
}
}
if (capi_decode_conn_conf(chan, skb, &complete)) {
printk(KERN_DEBUG "conn_conf indicates error\n");
pcbit_fsm_event(dev, chan, EV_ERROR, NULL);
}
else
if (complete)
pcbit_fsm_event(dev, chan, EV_NET_CALL_PROC, NULL);
else
pcbit_fsm_event(dev, chan, EV_NET_SETUP_ACK, NULL);
break;
case MSG_CONN_ACTV_IND:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (capi_decode_conn_actv_ind(chan, skb)) {
printk("error in capi_decode_conn_actv_ind\n");
/* pcbit_fsm_event(dev, chan, EV_ERROR, NULL); */
break;
}
chan->r_refnum = refnum;
pcbit_fsm_event(dev, chan, EV_NET_CONN, NULL);
break;
case MSG_CONN_ACTV_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (capi_decode_conn_actv_conf(chan, skb) == 0)
pcbit_fsm_event(dev, chan, EV_NET_CONN_ACK, NULL);
else
printk(KERN_DEBUG "decode_conn_actv_conf failed\n");
break;
case MSG_SELP_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!(err = capi_decode_sel_proto_conf(chan, skb)))
pcbit_fsm_event(dev, chan, EV_NET_SELP_RESP, NULL);
else {
/* Error */
printk("error %d - capi_decode_sel_proto_conf\n", err);
}
break;
case MSG_ACT_TRANSP_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!capi_decode_actv_trans_conf(chan, skb))
pcbit_fsm_event(dev, chan, EV_NET_ACTV_RESP, NULL);
break;
case MSG_DISC_IND:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!capi_decode_disc_ind(chan, skb))
pcbit_fsm_event(dev, chan, EV_NET_DISC, NULL);
else
printk(KERN_WARNING "capi_decode_disc_ind - error\n");
break;
case MSG_DISC_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!capi_decode_disc_ind(chan, skb))
pcbit_fsm_event(dev, chan, EV_NET_RELEASE, NULL);
else
printk(KERN_WARNING "capi_decode_disc_conf - error\n");
break;
case MSG_INFO_IND:
#ifdef DEBUG
printk(KERN_DEBUG "received Info Indication - discarded\n");
#endif
break;
#ifdef DEBUG
case MSG_DEBUG_188:
capi_decode_debug_188(skb->data, skb->len);
break;
default:
printk(KERN_DEBUG "pcbit_l3_receive: unknown message %08lx\n",
msg);
break;
#endif
}
kfree_skb(skb);
}
/*
* Single statbuf
* should be a statbuf per device
*/
static char statbuf[STATBUF_LEN];
static int stat_st = 0;
static int stat_end = 0;
static int pcbit_stat(u_char __user *buf, int len, int driver, int channel)
{
int stat_count;
stat_count = stat_end - stat_st;
if (stat_count < 0)
stat_count = STATBUF_LEN - stat_st + stat_end;
/* FIXME: should we sleep and wait for more cookies ? */
if (len > stat_count)
len = stat_count;
if (stat_st < stat_end)
{
if (copy_to_user(buf, statbuf + stat_st, len))
return -EFAULT;
stat_st += len;
}
else
{
if (len > STATBUF_LEN - stat_st)
{
if (copy_to_user(buf, statbuf + stat_st,
STATBUF_LEN - stat_st))
return -EFAULT;
if (copy_to_user(buf, statbuf,
len - (STATBUF_LEN - stat_st)))
return -EFAULT;
stat_st = len - (STATBUF_LEN - stat_st);
}
else
{
if (copy_to_user(buf, statbuf + stat_st, len))
return -EFAULT;
stat_st += len;
if (stat_st == STATBUF_LEN)
stat_st = 0;
}
}
if (stat_st == stat_end)
stat_st = stat_end = 0;
return len;
}
static void pcbit_logstat(struct pcbit_dev *dev, char *str)
{
int i;
isdn_ctrl ictl;
for (i = stat_end; i < strlen(str); i++)
{
statbuf[i] = str[i];
stat_end = (stat_end + 1) % STATBUF_LEN;
if (stat_end == stat_st)
stat_st = (stat_st + 1) % STATBUF_LEN;
}
ictl.command = ISDN_STAT_STAVAIL;
ictl.driver = dev->id;
ictl.arg = strlen(str);
dev->dev_if->statcallb(&ictl);
}
void pcbit_state_change(struct pcbit_dev *dev, struct pcbit_chan *chan,
unsigned short i, unsigned short ev, unsigned short f)
{
char buf[256];
sprintf(buf, "change on device: %d channel:%d\n%s -> %s -> %s\n",
dev->id, chan->id,
isdn_state_table[i], strisdnevent(ev), isdn_state_table[f]
);
#ifdef DEBUG
printk("%s", buf);
#endif
pcbit_logstat(dev, buf);
}
static void set_running_timeout(unsigned long ptr)
{
struct pcbit_dev *dev;
#ifdef DEBUG
printk(KERN_DEBUG "set_running_timeout\n");
#endif
dev = (struct pcbit_dev *) ptr;
wake_up_interruptible(&dev->set_running_wq);
}
static int set_protocol_running(struct pcbit_dev *dev)
{
isdn_ctrl ctl;
init_timer(&dev->set_running_timer);
dev->set_running_timer.function = &set_running_timeout;
dev->set_running_timer.data = (ulong) dev;
dev->set_running_timer.expires = jiffies + SET_RUN_TIMEOUT;
/* kick it */
dev->l2_state = L2_STARTING;
writeb((0x80U | ((dev->rcv_seq & 0x07) << 3) | (dev->send_seq & 0x07)),
dev->sh_mem + BANK4);
add_timer(&dev->set_running_timer);
interruptible_sleep_on(&dev->set_running_wq);
del_timer(&dev->set_running_timer);
if (dev->l2_state == L2_RUNNING)
{
printk(KERN_DEBUG "pcbit: running\n");
dev->unack_seq = dev->send_seq;
dev->writeptr = dev->sh_mem;
dev->readptr = dev->sh_mem + BANK2;
/* tell the good news to the upper layer */
ctl.driver = dev->id;
ctl.command = ISDN_STAT_RUN;
dev->dev_if->statcallb(&ctl);
}
else
{
printk(KERN_DEBUG "pcbit: initialization failed\n");
printk(KERN_DEBUG "pcbit: firmware not loaded\n");
dev->l2_state = L2_DOWN;
#ifdef DEBUG
printk(KERN_DEBUG "Bank3 = %02x\n",
readb(dev->sh_mem + BANK3));
#endif
writeb(0x40, dev->sh_mem + BANK4);
/* warn the upper layer */
ctl.driver = dev->id;
ctl.command = ISDN_STAT_STOP;
dev->dev_if->statcallb(&ctl);
return -EL2HLT; /* Level 2 halted */
}
return 0;
}
static int pcbit_ioctl(isdn_ctrl *ctl)
{
struct pcbit_dev *dev;
struct pcbit_ioctl *cmd;
dev = finddev(ctl->driver);
if (!dev)
{
printk(KERN_DEBUG "pcbit_ioctl: unknown device\n");
return -ENODEV;
}
cmd = (struct pcbit_ioctl *) ctl->parm.num;
switch (ctl->arg) {
case PCBIT_IOCTL_GETSTAT:
cmd->info.l2_status = dev->l2_state;
break;
case PCBIT_IOCTL_STRLOAD:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
dev->unack_seq = dev->send_seq = dev->rcv_seq = 0;
dev->writeptr = dev->sh_mem;
dev->readptr = dev->sh_mem + BANK2;
dev->l2_state = L2_LOADING;
break;
case PCBIT_IOCTL_LWMODE:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
dev->l2_state = L2_LWMODE;
break;
case PCBIT_IOCTL_FWMODE:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
dev->loadptr = LOAD_ZONE_START;
dev->l2_state = L2_FWMODE;
break;
case PCBIT_IOCTL_ENDLOAD:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
dev->l2_state = L2_DOWN;
break;
case PCBIT_IOCTL_SETBYTE:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
/* check addr */
if (cmd->info.rdp_byte.addr > BANK4)
return -EFAULT;
writeb(cmd->info.rdp_byte.value, dev->sh_mem + cmd->info.rdp_byte.addr);
break;
case PCBIT_IOCTL_GETBYTE:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
/* check addr */
if (cmd->info.rdp_byte.addr > BANK4)
{
printk("getbyte: invalid addr %04x\n", cmd->info.rdp_byte.addr);
return -EFAULT;
}
cmd->info.rdp_byte.value = readb(dev->sh_mem + cmd->info.rdp_byte.addr);
break;
case PCBIT_IOCTL_RUNNING:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
return set_protocol_running(dev);
break;
case PCBIT_IOCTL_WATCH188:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
pcbit_l2_write(dev, MSG_WATCH188, 0x0001, NULL, 0);
break;
case PCBIT_IOCTL_PING188:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
pcbit_l2_write(dev, MSG_PING188_REQ, 0x0001, NULL, 0);
break;
case PCBIT_IOCTL_APION:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
pcbit_l2_write(dev, MSG_API_ON, 0x0001, NULL, 0);
break;
case PCBIT_IOCTL_STOP:
dev->l2_state = L2_DOWN;
writeb(0x40, dev->sh_mem + BANK4);
dev->rcv_seq = 0;
dev->send_seq = 0;
dev->unack_seq = 0;
break;
default:
printk("error: unknown ioctl\n");
break;
};
return 0;
}
/*
* MSN list handling
*
* if null reject all calls
* if first entry has null MSN accept all calls
*/
static void pcbit_clear_msn(struct pcbit_dev *dev)
{
struct msn_entry *ptr, *back;
for (ptr = dev->msn_list; ptr;)
{
back = ptr->next;
kfree(ptr);
ptr = back;
}
dev->msn_list = NULL;
}
static void pcbit_set_msn(struct pcbit_dev *dev, char *list)
{
struct msn_entry *ptr;
struct msn_entry *back = NULL;
char *cp, *sp;
int len;
if (strlen(list) == 0) {
ptr = kmalloc(sizeof(struct msn_entry), GFP_ATOMIC);
if (!ptr) {
printk(KERN_WARNING "kmalloc failed\n");
return;
}
ptr->msn = NULL;
ptr->next = dev->msn_list;
dev->msn_list = ptr;
return;
}
if (dev->msn_list)
for (back = dev->msn_list; back->next; back = back->next);
sp = list;
do {
cp = strchr(sp, ',');
if (cp)
len = cp - sp;
else
len = strlen(sp);
ptr = kmalloc(sizeof(struct msn_entry), GFP_ATOMIC);
if (!ptr) {
printk(KERN_WARNING "kmalloc failed\n");
return;
}
ptr->next = NULL;
ptr->msn = kmalloc(len, GFP_ATOMIC);
if (!ptr->msn) {
printk(KERN_WARNING "kmalloc failed\n");
kfree(ptr);
return;
}
memcpy(ptr->msn, sp, len - 1);
ptr->msn[len] = 0;
#ifdef DEBUG
printk(KERN_DEBUG "msn: %s\n", ptr->msn);
#endif
if (dev->msn_list == NULL)
dev->msn_list = ptr;
else
back->next = ptr;
back = ptr;
sp += len;
} while (cp);
}
/*
* check if we do signal or reject an incoming call
*/
static int pcbit_check_msn(struct pcbit_dev *dev, char *msn)
{
struct msn_entry *ptr;
for (ptr = dev->msn_list; ptr; ptr = ptr->next) {
if (ptr->msn == NULL)
return 1;
if (strcmp(ptr->msn, msn) == 0)
return 1;
}
return 0;
}
| gpl-2.0 |
icslover/android_kernel_samsung_aries | samples/tracepoints/tracepoint-sample.c | 8475 | 1212 | /* tracepoint-sample.c
*
* Executes a tracepoint when /proc/tracepoint-sample is opened.
*
* (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
*
* This file is released under the GPLv2.
* See the file COPYING for more details.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include "tp-samples-trace.h"
DEFINE_TRACE(subsys_event);
DEFINE_TRACE(subsys_eventb);
struct proc_dir_entry *pentry_sample;
static int my_open(struct inode *inode, struct file *file)
{
int i;
trace_subsys_event(inode, file);
for (i = 0; i < 10; i++)
trace_subsys_eventb();
return -EPERM;
}
static const struct file_operations mark_ops = {
.open = my_open,
.llseek = noop_llseek,
};
static int __init sample_init(void)
{
printk(KERN_ALERT "sample init\n");
pentry_sample = proc_create("tracepoint-sample", 0444, NULL,
&mark_ops);
if (!pentry_sample)
return -EPERM;
return 0;
}
static void __exit sample_exit(void)
{
printk(KERN_ALERT "sample exit\n");
remove_proc_entry("tracepoint-sample", NULL);
}
module_init(sample_init)
module_exit(sample_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Tracepoint sample");
| gpl-2.0 |
MoKee/android_kernel_lge_sniper | fs/jfs/resize.c | 10267 | 15083 | /*
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_dinode.h"
#include "jfs_imap.h"
#include "jfs_dmap.h"
#include "jfs_superblock.h"
#include "jfs_txnmgr.h"
#include "jfs_debug.h"
#define BITSPERPAGE (PSIZE << 3)
#define L2MEGABYTE 20
#define MEGABYTE (1 << L2MEGABYTE)
#define MEGABYTE32 (MEGABYTE << 5)
/* convert block number to bmap file page number */
#define BLKTODMAPN(b)\
(((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
/*
* jfs_extendfs()
*
* function: extend file system;
*
* |-------------------------------|----------|----------|
* file system space fsck inline log
* workspace space
*
* input:
* new LVSize: in LV blocks (required)
* new LogSize: in LV blocks (optional)
* new FSSize: in LV blocks (optional)
*
* new configuration:
* 1. set new LogSize as specified or default from new LVSize;
* 2. compute new FSCKSize from new LVSize;
* 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
* assert(new FSSize >= old FSSize),
* i.e., file system must not be shrunk;
*/
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
int rc = 0;
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct inode *ipbmap = sbi->ipbmap;
struct inode *ipbmap2;
struct inode *ipimap = sbi->ipimap;
struct jfs_log *log = sbi->log;
struct bmap *bmp = sbi->bmap;
s64 newLogAddress, newFSCKAddress;
int newFSCKSize;
s64 newMapSize = 0, mapSize;
s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
s64 oldLVSize;
s64 newFSSize;
s64 VolumeSize;
int newNpages = 0, nPages, newPage, xlen, t32;
int tid;
int log_formatted = 0;
struct inode *iplist[1];
struct jfs_superblock *j_sb, *j_sb2;
s64 old_agsize;
int agsizechanged = 0;
struct buffer_head *bh, *bh2;
/* If the volume hasn't grown, get out now */
if (sbi->mntflag & JFS_INLINELOG)
oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
else
oldLVSize = addressPXD(&sbi->fsckpxd) +
lengthPXD(&sbi->fsckpxd);
if (oldLVSize >= newLVSize) {
printk(KERN_WARNING
"jfs_extendfs: volume hasn't grown, returning\n");
goto out;
}
VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
if (VolumeSize) {
if (newLVSize > VolumeSize) {
printk(KERN_WARNING "jfs_extendfs: invalid size\n");
rc = -EINVAL;
goto out;
}
} else {
/* check the device */
bh = sb_bread(sb, newLVSize - 1);
if (!bh) {
printk(KERN_WARNING "jfs_extendfs: invalid size\n");
rc = -EINVAL;
goto out;
}
bforget(bh);
}
/* Can't extend write-protected drive */
if (isReadOnly(ipbmap)) {
printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
rc = -EROFS;
goto out;
}
/*
* reconfigure LV spaces
* ---------------------
*
* validate new size, or, if not specified, determine new size
*/
/*
* reconfigure inline log space:
*/
if ((sbi->mntflag & JFS_INLINELOG)) {
if (newLogSize == 0) {
/*
* no size specified: default to 1/256 of aggregate
* size; rounded up to a megabyte boundary;
*/
newLogSize = newLVSize >> 8;
t32 = (1 << (20 - sbi->l2bsize)) - 1;
newLogSize = (newLogSize + t32) & ~t32;
newLogSize =
min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
} else {
/*
* convert the newLogSize to fs blocks.
*
* Since this is given in megabytes, it will always be
* an even number of pages.
*/
newLogSize = (newLogSize * MEGABYTE) >> sbi->l2bsize;
}
} else
newLogSize = 0;
newLogAddress = newLVSize - newLogSize;
/*
* reconfigure fsck work space:
*
* configure it to the end of the logical volume regardless of
* whether file system extends to the end of the aggregate;
* Need enough 4k pages to cover:
* - 1 bit per block in aggregate rounded up to BPERDMAP boundary
* - 1 extra page to handle control page and intermediate level pages
* - 50 extra pages for the chkdsk service log
*/
t64 = ((newLVSize - newLogSize + BPERDMAP - 1) >> L2BPERDMAP)
<< L2BPERDMAP;
t32 = DIV_ROUND_UP(t64, BITSPERPAGE) + 1 + 50;
newFSCKSize = t32 << sbi->l2nbperpage;
newFSCKAddress = newLogAddress - newFSCKSize;
/*
* compute new file system space;
*/
newFSSize = newLVSize - newLogSize - newFSCKSize;
/* file system cannot be shrunk */
if (newFSSize < bmp->db_mapsize) {
rc = -EINVAL;
goto out;
}
/*
* If we're expanding enough that the inline log does not overlap
* the old one, we can format the new log before we quiesce the
* filesystem.
*/
if ((sbi->mntflag & JFS_INLINELOG) && (newLogAddress > oldLVSize)) {
if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
goto out;
log_formatted = 1;
}
/*
* quiesce file system
*
* (prepare to move the inline log and to prevent map update)
*
* block any new transactions and wait for completion of
* all wip transactions and flush modified pages s.t.
* on-disk file system is in consistent state and
* log is not required for recovery.
*/
txQuiesce(sb);
/* Reset size of direct inode */
sbi->direct_inode->i_size = sb->s_bdev->bd_inode->i_size;
if (sbi->mntflag & JFS_INLINELOG) {
/*
* deactivate old inline log
*/
lmLogShutdown(log);
/*
* mark on-disk super block for fs in transition;
*
* update on-disk superblock for the new space configuration
* of inline log space and fsck work space descriptors:
* N.B. FS descriptor is NOT updated;
*
* crash recovery:
* logredo(): if FM_EXTENDFS, return to fsck() for cleanup;
* fsck(): if FM_EXTENDFS, reformat inline log and fsck
* workspace from superblock inline log descriptor and fsck
* workspace descriptor;
*/
/* read in superblock */
if ((rc = readSuper(sb, &bh)))
goto error_out;
j_sb = (struct jfs_superblock *)bh->b_data;
/* mark extendfs() in progress */
j_sb->s_state |= cpu_to_le32(FM_EXTENDFS);
j_sb->s_xsize = cpu_to_le64(newFSSize);
PXDaddress(&j_sb->s_xfsckpxd, newFSCKAddress);
PXDlength(&j_sb->s_xfsckpxd, newFSCKSize);
PXDaddress(&j_sb->s_xlogpxd, newLogAddress);
PXDlength(&j_sb->s_xlogpxd, newLogSize);
/* synchronously update superblock */
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
/*
* format new inline log synchronously;
*
* crash recovery: if log move in progress,
* reformat log and exit success;
*/
if (!log_formatted)
if ((rc = lmLogFormat(log, newLogAddress, newLogSize)))
goto error_out;
/*
* activate new log
*/
log->base = newLogAddress;
log->size = newLogSize >> (L2LOGPSIZE - sb->s_blocksize_bits);
if ((rc = lmLogInit(log)))
goto error_out;
}
/*
* extend block allocation map
* ---------------------------
*
* extendfs() for new extension, retry after crash recovery;
*
* note: both logredo() and fsck() rebuild map from
* the bitmap and configuration parameter from superblock
* (disregarding all other control information in the map);
*
* superblock:
* s_size: aggregate size in physical blocks;
*/
/*
* compute the new block allocation map configuration
*
* map dinode:
* di_size: map file size in byte;
* di_nblocks: number of blocks allocated for map file;
* di_mapsize: number of blocks in aggregate (covered by map);
* map control page:
* db_mapsize: number of blocks in aggregate (covered by map);
*/
newMapSize = newFSSize;
/* number of data pages of new bmap file:
* roundup new size to full dmap page boundary and
* add 1 extra dmap page for next extendfs()
*/
t64 = (newMapSize - 1) + BPERDMAP;
newNpages = BLKTODMAPN(t64) + 1;
/*
* extend map from current map (WITHOUT growing mapfile)
*
* map new extension with unmapped part of the last partial
* dmap page, if applicable, and extra page(s) allocated
* at end of bmap by mkfs() or previous extendfs();
*/
extendBmap:
/* compute number of blocks requested to extend */
mapSize = bmp->db_mapsize;
XAddress = mapSize; /* eXtension Address */
XSize = newMapSize - mapSize; /* eXtension Size */
old_agsize = bmp->db_agsize; /* We need to know if this changes */
/* compute number of blocks that can be extended by current mapfile */
t64 = dbMapFileSizeToMapSize(ipbmap);
if (mapSize > t64) {
printk(KERN_ERR "jfs_extendfs: mapSize (0x%Lx) > t64 (0x%Lx)\n",
(long long) mapSize, (long long) t64);
rc = -EIO;
goto error_out;
}
nblocks = min(t64 - mapSize, XSize);
/*
* update map pages for new extension:
*
* update/init dmap and bubble up the control hierarchy
* incrementally fold up dmaps into upper levels;
* update bmap control page;
*/
if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
goto error_out;
agsizechanged |= (bmp->db_agsize != old_agsize);
/*
* the map now has extended to cover additional nblocks:
* dn_mapsize = oldMapsize + nblocks;
*/
/* ipbmap->i_mapsize += nblocks; */
XSize -= nblocks;
/*
* grow map file to cover remaining extension
* and/or one extra dmap page for next extendfs();
*
* allocate new map pages and its backing blocks, and
* update map file xtree
*/
/* compute number of data pages of current bmap file */
nPages = ipbmap->i_size >> L2PSIZE;
/* need to grow map file ? */
if (nPages == newNpages)
goto finalizeBmap;
/*
* grow bmap file for the new map pages required:
*
* allocate growth at the start of newly extended region;
* bmap file only grows sequentially, i.e., both data pages
* and possibly xtree index pages may grow in append mode,
* s.t. logredo() can reconstruct pre-extension state
* by washing away bmap file of pages outside s_size boundary;
*/
/*
* journal map file growth as if a regular file growth:
* (note: bmap is created with di_mode = IFJOURNAL|IFREG);
*
* journaling of bmap file growth is not required since
* logredo() do/can not use log records of bmap file growth
* but it provides careful write semantics, pmap update, etc.;
*/
/* synchronous write of data pages: bmap data pages are
* cached in meta-data cache, and not written out
* by txCommit();
*/
filemap_fdatawait(ipbmap->i_mapping);
filemap_write_and_wait(ipbmap->i_mapping);
diWriteSpecial(ipbmap, 0);
newPage = nPages; /* first new page number */
xoff = newPage << sbi->l2nbperpage;
xlen = (newNpages - nPages) << sbi->l2nbperpage;
xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1);
xaddr = XAddress;
tid = txBegin(sb, COMMIT_FORCE);
if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) {
txEnd(tid);
goto error_out;
}
/* update bmap file size */
ipbmap->i_size += xlen << sbi->l2bsize;
inode_add_bytes(ipbmap, xlen << sbi->l2bsize);
iplist[0] = ipbmap;
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid);
if (rc)
goto error_out;
/*
* map file has been grown now to cover extension to further out;
* di_size = new map file size;
*
* if huge extension, the previous extension based on previous
* map file size may not have been sufficient to cover whole extension
* (it could have been used up for new map pages),
* but the newly grown map file now covers lot bigger new free space
* available for further extension of map;
*/
/* any more blocks to extend ? */
if (XSize)
goto extendBmap;
finalizeBmap:
/* finalize bmap */
dbFinalizeBmap(ipbmap);
/*
* update inode allocation map
* ---------------------------
*
* move iag lists from old to new iag;
* agstart field is not updated for logredo() to reconstruct
* iag lists if system crash occurs.
* (computation of ag number from agstart based on agsize
* will correctly identify the new ag);
*/
/* if new AG size the same as old AG size, done! */
if (agsizechanged) {
if ((rc = diExtendFS(ipimap, ipbmap)))
goto error_out;
/* finalize imap */
if ((rc = diSync(ipimap)))
goto error_out;
}
/*
* finalize
* --------
*
* extension is committed when on-disk super block is
* updated with new descriptors: logredo will recover
* crash before it to pre-extension state;
*/
/* sync log to skip log replay of bmap file growth transaction; */
/* lmLogSync(log, 1); */
/*
* synchronous write bmap global control page;
* for crash before completion of write
* logredo() will recover to pre-extendfs state;
* for crash after completion of write,
* logredo() will recover post-extendfs state;
*/
if ((rc = dbSync(ipbmap)))
goto error_out;
/*
* copy primary bmap inode to secondary bmap inode
*/
ipbmap2 = diReadSpecial(sb, BMAP_I, 1);
if (ipbmap2 == NULL) {
printk(KERN_ERR "jfs_extendfs: diReadSpecial(bmap) failed\n");
goto error_out;
}
memcpy(&JFS_IP(ipbmap2)->i_xtroot, &JFS_IP(ipbmap)->i_xtroot, 288);
ipbmap2->i_size = ipbmap->i_size;
ipbmap2->i_blocks = ipbmap->i_blocks;
diWriteSpecial(ipbmap2, 1);
diFreeSpecial(ipbmap2);
/*
* update superblock
*/
if ((rc = readSuper(sb, &bh)))
goto error_out;
j_sb = (struct jfs_superblock *)bh->b_data;
/* mark extendfs() completion */
j_sb->s_state &= cpu_to_le32(~FM_EXTENDFS);
j_sb->s_size = cpu_to_le64(bmp->db_mapsize <<
le16_to_cpu(j_sb->s_l2bfactor));
j_sb->s_agsize = cpu_to_le32(bmp->db_agsize);
/* update inline log space descriptor */
if (sbi->mntflag & JFS_INLINELOG) {
PXDaddress(&(j_sb->s_logpxd), newLogAddress);
PXDlength(&(j_sb->s_logpxd), newLogSize);
}
/* record log's mount serial number */
j_sb->s_logserial = cpu_to_le32(log->serial);
/* update fsck work space descriptor */
PXDaddress(&(j_sb->s_fsckpxd), newFSCKAddress);
PXDlength(&(j_sb->s_fsckpxd), newFSCKSize);
j_sb->s_fscklog = 1;
/* sb->s_fsckloglen remains the same */
/* Update secondary superblock */
bh2 = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits);
if (bh2) {
j_sb2 = (struct jfs_superblock *)bh2->b_data;
memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock));
mark_buffer_dirty(bh);
sync_dirty_buffer(bh2);
brelse(bh2);
}
/* write primary superblock */
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
goto resume;
error_out:
jfs_error(sb, "jfs_extendfs");
resume:
/*
* resume file system transactions
*/
txResume(sb);
out:
return rc;
}
| gpl-2.0 |
ShieldKteam/shield_osprey | arch/sh/kernel/stacktrace.c | 11803 | 2180 | /*
* arch/sh/kernel/stacktrace.c
*
* Stack trace management functions
*
* Copyright (C) 2006 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
#include <asm/unwinder.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
static int save_stack_stack(void *data, char *name)
{
return 0;
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
static void save_stack_address(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = data;
if (!reliable)
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
static const struct stacktrace_ops save_stack_ops = {
.stack = save_stack_stack,
.address = save_stack_address,
};
void save_stack_trace(struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)current_stack_pointer;
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
static void
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = (struct stack_trace *)data;
if (!reliable)
return;
if (in_sched_functions(addr))
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
static const struct stacktrace_ops save_stack_ops_nosched = {
.stack = save_stack_stack,
.address = save_stack_address_nosched,
};
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)tsk->thread.sp;
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
| gpl-2.0 |
royorbs3/Leviathan-V1-Kernel-G925T | fs/omfs/bitmap.c | 12827 | 4107 | #include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <asm/div64.h>
#include "omfs.h"
unsigned long omfs_count_free(struct super_block *sb)
{
unsigned int i;
unsigned long sum = 0;
struct omfs_sb_info *sbi = OMFS_SB(sb);
int nbits = sb->s_blocksize * 8;
for (i = 0; i < sbi->s_imap_size; i++)
sum += nbits - bitmap_weight(sbi->s_imap[i], nbits);
return sum;
}
/*
* Counts the run of zero bits starting at bit up to max.
* It handles the case where a run might spill over a buffer.
* Called with bitmap lock.
*/
static int count_run(unsigned long **addr, int nbits,
int addrlen, int bit, int max)
{
int count = 0;
int x;
for (; addrlen > 0; addrlen--, addr++) {
x = find_next_bit(*addr, nbits, bit);
count += x - bit;
if (x < nbits || count > max)
return min(count, max);
bit = 0;
}
return min(count, max);
}
/*
* Sets or clears the run of count bits starting with bit.
* Called with bitmap lock.
*/
static int set_run(struct super_block *sb, int map,
int nbits, int bit, int count, int set)
{
int i;
int err;
struct buffer_head *bh;
struct omfs_sb_info *sbi = OMFS_SB(sb);
err = -ENOMEM;
bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
if (!bh)
goto out;
for (i = 0; i < count; i++, bit++) {
if (bit >= nbits) {
bit = 0;
map++;
mark_buffer_dirty(bh);
brelse(bh);
bh = sb_bread(sb,
clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
if (!bh)
goto out;
}
if (set) {
set_bit(bit, sbi->s_imap[map]);
set_bit(bit, (unsigned long *)bh->b_data);
} else {
clear_bit(bit, sbi->s_imap[map]);
clear_bit(bit, (unsigned long *)bh->b_data);
}
}
mark_buffer_dirty(bh);
brelse(bh);
err = 0;
out:
return err;
}
/*
* Tries to allocate exactly one block. Returns true if successful.
*/
int omfs_allocate_block(struct super_block *sb, u64 block)
{
struct buffer_head *bh;
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
unsigned int map, bit;
int ret = 0;
u64 tmp;
tmp = block;
bit = do_div(tmp, bits_per_entry);
map = tmp;
mutex_lock(&sbi->s_bitmap_lock);
if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map]))
goto out;
if (sbi->s_bitmap_ino > 0) {
bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
if (!bh)
goto out;
set_bit(bit, (unsigned long *)bh->b_data);
mark_buffer_dirty(bh);
brelse(bh);
}
ret = 1;
out:
mutex_unlock(&sbi->s_bitmap_lock);
return ret;
}
/*
* Tries to allocate a set of blocks. The request size depends on the
* type: for inodes, we must allocate sbi->s_mirrors blocks, and for file
* blocks, we try to allocate sbi->s_clustersize, but can always get away
* with just one block.
*/
int omfs_allocate_range(struct super_block *sb,
int min_request,
int max_request,
u64 *return_block,
int *return_size)
{
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
int ret = 0;
int i, run, bit;
mutex_lock(&sbi->s_bitmap_lock);
for (i = 0; i < sbi->s_imap_size; i++) {
bit = 0;
while (bit < bits_per_entry) {
bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry,
bit);
if (bit == bits_per_entry)
break;
run = count_run(&sbi->s_imap[i], bits_per_entry,
sbi->s_imap_size-i, bit, max_request);
if (run >= min_request)
goto found;
bit += run;
}
}
ret = -ENOSPC;
goto out;
found:
*return_block = i * bits_per_entry + bit;
*return_size = run;
ret = set_run(sb, i, bits_per_entry, bit, run, 1);
out:
mutex_unlock(&sbi->s_bitmap_lock);
return ret;
}
/*
* Clears count bits starting at a given block.
*/
int omfs_clear_range(struct super_block *sb, u64 block, int count)
{
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
u64 tmp;
unsigned int map, bit;
int ret;
tmp = block;
bit = do_div(tmp, bits_per_entry);
map = tmp;
if (map >= sbi->s_imap_size)
return 0;
mutex_lock(&sbi->s_bitmap_lock);
ret = set_run(sb, map, bits_per_entry, bit, count, 0);
mutex_unlock(&sbi->s_bitmap_lock);
return ret;
}
| gpl-2.0 |
dineshram/linux-media-si4713USBDriver | fs/omfs/bitmap.c | 12827 | 4107 | #include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <asm/div64.h>
#include "omfs.h"
unsigned long omfs_count_free(struct super_block *sb)
{
unsigned int i;
unsigned long sum = 0;
struct omfs_sb_info *sbi = OMFS_SB(sb);
int nbits = sb->s_blocksize * 8;
for (i = 0; i < sbi->s_imap_size; i++)
sum += nbits - bitmap_weight(sbi->s_imap[i], nbits);
return sum;
}
/*
* Counts the run of zero bits starting at bit up to max.
* It handles the case where a run might spill over a buffer.
* Called with bitmap lock.
*/
static int count_run(unsigned long **addr, int nbits,
int addrlen, int bit, int max)
{
int count = 0;
int x;
for (; addrlen > 0; addrlen--, addr++) {
x = find_next_bit(*addr, nbits, bit);
count += x - bit;
if (x < nbits || count > max)
return min(count, max);
bit = 0;
}
return min(count, max);
}
/*
* Sets or clears the run of count bits starting with bit.
* Called with bitmap lock.
*/
static int set_run(struct super_block *sb, int map,
int nbits, int bit, int count, int set)
{
int i;
int err;
struct buffer_head *bh;
struct omfs_sb_info *sbi = OMFS_SB(sb);
err = -ENOMEM;
bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
if (!bh)
goto out;
for (i = 0; i < count; i++, bit++) {
if (bit >= nbits) {
bit = 0;
map++;
mark_buffer_dirty(bh);
brelse(bh);
bh = sb_bread(sb,
clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
if (!bh)
goto out;
}
if (set) {
set_bit(bit, sbi->s_imap[map]);
set_bit(bit, (unsigned long *)bh->b_data);
} else {
clear_bit(bit, sbi->s_imap[map]);
clear_bit(bit, (unsigned long *)bh->b_data);
}
}
mark_buffer_dirty(bh);
brelse(bh);
err = 0;
out:
return err;
}
/*
* Tries to allocate exactly one block. Returns true if successful.
*/
int omfs_allocate_block(struct super_block *sb, u64 block)
{
struct buffer_head *bh;
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
unsigned int map, bit;
int ret = 0;
u64 tmp;
tmp = block;
bit = do_div(tmp, bits_per_entry);
map = tmp;
mutex_lock(&sbi->s_bitmap_lock);
if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map]))
goto out;
if (sbi->s_bitmap_ino > 0) {
bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
if (!bh)
goto out;
set_bit(bit, (unsigned long *)bh->b_data);
mark_buffer_dirty(bh);
brelse(bh);
}
ret = 1;
out:
mutex_unlock(&sbi->s_bitmap_lock);
return ret;
}
/*
* Tries to allocate a set of blocks. The request size depends on the
* type: for inodes, we must allocate sbi->s_mirrors blocks, and for file
* blocks, we try to allocate sbi->s_clustersize, but can always get away
* with just one block.
*/
int omfs_allocate_range(struct super_block *sb,
int min_request,
int max_request,
u64 *return_block,
int *return_size)
{
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
int ret = 0;
int i, run, bit;
mutex_lock(&sbi->s_bitmap_lock);
for (i = 0; i < sbi->s_imap_size; i++) {
bit = 0;
while (bit < bits_per_entry) {
bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry,
bit);
if (bit == bits_per_entry)
break;
run = count_run(&sbi->s_imap[i], bits_per_entry,
sbi->s_imap_size-i, bit, max_request);
if (run >= min_request)
goto found;
bit += run;
}
}
ret = -ENOSPC;
goto out;
found:
*return_block = i * bits_per_entry + bit;
*return_size = run;
ret = set_run(sb, i, bits_per_entry, bit, run, 1);
out:
mutex_unlock(&sbi->s_bitmap_lock);
return ret;
}
/*
* Clears count bits starting at a given block.
*/
int omfs_clear_range(struct super_block *sb, u64 block, int count)
{
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
u64 tmp;
unsigned int map, bit;
int ret;
tmp = block;
bit = do_div(tmp, bits_per_entry);
map = tmp;
if (map >= sbi->s_imap_size)
return 0;
mutex_lock(&sbi->s_bitmap_lock);
ret = set_run(sb, map, bits_per_entry, bit, count, 0);
mutex_unlock(&sbi->s_bitmap_lock);
return ret;
}
| gpl-2.0 |
scs/uclinux | lib/libgmp/gmp-4.2.4/mpz/set_f.c | 28 | 1543 | /* mpz_set_f (dest_integer, src_float) -- Assign DEST_INTEGER from SRC_FLOAT.
Copyright 1996, 2001 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
#include "gmp.h"
#include "gmp-impl.h"
void
mpz_set_f (mpz_ptr w, mpf_srcptr u)
{
mp_ptr wp, up;
mp_size_t size;
mp_exp_t exp;
/* abs(u)<1 truncates to zero */
exp = EXP (u);
if (exp <= 0)
{
SIZ(w) = 0;
return;
}
MPZ_REALLOC (w, exp);
wp = PTR(w);
up = PTR(u);
size = SIZ (u);
SIZ(w) = (size >= 0 ? exp : -exp);
size = ABS (size);
if (exp > size)
{
/* pad with low zeros to get a total "exp" many limbs */
mp_size_t zeros = exp - size;
MPN_ZERO (wp, zeros);
wp += zeros;
}
else
{
/* exp<=size, trucate to the high "exp" many limbs */
up += (size - exp);
size = exp;
}
MPN_COPY (wp, up, size);
}
| gpl-2.0 |
dirker/barebox | scripts/mod/modpost.c | 28 | 43309 | /* Postprocess module symbol versions
*
* Copyright 2003 Kai Germaschewski
* Copyright 2002-2004 Rusty Russell, IBM Corporation
* Copyright 2006 Sam Ravnborg
* Based in part on module-init-tools/depmod.c,file2alias
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Usage: modpost vmlinux module1.o module2.o ...
*/
#include <ctype.h>
#include "modpost.h"
#if 0
/* Once we decide to actually make use of the license it should
* go back to the include file.
*/
#include "../../include/license.h"
#else
static inline int license_is_gpl_compatible(const char *license)
{
return (strcmp(license, "GPL") == 0
|| strcmp(license, "GPL v2") == 0
|| strcmp(license, "GPL and additional rights") == 0
|| strcmp(license, "Dual BSD/GPL") == 0
|| strcmp(license, "Dual MIT/GPL") == 0
|| strcmp(license, "Dual MPL/GPL") == 0);
}
#endif
/* Are we using CONFIG_MODVERSIONS? */
int modversions = 0;
/* Warn about undefined symbols? (do so if we have vmlinux) */
int have_vmlinux = 0;
/* Is CONFIG_MODULE_SRCVERSION_ALL set? */
static int all_versions = 0;
/* If we are modposting external module set to 1 */
static int external_module = 0;
/* Only warn about unresolved symbols */
static int warn_unresolved = 0;
/* How a symbol is exported */
enum export {
export_plain, export_unused, export_gpl,
export_unused_gpl, export_gpl_future, export_unknown
};
void fatal(const char *fmt, ...)
{
va_list arglist;
fprintf(stderr, "FATAL: ");
va_start(arglist, fmt);
vfprintf(stderr, fmt, arglist);
va_end(arglist);
exit(1);
}
void warn(const char *fmt, ...)
{
va_list arglist;
fprintf(stderr, "WARNING: ");
va_start(arglist, fmt);
vfprintf(stderr, fmt, arglist);
va_end(arglist);
}
void merror(const char *fmt, ...)
{
va_list arglist;
fprintf(stderr, "ERROR: ");
va_start(arglist, fmt);
vfprintf(stderr, fmt, arglist);
va_end(arglist);
}
static int is_vmlinux(const char *modname)
{
const char *myname;
if ((myname = strrchr(modname, '/')))
myname++;
else
myname = modname;
return (strcmp(myname, "barebox") == 0) ||
(strcmp(myname, "barebox.o") == 0);
}
void *do_nofail(void *ptr, const char *expr)
{
if (!ptr) {
fatal("modpost: Memory allocation failure: %s.\n", expr);
}
return ptr;
}
/* A list of all modules we processed */
static struct module *modules;
static struct module *find_module(char *modname)
{
struct module *mod;
for (mod = modules; mod; mod = mod->next)
if (strcmp(mod->name, modname) == 0)
break;
return mod;
}
static struct module *new_module(char *modname)
{
struct module *mod;
char *p, *s;
mod = NOFAIL(malloc(sizeof(*mod)));
memset(mod, 0, sizeof(*mod));
p = NOFAIL(strdup(modname));
/* strip trailing .o */
if ((s = strrchr(p, '.')) != NULL)
if (strcmp(s, ".o") == 0)
*s = '\0';
/* add to list */
mod->name = p;
mod->gpl_compatible = -1;
mod->next = modules;
modules = mod;
return mod;
}
/* A hash of all exported symbols,
* struct symbol is also used for lists of unresolved symbols */
#define SYMBOL_HASH_SIZE 1024
struct symbol {
struct symbol *next;
struct module *module;
unsigned int crc;
int crc_valid;
unsigned int weak:1;
unsigned int vmlinux:1; /* 1 if symbol is defined in vmlinux */
unsigned int kernel:1; /* 1 if symbol is from kernel
* (only for external modules) **/
unsigned int preloaded:1; /* 1 if symbol from Module.symvers */
enum export export; /* Type of export */
char name[0];
};
static struct symbol *symbolhash[SYMBOL_HASH_SIZE];
/* This is based on the hash agorithm from gdbm, via tdb */
static inline unsigned int tdb_hash(const char *name)
{
unsigned value; /* Used to compute the hash value. */
unsigned i; /* Used to cycle through random values. */
/* Set the initial value from the key size. */
for (value = 0x238F13AF * strlen(name), i=0; name[i]; i++)
value = (value + (((unsigned char *)name)[i] << (i*5 % 24)));
return (1103515243 * value + 12345);
}
/**
* Allocate a new symbols for use in the hash of exported symbols or
* the list of unresolved symbols per module
**/
static struct symbol *alloc_symbol(const char *name, unsigned int weak,
struct symbol *next)
{
struct symbol *s = NOFAIL(malloc(sizeof(*s) + strlen(name) + 1));
memset(s, 0, sizeof(*s));
strcpy(s->name, name);
s->weak = weak;
s->next = next;
return s;
}
/* For the hash of exported symbols */
static struct symbol *new_symbol(const char *name, struct module *module,
enum export export)
{
unsigned int hash;
struct symbol *new;
hash = tdb_hash(name) % SYMBOL_HASH_SIZE;
new = symbolhash[hash] = alloc_symbol(name, 0, symbolhash[hash]);
new->module = module;
new->export = export;
return new;
}
static struct symbol *find_symbol(const char *name)
{
struct symbol *s;
/* For our purposes, .foo matches foo. PPC64 needs this. */
if (name[0] == '.')
name++;
for (s = symbolhash[tdb_hash(name) % SYMBOL_HASH_SIZE]; s; s=s->next) {
if (strcmp(s->name, name) == 0)
return s;
}
return NULL;
}
static struct {
const char *str;
enum export export;
} export_list[] = {
{ .str = "EXPORT_SYMBOL", .export = export_plain },
{ .str = "EXPORT_UNUSED_SYMBOL", .export = export_unused },
{ .str = "EXPORT_SYMBOL_GPL", .export = export_gpl },
{ .str = "EXPORT_UNUSED_SYMBOL_GPL", .export = export_unused_gpl },
{ .str = "EXPORT_SYMBOL_GPL_FUTURE", .export = export_gpl_future },
{ .str = "(unknown)", .export = export_unknown },
};
static const char *export_str(enum export ex)
{
return export_list[ex].str;
}
static enum export export_no(const char * s)
{
int i;
if (!s)
return export_unknown;
for (i = 0; export_list[i].export != export_unknown; i++) {
if (strcmp(export_list[i].str, s) == 0)
return export_list[i].export;
}
return export_unknown;
}
static enum export export_from_sec(struct elf_info *elf, Elf_Section sec)
{
if (sec == elf->export_sec)
return export_plain;
else if (sec == elf->export_unused_sec)
return export_unused;
else if (sec == elf->export_gpl_sec)
return export_gpl;
else if (sec == elf->export_unused_gpl_sec)
return export_unused_gpl;
else if (sec == elf->export_gpl_future_sec)
return export_gpl_future;
else
return export_unknown;
}
/**
* Add an exported symbol - it may have already been added without a
* CRC, in this case just update the CRC
**/
static struct symbol *sym_add_exported(const char *name, struct module *mod,
enum export export)
{
struct symbol *s = find_symbol(name);
if (!s) {
s = new_symbol(name, mod, export);
} else {
if (!s->preloaded) {
warn("%s: '%s' exported twice. Previous export "
"was in %s%s\n", mod->name, name,
s->module->name,
is_vmlinux(s->module->name) ?"":".ko");
}
}
s->preloaded = 0;
s->vmlinux = is_vmlinux(mod->name);
s->kernel = 0;
s->export = export;
return s;
}
static void sym_update_crc(const char *name, struct module *mod,
unsigned int crc, enum export export)
{
struct symbol *s = find_symbol(name);
if (!s)
s = new_symbol(name, mod, export);
s->crc = crc;
s->crc_valid = 1;
}
void *grab_file(const char *filename, unsigned long *size)
{
struct stat st;
void *map;
int fd;
fd = open(filename, O_RDONLY);
if (fd < 0 || fstat(fd, &st) != 0)
return NULL;
*size = st.st_size;
map = mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
close(fd);
if (map == MAP_FAILED)
return NULL;
return map;
}
/**
* Return a copy of the next line in a mmap'ed file.
* spaces in the beginning of the line is trimmed away.
* Return a pointer to a static buffer.
**/
char* get_next_line(unsigned long *pos, void *file, unsigned long size)
{
static char line[4096];
int skip = 1;
size_t len = 0;
signed char *p = (signed char *)file + *pos;
char *s = line;
for (; *pos < size ; (*pos)++)
{
if (skip && isspace(*p)) {
p++;
continue;
}
skip = 0;
if (*p != '\n' && (*pos < size)) {
len++;
*s++ = *p++;
if (len > 4095)
break; /* Too long, stop */
} else {
/* End of string */
*s = '\0';
return line;
}
}
/* End of buffer */
return NULL;
}
void release_file(void *file, unsigned long size)
{
munmap(file, size);
}
static int parse_elf(struct elf_info *info, const char *filename)
{
unsigned int i;
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
Elf_Sym *sym;
hdr = grab_file(filename, &info->size);
if (!hdr) {
perror(filename);
exit(1);
}
info->hdr = hdr;
if (info->size < sizeof(*hdr)) {
/* file too small, assume this is an empty .o file */
return 0;
}
/* Is this a valid ELF file? */
if ((hdr->e_ident[EI_MAG0] != ELFMAG0) ||
(hdr->e_ident[EI_MAG1] != ELFMAG1) ||
(hdr->e_ident[EI_MAG2] != ELFMAG2) ||
(hdr->e_ident[EI_MAG3] != ELFMAG3)) {
/* Not an ELF file - silently ignore it */
return 0;
}
/* Fix endianness in ELF header */
hdr->e_shoff = TO_NATIVE(hdr->e_shoff);
hdr->e_shstrndx = TO_NATIVE(hdr->e_shstrndx);
hdr->e_shnum = TO_NATIVE(hdr->e_shnum);
hdr->e_machine = TO_NATIVE(hdr->e_machine);
hdr->e_type = TO_NATIVE(hdr->e_type);
sechdrs = (void *)hdr + hdr->e_shoff;
info->sechdrs = sechdrs;
/* Fix endianness in section headers */
for (i = 0; i < hdr->e_shnum; i++) {
sechdrs[i].sh_type = TO_NATIVE(sechdrs[i].sh_type);
sechdrs[i].sh_offset = TO_NATIVE(sechdrs[i].sh_offset);
sechdrs[i].sh_size = TO_NATIVE(sechdrs[i].sh_size);
sechdrs[i].sh_link = TO_NATIVE(sechdrs[i].sh_link);
sechdrs[i].sh_name = TO_NATIVE(sechdrs[i].sh_name);
sechdrs[i].sh_info = TO_NATIVE(sechdrs[i].sh_info);
sechdrs[i].sh_addr = TO_NATIVE(sechdrs[i].sh_addr);
}
/* Find symbol table. */
for (i = 1; i < hdr->e_shnum; i++) {
const char *secstrings
= (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const char *secname;
if (sechdrs[i].sh_offset > info->size) {
fatal("%s is truncated. sechdrs[i].sh_offset=%u > sizeof(*hrd)=%ul\n", filename, (unsigned int)sechdrs[i].sh_offset, sizeof(*hdr));
return 0;
}
secname = secstrings + sechdrs[i].sh_name;
if (strcmp(secname, ".modinfo") == 0) {
info->modinfo = (void *)hdr + sechdrs[i].sh_offset;
info->modinfo_len = sechdrs[i].sh_size;
} else if (strcmp(secname, "__usymtab") == 0)
info->export_sec = i;
else if (strcmp(secname, "__usymtab_unused") == 0)
info->export_unused_sec = i;
else if (strcmp(secname, "__usymtab_gpl") == 0)
info->export_gpl_sec = i;
else if (strcmp(secname, "__usymtab_unused_gpl") == 0)
info->export_unused_gpl_sec = i;
else if (strcmp(secname, "__usymtab_gpl_future") == 0)
info->export_gpl_future_sec = i;
if (sechdrs[i].sh_type != SHT_SYMTAB)
continue;
info->symtab_start = (void *)hdr + sechdrs[i].sh_offset;
info->symtab_stop = (void *)hdr + sechdrs[i].sh_offset
+ sechdrs[i].sh_size;
info->strtab = (void *)hdr +
sechdrs[sechdrs[i].sh_link].sh_offset;
}
if (!info->symtab_start) {
fatal("%s has no symtab?\n", filename);
}
/* Fix endianness in symbols */
for (sym = info->symtab_start; sym < info->symtab_stop; sym++) {
sym->st_shndx = TO_NATIVE(sym->st_shndx);
sym->st_name = TO_NATIVE(sym->st_name);
sym->st_value = TO_NATIVE(sym->st_value);
sym->st_size = TO_NATIVE(sym->st_size);
}
return 1;
}
static void parse_elf_finish(struct elf_info *info)
{
release_file(info->hdr, info->size);
}
#define CRC_PFX MODULE_SYMBOL_PREFIX "__crc_"
#define KSYMTAB_PFX MODULE_SYMBOL_PREFIX "__usymtab_"
static void handle_modversions(struct module *mod, struct elf_info *info,
Elf_Sym *sym, const char *symname)
{
unsigned int crc;
enum export export = export_from_sec(info, sym->st_shndx);
switch (sym->st_shndx) {
case SHN_COMMON:
warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name);
break;
case SHN_ABS:
/* CRC'd symbol */
if (memcmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
crc = (unsigned int) sym->st_value;
sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
export);
}
break;
case SHN_UNDEF:
/* undefined symbol */
if (ELF_ST_BIND(sym->st_info) != STB_GLOBAL &&
ELF_ST_BIND(sym->st_info) != STB_WEAK)
break;
/* ignore global offset table */
if (strcmp(symname, "_GLOBAL_OFFSET_TABLE_") == 0)
break;
/* ignore __this_module, it will be resolved shortly */
if (strcmp(symname, MODULE_SYMBOL_PREFIX "__this_module") == 0)
break;
/* cope with newer glibc (2.3.4 or higher) STT_ definition in elf.h */
#if defined(STT_REGISTER) || defined(STT_SPARC_REGISTER)
/* add compatibility with older glibc */
#ifndef STT_SPARC_REGISTER
#define STT_SPARC_REGISTER STT_REGISTER
#endif
if (info->hdr->e_machine == EM_SPARC ||
info->hdr->e_machine == EM_SPARCV9) {
/* Ignore register directives. */
if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
break;
if (symname[0] == '.') {
char *munged = strdup(symname);
munged[0] = '_';
munged[1] = toupper(munged[1]);
symname = munged;
}
}
#endif
if (memcmp(symname, MODULE_SYMBOL_PREFIX,
strlen(MODULE_SYMBOL_PREFIX)) == 0)
mod->unres = alloc_symbol(symname +
strlen(MODULE_SYMBOL_PREFIX),
ELF_ST_BIND(sym->st_info) == STB_WEAK,
mod->unres);
break;
default:
/* All exported symbols */
if (memcmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) {
sym_add_exported(symname + strlen(KSYMTAB_PFX), mod,
export);
}
if (strcmp(symname, MODULE_SYMBOL_PREFIX "init_module") == 0)
mod->has_init = 1;
if (strcmp(symname, MODULE_SYMBOL_PREFIX "cleanup_module") == 0)
mod->has_cleanup = 1;
break;
}
}
/**
* Parse tag=value strings from .modinfo section
**/
static char *next_string(char *string, unsigned long *secsize)
{
/* Skip non-zero chars */
while (string[0]) {
string++;
if ((*secsize)-- <= 1)
return NULL;
}
/* Skip any zero padding. */
while (!string[0]) {
string++;
if ((*secsize)-- <= 1)
return NULL;
}
return string;
}
static char *get_next_modinfo(void *modinfo, unsigned long modinfo_len,
const char *tag, char *info)
{
char *p;
unsigned int taglen = strlen(tag);
unsigned long size = modinfo_len;
if (info) {
size -= info - (char *)modinfo;
modinfo = next_string(info, &size);
}
for (p = modinfo; p; p = next_string(p, &size)) {
if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
return p + taglen + 1;
}
return NULL;
}
static char *get_modinfo(void *modinfo, unsigned long modinfo_len,
const char *tag)
{
return get_next_modinfo(modinfo, modinfo_len, tag, NULL);
}
/**
* Test if string s ends in string sub
* return 0 if match
**/
static int strrcmp(const char *s, const char *sub)
{
int slen, sublen;
if (!s || !sub)
return 1;
slen = strlen(s);
sublen = strlen(sub);
if ((slen == 0) || (sublen == 0))
return 1;
if (sublen > slen)
return 1;
return memcmp(s + slen - sublen, sub, sublen);
}
/**
* Whitelist to allow certain references to pass with no warning.
*
* Pattern 0:
* Do not warn if function/data are marked with __init_refok/__initdata_refok.
* The pattern is identified by:
* fromsec = .text.init.refok | .data.init.refok
*
* Pattern 1:
* If a module parameter is declared __initdata and permissions=0
* then this is legal despite the warning generated.
* We cannot see value of permissions here, so just ignore
* this pattern.
* The pattern is identified by:
* tosec = .init.data
* fromsec = .data*
* atsym =__param*
*
* Pattern 2:
* Many drivers utilise a *driver container with references to
* add, remove, probe functions etc.
* These functions may often be marked __init and we do not want to
* warn here.
* the pattern is identified by:
* tosec = .init.text | .exit.text | .init.data
* fromsec = .data | .data.rel | .data.rel.*
* atsym = *driver, *_template, *_sht, *_ops, *_probe, *probe_one, *_console, *_timer
*
* Pattern 3:
* Whitelist all refereces from .text.head to .init.data
* Whitelist all refereces from .text.head to .init.text
*
* Pattern 4:
* Some symbols belong to init section but still it is ok to reference
* these from non-init sections as these symbols don't have any memory
* allocated for them and symbol address and value are same. So even
* if init section is freed, its ok to reference those symbols.
* For ex. symbols marking the init section boundaries.
* This pattern is identified by
* refsymname = __init_begin, _sinittext, _einittext
*
**/
static int secref_whitelist(const char *modname, const char *tosec,
const char *fromsec, const char *atsym,
const char *refsymname)
{
int f1 = 1, f2 = 1;
const char **s;
const char *pat2sym[] = {
"driver",
"_template", /* scsi uses *_template a lot */
"_timer", /* arm uses ops structures named _timer a lot */
"_sht", /* scsi also used *_sht to some extent */
"_ops",
"_probe",
"_probe_one",
"_console",
NULL
};
const char *pat3refsym[] = {
"__init_begin",
"_sinittext",
"_einittext",
NULL
};
/* Check for pattern 0 */
if ((strcmp(fromsec, ".text.init.refok") == 0) ||
(strcmp(fromsec, ".data.init.refok") == 0))
return 1;
/* Check for pattern 1 */
if (strcmp(tosec, ".init.data") != 0)
f1 = 0;
if (strncmp(fromsec, ".data", strlen(".data")) != 0)
f1 = 0;
if (strncmp(atsym, "__param", strlen("__param")) != 0)
f1 = 0;
if (f1)
return f1;
/* Check for pattern 2 */
if ((strcmp(tosec, ".init.text") != 0) &&
(strcmp(tosec, ".exit.text") != 0) &&
(strcmp(tosec, ".init.data") != 0))
f2 = 0;
if ((strcmp(fromsec, ".data") != 0) &&
(strcmp(fromsec, ".data.rel") != 0) &&
(strncmp(fromsec, ".data.rel.", strlen(".data.rel.")) != 0))
f2 = 0;
for (s = pat2sym; *s; s++)
if (strrcmp(atsym, *s) == 0)
f1 = 1;
if (f1 && f2)
return 1;
/* Check for pattern 3 */
if ((strcmp(fromsec, ".text.head") == 0) &&
((strcmp(tosec, ".init.data") == 0) ||
(strcmp(tosec, ".init.text") == 0)))
return 1;
/* Check for pattern 4 */
for (s = pat3refsym; *s; s++)
if (strcmp(refsymname, *s) == 0)
return 1;
return 0;
}
/**
* Find symbol based on relocation record info.
* In some cases the symbol supplied is a valid symbol so
* return refsym. If st_name != 0 we assume this is a valid symbol.
* In other cases the symbol needs to be looked up in the symbol table
* based on section and address.
* **/
static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf_Addr addr,
Elf_Sym *relsym)
{
Elf_Sym *sym;
if (relsym->st_name != 0)
return relsym;
for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
if (sym->st_shndx != relsym->st_shndx)
continue;
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
if (sym->st_value == addr)
return sym;
}
return NULL;
}
static inline int is_arm_mapping_symbol(const char *str)
{
return str[0] == '$' && strchr("atd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}
/*
* If there's no name there, ignore it; likewise, ignore it if it's
* one of the magic symbols emitted used by current ARM tools.
*
* Otherwise if find_symbols_between() returns those symbols, they'll
* fail the whitelist tests and cause lots of false alarms ... fixable
* only by merging __exit and __init sections into __text, bloating
* the kernel (which is especially evil on embedded platforms).
*/
static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
{
const char *name = elf->strtab + sym->st_name;
if (!name || !strlen(name))
return 0;
return !is_arm_mapping_symbol(name);
}
/*
* Find symbols before or equal addr and after addr - in the section sec.
* If we find two symbols with equal offset prefer one with a valid name.
* The ELF format may have a better way to detect what type of symbol
* it is, but this works for now.
**/
static void find_symbols_between(struct elf_info *elf, Elf_Addr addr,
const char *sec,
Elf_Sym **before, Elf_Sym **after)
{
Elf_Sym *sym;
Elf_Ehdr *hdr = elf->hdr;
Elf_Addr beforediff = ~0;
Elf_Addr afterdiff = ~0;
const char *secstrings = (void *)hdr +
elf->sechdrs[hdr->e_shstrndx].sh_offset;
*before = NULL;
*after = NULL;
for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
const char *symsec;
if (sym->st_shndx >= SHN_LORESERVE)
continue;
symsec = secstrings + elf->sechdrs[sym->st_shndx].sh_name;
if (strcmp(symsec, sec) != 0)
continue;
if (!is_valid_name(elf, sym))
continue;
if (sym->st_value <= addr) {
if ((addr - sym->st_value) < beforediff) {
beforediff = addr - sym->st_value;
*before = sym;
}
else if ((addr - sym->st_value) == beforediff) {
*before = sym;
}
}
else
{
if ((sym->st_value - addr) < afterdiff) {
afterdiff = sym->st_value - addr;
*after = sym;
}
else if ((sym->st_value - addr) == afterdiff) {
*after = sym;
}
}
}
}
/**
* Print a warning about a section mismatch.
* Try to find symbols near it so user can find it.
* Check whitelist before warning - it may be a false positive.
**/
static void warn_sec_mismatch(const char *modname, const char *fromsec,
struct elf_info *elf, Elf_Sym *sym, Elf_Rela r)
{
const char *refsymname = "";
Elf_Sym *before, *after;
Elf_Sym *refsym;
Elf_Ehdr *hdr = elf->hdr;
Elf_Shdr *sechdrs = elf->sechdrs;
const char *secstrings = (void *)hdr +
sechdrs[hdr->e_shstrndx].sh_offset;
const char *secname = secstrings + sechdrs[sym->st_shndx].sh_name;
find_symbols_between(elf, r.r_offset, fromsec, &before, &after);
refsym = find_elf_symbol(elf, r.r_addend, sym);
if (refsym && strlen(elf->strtab + refsym->st_name))
refsymname = elf->strtab + refsym->st_name;
/* check whitelist - we may ignore it */
if (before &&
secref_whitelist(modname, secname, fromsec,
elf->strtab + before->st_name, refsymname))
return;
if (before && after) {
warn("%s(%s+0x%llx): Section mismatch: reference to %s:%s "
"(between '%s' and '%s')\n",
modname, fromsec, (unsigned long long)r.r_offset,
secname, refsymname,
elf->strtab + before->st_name,
elf->strtab + after->st_name);
} else if (before) {
warn("%s(%s+0x%llx): Section mismatch: reference to %s:%s "
"(after '%s')\n",
modname, fromsec, (unsigned long long)r.r_offset,
secname, refsymname,
elf->strtab + before->st_name);
} else if (after) {
warn("%s(%s+0x%llx): Section mismatch: reference to %s:%s "
"before '%s' (at offset -0x%llx)\n",
modname, fromsec, (unsigned long long)r.r_offset,
secname, refsymname,
elf->strtab + after->st_name);
} else {
warn("%s(%s+0x%llx): Section mismatch: reference to %s:%s\n",
modname, fromsec, (unsigned long long)r.r_offset,
secname, refsymname);
}
}
static unsigned int *reloc_location(struct elf_info *elf,
int rsection, Elf_Rela *r)
{
Elf_Shdr *sechdrs = elf->sechdrs;
int section = sechdrs[rsection].sh_info;
return (void *)elf->hdr + sechdrs[section].sh_offset +
(r->r_offset - sechdrs[section].sh_addr);
}
static int addend_386_rel(struct elf_info *elf, int rsection, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
unsigned int *location = reloc_location(elf, rsection, r);
switch (r_typ) {
case R_386_32:
r->r_addend = TO_NATIVE(*location);
break;
case R_386_PC32:
r->r_addend = TO_NATIVE(*location) + 4;
/* For CONFIG_RELOCATABLE=y */
if (elf->hdr->e_type == ET_EXEC)
r->r_addend += r->r_offset;
break;
}
return 0;
}
static int addend_arm_rel(struct elf_info *elf, int rsection, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
switch (r_typ) {
case R_ARM_ABS32:
/* From ARM ABI: (S + A) | T */
r->r_addend = (int)(long)(elf->symtab_start + ELF_R_SYM(r->r_info));
break;
case R_ARM_PC24:
/* From ARM ABI: ((S + A) | T) - P */
r->r_addend = (int)(long)(elf->hdr + elf->sechdrs[rsection].sh_offset +
(r->r_offset - elf->sechdrs[rsection].sh_addr));
break;
default:
return 1;
}
return 0;
}
static int addend_mips_rel(struct elf_info *elf, int rsection, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
unsigned int *location = reloc_location(elf, rsection, r);
unsigned int inst;
if (r_typ == R_MIPS_HI16)
return 1; /* skip this */
inst = TO_NATIVE(*location);
switch (r_typ) {
case R_MIPS_LO16:
r->r_addend = inst & 0xffff;
break;
case R_MIPS_26:
r->r_addend = (inst & 0x03ffffff) << 2;
break;
case R_MIPS_32:
r->r_addend = inst;
break;
}
return 0;
}
/**
* A module includes a number of sections that are discarded
* either when loaded or when used as built-in.
* For loaded modules all functions marked __init and all data
* marked __initdata will be discarded when the module has been intialized.
* Likewise for modules used built-in the sections marked __exit
* are discarded because __exit marked function are supposed to be called
* only when a moduel is unloaded which never happes for built-in modules.
* The check_sec_ref() function traverses all relocation records
* to find all references to a section that reference a section that will
* be discarded and warns about it.
**/
static void check_sec_ref(struct module *mod, const char *modname,
struct elf_info *elf,
int section(const char*),
int section_ref_ok(const char *))
{
int i;
Elf_Sym *sym;
Elf_Ehdr *hdr = elf->hdr;
Elf_Shdr *sechdrs = elf->sechdrs;
const char *secstrings = (void *)hdr +
sechdrs[hdr->e_shstrndx].sh_offset;
/* Walk through all sections */
for (i = 0; i < hdr->e_shnum; i++) {
const char *name = secstrings + sechdrs[i].sh_name;
const char *secname;
Elf_Rela r;
unsigned int r_sym;
/* We want to process only relocation sections and not .init */
if (sechdrs[i].sh_type == SHT_RELA) {
Elf_Rela *rela;
Elf_Rela *start = (void *)hdr + sechdrs[i].sh_offset;
Elf_Rela *stop = (void*)start + sechdrs[i].sh_size;
name += strlen(".rela");
if (section_ref_ok(name))
continue;
for (rela = start; rela < stop; rela++) {
r.r_offset = TO_NATIVE(rela->r_offset);
#if KERNEL_ELFCLASS == ELFCLASS64
if (hdr->e_machine == EM_MIPS) {
unsigned int r_typ;
r_sym = ELF64_MIPS_R_SYM(rela->r_info);
r_sym = TO_NATIVE(r_sym);
r_typ = ELF64_MIPS_R_TYPE(rela->r_info);
r.r_info = ELF64_R_INFO(r_sym, r_typ);
} else {
r.r_info = TO_NATIVE(rela->r_info);
r_sym = ELF_R_SYM(r.r_info);
}
#else
r.r_info = TO_NATIVE(rela->r_info);
r_sym = ELF_R_SYM(r.r_info);
#endif
r.r_addend = TO_NATIVE(rela->r_addend);
sym = elf->symtab_start + r_sym;
/* Skip special sections */
if (sym->st_shndx >= SHN_LORESERVE)
continue;
secname = secstrings +
sechdrs[sym->st_shndx].sh_name;
if (section(secname))
warn_sec_mismatch(modname, name,
elf, sym, r);
}
} else if (sechdrs[i].sh_type == SHT_REL) {
Elf_Rel *rel;
Elf_Rel *start = (void *)hdr + sechdrs[i].sh_offset;
Elf_Rel *stop = (void*)start + sechdrs[i].sh_size;
name += strlen(".rel");
if (section_ref_ok(name))
continue;
for (rel = start; rel < stop; rel++) {
r.r_offset = TO_NATIVE(rel->r_offset);
#if KERNEL_ELFCLASS == ELFCLASS64
if (hdr->e_machine == EM_MIPS) {
unsigned int r_typ;
r_sym = ELF64_MIPS_R_SYM(rel->r_info);
r_sym = TO_NATIVE(r_sym);
r_typ = ELF64_MIPS_R_TYPE(rel->r_info);
r.r_info = ELF64_R_INFO(r_sym, r_typ);
} else {
r.r_info = TO_NATIVE(rel->r_info);
r_sym = ELF_R_SYM(r.r_info);
}
#else
r.r_info = TO_NATIVE(rel->r_info);
r_sym = ELF_R_SYM(r.r_info);
#endif
r.r_addend = 0;
switch (hdr->e_machine) {
case EM_386:
if (addend_386_rel(elf, i, &r))
continue;
break;
case EM_ARM:
if(addend_arm_rel(elf, i, &r))
continue;
break;
case EM_MIPS:
if (addend_mips_rel(elf, i, &r))
continue;
break;
}
sym = elf->symtab_start + r_sym;
/* Skip special sections */
if (sym->st_shndx >= SHN_LORESERVE)
continue;
secname = secstrings +
sechdrs[sym->st_shndx].sh_name;
if (section(secname))
warn_sec_mismatch(modname, name,
elf, sym, r);
}
}
}
}
/*
* Identify sections from which references to either a
* .init or a .exit section is OK.
*
* [OPD] Keith Ownes <kaos@sgi.com> commented:
* For our future {in}sanity, add a comment that this is the ppc .opd
* section, not the ia64 .opd section.
* ia64 .opd should not point to discarded sections.
* [.rodata] like for .init.text we ignore .rodata references -same reason
*/
static int initexit_section_ref_ok(const char *name)
{
const char **s;
/* Absolute section names */
const char *namelist1[] = {
"__bug_table", /* used by powerpc for BUG() */
"__ex_table",
".altinstructions",
".cranges", /* used by sh64 */
".fixup",
".machvec", /* ia64 + powerpc uses these */
".machine.desc",
".opd", /* See comment [OPD] */
".parainstructions",
".pdr",
".plt", /* seen on ARCH=um build on x86_64. Harmless */
".smp_locks",
".stab",
".m68k_fixup",
NULL
};
/* Start of section names */
const char *namelist2[] = {
".debug",
".eh_frame",
".note", /* ignore ELF notes - may contain anything */
".got", /* powerpc - global offset table */
".toc", /* powerpc - table of contents */
NULL
};
/* part of section name */
const char *namelist3 [] = {
".unwind", /* Sample: IA_64.unwind.exit.text */
NULL
};
for (s = namelist1; *s; s++)
if (strcmp(*s, name) == 0)
return 1;
for (s = namelist2; *s; s++)
if (strncmp(*s, name, strlen(*s)) == 0)
return 1;
for (s = namelist3; *s; s++)
if (strstr(name, *s) != NULL)
return 1;
return 0;
}
/**
* Functions used only during module init is marked __init and is stored in
* a .init.text section. Likewise data is marked __initdata and stored in
* a .init.data section.
* If this section is one of these sections return 1
* See include/linux/init.h for the details
**/
static int init_section(const char *name)
{
if (strcmp(name, ".init") == 0)
return 1;
if (strncmp(name, ".init.", strlen(".init.")) == 0)
return 1;
return 0;
}
/*
* Identify sections from which references to a .init section is OK.
*
* Unfortunately references to read only data that referenced .init
* sections had to be excluded. Almost all of these are false
* positives, they are created by gcc. The downside of excluding rodata
* is that there really are some user references from rodata to
* init code, e.g. drivers/video/vgacon.c:
*
* const struct consw vga_con = {
* con_startup: vgacon_startup,
*
* where vgacon_startup is __init. If you want to wade through the false
* positives, take out the check for rodata.
*/
static int init_section_ref_ok(const char *name)
{
const char **s;
/* Absolute section names */
const char *namelist1[] = {
"__dbe_table", /* MIPS generate these */
"__ftr_fixup", /* powerpc cpu feature fixup */
"__fw_ftr_fixup", /* powerpc firmware feature fixup */
"__param",
".data.rel.ro", /* used by parisc64 */
".init",
".text.lock",
NULL
};
/* Start of section names */
const char *namelist2[] = {
".init.",
".pci_fixup",
".rodata",
NULL
};
if (initexit_section_ref_ok(name))
return 1;
for (s = namelist1; *s; s++)
if (strcmp(*s, name) == 0)
return 1;
for (s = namelist2; *s; s++)
if (strncmp(*s, name, strlen(*s)) == 0)
return 1;
/* If section name ends with ".init" we allow references
* as is the case with .initcallN.init, .early_param.init, .taglist.init etc
*/
if (strrcmp(name, ".init") == 0)
return 1;
return 0;
}
/*
* Functions used only during module exit is marked __exit and is stored in
* a .exit.text section. Likewise data is marked __exitdata and stored in
* a .exit.data section.
* If this section is one of these sections return 1
* See include/linux/init.h for the details
**/
static int exit_section(const char *name)
{
if (strcmp(name, ".exit.text") == 0)
return 1;
if (strcmp(name, ".exit.data") == 0)
return 1;
return 0;
}
/*
* Identify sections from which references to a .exit section is OK.
*/
static int exit_section_ref_ok(const char *name)
{
const char **s;
/* Absolute section names */
const char *namelist1[] = {
".exit.data",
".exit.text",
".exitcall.exit",
".rodata",
NULL
};
if (initexit_section_ref_ok(name))
return 1;
for (s = namelist1; *s; s++)
if (strcmp(*s, name) == 0)
return 1;
return 0;
}
static void read_symbols(char *modname)
{
const char *symname;
char *version;
char *license;
struct module *mod;
struct elf_info info = { };
Elf_Sym *sym;
if (!parse_elf(&info, modname))
return;
mod = new_module(modname);
/* When there's no vmlinux, don't print warnings about
* unresolved symbols (since there'll be too many ;) */
if (is_vmlinux(modname)) {
have_vmlinux = 1;
mod->skip = 1;
}
license = get_modinfo(info.modinfo, info.modinfo_len, "license");
while (license) {
if (license_is_gpl_compatible(license))
mod->gpl_compatible = 1;
else {
mod->gpl_compatible = 0;
break;
}
license = get_next_modinfo(info.modinfo, info.modinfo_len,
"license", license);
}
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = info.strtab + sym->st_name;
handle_modversions(mod, &info, sym, symname);
// handle_moddevtable(mod, &info, sym, symname);
}
check_sec_ref(mod, modname, &info, init_section, init_section_ref_ok);
check_sec_ref(mod, modname, &info, exit_section, exit_section_ref_ok);
version = get_modinfo(info.modinfo, info.modinfo_len, "version");
if (version)
maybe_frob_rcs_version(modname, version, info.modinfo,
version - (char *)info.hdr);
if (version || (all_versions && !is_vmlinux(modname)))
get_src_version(modname, mod->srcversion,
sizeof(mod->srcversion)-1);
parse_elf_finish(&info);
/* Our trick to get versioning for struct_module - it's
* never passed as an argument to an exported function, so
* the automatic versioning doesn't pick it up, but it's really
* important anyhow */
if (modversions)
mod->unres = alloc_symbol("struct_module", 0, mod->unres);
}
#define SZ 500
/* We first write the generated file into memory using the
* following helper, then compare to the file on disk and
* only update the later if anything changed */
void __attribute__((format(__printf__, 2, 3))) buf_printf(struct buffer *buf,
const char *fmt, ...)
{
char tmp[SZ];
int len;
va_list ap;
va_start(ap, fmt);
len = vsnprintf(tmp, SZ, fmt, ap);
buf_write(buf, tmp, len);
va_end(ap);
}
void buf_write(struct buffer *buf, const char *s, int len)
{
if (buf->size - buf->pos < len) {
buf->size += len + SZ;
buf->p = NOFAIL(realloc(buf->p, buf->size));
}
strncpy(buf->p + buf->pos, s, len);
buf->pos += len;
}
static void check_for_gpl_usage(enum export exp, const char *m, const char *s)
{
const char *e = is_vmlinux(m) ?"":".ko";
switch (exp) {
case export_gpl:
fatal("modpost: GPL-incompatible module %s%s "
"uses GPL-only symbol '%s'\n", m, e, s);
break;
case export_unused_gpl:
fatal("modpost: GPL-incompatible module %s%s "
"uses GPL-only symbol marked UNUSED '%s'\n", m, e, s);
break;
case export_gpl_future:
warn("modpost: GPL-incompatible module %s%s "
"uses future GPL-only symbol '%s'\n", m, e, s);
break;
case export_plain:
case export_unused:
case export_unknown:
/* ignore */
break;
}
}
static void check_for_unused(enum export exp, const char* m, const char* s)
{
const char *e = is_vmlinux(m) ?"":".ko";
switch (exp) {
case export_unused:
case export_unused_gpl:
warn("modpost: module %s%s "
"uses symbol '%s' marked UNUSED\n", m, e, s);
break;
default:
/* ignore */
break;
}
}
static void check_exports(struct module *mod)
{
struct symbol *s, *exp;
for (s = mod->unres; s; s = s->next) {
const char *basename;
exp = find_symbol(s->name);
if (!exp || exp->module == mod)
continue;
basename = strrchr(mod->name, '/');
if (basename)
basename++;
else
basename = mod->name;
if (!mod->gpl_compatible)
check_for_gpl_usage(exp->export, basename, exp->name);
check_for_unused(exp->export, basename, exp->name);
}
}
/**
* Header for the generated file
**/
static void add_header(struct buffer *b, struct module *mod)
{
buf_printf(b, "#include <module.h>\n");
// buf_printf(b, "#include <linux/vermagic.h>\n");
buf_printf(b, "#include <linux/compiler.h>\n");
buf_printf(b, "\n");
// buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
buf_printf(b, "\n");
buf_printf(b, "struct module __this_module\n");
buf_printf(b, "__attribute__((section(\".gnu.linkonce.this_module\"))) = {\n");
buf_printf(b, " .name = KBUILD_MODNAME,\n");
if (mod->has_init)
buf_printf(b, " .init = init_module,\n");
if (mod->has_cleanup)
buf_printf(b, "#ifdef CONFIG_MODULE_UNLOAD\n"
" .exit = cleanup_module,\n"
"#endif\n");
// buf_printf(b, " .arch = MODULE_ARCH_INIT,\n");
buf_printf(b, "};\n");
}
/**
* Record CRCs for unresolved symbols
**/
static int add_versions(struct buffer *b, struct module *mod)
{
struct symbol *s, *exp;
int err = 0;
for (s = mod->unres; s; s = s->next) {
exp = find_symbol(s->name);
if (!exp || exp->module == mod) {
if (have_vmlinux && !s->weak) {
if (warn_unresolved) {
warn("\"%s\" [%s.ko] undefined!\n",
s->name, mod->name);
} else {
merror("\"%s\" [%s.ko] undefined!\n",
s->name, mod->name);
err = 1;
}
}
continue;
}
s->module = exp->module;
s->crc_valid = exp->crc_valid;
s->crc = exp->crc;
}
if (!modversions)
return err;
buf_printf(b, "\n");
buf_printf(b, "static const struct modversion_info ____versions[]\n");
buf_printf(b, "__used\n");
buf_printf(b, "__attribute__((section(\"__versions\"))) = {\n");
for (s = mod->unres; s; s = s->next) {
if (!s->module) {
continue;
}
if (!s->crc_valid) {
warn("\"%s\" [%s.ko] has no CRC!\n",
s->name, mod->name);
continue;
}
buf_printf(b, "\t{ %#8x, \"%s\" },\n", s->crc, s->name);
}
buf_printf(b, "};\n");
return err;
}
static void add_depends(struct buffer *b, struct module *mod,
struct module *modules)
{
struct symbol *s;
struct module *m;
int first = 1;
for (m = modules; m; m = m->next) {
m->seen = is_vmlinux(m->name);
}
buf_printf(b, "\n");
buf_printf(b, "static const char __module_depends[]\n");
buf_printf(b, "__used\n");
buf_printf(b, "__attribute__((section(\".modinfo\"))) =\n");
buf_printf(b, "\"depends=");
for (s = mod->unres; s; s = s->next) {
const char *p;
if (!s->module)
continue;
if (s->module->seen)
continue;
s->module->seen = 1;
if ((p = strrchr(s->module->name, '/')) != NULL)
p++;
else
p = s->module->name;
buf_printf(b, "%s%s", first ? "" : ",", p);
first = 0;
}
buf_printf(b, "\";\n");
}
static void add_srcversion(struct buffer *b, struct module *mod)
{
if (mod->srcversion[0]) {
buf_printf(b, "\n");
buf_printf(b, "MODULE_INFO(srcversion, \"%s\");\n",
mod->srcversion);
}
}
static void write_if_changed(struct buffer *b, const char *fname)
{
char *tmp;
FILE *file;
struct stat st;
file = fopen(fname, "r");
if (!file)
goto write;
if (fstat(fileno(file), &st) < 0)
goto close_write;
if (st.st_size != b->pos)
goto close_write;
tmp = NOFAIL(malloc(b->pos));
if (fread(tmp, 1, b->pos, file) != b->pos)
goto free_write;
if (memcmp(tmp, b->p, b->pos) != 0)
goto free_write;
free(tmp);
fclose(file);
return;
free_write:
free(tmp);
close_write:
fclose(file);
write:
file = fopen(fname, "w");
if (!file) {
perror(fname);
exit(1);
}
if (fwrite(b->p, 1, b->pos, file) != b->pos) {
perror(fname);
exit(1);
}
fclose(file);
}
/* parse Module.symvers file. line format:
* 0x12345678<tab>symbol<tab>module[[<tab>export]<tab>something]
**/
static void read_dump(const char *fname, unsigned int kernel)
{
unsigned long size, pos = 0;
void *file = grab_file(fname, &size);
char *line;
if (!file)
/* No symbol versions, silently ignore */
return;
while ((line = get_next_line(&pos, file, size))) {
char *symname, *modname, *d, *export, *end;
unsigned int crc;
struct module *mod;
struct symbol *s;
if (!(symname = strchr(line, '\t')))
goto fail;
*symname++ = '\0';
if (!(modname = strchr(symname, '\t')))
goto fail;
*modname++ = '\0';
if ((export = strchr(modname, '\t')) != NULL)
*export++ = '\0';
if (export && ((end = strchr(export, '\t')) != NULL))
*end = '\0';
crc = strtoul(line, &d, 16);
if (*symname == '\0' || *modname == '\0' || *d != '\0')
goto fail;
if (!(mod = find_module(modname))) {
if (is_vmlinux(modname)) {
have_vmlinux = 1;
}
mod = new_module(NOFAIL(strdup(modname)));
mod->skip = 1;
}
s = sym_add_exported(symname, mod, export_no(export));
s->kernel = kernel;
s->preloaded = 1;
sym_update_crc(symname, mod, crc, export_no(export));
}
return;
fail:
fatal("parse error in symbol dump file\n");
}
/* For normal builds always dump all symbols.
* For external modules only dump symbols
* that are not read from kernel Module.symvers.
**/
static int dump_sym(struct symbol *sym)
{
if (!external_module)
return 1;
if (sym->vmlinux || sym->kernel)
return 0;
return 1;
}
static void write_dump(const char *fname)
{
struct buffer buf = { };
struct symbol *symbol;
int n;
for (n = 0; n < SYMBOL_HASH_SIZE ; n++) {
symbol = symbolhash[n];
while (symbol) {
if (dump_sym(symbol))
buf_printf(&buf, "0x%08x\t%s\t%s\t%s\n",
symbol->crc, symbol->name,
symbol->module->name,
export_str(symbol->export));
symbol = symbol->next;
}
}
write_if_changed(&buf, fname);
}
int main(int argc, char **argv)
{
struct module *mod;
struct buffer buf = { };
char fname[SZ];
char *kernel_read = NULL, *module_read = NULL;
char *dump_write = NULL;
int opt;
int err;
while ((opt = getopt(argc, argv, "i:I:mo:aw")) != -1) {
switch(opt) {
case 'i':
kernel_read = optarg;
break;
case 'I':
module_read = optarg;
external_module = 1;
break;
case 'm':
modversions = 1;
break;
case 'o':
dump_write = optarg;
break;
case 'a':
all_versions = 1;
break;
case 'w':
warn_unresolved = 1;
break;
default:
exit(1);
}
}
if (kernel_read)
read_dump(kernel_read, 1);
if (module_read)
read_dump(module_read, 0);
while (optind < argc) {
read_symbols(argv[optind++]);
}
for (mod = modules; mod; mod = mod->next) {
if (mod->skip)
continue;
check_exports(mod);
}
err = 0;
for (mod = modules; mod; mod = mod->next) {
if (mod->skip)
continue;
buf.pos = 0;
add_header(&buf, mod);
err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules);
// add_moddevtable(&buf, mod);
add_srcversion(&buf, mod);
sprintf(fname, "%s.mod.c", mod->name);
write_if_changed(&buf, fname);
}
if (dump_write)
write_dump(dump_write);
return err;
}
| gpl-2.0 |
RafaelRMachado/Coreboot | src/vendorcode/amd/agesa/f14/Proc/Mem/Tech/mttml.c | 28 | 9350 | /* $NoKeywords:$ */
/**
* @file
*
* mttml.c
*
* Technology Max Latency Training support
*
* @xrefitem bom "File Content Label" "Release Content"
* @e project: AGESA
* @e sub-project: (Mem/Tech)
* @e \$Revision: 38442 $ @e \$Date: 2010-09-24 06:39:57 +0800 (Fri, 24 Sep 2010) $
*
**/
/*
*****************************************************************************
*
* Copyright (c) 2011, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***************************************************************************
*
*/
/*
*----------------------------------------------------------------------------
* MODULES USED
*
*----------------------------------------------------------------------------
*/
#include "AGESA.h"
#include "Ids.h"
#include "mm.h"
#include "mn.h"
#include "mu.h"
#include "mt.h"
#include "merrhdl.h"
#include "GeneralServices.h"
#include "Filecode.h"
CODE_GROUP (G1_PEICC)
RDATA_GROUP (G1_PEICC)
#define FILECODE PROC_MEM_TECH_MTTML_FILECODE
/*----------------------------------------------------------------------------
* DEFINITIONS AND MACROS
*
*----------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
* TYPEDEFS AND STRUCTURES
*
*----------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
* PROTOTYPES OF LOCAL FUNCTIONS
*
*----------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
* EXPORTED FUNCTIONS
*
*----------------------------------------------------------------------------
*/
/* -----------------------------------------------------------------------------*/
/**
*
* This function trains Max latency for all dies
*
* @param[in,out] *TechPtr - Pointer to the MEM_TECH_BLOCK
*
* @return TRUE - No fatal error occurs.
* @return FALSE - Fatal error occurs.
*/
BOOLEAN
MemTTrainMaxLatency (
IN OUT MEM_TECH_BLOCK *TechPtr
)
{
UINT32 TestAddrRJ16;
UINT8 Dct;
UINT8 ChipSel;
UINT8 *PatternBufPtr;
UINT8 *TestBufferPtr;
UINT16 CalcMaxLatDly;
UINT16 MaxLatDly;
UINT16 MaxLatLimit;
UINT16 Margin;
UINT16 CurTest;
UINT16 _CL_;
UINT8 TimesFail;
UINT8 TimesRetrain;
UINT16 i;
MEM_DATA_STRUCT *MemPtr;
DIE_STRUCT *MCTPtr;
MEM_NB_BLOCK *NBPtr;
NBPtr = TechPtr->NBPtr;
MCTPtr = NBPtr->MCTPtr;
MemPtr = NBPtr->MemPtr;
TechPtr->TrainingType = TRN_MAX_READ_LATENCY;
TimesRetrain = DEFAULT_TRAINING_TIMES;
IDS_OPTION_HOOK (IDS_MEM_RETRAIN_TIMES, &TimesRetrain, &MemPtr->StdHeader);
IDS_HDT_CONSOLE (MEM_STATUS, "\nStart MaxRdLat training\n");
// Set environment settings before training
AGESA_TESTPOINT (TpProcMemMaxRdLatencyTraining, &(MemPtr->StdHeader));
MemTBeginTraining (TechPtr);
//
// Initialize the Training Pattern
//
if (AGESA_SUCCESS != NBPtr->TrainingPatternInit (NBPtr)) {
return (BOOLEAN) (MCTPtr->ErrCode < AGESA_FATAL);
}
TechPtr->PatternLength = (MCTPtr->Status[Sb128bitmode]) ? 6 : 3;
//
// Setup hardware training engine (if applicable)
//
NBPtr->FamilySpecificHook[SetupHwTrainingEngine] (NBPtr, &TechPtr->TrainingType);
MaxLatDly = 0;
_CL_ = TechPtr->PatternLength;
PatternBufPtr = TechPtr->PatternBufPtr;
TestBufferPtr = TechPtr->TestBufPtr;
//
// Begin max latency training
//
for (Dct = 0; Dct < NBPtr->DctCount; Dct++) {
if (MCTPtr->Status[Sb128bitmode] && (Dct != 0)) {
break;
}
IDS_HDT_CONSOLE (MEM_STATUS, "\tDct %d\n", Dct);
NBPtr->SwitchDCT (NBPtr, Dct);
if (NBPtr->DCTPtr->Timings.DctMemSize != 0) {
if (TechPtr->FindMaxDlyForMaxRdLat (TechPtr, &ChipSel)) {
if (NBPtr->GetSysAddr (NBPtr, ChipSel, &TestAddrRJ16)) {
IDS_HDT_CONSOLE (MEM_STATUS, "\t\tCS %d\n", ChipSel);
IDS_HDT_CONSOLE (MEM_FLOW, "\t\t\tWrite to address: %04x0000\n", TestAddrRJ16);
// Write the test patterns
AGESA_TESTPOINT (TpProcMemMaxRdLatWritePattern, &(MemPtr->StdHeader));
NBPtr->WritePattern (NBPtr, TestAddrRJ16, PatternBufPtr, _CL_);
// Sweep max latency delays
NBPtr->getMaxLatParams (NBPtr, TechPtr->MaxDlyForMaxRdLat, &CalcMaxLatDly, &MaxLatLimit, &Margin);
AGESA_TESTPOINT (TpProcMemMaxRdLatStartSweep, &(MemPtr->StdHeader));
TimesFail = 0;
ERROR_HANDLE_RETRAIN_BEGIN (TimesFail, TimesRetrain)
{
MaxLatDly = CalcMaxLatDly;
for (i = 0; i < (MaxLatLimit - CalcMaxLatDly); i++) {
NBPtr->SetBitField (NBPtr, BFMaxLatency, MaxLatDly);
IDS_HDT_CONSOLE (MEM_FLOW, "\t\t\tDly %3x", MaxLatDly);
TechPtr->ResetDCTWrPtr (TechPtr, 6);
AGESA_TESTPOINT (TpProcMemMaxRdLatReadPattern, &(MemPtr->StdHeader));
NBPtr->ReadPattern (NBPtr, TestBufferPtr, TestAddrRJ16, _CL_);
AGESA_TESTPOINT (TpProcMemMaxRdLatTestPattern, &(MemPtr->StdHeader));
CurTest = NBPtr->CompareTestPattern (NBPtr, TestBufferPtr, PatternBufPtr, _CL_ * 64);
NBPtr->FlushPattern (NBPtr, TestAddrRJ16, _CL_);
if (NBPtr->IsSupported[ReverseMaxRdLatTrain]) {
// Reverse training decrements MaxLatDly whenever the test passes
// and uses the last passing MaxLatDly as left edge
if (CurTest == 0xFFFF) {
IDS_HDT_CONSOLE (MEM_FLOW, " P");
if (MaxLatDly == 0) {
break;
} else {
MaxLatDly--;
}
}
} else {
// Traditional training increments MaxLatDly until the test passes
// and uses it as left edge
if (CurTest == 0xFFFF) {
IDS_HDT_CONSOLE (MEM_FLOW, " P");
break;
} else {
MaxLatDly++;
}
}
IDS_HDT_CONSOLE (MEM_FLOW, "\n");
}// End of delay sweep
ERROR_HANDLE_RETRAIN_END ((MaxLatDly >= MaxLatLimit), TimesFail)
}
AGESA_TESTPOINT (TpProcMemMaxRdLatSetDelay, &(MemPtr->StdHeader));
if (MaxLatDly >= MaxLatLimit) {
PutEventLog (AGESA_ERROR, MEM_ERROR_MAX_LAT_NO_WINDOW, NBPtr->Node, NBPtr->Dct, NBPtr->Channel, 0, &NBPtr->MemPtr->StdHeader);
SetMemError (AGESA_ERROR, MCTPtr);
NBPtr->DCTPtr->Timings.CsTrainFail |= NBPtr->DCTPtr->Timings.CsPresent;
MCTPtr->ChannelTrainFail |= (UINT32)1 << Dct;
if (!NBPtr->MemPtr->ErrorHandling (MCTPtr, NBPtr->Dct, EXCLUDE_ALL_CHIPSEL, &NBPtr->MemPtr->StdHeader)) {
return FALSE;
}
} else {
NBPtr->FamilySpecificHook[AddlMaxRdLatTrain] (NBPtr, &TestAddrRJ16);
MaxLatDly = MaxLatDly + Margin;
if (NBPtr->IsSupported[ReverseMaxRdLatTrain]) {
MaxLatDly++; // Add 1 to get back to the last passing value
}
// Set final delays
NBPtr->SetBitField (NBPtr, BFMaxLatency, MaxLatDly);
IDS_HDT_CONSOLE (MEM_FLOW, "\t\tFinal MaxRdLat: %03x\n", MaxLatDly);
}
}
}
}
}
// Restore environment settings after training
MemTEndTraining (TechPtr);
IDS_HDT_CONSOLE (MEM_FLOW, "End MaxRdLat training\n\n");
//
// Finalize the Pattern
//
NBPtr->TrainingPatternFinalize (NBPtr);
return (BOOLEAN) (MCTPtr->ErrCode < AGESA_FATAL);
}
| gpl-2.0 |
kasi86/linux | sound/firewire/tascam/tascam.c | 284 | 6053 | /*
* tascam.c - a part of driver for TASCAM FireWire series
*
* Copyright (c) 2015 Takashi Sakamoto
*
* Licensed under the terms of the GNU General Public License, version 2.
*/
#include "tascam.h"
MODULE_DESCRIPTION("TASCAM FireWire series Driver");
MODULE_AUTHOR("Takashi Sakamoto <o-takashi@sakamocchi.jp>");
MODULE_LICENSE("GPL v2");
static struct snd_tscm_spec model_specs[] = {
{
.name = "FW-1884",
.has_adat = true,
.has_spdif = true,
.pcm_capture_analog_channels = 8,
.pcm_playback_analog_channels = 8,
.midi_capture_ports = 4,
.midi_playback_ports = 4,
},
{
.name = "FW-1082",
.has_adat = false,
.has_spdif = true,
.pcm_capture_analog_channels = 8,
.pcm_playback_analog_channels = 2,
.midi_capture_ports = 2,
.midi_playback_ports = 2,
},
{
.name = "FW-1804",
.has_adat = true,
.has_spdif = true,
.pcm_capture_analog_channels = 8,
.pcm_playback_analog_channels = 2,
.midi_capture_ports = 2,
.midi_playback_ports = 4,
},
};
static int identify_model(struct snd_tscm *tscm)
{
struct fw_device *fw_dev = fw_parent_device(tscm->unit);
const u32 *config_rom = fw_dev->config_rom;
char model[9];
unsigned int i;
u8 c;
if (fw_dev->config_rom_length < 30) {
dev_err(&tscm->unit->device,
"Configuration ROM is too short.\n");
return -ENODEV;
}
/* Pick up model name from certain addresses. */
for (i = 0; i < 8; i++) {
c = config_rom[28 + i / 4] >> (24 - 8 * (i % 4));
if (c == '\0')
break;
model[i] = c;
}
model[i] = '\0';
for (i = 0; i < ARRAY_SIZE(model_specs); i++) {
if (strcmp(model, model_specs[i].name) == 0) {
tscm->spec = &model_specs[i];
break;
}
}
if (tscm->spec == NULL)
return -ENODEV;
strcpy(tscm->card->driver, "FW-TASCAM");
strcpy(tscm->card->shortname, model);
strcpy(tscm->card->mixername, model);
snprintf(tscm->card->longname, sizeof(tscm->card->longname),
"TASCAM %s, GUID %08x%08x at %s, S%d", model,
fw_dev->config_rom[3], fw_dev->config_rom[4],
dev_name(&tscm->unit->device), 100 << fw_dev->max_speed);
return 0;
}
static void tscm_free(struct snd_tscm *tscm)
{
snd_tscm_transaction_unregister(tscm);
snd_tscm_stream_destroy_duplex(tscm);
fw_unit_put(tscm->unit);
mutex_destroy(&tscm->mutex);
}
static void tscm_card_free(struct snd_card *card)
{
tscm_free(card->private_data);
}
static void do_registration(struct work_struct *work)
{
struct snd_tscm *tscm = container_of(work, struct snd_tscm, dwork.work);
int err;
err = snd_card_new(&tscm->unit->device, -1, NULL, THIS_MODULE, 0,
&tscm->card);
if (err < 0)
return;
err = identify_model(tscm);
if (err < 0)
goto error;
err = snd_tscm_transaction_register(tscm);
if (err < 0)
goto error;
err = snd_tscm_stream_init_duplex(tscm);
if (err < 0)
goto error;
snd_tscm_proc_init(tscm);
err = snd_tscm_create_pcm_devices(tscm);
if (err < 0)
goto error;
err = snd_tscm_create_midi_devices(tscm);
if (err < 0)
goto error;
err = snd_tscm_create_hwdep_device(tscm);
if (err < 0)
goto error;
err = snd_card_register(tscm->card);
if (err < 0)
goto error;
/*
* After registered, tscm instance can be released corresponding to
* releasing the sound card instance.
*/
tscm->card->private_free = tscm_card_free;
tscm->card->private_data = tscm;
tscm->registered = true;
return;
error:
snd_tscm_transaction_unregister(tscm);
snd_tscm_stream_destroy_duplex(tscm);
snd_card_free(tscm->card);
dev_info(&tscm->unit->device,
"Sound card registration failed: %d\n", err);
}
static int snd_tscm_probe(struct fw_unit *unit,
const struct ieee1394_device_id *entry)
{
struct snd_tscm *tscm;
/* Allocate this independent of sound card instance. */
tscm = kzalloc(sizeof(struct snd_tscm), GFP_KERNEL);
if (tscm == NULL)
return -ENOMEM;
/* initialize myself */
tscm->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, tscm);
mutex_init(&tscm->mutex);
spin_lock_init(&tscm->lock);
init_waitqueue_head(&tscm->hwdep_wait);
/* Allocate and register this sound card later. */
INIT_DEFERRABLE_WORK(&tscm->dwork, do_registration);
snd_fw_schedule_registration(unit, &tscm->dwork);
return 0;
}
static void snd_tscm_update(struct fw_unit *unit)
{
struct snd_tscm *tscm = dev_get_drvdata(&unit->device);
/* Postpone a workqueue for deferred registration. */
if (!tscm->registered)
snd_fw_schedule_registration(unit, &tscm->dwork);
snd_tscm_transaction_reregister(tscm);
/*
* After registration, userspace can start packet streaming, then this
* code block works fine.
*/
if (tscm->registered) {
mutex_lock(&tscm->mutex);
snd_tscm_stream_update_duplex(tscm);
mutex_unlock(&tscm->mutex);
}
}
static void snd_tscm_remove(struct fw_unit *unit)
{
struct snd_tscm *tscm = dev_get_drvdata(&unit->device);
/*
* Confirm to stop the work for registration before the sound card is
* going to be released. The work is not scheduled again because bus
* reset handler is not called anymore.
*/
cancel_delayed_work_sync(&tscm->dwork);
if (tscm->registered) {
/* No need to wait for releasing card object in this context. */
snd_card_free_when_closed(tscm->card);
} else {
/* Don't forget this case. */
tscm_free(tscm);
}
}
static const struct ieee1394_device_id snd_tscm_id_table[] = {
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
IEEE1394_MATCH_SPECIFIER_ID,
.vendor_id = 0x00022e,
.specifier_id = 0x00022e,
},
/* FE-08 requires reverse-engineering because it just has faders. */
{}
};
MODULE_DEVICE_TABLE(ieee1394, snd_tscm_id_table);
static struct fw_driver tscm_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "snd-firewire-tascam",
.bus = &fw_bus_type,
},
.probe = snd_tscm_probe,
.update = snd_tscm_update,
.remove = snd_tscm_remove,
.id_table = snd_tscm_id_table,
};
static int __init snd_tscm_init(void)
{
return driver_register(&tscm_driver.driver);
}
static void __exit snd_tscm_exit(void)
{
driver_unregister(&tscm_driver.driver);
}
module_init(snd_tscm_init);
module_exit(snd_tscm_exit);
| gpl-2.0 |
CyanogenMod/htc-kernel-supersonic | drivers/gpu/drm/drm_crtc_helper.c | 284 | 31741 | /*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
*
* DRM core CRTC related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Keith Packard
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
struct drm_display_mode *mode, *t;
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
return;
list_for_each_entry_safe(mode, t, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
}
return;
}
/**
* drm_helper_probe_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
* Based on @dev's mode_config layout, scan all the connectors and try to detect
* modes on them. Modes will first be added to the connector's probed_modes
* list, then culled (based on validity and the @maxX, @maxY parameters) and
* put into the normal modes list.
*
* Intended to be used either at bootup time or when major configuration
* changes have occurred.
*
* FIXME: take into account monitor limits
*
* RETURNS:
* Number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
int mode_flags = 0;
DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
if (connector->force) {
if (connector->force == DRM_FORCE_ON)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
} else
connector->status = connector->funcs->detect(connector);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
goto prune;
}
count = (*connector_funcs->get_modes)(connector);
if (!count) {
count = drm_add_modes_noedid(connector, 800, 600);
if (!count)
return 0;
}
drm_mode_connector_list_update(connector);
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX,
maxY, 0);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry_safe(mode, t, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = connector_funcs->mode_valid(connector,
mode);
}
prune:
drm_mode_prune_invalid(dev, &connector->modes, true);
if (list_empty(&connector->modes))
return 0;
drm_mode_sort(&connector->modes);
DRM_DEBUG_KMS("Probed modes for %s\n",
drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
}
return count;
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
uint32_t maxY)
{
struct drm_connector *connector;
int count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
count += drm_helper_probe_single_connector_modes(connector,
maxX, maxY);
}
return count;
}
EXPORT_SYMBOL(drm_helper_probe_connector_modes);
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
*
* LOCKING:
* Caller must hold mode config lock.
*
* Walk @encoders's DRM device's mode_config and see if it's in use.
*
* RETURNS:
* True if @encoder is part of the mode_config, false otherwise.
*/
bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return true;
return false;
}
EXPORT_SYMBOL(drm_helper_encoder_in_use);
/**
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
* LOCKING:
* Caller must hold mode config lock.
*
* Walk @crtc's DRM device's mode_config and see if it's in use.
*
* RETURNS:
* True if @crtc is part of the mode_config, false otherwise.
*/
bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
/* FIXME: Locking around list access? */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
return false;
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
/**
* drm_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* LOCKING:
* Caller must hold mode config lock.
*
* If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
* by calling its dpms function, which should power it off.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc *crtc;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
if (connector->status == connector_status_disconnected)
connector->encoder = NULL;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_funcs = encoder->helper_private;
if (!drm_helper_encoder_in_use(encoder)) {
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
}
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
crtc->fb = NULL;
}
}
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_width(mode) > width ||
drm_mode_height(mode) > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
static bool drm_has_cmdline_mode(struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
if (!fb_help_conn)
return false;
cmdline_mode = &fb_help_conn->cmdline_mode;
return cmdline_mode->specified;
}
static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
if (!fb_help_conn)
return mode;
cmdline_mode = &fb_help_conn->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
*/
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
list_for_each_entry(mode, &connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
continue;
if (cmdline_mode->refresh_specified) {
if (mode->vrefresh != cmdline_mode->refresh)
continue;
}
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
}
return mode;
}
create_mode:
mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
list_add(&mode->head, &connector->modes);
return mode;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
if (strict) {
enable = connector->status == connector_status_connected;
} else {
enable = connector->status != connector_status_disconnected;
}
return enable;
}
static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
{
bool any_enabled = false;
struct drm_connector *connector;
int i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
enabled[i] ? "yes" : "no");
any_enabled |= enabled[i];
i++;
}
if (any_enabled)
return;
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
enabled[i] = drm_connector_enabled(connector, false);
i++;
}
}
static bool drm_target_preferred(struct drm_device *dev,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_connector *connector;
int i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (enabled[i] == false) {
i++;
continue;
}
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
connector->base.id);
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(connector, width, height);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
connector->base.id);
modes[i] = drm_has_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
if (!modes[i] && !list_empty(&connector->modes)) {
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
i++;
}
return true;
}
static int drm_pick_crtcs(struct drm_device *dev,
struct drm_crtc **best_crtcs,
struct drm_display_mode **modes,
int n, int width, int height)
{
int c, o;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *best_crtc;
int my_score, best_score, score;
struct drm_crtc **crtcs, *crtc;
if (n == dev->mode_config.num_connector)
return 0;
c = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (c == n)
break;
c++;
}
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
crtcs = kmalloc(dev->mode_config.num_connector *
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (drm_has_cmdline_mode(connector))
my_score++;
if (drm_has_preferred_mode(connector, width, height))
my_score++;
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if (!encoder)
goto out;
connector->encoder = encoder;
/* select a crtc for this connector and then attempt to configure
remaining connectors */
c = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if ((encoder->possible_crtcs & (1 << c)) == 0) {
c++;
continue;
}
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
break;
if (o < n) {
/* ignore cloning for now */
c++;
continue;
}
crtcs[n] = crtc;
memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
sizeof(struct drm_crtc *));
}
c++;
}
out:
kfree(crtcs);
return best_score;
}
static void drm_setup_crtcs(struct drm_device *dev)
{
struct drm_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_connector *connector;
bool *enabled;
int width, height;
int i, ret;
DRM_DEBUG_KMS("\n");
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
}
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
drm_enable_connectors(dev, enabled);
ret = drm_target_preferred(dev, modes, enabled, width, height);
if (!ret)
DRM_ERROR("Unable to find initial modes\n");
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
if (connector->encoder == NULL) {
i++;
continue;
}
if (mode && crtc) {
DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
mode->name, crtc->base.id);
crtc->desired_mode = mode;
connector->encoder->crtc = crtc;
} else {
connector->encoder->crtc = NULL;
connector->encoder = NULL;
}
i++;
}
kfree(crtcs);
kfree(modes);
kfree(enabled);
}
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
* @crtc: crtc to test
*
* Return false if @encoder can't be driven by @crtc, true otherwise.
*/
static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *crtc)
{
struct drm_device *dev;
struct drm_crtc *tmp;
int crtc_mask = 1;
WARN(!crtc, "checking null crtc?");
dev = crtc->dev;
list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
if (tmp == crtc)
break;
crtc_mask <<= 1;
}
if (encoder->possible_crtcs & crtc_mask)
return true;
return false;
}
/*
* Check the CRTC we're going to map each output to vs. its current
* CRTC. If they don't match, we have to disable the output and the CRTC
* since the driver will have to re-route things.
*/
static void
drm_crtc_prepare_encoders(struct drm_device *dev)
{
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
/* Disable encoders whose CRTC is about to change */
if (encoder_funcs->get_crtc &&
encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
}
}
/**
* drm_crtc_set_mode - set a mode
* @crtc: CRTC to program
* @mode: mode to use
* @x: width of mode
* @y: height of mode
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
*/
bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
struct drm_encoder *encoder;
bool ret = true;
adjusted_mode = drm_mode_duplicate(dev, mode);
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
/* Update crtc values up front so the driver can rely on them for mode
* setting.
*/
crtc->mode = *mode;
crtc->x = x;
crtc->y = y;
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
goto done;
}
}
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
goto done;
}
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
}
drm_crtc_prepare_encoders(dev);
crtc_funcs->prepare(crtc);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
if (!ret)
goto done;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
mode->name, mode->base.id);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
crtc_funcs->commit(crtc);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
}
/* XXX free adjustedmode */
drm_mode_destroy(dev, adjusted_mode);
/* FIXME: add subpixel order */
done:
if (!ret) {
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
}
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
* @crtc_info: user provided configuration
* @new_mode: new mode to set
* @connector_set: set of connectors for the new config
* @fb: new framebuffer
*
* LOCKING:
* Caller must hold mode config lock.
*
* Setup a new configuration, provided by the user in @crtc_info, and enable
* it.
*
* RETURNS:
* Zero. (FIXME)
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_crtc *save_crtcs, *new_crtc, *crtc;
struct drm_encoder *save_encoders, *new_encoder, *encoder;
struct drm_framebuffer *old_fb = NULL;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
DRM_DEBUG_KMS("\n");
if (!set)
return -EINVAL;
if (!set->crtc)
return -EINVAL;
if (!set->crtc->helper_private)
return -EINVAL;
crtc_funcs = set->crtc->helper_private;
DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:"
" %d (x, y) (%i, %i)\n",
set->crtc, set->crtc->base.id, set->fb, set->connectors,
(int)set->num_connectors, set->x, set->y);
dev = set->crtc->dev;
/* Allocate space for the backup of all (non-pointer) crtc, encoder and
* connector data. */
save_crtcs = kzalloc(dev->mode_config.num_crtc *
sizeof(struct drm_crtc), GFP_KERNEL);
if (!save_crtcs)
return -ENOMEM;
save_encoders = kzalloc(dev->mode_config.num_encoder *
sizeof(struct drm_encoder), GFP_KERNEL);
if (!save_encoders) {
kfree(save_crtcs);
return -ENOMEM;
}
save_connectors = kzalloc(dev->mode_config.num_connector *
sizeof(struct drm_connector), GFP_KERNEL);
if (!save_connectors) {
kfree(save_crtcs);
kfree(save_encoders);
return -ENOMEM;
}
/* Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
save_crtcs[count++] = *crtc;
}
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
save_encoders[count++] = *encoder;
}
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
save_connectors[count++] = *connector;
}
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
} else if ((set->fb->bits_per_pixel !=
set->crtc->fb->bits_per_pixel) ||
set->fb->depth != set->crtc->fb->depth)
fb_changed = true;
else
fb_changed = true;
}
if (set->x != set->crtc->x || set->y != set->crtc->y)
fb_changed = true;
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
DRM_DEBUG_KMS("modes are different, full mode set\n");
drm_mode_debug_printmodeline(&set->crtc->mode);
drm_mode_debug_printmodeline(set->mode);
mode_changed = true;
}
/* a) traverse passed in connector list and get encoders for them */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector) {
new_encoder = connector_funcs->best_encoder(connector);
/* if we can't get an encoder for a connector
we are setting now - then fail */
if (new_encoder == NULL)
/* don't break so fail path works correct */
fail = 1;
break;
}
}
if (new_encoder != connector->encoder) {
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
mode_changed = true;
/* If the encoder is reused for another connector, then
* the appropriate crtc will be set later.
*/
if (connector->encoder)
connector->encoder->crtc = NULL;
connector->encoder = new_encoder;
}
}
if (fail) {
ret = -EINVAL;
goto fail;
}
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
if (connector->encoder->crtc == set->crtc)
new_crtc = NULL;
else
new_crtc = connector->encoder->crtc;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector)
new_crtc = set->crtc;
}
/* Make sure the new CRTC will work with the encoder */
if (new_crtc &&
!drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
ret = -EINVAL;
goto fail;
}
if (new_crtc != connector->encoder->crtc) {
DRM_DEBUG_KMS("crtc changed, full mode switch\n");
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
DRM_DEBUG_KMS("setting connector %d crtc to %p\n",
connector->base.id, new_crtc);
}
/* mode_set_base is not a required function */
if (fb_changed && !crtc_funcs->mode_set_base)
mode_changed = true;
if (mode_changed) {
old_fb = set->crtc->fb;
set->crtc->fb = set->fb;
set->crtc->enabled = (set->mode != NULL);
if (set->mode != NULL) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
DRM_ERROR("failed to set mode on crtc %p\n",
set->crtc);
ret = -EINVAL;
goto fail;
}
/* TODO are these needed? */
set->crtc->desired_x = set->x;
set->crtc->desired_y = set->y;
set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
old_fb = set->crtc->fb;
if (set->crtc->fb != set->fb)
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
if (ret != 0)
goto fail;
}
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
*crtc = save_crtcs[count++];
}
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
*encoder = save_encoders[count++];
}
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
*connector = save_connectors[count++];
}
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
bool drm_helper_plugged_event(struct drm_device *dev)
{
DRM_DEBUG_KMS("\n");
drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
dev->mode_config.max_height);
drm_setup_crtcs(dev);
/* alert the driver fb layer */
dev->mode_config.funcs->fb_changed(dev);
/* FIXME: send hotplug event */
return true;
}
/**
* drm_initial_config - setup a sane initial connector configuration
* @dev: DRM device
*
* LOCKING:
* Called at init time, must take mode config lock.
*
* Scan the CRTCs and connectors and try to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_helper_initial_config(struct drm_device *dev)
{
int count = 0;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev,
dev->mode_config.max_width,
dev->mode_config.max_height);
/*
* we shouldn't end up with no modes here.
*/
WARN(!count, "No connectors reported connected with modes\n");
drm_setup_crtcs(dev);
/* alert the driver fb layer */
dev->mode_config.funcs->fb_changed(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_initial_config);
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
if (connector->dpms < dpms)
dpms = connector->dpms;
return dpms;
}
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
struct drm_connector *connector;
struct drm_device *dev = crtc->dev;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder && connector->encoder->crtc == crtc)
if (connector->dpms < dpms)
dpms = connector->dpms;
return dpms;
}
/**
* drm_helper_connector_dpms
* @connector affected connector
* @mode DPMS mode
*
* Calls the low-level connector DPMS function, then
* calls appropriate encoder and crtc DPMS functions as well
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms;
if (mode == connector->dpms)
return;
old_dpms = connector->dpms;
connector->dpms = mode;
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
if (encoder) {
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
}
}
/* from on to off, do encoder then crtc */
if (mode > old_dpms) {
if (encoder) {
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
}
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
}
return;
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
/**
* drm_hotplug_stage_two
* @dev DRM device
* @connector hotpluged connector
*
* LOCKING.
* Caller must hold mode config lock, function might grab struct lock.
*
* Stage two of a hotplug.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_helper_hotplug_stage_two(struct drm_device *dev)
{
drm_helper_plugged_event(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
fb->pitch = mode_cmd->pitch;
fb->bits_per_pixel = mode_cmd->bpp;
fb->depth = mode_cmd->depth;
return 0;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->enabled)
continue;
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
}
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
| gpl-2.0 |
chinaopenx/linux | drivers/mtd/nand/xway_nand.c | 540 | 5202 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright © 2012 John Crispin <blogic@openwrt.org>
*/
#include <linux/mtd/nand.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <lantiq_soc.h>
/* nand registers */
#define EBU_ADDSEL1 0x24
#define EBU_NAND_CON 0xB0
#define EBU_NAND_WAIT 0xB4
#define EBU_NAND_ECC0 0xB8
#define EBU_NAND_ECC_AC 0xBC
/* nand commands */
#define NAND_CMD_ALE (1 << 2)
#define NAND_CMD_CLE (1 << 3)
#define NAND_CMD_CS (1 << 4)
#define NAND_WRITE_CMD_RESET 0xff
#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
#define NAND_WRITE_DATA (NAND_CMD_CS)
#define NAND_READ_DATA (NAND_CMD_CS)
#define NAND_WAIT_WR_C (1 << 3)
#define NAND_WAIT_RD (0x1)
/* we need to tel the ebu which addr we mapped the nand to */
#define ADDSEL1_MASK(x) (x << 4)
#define ADDSEL1_REGEN 1
/* we need to tell the EBU that we have nand attached and set it up properly */
#define BUSCON1_SETUP (1 << 22)
#define BUSCON1_BCGEN_RES (0x3 << 12)
#define BUSCON1_WAITWRC2 (2 << 8)
#define BUSCON1_WAITRDC2 (2 << 6)
#define BUSCON1_HOLDC1 (1 << 4)
#define BUSCON1_RECOVC1 (1 << 2)
#define BUSCON1_CMULT4 1
#define NAND_CON_CE (1 << 20)
#define NAND_CON_OUT_CS1 (1 << 10)
#define NAND_CON_IN_CS1 (1 << 8)
#define NAND_CON_PRE_P (1 << 7)
#define NAND_CON_WP_P (1 << 6)
#define NAND_CON_SE_P (1 << 5)
#define NAND_CON_CS_P (1 << 4)
#define NAND_CON_CSMUX (1 << 1)
#define NAND_CON_NANDM 1
static void xway_reset_chip(struct nand_chip *chip)
{
unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
unsigned long flags;
nandaddr &= ~NAND_WRITE_ADDR;
nandaddr |= NAND_WRITE_CMD;
/* finish with a reset */
spin_lock_irqsave(&ebu_lock, flags);
writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
;
spin_unlock_irqrestore(&ebu_lock, flags);
}
static void xway_select_chip(struct mtd_info *mtd, int chip)
{
switch (chip) {
case -1:
ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
break;
case 0:
ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
break;
default:
BUG();
}
}
static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *this = mtd->priv;
unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
unsigned long flags;
if (ctrl & NAND_CTRL_CHANGE) {
nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
if (ctrl & NAND_CLE)
nandaddr |= NAND_WRITE_CMD;
else
nandaddr |= NAND_WRITE_ADDR;
this->IO_ADDR_W = (void __iomem *) nandaddr;
}
if (cmd != NAND_CMD_NONE) {
spin_lock_irqsave(&ebu_lock, flags);
writeb(cmd, this->IO_ADDR_W);
while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
;
spin_unlock_irqrestore(&ebu_lock, flags);
}
}
static int xway_dev_ready(struct mtd_info *mtd)
{
return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
}
static unsigned char xway_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
unsigned long flags;
int ret;
spin_lock_irqsave(&ebu_lock, flags);
ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
spin_unlock_irqrestore(&ebu_lock, flags);
return ret;
}
static int xway_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this = platform_get_drvdata(pdev);
unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
const __be32 *cs = of_get_property(pdev->dev.of_node,
"lantiq,cs", NULL);
u32 cs_flag = 0;
/* load our CS from the DT. Either we find a valid 1 or default to 0 */
if (cs && (*cs == 1))
cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
/* setup the EBU to run in NAND mode on our base addr */
ltq_ebu_w32(CPHYSADDR(nandaddr)
| ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
| BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
| BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
| NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
| cs_flag, EBU_NAND_CON);
/* finish with a reset */
xway_reset_chip(this);
return 0;
}
static struct platform_nand_data xway_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
},
.ctrl = {
.probe = xway_nand_probe,
.cmd_ctrl = xway_cmd_ctrl,
.dev_ready = xway_dev_ready,
.select_chip = xway_select_chip,
.read_byte = xway_read_byte,
}
};
/*
* Try to find the node inside the DT. If it is available attach out
* platform_nand_data
*/
static int __init xway_register_nand(void)
{
struct device_node *node;
struct platform_device *pdev;
node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
if (!node)
return -ENOENT;
pdev = of_find_device_by_node(node);
if (!pdev)
return -EINVAL;
pdev->dev.platform_data = &xway_nand_data;
of_node_put(node);
return 0;
}
subsys_initcall(xway_register_nand);
| gpl-2.0 |
Celtus/custom-core | dep/ACE_wrappers/ace/OS_NS_strings.cpp | 540 | 1992 | // $Id: OS_NS_strings.cpp 91286 2010-08-05 09:04:31Z johnnyw $
#include "ace/OS_NS_strings.h"
#if !defined (ACE_HAS_INLINED_OSCALLS)
# include "ace/OS_NS_strings.inl"
#endif /* ACE_HAS_INLINED_OSCALLS */
#if defined (ACE_LACKS_STRCASECMP)
# include "ace/OS_NS_ctype.h"
#endif /* ACE_LACKS_STRCASECMP */
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
#if defined (ACE_LACKS_STRCASECMP)
int
ACE_OS::strcasecmp_emulation (const char *s, const char *t)
{
const char *scan1 = s;
const char *scan2 = t;
while (*scan1 != 0
&& ACE_OS::ace_tolower (*scan1)
== ACE_OS::ace_tolower (*scan2))
{
++scan1;
++scan2;
}
// The following case analysis is necessary so that characters which
// look negative collate low against normal characters but high
// against the end-of-string NUL.
if (*scan1 == '\0' && *scan2 == '\0')
return 0;
else if (*scan1 == '\0')
return -1;
else if (*scan2 == '\0')
return 1;
else
return ACE_OS::ace_tolower (*scan1) - ACE_OS::ace_tolower (*scan2);
}
#endif /* ACE_LACKS_STRCASECMP */
#if defined (ACE_LACKS_STRCASECMP)
int
ACE_OS::strncasecmp_emulation (const char *s,
const char *t,
size_t len)
{
const char *scan1 = s;
const char *scan2 = t;
size_t count = 0;
while (count++ < len
&& *scan1 != 0
&& ACE_OS::ace_tolower (*scan1)
== ACE_OS::ace_tolower (*scan2))
{
++scan1;
++scan2;
}
if (count > len)
return 0;
// The following case analysis is necessary so that characters which
// look negative collate low against normal characters but high
// against the end-of-string NUL.
if (*scan1 == '\0' && *scan2 == '\0')
return 0;
else if (*scan1 == '\0')
return -1;
else if (*scan2 == '\0')
return 1;
else
return ACE_OS::ace_tolower (*scan1) - ACE_OS::ace_tolower (*scan2);
}
#endif /* ACE_LACKS_STRCASECMP */
ACE_END_VERSIONED_NAMESPACE_DECL
| gpl-2.0 |
tiny4579/gingertiny-v2 | sound/soc/codecs/wm8900.c | 540 | 40800 | /*
* wm8900.c -- WM8900 ALSA Soc Audio driver
*
* Copyright 2007, 2008 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* TODO:
* - Tristating.
* - TDM.
* - Jack detect.
* - FLL source configuration, currently only MCLK is supported.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8900.h"
/* WM8900 register space */
#define WM8900_REG_RESET 0x0
#define WM8900_REG_ID 0x0
#define WM8900_REG_POWER1 0x1
#define WM8900_REG_POWER2 0x2
#define WM8900_REG_POWER3 0x3
#define WM8900_REG_AUDIO1 0x4
#define WM8900_REG_AUDIO2 0x5
#define WM8900_REG_CLOCKING1 0x6
#define WM8900_REG_CLOCKING2 0x7
#define WM8900_REG_AUDIO3 0x8
#define WM8900_REG_AUDIO4 0x9
#define WM8900_REG_DACCTRL 0xa
#define WM8900_REG_LDAC_DV 0xb
#define WM8900_REG_RDAC_DV 0xc
#define WM8900_REG_SIDETONE 0xd
#define WM8900_REG_ADCCTRL 0xe
#define WM8900_REG_LADC_DV 0xf
#define WM8900_REG_RADC_DV 0x10
#define WM8900_REG_GPIO 0x12
#define WM8900_REG_INCTL 0x15
#define WM8900_REG_LINVOL 0x16
#define WM8900_REG_RINVOL 0x17
#define WM8900_REG_INBOOSTMIX1 0x18
#define WM8900_REG_INBOOSTMIX2 0x19
#define WM8900_REG_ADCPATH 0x1a
#define WM8900_REG_AUXBOOST 0x1b
#define WM8900_REG_ADDCTL 0x1e
#define WM8900_REG_FLLCTL1 0x24
#define WM8900_REG_FLLCTL2 0x25
#define WM8900_REG_FLLCTL3 0x26
#define WM8900_REG_FLLCTL4 0x27
#define WM8900_REG_FLLCTL5 0x28
#define WM8900_REG_FLLCTL6 0x29
#define WM8900_REG_LOUTMIXCTL1 0x2c
#define WM8900_REG_ROUTMIXCTL1 0x2d
#define WM8900_REG_BYPASS1 0x2e
#define WM8900_REG_BYPASS2 0x2f
#define WM8900_REG_AUXOUT_CTL 0x30
#define WM8900_REG_LOUT1CTL 0x33
#define WM8900_REG_ROUT1CTL 0x34
#define WM8900_REG_LOUT2CTL 0x35
#define WM8900_REG_ROUT2CTL 0x36
#define WM8900_REG_HPCTL1 0x3a
#define WM8900_REG_OUTBIASCTL 0x73
#define WM8900_MAXREG 0x80
#define WM8900_REG_ADDCTL_OUT1_DIS 0x80
#define WM8900_REG_ADDCTL_OUT2_DIS 0x40
#define WM8900_REG_ADDCTL_VMID_DIS 0x20
#define WM8900_REG_ADDCTL_BIAS_SRC 0x10
#define WM8900_REG_ADDCTL_VMID_SOFTST 0x04
#define WM8900_REG_ADDCTL_TEMP_SD 0x02
#define WM8900_REG_GPIO_TEMP_ENA 0x2
#define WM8900_REG_POWER1_STARTUP_BIAS_ENA 0x0100
#define WM8900_REG_POWER1_BIAS_ENA 0x0008
#define WM8900_REG_POWER1_VMID_BUF_ENA 0x0004
#define WM8900_REG_POWER1_FLL_ENA 0x0040
#define WM8900_REG_POWER2_SYSCLK_ENA 0x8000
#define WM8900_REG_POWER2_ADCL_ENA 0x0002
#define WM8900_REG_POWER2_ADCR_ENA 0x0001
#define WM8900_REG_POWER3_DACL_ENA 0x0002
#define WM8900_REG_POWER3_DACR_ENA 0x0001
#define WM8900_REG_AUDIO1_AIF_FMT_MASK 0x0018
#define WM8900_REG_AUDIO1_LRCLK_INV 0x0080
#define WM8900_REG_AUDIO1_BCLK_INV 0x0100
#define WM8900_REG_CLOCKING1_BCLK_DIR 0x1
#define WM8900_REG_CLOCKING1_MCLK_SRC 0x100
#define WM8900_REG_CLOCKING1_BCLK_MASK (~0x01e)
#define WM8900_REG_CLOCKING1_OPCLK_MASK (~0x7000)
#define WM8900_REG_CLOCKING2_ADC_CLKDIV 0xe0
#define WM8900_REG_CLOCKING2_DAC_CLKDIV 0x1c
#define WM8900_REG_DACCTRL_MUTE 0x004
#define WM8900_REG_DACCTRL_DAC_SB_FILT 0x100
#define WM8900_REG_DACCTRL_AIF_LRCLKRATE 0x400
#define WM8900_REG_AUDIO3_ADCLRC_DIR 0x0800
#define WM8900_REG_AUDIO4_DACLRC_DIR 0x0800
#define WM8900_REG_FLLCTL1_OSC_ENA 0x100
#define WM8900_REG_FLLCTL6_FLL_SLOW_LOCK_REF 0x100
#define WM8900_REG_HPCTL1_HP_IPSTAGE_ENA 0x80
#define WM8900_REG_HPCTL1_HP_OPSTAGE_ENA 0x40
#define WM8900_REG_HPCTL1_HP_CLAMP_IP 0x20
#define WM8900_REG_HPCTL1_HP_CLAMP_OP 0x10
#define WM8900_REG_HPCTL1_HP_SHORT 0x08
#define WM8900_REG_HPCTL1_HP_SHORT2 0x04
#define WM8900_LRC_MASK 0xfc00
struct snd_soc_codec_device soc_codec_dev_wm8900;
struct wm8900_priv {
struct snd_soc_codec codec;
u16 reg_cache[WM8900_MAXREG];
u32 fll_in; /* FLL input frequency */
u32 fll_out; /* FLL output frequency */
};
/*
* wm8900 register cache. We can't read the entire register space and we
* have slow control buses so we cache the registers.
*/
static const u16 wm8900_reg_defaults[WM8900_MAXREG] = {
0x8900, 0x0000,
0xc000, 0x0000,
0x4050, 0x4000,
0x0008, 0x0000,
0x0040, 0x0040,
0x1004, 0x00c0,
0x00c0, 0x0000,
0x0100, 0x00c0,
0x00c0, 0x0000,
0xb001, 0x0000,
0x0000, 0x0044,
0x004c, 0x004c,
0x0044, 0x0044,
0x0000, 0x0044,
0x0000, 0x0000,
0x0002, 0x0000,
0x0000, 0x0000,
0x0000, 0x0000,
0x0008, 0x0000,
0x0000, 0x0008,
0x0097, 0x0100,
0x0000, 0x0000,
0x0050, 0x0050,
0x0055, 0x0055,
0x0055, 0x0000,
0x0000, 0x0079,
0x0079, 0x0079,
0x0079, 0x0000,
/* Remaining registers all zero */
};
static int wm8900_volatile_register(unsigned int reg)
{
switch (reg) {
case WM8900_REG_ID:
case WM8900_REG_POWER1:
return 1;
default:
return 0;
}
}
static void wm8900_reset(struct snd_soc_codec *codec)
{
snd_soc_write(codec, WM8900_REG_RESET, 0);
memcpy(codec->reg_cache, wm8900_reg_defaults,
sizeof(wm8900_reg_defaults));
}
static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
u16 hpctl1 = snd_soc_read(codec, WM8900_REG_HPCTL1);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Clamp headphone outputs */
hpctl1 = WM8900_REG_HPCTL1_HP_CLAMP_IP |
WM8900_REG_HPCTL1_HP_CLAMP_OP;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
break;
case SND_SOC_DAPM_POST_PMU:
/* Enable the input stage */
hpctl1 &= ~WM8900_REG_HPCTL1_HP_CLAMP_IP;
hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT |
WM8900_REG_HPCTL1_HP_SHORT2 |
WM8900_REG_HPCTL1_HP_IPSTAGE_ENA;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
msleep(400);
/* Enable the output stage */
hpctl1 &= ~WM8900_REG_HPCTL1_HP_CLAMP_OP;
hpctl1 |= WM8900_REG_HPCTL1_HP_OPSTAGE_ENA;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
/* Remove the shorts */
hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT2;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
break;
case SND_SOC_DAPM_PRE_PMD:
/* Short the output */
hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
/* Disable the output stage */
hpctl1 &= ~WM8900_REG_HPCTL1_HP_OPSTAGE_ENA;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
/* Clamp the outputs and power down input */
hpctl1 |= WM8900_REG_HPCTL1_HP_CLAMP_IP |
WM8900_REG_HPCTL1_HP_CLAMP_OP;
hpctl1 &= ~WM8900_REG_HPCTL1_HP_IPSTAGE_ENA;
snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
break;
case SND_SOC_DAPM_POST_PMD:
/* Disable everything */
snd_soc_write(codec, WM8900_REG_HPCTL1, 0);
break;
default:
BUG();
}
return 0;
}
static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 100, 0);
static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 0);
static const DECLARE_TLV_DB_SCALE(in_boost_tlv, -1200, 600, 0);
static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(dac_boost_tlv, 0, 600, 0);
static const DECLARE_TLV_DB_SCALE(dac_tlv, -7200, 75, 1);
static const DECLARE_TLV_DB_SCALE(adc_svol_tlv, -3600, 300, 0);
static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
static const char *mic_bias_level_txt[] = { "0.9*AVDD", "0.65*AVDD" };
static const struct soc_enum mic_bias_level =
SOC_ENUM_SINGLE(WM8900_REG_INCTL, 8, 2, mic_bias_level_txt);
static const char *dac_mute_rate_txt[] = { "Fast", "Slow" };
static const struct soc_enum dac_mute_rate =
SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 7, 2, dac_mute_rate_txt);
static const char *dac_deemphasis_txt[] = {
"Disabled", "32kHz", "44.1kHz", "48kHz"
};
static const struct soc_enum dac_deemphasis =
SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 4, 4, dac_deemphasis_txt);
static const char *adc_hpf_cut_txt[] = {
"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"
};
static const struct soc_enum adc_hpf_cut =
SOC_ENUM_SINGLE(WM8900_REG_ADCCTRL, 5, 4, adc_hpf_cut_txt);
static const char *lr_txt[] = {
"Left", "Right"
};
static const struct soc_enum aifl_src =
SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 15, 2, lr_txt);
static const struct soc_enum aifr_src =
SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 14, 2, lr_txt);
static const struct soc_enum dacl_src =
SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 15, 2, lr_txt);
static const struct soc_enum dacr_src =
SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 14, 2, lr_txt);
static const char *sidetone_txt[] = {
"Disabled", "Left ADC", "Right ADC"
};
static const struct soc_enum dacl_sidetone =
SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 2, 3, sidetone_txt);
static const struct soc_enum dacr_sidetone =
SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 0, 3, sidetone_txt);
static const struct snd_kcontrol_new wm8900_snd_controls[] = {
SOC_ENUM("Mic Bias Level", mic_bias_level),
SOC_SINGLE_TLV("Left Input PGA Volume", WM8900_REG_LINVOL, 0, 31, 0,
in_pga_tlv),
SOC_SINGLE("Left Input PGA Switch", WM8900_REG_LINVOL, 6, 1, 1),
SOC_SINGLE("Left Input PGA ZC Switch", WM8900_REG_LINVOL, 7, 1, 0),
SOC_SINGLE_TLV("Right Input PGA Volume", WM8900_REG_RINVOL, 0, 31, 0,
in_pga_tlv),
SOC_SINGLE("Right Input PGA Switch", WM8900_REG_RINVOL, 6, 1, 1),
SOC_SINGLE("Right Input PGA ZC Switch", WM8900_REG_RINVOL, 7, 1, 0),
SOC_SINGLE("DAC Soft Mute Switch", WM8900_REG_DACCTRL, 6, 1, 1),
SOC_ENUM("DAC Mute Rate", dac_mute_rate),
SOC_SINGLE("DAC Mono Switch", WM8900_REG_DACCTRL, 9, 1, 0),
SOC_ENUM("DAC Deemphasis", dac_deemphasis),
SOC_SINGLE("DAC Sigma-Delta Modulator Clock Switch", WM8900_REG_DACCTRL,
12, 1, 0),
SOC_SINGLE("ADC HPF Switch", WM8900_REG_ADCCTRL, 8, 1, 0),
SOC_ENUM("ADC HPF Cut-Off", adc_hpf_cut),
SOC_DOUBLE("ADC Invert Switch", WM8900_REG_ADCCTRL, 1, 0, 1, 0),
SOC_SINGLE_TLV("Left ADC Sidetone Volume", WM8900_REG_SIDETONE, 9, 12, 0,
adc_svol_tlv),
SOC_SINGLE_TLV("Right ADC Sidetone Volume", WM8900_REG_SIDETONE, 5, 12, 0,
adc_svol_tlv),
SOC_ENUM("Left Digital Audio Source", aifl_src),
SOC_ENUM("Right Digital Audio Source", aifr_src),
SOC_SINGLE_TLV("DAC Input Boost Volume", WM8900_REG_AUDIO2, 10, 4, 0,
dac_boost_tlv),
SOC_ENUM("Left DAC Source", dacl_src),
SOC_ENUM("Right DAC Source", dacr_src),
SOC_ENUM("Left DAC Sidetone", dacl_sidetone),
SOC_ENUM("Right DAC Sidetone", dacr_sidetone),
SOC_DOUBLE("DAC Invert Switch", WM8900_REG_DACCTRL, 1, 0, 1, 0),
SOC_DOUBLE_R_TLV("Digital Playback Volume",
WM8900_REG_LDAC_DV, WM8900_REG_RDAC_DV,
1, 96, 0, dac_tlv),
SOC_DOUBLE_R_TLV("Digital Capture Volume",
WM8900_REG_LADC_DV, WM8900_REG_RADC_DV, 1, 119, 0, adc_tlv),
SOC_SINGLE_TLV("LINPUT3 Bypass Volume", WM8900_REG_LOUTMIXCTL1, 4, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("RINPUT3 Bypass Volume", WM8900_REG_ROUTMIXCTL1, 4, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("Left AUX Bypass Volume", WM8900_REG_AUXOUT_CTL, 4, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("Right AUX Bypass Volume", WM8900_REG_AUXOUT_CTL, 0, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("LeftIn to RightOut Mixer Volume", WM8900_REG_BYPASS1, 0, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("LeftIn to LeftOut Mixer Volume", WM8900_REG_BYPASS1, 4, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("RightIn to LeftOut Mixer Volume", WM8900_REG_BYPASS2, 0, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("RightIn to RightOut Mixer Volume", WM8900_REG_BYPASS2, 4, 7, 0,
out_mix_tlv),
SOC_SINGLE_TLV("IN2L Boost Volume", WM8900_REG_INBOOSTMIX1, 0, 3, 0,
in_boost_tlv),
SOC_SINGLE_TLV("IN3L Boost Volume", WM8900_REG_INBOOSTMIX1, 4, 3, 0,
in_boost_tlv),
SOC_SINGLE_TLV("IN2R Boost Volume", WM8900_REG_INBOOSTMIX2, 0, 3, 0,
in_boost_tlv),
SOC_SINGLE_TLV("IN3R Boost Volume", WM8900_REG_INBOOSTMIX2, 4, 3, 0,
in_boost_tlv),
SOC_SINGLE_TLV("Left AUX Boost Volume", WM8900_REG_AUXBOOST, 4, 3, 0,
in_boost_tlv),
SOC_SINGLE_TLV("Right AUX Boost Volume", WM8900_REG_AUXBOOST, 0, 3, 0,
in_boost_tlv),
SOC_DOUBLE_R_TLV("LINEOUT1 Volume", WM8900_REG_LOUT1CTL, WM8900_REG_ROUT1CTL,
0, 63, 0, out_pga_tlv),
SOC_DOUBLE_R("LINEOUT1 Switch", WM8900_REG_LOUT1CTL, WM8900_REG_ROUT1CTL,
6, 1, 1),
SOC_DOUBLE_R("LINEOUT1 ZC Switch", WM8900_REG_LOUT1CTL, WM8900_REG_ROUT1CTL,
7, 1, 0),
SOC_DOUBLE_R_TLV("LINEOUT2 Volume",
WM8900_REG_LOUT2CTL, WM8900_REG_ROUT2CTL,
0, 63, 0, out_pga_tlv),
SOC_DOUBLE_R("LINEOUT2 Switch",
WM8900_REG_LOUT2CTL, WM8900_REG_ROUT2CTL, 6, 1, 1),
SOC_DOUBLE_R("LINEOUT2 ZC Switch",
WM8900_REG_LOUT2CTL, WM8900_REG_ROUT2CTL, 7, 1, 0),
SOC_SINGLE("LINEOUT2 LP -12dB", WM8900_REG_LOUTMIXCTL1,
0, 1, 1),
};
static const struct snd_kcontrol_new wm8900_dapm_loutput2_control =
SOC_DAPM_SINGLE("LINEOUT2L Switch", WM8900_REG_POWER3, 6, 1, 0);
static const struct snd_kcontrol_new wm8900_dapm_routput2_control =
SOC_DAPM_SINGLE("LINEOUT2R Switch", WM8900_REG_POWER3, 5, 1, 0);
static const struct snd_kcontrol_new wm8900_loutmix_controls[] = {
SOC_DAPM_SINGLE("LINPUT3 Bypass Switch", WM8900_REG_LOUTMIXCTL1, 7, 1, 0),
SOC_DAPM_SINGLE("AUX Bypass Switch", WM8900_REG_AUXOUT_CTL, 7, 1, 0),
SOC_DAPM_SINGLE("Left Input Mixer Switch", WM8900_REG_BYPASS1, 7, 1, 0),
SOC_DAPM_SINGLE("Right Input Mixer Switch", WM8900_REG_BYPASS2, 3, 1, 0),
SOC_DAPM_SINGLE("DACL Switch", WM8900_REG_LOUTMIXCTL1, 8, 1, 0),
};
static const struct snd_kcontrol_new wm8900_routmix_controls[] = {
SOC_DAPM_SINGLE("RINPUT3 Bypass Switch", WM8900_REG_ROUTMIXCTL1, 7, 1, 0),
SOC_DAPM_SINGLE("AUX Bypass Switch", WM8900_REG_AUXOUT_CTL, 3, 1, 0),
SOC_DAPM_SINGLE("Left Input Mixer Switch", WM8900_REG_BYPASS1, 3, 1, 0),
SOC_DAPM_SINGLE("Right Input Mixer Switch", WM8900_REG_BYPASS2, 7, 1, 0),
SOC_DAPM_SINGLE("DACR Switch", WM8900_REG_ROUTMIXCTL1, 8, 1, 0),
};
static const struct snd_kcontrol_new wm8900_linmix_controls[] = {
SOC_DAPM_SINGLE("LINPUT2 Switch", WM8900_REG_INBOOSTMIX1, 2, 1, 1),
SOC_DAPM_SINGLE("LINPUT3 Switch", WM8900_REG_INBOOSTMIX1, 6, 1, 1),
SOC_DAPM_SINGLE("AUX Switch", WM8900_REG_AUXBOOST, 6, 1, 1),
SOC_DAPM_SINGLE("Input PGA Switch", WM8900_REG_ADCPATH, 6, 1, 0),
};
static const struct snd_kcontrol_new wm8900_rinmix_controls[] = {
SOC_DAPM_SINGLE("RINPUT2 Switch", WM8900_REG_INBOOSTMIX2, 2, 1, 1),
SOC_DAPM_SINGLE("RINPUT3 Switch", WM8900_REG_INBOOSTMIX2, 6, 1, 1),
SOC_DAPM_SINGLE("AUX Switch", WM8900_REG_AUXBOOST, 2, 1, 1),
SOC_DAPM_SINGLE("Input PGA Switch", WM8900_REG_ADCPATH, 2, 1, 0),
};
static const struct snd_kcontrol_new wm8900_linpga_controls[] = {
SOC_DAPM_SINGLE("LINPUT1 Switch", WM8900_REG_INCTL, 6, 1, 0),
SOC_DAPM_SINGLE("LINPUT2 Switch", WM8900_REG_INCTL, 5, 1, 0),
SOC_DAPM_SINGLE("LINPUT3 Switch", WM8900_REG_INCTL, 4, 1, 0),
};
static const struct snd_kcontrol_new wm8900_rinpga_controls[] = {
SOC_DAPM_SINGLE("RINPUT1 Switch", WM8900_REG_INCTL, 2, 1, 0),
SOC_DAPM_SINGLE("RINPUT2 Switch", WM8900_REG_INCTL, 1, 1, 0),
SOC_DAPM_SINGLE("RINPUT3 Switch", WM8900_REG_INCTL, 0, 1, 0),
};
static const char *wm9700_lp_mux[] = { "Disabled", "Enabled" };
static const struct soc_enum wm8900_lineout2_lp_mux =
SOC_ENUM_SINGLE(WM8900_REG_LOUTMIXCTL1, 1, 2, wm9700_lp_mux);
static const struct snd_kcontrol_new wm8900_lineout2_lp =
SOC_DAPM_ENUM("Route", wm8900_lineout2_lp_mux);
static const struct snd_soc_dapm_widget wm8900_dapm_widgets[] = {
/* Externally visible pins */
SND_SOC_DAPM_OUTPUT("LINEOUT1L"),
SND_SOC_DAPM_OUTPUT("LINEOUT1R"),
SND_SOC_DAPM_OUTPUT("LINEOUT2L"),
SND_SOC_DAPM_OUTPUT("LINEOUT2R"),
SND_SOC_DAPM_OUTPUT("HP_L"),
SND_SOC_DAPM_OUTPUT("HP_R"),
SND_SOC_DAPM_INPUT("RINPUT1"),
SND_SOC_DAPM_INPUT("LINPUT1"),
SND_SOC_DAPM_INPUT("RINPUT2"),
SND_SOC_DAPM_INPUT("LINPUT2"),
SND_SOC_DAPM_INPUT("RINPUT3"),
SND_SOC_DAPM_INPUT("LINPUT3"),
SND_SOC_DAPM_INPUT("AUX"),
SND_SOC_DAPM_VMID("VMID"),
/* Input */
SND_SOC_DAPM_MIXER("Left Input PGA", WM8900_REG_POWER2, 3, 0,
wm8900_linpga_controls,
ARRAY_SIZE(wm8900_linpga_controls)),
SND_SOC_DAPM_MIXER("Right Input PGA", WM8900_REG_POWER2, 2, 0,
wm8900_rinpga_controls,
ARRAY_SIZE(wm8900_rinpga_controls)),
SND_SOC_DAPM_MIXER("Left Input Mixer", WM8900_REG_POWER2, 5, 0,
wm8900_linmix_controls,
ARRAY_SIZE(wm8900_linmix_controls)),
SND_SOC_DAPM_MIXER("Right Input Mixer", WM8900_REG_POWER2, 4, 0,
wm8900_rinmix_controls,
ARRAY_SIZE(wm8900_rinmix_controls)),
SND_SOC_DAPM_MICBIAS("Mic Bias", WM8900_REG_POWER1, 4, 0),
SND_SOC_DAPM_ADC("ADCL", "Left HiFi Capture", WM8900_REG_POWER2, 1, 0),
SND_SOC_DAPM_ADC("ADCR", "Right HiFi Capture", WM8900_REG_POWER2, 0, 0),
/* Output */
SND_SOC_DAPM_DAC("DACL", "Left HiFi Playback", WM8900_REG_POWER3, 1, 0),
SND_SOC_DAPM_DAC("DACR", "Right HiFi Playback", WM8900_REG_POWER3, 0, 0),
SND_SOC_DAPM_PGA_E("Headphone Amplifier", WM8900_REG_POWER3, 7, 0, NULL, 0,
wm8900_hp_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA("LINEOUT1L PGA", WM8900_REG_POWER2, 8, 0, NULL, 0),
SND_SOC_DAPM_PGA("LINEOUT1R PGA", WM8900_REG_POWER2, 7, 0, NULL, 0),
SND_SOC_DAPM_MUX("LINEOUT2 LP", SND_SOC_NOPM, 0, 0, &wm8900_lineout2_lp),
SND_SOC_DAPM_PGA("LINEOUT2L PGA", WM8900_REG_POWER3, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("LINEOUT2R PGA", WM8900_REG_POWER3, 5, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Left Output Mixer", WM8900_REG_POWER3, 3, 0,
wm8900_loutmix_controls,
ARRAY_SIZE(wm8900_loutmix_controls)),
SND_SOC_DAPM_MIXER("Right Output Mixer", WM8900_REG_POWER3, 2, 0,
wm8900_routmix_controls,
ARRAY_SIZE(wm8900_routmix_controls)),
};
/* Target, Path, Source */
static const struct snd_soc_dapm_route audio_map[] = {
/* Inputs */
{"Left Input PGA", "LINPUT1 Switch", "LINPUT1"},
{"Left Input PGA", "LINPUT2 Switch", "LINPUT2"},
{"Left Input PGA", "LINPUT3 Switch", "LINPUT3"},
{"Right Input PGA", "RINPUT1 Switch", "RINPUT1"},
{"Right Input PGA", "RINPUT2 Switch", "RINPUT2"},
{"Right Input PGA", "RINPUT3 Switch", "RINPUT3"},
{"Left Input Mixer", "LINPUT2 Switch", "LINPUT2"},
{"Left Input Mixer", "LINPUT3 Switch", "LINPUT3"},
{"Left Input Mixer", "AUX Switch", "AUX"},
{"Left Input Mixer", "Input PGA Switch", "Left Input PGA"},
{"Right Input Mixer", "RINPUT2 Switch", "RINPUT2"},
{"Right Input Mixer", "RINPUT3 Switch", "RINPUT3"},
{"Right Input Mixer", "AUX Switch", "AUX"},
{"Right Input Mixer", "Input PGA Switch", "Right Input PGA"},
{"ADCL", NULL, "Left Input Mixer"},
{"ADCR", NULL, "Right Input Mixer"},
/* Outputs */
{"LINEOUT1L", NULL, "LINEOUT1L PGA"},
{"LINEOUT1L PGA", NULL, "Left Output Mixer"},
{"LINEOUT1R", NULL, "LINEOUT1R PGA"},
{"LINEOUT1R PGA", NULL, "Right Output Mixer"},
{"LINEOUT2L PGA", NULL, "Left Output Mixer"},
{"LINEOUT2 LP", "Disabled", "LINEOUT2L PGA"},
{"LINEOUT2 LP", "Enabled", "Left Output Mixer"},
{"LINEOUT2L", NULL, "LINEOUT2 LP"},
{"LINEOUT2R PGA", NULL, "Right Output Mixer"},
{"LINEOUT2 LP", "Disabled", "LINEOUT2R PGA"},
{"LINEOUT2 LP", "Enabled", "Right Output Mixer"},
{"LINEOUT2R", NULL, "LINEOUT2 LP"},
{"Left Output Mixer", "LINPUT3 Bypass Switch", "LINPUT3"},
{"Left Output Mixer", "AUX Bypass Switch", "AUX"},
{"Left Output Mixer", "Left Input Mixer Switch", "Left Input Mixer"},
{"Left Output Mixer", "Right Input Mixer Switch", "Right Input Mixer"},
{"Left Output Mixer", "DACL Switch", "DACL"},
{"Right Output Mixer", "RINPUT3 Bypass Switch", "RINPUT3"},
{"Right Output Mixer", "AUX Bypass Switch", "AUX"},
{"Right Output Mixer", "Left Input Mixer Switch", "Left Input Mixer"},
{"Right Output Mixer", "Right Input Mixer Switch", "Right Input Mixer"},
{"Right Output Mixer", "DACR Switch", "DACR"},
/* Note that the headphone output stage needs to be connected
* externally to LINEOUT2 via DC blocking capacitors. Other
* configurations are not supported.
*
* Note also that left and right headphone paths are treated as a
* mono path.
*/
{"Headphone Amplifier", NULL, "LINEOUT2 LP"},
{"Headphone Amplifier", NULL, "LINEOUT2 LP"},
{"HP_L", NULL, "Headphone Amplifier"},
{"HP_R", NULL, "Headphone Amplifier"},
};
static int wm8900_add_widgets(struct snd_soc_codec *codec)
{
snd_soc_dapm_new_controls(codec, wm8900_dapm_widgets,
ARRAY_SIZE(wm8900_dapm_widgets));
snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
return 0;
}
static int wm8900_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
u16 reg;
reg = snd_soc_read(codec, WM8900_REG_AUDIO1) & ~0x60;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
reg |= 0x20;
break;
case SNDRV_PCM_FORMAT_S24_LE:
reg |= 0x40;
break;
case SNDRV_PCM_FORMAT_S32_LE:
reg |= 0x60;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8900_REG_AUDIO1, reg);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
if (params_rate(params) <= 24000)
reg |= WM8900_REG_DACCTRL_DAC_SB_FILT;
else
reg &= ~WM8900_REG_DACCTRL_DAC_SB_FILT;
snd_soc_write(codec, WM8900_REG_DACCTRL, reg);
}
return 0;
}
/* FLL divisors */
struct _fll_div {
u16 fll_ratio;
u16 fllclk_div;
u16 fll_slow_lock_ref;
u16 n;
u16 k;
};
/* The size in bits of the FLL divide multiplied by 10
* to allow rounding later */
#define FIXED_FLL_SIZE ((1 << 16) * 10)
static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
unsigned int Fout)
{
u64 Kpart;
unsigned int K, Ndiv, Nmod, target;
unsigned int div;
BUG_ON(!Fout);
/* The FLL must run at 90-100MHz which is then scaled down to
* the output value by FLLCLK_DIV. */
target = Fout;
div = 1;
while (target < 90000000) {
div *= 2;
target *= 2;
}
if (target > 100000000)
printk(KERN_WARNING "wm8900: FLL rate %u out of range, Fref=%u"
" Fout=%u\n", target, Fref, Fout);
if (div > 32) {
printk(KERN_ERR "wm8900: Invalid FLL division rate %u, "
"Fref=%u, Fout=%u, target=%u\n",
div, Fref, Fout, target);
return -EINVAL;
}
fll_div->fllclk_div = div >> 2;
if (Fref < 48000)
fll_div->fll_slow_lock_ref = 1;
else
fll_div->fll_slow_lock_ref = 0;
Ndiv = target / Fref;
if (Fref < 1000000)
fll_div->fll_ratio = 8;
else
fll_div->fll_ratio = 1;
fll_div->n = Ndiv / fll_div->fll_ratio;
Nmod = (target / fll_div->fll_ratio) % Fref;
/* Calculate fractional part - scale up so we can round. */
Kpart = FIXED_FLL_SIZE * (long long)Nmod;
do_div(Kpart, Fref);
K = Kpart & 0xFFFFFFFF;
if ((K % 10) >= 5)
K += 5;
/* Move down to proper range now rounding is done */
fll_div->k = K / 10;
BUG_ON(target != Fout * (fll_div->fllclk_div << 2));
BUG_ON(!K && target != Fref * fll_div->fll_ratio * fll_div->n);
return 0;
}
static int wm8900_set_fll(struct snd_soc_codec *codec,
int fll_id, unsigned int freq_in, unsigned int freq_out)
{
struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec);
struct _fll_div fll_div;
unsigned int reg;
if (wm8900->fll_in == freq_in && wm8900->fll_out == freq_out)
return 0;
/* The digital side should be disabled during any change. */
reg = snd_soc_read(codec, WM8900_REG_POWER1);
snd_soc_write(codec, WM8900_REG_POWER1,
reg & (~WM8900_REG_POWER1_FLL_ENA));
/* Disable the FLL? */
if (!freq_in || !freq_out) {
reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
snd_soc_write(codec, WM8900_REG_CLOCKING1,
reg & (~WM8900_REG_CLOCKING1_MCLK_SRC));
reg = snd_soc_read(codec, WM8900_REG_FLLCTL1);
snd_soc_write(codec, WM8900_REG_FLLCTL1,
reg & (~WM8900_REG_FLLCTL1_OSC_ENA));
wm8900->fll_in = freq_in;
wm8900->fll_out = freq_out;
return 0;
}
if (fll_factors(&fll_div, freq_in, freq_out) != 0)
goto reenable;
wm8900->fll_in = freq_in;
wm8900->fll_out = freq_out;
/* The osclilator *MUST* be enabled before we enable the
* digital circuit. */
snd_soc_write(codec, WM8900_REG_FLLCTL1,
fll_div.fll_ratio | WM8900_REG_FLLCTL1_OSC_ENA);
snd_soc_write(codec, WM8900_REG_FLLCTL4, fll_div.n >> 5);
snd_soc_write(codec, WM8900_REG_FLLCTL5,
(fll_div.fllclk_div << 6) | (fll_div.n & 0x1f));
if (fll_div.k) {
snd_soc_write(codec, WM8900_REG_FLLCTL2,
(fll_div.k >> 8) | 0x100);
snd_soc_write(codec, WM8900_REG_FLLCTL3, fll_div.k & 0xff);
} else
snd_soc_write(codec, WM8900_REG_FLLCTL2, 0);
if (fll_div.fll_slow_lock_ref)
snd_soc_write(codec, WM8900_REG_FLLCTL6,
WM8900_REG_FLLCTL6_FLL_SLOW_LOCK_REF);
else
snd_soc_write(codec, WM8900_REG_FLLCTL6, 0);
reg = snd_soc_read(codec, WM8900_REG_POWER1);
snd_soc_write(codec, WM8900_REG_POWER1,
reg | WM8900_REG_POWER1_FLL_ENA);
reenable:
reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
snd_soc_write(codec, WM8900_REG_CLOCKING1,
reg | WM8900_REG_CLOCKING1_MCLK_SRC);
return 0;
}
static int wm8900_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
return wm8900_set_fll(codec_dai->codec, pll_id, freq_in, freq_out);
}
static int wm8900_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int reg;
switch (div_id) {
case WM8900_BCLK_DIV:
reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
snd_soc_write(codec, WM8900_REG_CLOCKING1,
div | (reg & WM8900_REG_CLOCKING1_BCLK_MASK));
break;
case WM8900_OPCLK_DIV:
reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
snd_soc_write(codec, WM8900_REG_CLOCKING1,
div | (reg & WM8900_REG_CLOCKING1_OPCLK_MASK));
break;
case WM8900_DAC_LRCLK:
reg = snd_soc_read(codec, WM8900_REG_AUDIO4);
snd_soc_write(codec, WM8900_REG_AUDIO4,
div | (reg & WM8900_LRC_MASK));
break;
case WM8900_ADC_LRCLK:
reg = snd_soc_read(codec, WM8900_REG_AUDIO3);
snd_soc_write(codec, WM8900_REG_AUDIO3,
div | (reg & WM8900_LRC_MASK));
break;
case WM8900_DAC_CLKDIV:
reg = snd_soc_read(codec, WM8900_REG_CLOCKING2);
snd_soc_write(codec, WM8900_REG_CLOCKING2,
div | (reg & WM8900_REG_CLOCKING2_DAC_CLKDIV));
break;
case WM8900_ADC_CLKDIV:
reg = snd_soc_read(codec, WM8900_REG_CLOCKING2);
snd_soc_write(codec, WM8900_REG_CLOCKING2,
div | (reg & WM8900_REG_CLOCKING2_ADC_CLKDIV));
break;
case WM8900_LRCLK_MODE:
reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
snd_soc_write(codec, WM8900_REG_DACCTRL,
div | (reg & WM8900_REG_DACCTRL_AIF_LRCLKRATE));
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8900_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int clocking1, aif1, aif3, aif4;
clocking1 = snd_soc_read(codec, WM8900_REG_CLOCKING1);
aif1 = snd_soc_read(codec, WM8900_REG_AUDIO1);
aif3 = snd_soc_read(codec, WM8900_REG_AUDIO3);
aif4 = snd_soc_read(codec, WM8900_REG_AUDIO4);
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
clocking1 &= ~WM8900_REG_CLOCKING1_BCLK_DIR;
aif3 &= ~WM8900_REG_AUDIO3_ADCLRC_DIR;
aif4 &= ~WM8900_REG_AUDIO4_DACLRC_DIR;
break;
case SND_SOC_DAIFMT_CBS_CFM:
clocking1 &= ~WM8900_REG_CLOCKING1_BCLK_DIR;
aif3 |= WM8900_REG_AUDIO3_ADCLRC_DIR;
aif4 |= WM8900_REG_AUDIO4_DACLRC_DIR;
break;
case SND_SOC_DAIFMT_CBM_CFM:
clocking1 |= WM8900_REG_CLOCKING1_BCLK_DIR;
aif3 |= WM8900_REG_AUDIO3_ADCLRC_DIR;
aif4 |= WM8900_REG_AUDIO4_DACLRC_DIR;
break;
case SND_SOC_DAIFMT_CBM_CFS:
clocking1 |= WM8900_REG_CLOCKING1_BCLK_DIR;
aif3 &= ~WM8900_REG_AUDIO3_ADCLRC_DIR;
aif4 &= ~WM8900_REG_AUDIO4_DACLRC_DIR;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
aif1 |= WM8900_REG_AUDIO1_AIF_FMT_MASK;
aif1 &= ~WM8900_REG_AUDIO1_LRCLK_INV;
break;
case SND_SOC_DAIFMT_DSP_B:
aif1 |= WM8900_REG_AUDIO1_AIF_FMT_MASK;
aif1 |= WM8900_REG_AUDIO1_LRCLK_INV;
break;
case SND_SOC_DAIFMT_I2S:
aif1 &= ~WM8900_REG_AUDIO1_AIF_FMT_MASK;
aif1 |= 0x10;
break;
case SND_SOC_DAIFMT_RIGHT_J:
aif1 &= ~WM8900_REG_AUDIO1_AIF_FMT_MASK;
break;
case SND_SOC_DAIFMT_LEFT_J:
aif1 &= ~WM8900_REG_AUDIO1_AIF_FMT_MASK;
aif1 |= 0x8;
break;
default:
return -EINVAL;
}
/* Clock inversion */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
case SND_SOC_DAIFMT_DSP_B:
/* frame inversion not valid for DSP modes */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
aif1 &= ~WM8900_REG_AUDIO1_BCLK_INV;
break;
case SND_SOC_DAIFMT_IB_NF:
aif1 |= WM8900_REG_AUDIO1_BCLK_INV;
break;
default:
return -EINVAL;
}
break;
case SND_SOC_DAIFMT_I2S:
case SND_SOC_DAIFMT_RIGHT_J:
case SND_SOC_DAIFMT_LEFT_J:
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
aif1 &= ~WM8900_REG_AUDIO1_BCLK_INV;
aif1 &= ~WM8900_REG_AUDIO1_LRCLK_INV;
break;
case SND_SOC_DAIFMT_IB_IF:
aif1 |= WM8900_REG_AUDIO1_BCLK_INV;
aif1 |= WM8900_REG_AUDIO1_LRCLK_INV;
break;
case SND_SOC_DAIFMT_IB_NF:
aif1 |= WM8900_REG_AUDIO1_BCLK_INV;
aif1 &= ~WM8900_REG_AUDIO1_LRCLK_INV;
break;
case SND_SOC_DAIFMT_NB_IF:
aif1 &= ~WM8900_REG_AUDIO1_BCLK_INV;
aif1 |= WM8900_REG_AUDIO1_LRCLK_INV;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8900_REG_CLOCKING1, clocking1);
snd_soc_write(codec, WM8900_REG_AUDIO1, aif1);
snd_soc_write(codec, WM8900_REG_AUDIO3, aif3);
snd_soc_write(codec, WM8900_REG_AUDIO4, aif4);
return 0;
}
static int wm8900_digital_mute(struct snd_soc_dai *codec_dai, int mute)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 reg;
reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
if (mute)
reg |= WM8900_REG_DACCTRL_MUTE;
else
reg &= ~WM8900_REG_DACCTRL_MUTE;
snd_soc_write(codec, WM8900_REG_DACCTRL, reg);
return 0;
}
#define WM8900_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\
SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
#define WM8900_PCM_FORMATS \
(SNDRV_PCM_FORMAT_S16_LE | SNDRV_PCM_FORMAT_S20_3LE | \
SNDRV_PCM_FORMAT_S24_LE)
static struct snd_soc_dai_ops wm8900_dai_ops = {
.hw_params = wm8900_hw_params,
.set_clkdiv = wm8900_set_dai_clkdiv,
.set_pll = wm8900_set_dai_pll,
.set_fmt = wm8900_set_dai_fmt,
.digital_mute = wm8900_digital_mute,
};
struct snd_soc_dai wm8900_dai = {
.name = "WM8900 HiFi",
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM8900_RATES,
.formats = WM8900_PCM_FORMATS,
},
.capture = {
.stream_name = "HiFi Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM8900_RATES,
.formats = WM8900_PCM_FORMATS,
},
.ops = &wm8900_dai_ops,
};
EXPORT_SYMBOL_GPL(wm8900_dai);
static int wm8900_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 reg;
switch (level) {
case SND_SOC_BIAS_ON:
/* Enable thermal shutdown */
reg = snd_soc_read(codec, WM8900_REG_GPIO);
snd_soc_write(codec, WM8900_REG_GPIO,
reg | WM8900_REG_GPIO_TEMP_ENA);
reg = snd_soc_read(codec, WM8900_REG_ADDCTL);
snd_soc_write(codec, WM8900_REG_ADDCTL,
reg | WM8900_REG_ADDCTL_TEMP_SD);
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
/* Charge capacitors if initial power up */
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* STARTUP_BIAS_ENA on */
snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA);
/* Startup bias mode */
snd_soc_write(codec, WM8900_REG_ADDCTL,
WM8900_REG_ADDCTL_BIAS_SRC |
WM8900_REG_ADDCTL_VMID_SOFTST);
/* VMID 2x50k */
snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA | 0x1);
/* Allow capacitors to charge */
schedule_timeout_interruptible(msecs_to_jiffies(400));
/* Enable bias */
snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA |
WM8900_REG_POWER1_BIAS_ENA | 0x1);
snd_soc_write(codec, WM8900_REG_ADDCTL, 0);
snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_BIAS_ENA | 0x1);
}
reg = snd_soc_read(codec, WM8900_REG_POWER1);
snd_soc_write(codec, WM8900_REG_POWER1,
(reg & WM8900_REG_POWER1_FLL_ENA) |
WM8900_REG_POWER1_BIAS_ENA | 0x1);
snd_soc_write(codec, WM8900_REG_POWER2,
WM8900_REG_POWER2_SYSCLK_ENA);
snd_soc_write(codec, WM8900_REG_POWER3, 0);
break;
case SND_SOC_BIAS_OFF:
/* Startup bias enable */
reg = snd_soc_read(codec, WM8900_REG_POWER1);
snd_soc_write(codec, WM8900_REG_POWER1,
reg & WM8900_REG_POWER1_STARTUP_BIAS_ENA);
snd_soc_write(codec, WM8900_REG_ADDCTL,
WM8900_REG_ADDCTL_BIAS_SRC |
WM8900_REG_ADDCTL_VMID_SOFTST);
/* Discharge caps */
snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA);
schedule_timeout_interruptible(msecs_to_jiffies(500));
/* Remove clamp */
snd_soc_write(codec, WM8900_REG_HPCTL1, 0);
/* Power down */
snd_soc_write(codec, WM8900_REG_ADDCTL, 0);
snd_soc_write(codec, WM8900_REG_POWER1, 0);
snd_soc_write(codec, WM8900_REG_POWER2, 0);
snd_soc_write(codec, WM8900_REG_POWER3, 0);
/* Need to let things settle before stopping the clock
* to ensure that restart works, see "Stopping the
* master clock" in the datasheet. */
schedule_timeout_interruptible(msecs_to_jiffies(1));
snd_soc_write(codec, WM8900_REG_POWER2,
WM8900_REG_POWER2_SYSCLK_ENA);
break;
}
codec->bias_level = level;
return 0;
}
static int wm8900_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec);
int fll_out = wm8900->fll_out;
int fll_in = wm8900->fll_in;
int ret;
/* Stop the FLL in an orderly fashion */
ret = wm8900_set_fll(codec, 0, 0, 0);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to stop FLL\n");
return ret;
}
wm8900->fll_out = fll_out;
wm8900->fll_in = fll_in;
wm8900_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int wm8900_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec);
u16 *cache;
int i, ret;
cache = kmemdup(codec->reg_cache, sizeof(wm8900_reg_defaults),
GFP_KERNEL);
wm8900_reset(codec);
wm8900_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Restart the FLL? */
if (wm8900->fll_out) {
int fll_out = wm8900->fll_out;
int fll_in = wm8900->fll_in;
wm8900->fll_in = 0;
wm8900->fll_out = 0;
ret = wm8900_set_fll(codec, 0, fll_in, fll_out);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to restart FLL\n");
return ret;
}
}
if (cache) {
for (i = 0; i < WM8900_MAXREG; i++)
snd_soc_write(codec, i, cache[i]);
kfree(cache);
} else
dev_err(&pdev->dev, "Unable to allocate register cache\n");
return 0;
}
static struct snd_soc_codec *wm8900_codec;
static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8900_priv *wm8900;
struct snd_soc_codec *codec;
unsigned int reg;
int ret;
wm8900 = kzalloc(sizeof(struct wm8900_priv), GFP_KERNEL);
if (wm8900 == NULL)
return -ENOMEM;
codec = &wm8900->codec;
snd_soc_codec_set_drvdata(codec, wm8900);
codec->reg_cache = &wm8900->reg_cache[0];
codec->reg_cache_size = WM8900_MAXREG;
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
codec->name = "WM8900";
codec->owner = THIS_MODULE;
codec->dai = &wm8900_dai;
codec->num_dai = 1;
codec->control_data = i2c;
codec->set_bias_level = wm8900_set_bias_level;
codec->volatile_register = wm8900_volatile_register;
codec->dev = &i2c->dev;
ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to set cache I/O: %d\n", ret);
goto err;
}
reg = snd_soc_read(codec, WM8900_REG_ID);
if (reg != 0x8900) {
dev_err(&i2c->dev, "Device is not a WM8900 - ID %x\n", reg);
ret = -ENODEV;
goto err;
}
/* Read back from the chip */
reg = snd_soc_read(codec, WM8900_REG_POWER1);
reg = (reg >> 12) & 0xf;
dev_info(&i2c->dev, "WM8900 revision %d\n", reg);
wm8900_reset(codec);
/* Turn the chip on */
wm8900_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch the volume update bits */
snd_soc_write(codec, WM8900_REG_LINVOL,
snd_soc_read(codec, WM8900_REG_LINVOL) | 0x100);
snd_soc_write(codec, WM8900_REG_RINVOL,
snd_soc_read(codec, WM8900_REG_RINVOL) | 0x100);
snd_soc_write(codec, WM8900_REG_LOUT1CTL,
snd_soc_read(codec, WM8900_REG_LOUT1CTL) | 0x100);
snd_soc_write(codec, WM8900_REG_ROUT1CTL,
snd_soc_read(codec, WM8900_REG_ROUT1CTL) | 0x100);
snd_soc_write(codec, WM8900_REG_LOUT2CTL,
snd_soc_read(codec, WM8900_REG_LOUT2CTL) | 0x100);
snd_soc_write(codec, WM8900_REG_ROUT2CTL,
snd_soc_read(codec, WM8900_REG_ROUT2CTL) | 0x100);
snd_soc_write(codec, WM8900_REG_LDAC_DV,
snd_soc_read(codec, WM8900_REG_LDAC_DV) | 0x100);
snd_soc_write(codec, WM8900_REG_RDAC_DV,
snd_soc_read(codec, WM8900_REG_RDAC_DV) | 0x100);
snd_soc_write(codec, WM8900_REG_LADC_DV,
snd_soc_read(codec, WM8900_REG_LADC_DV) | 0x100);
snd_soc_write(codec, WM8900_REG_RADC_DV,
snd_soc_read(codec, WM8900_REG_RADC_DV) | 0x100);
/* Set the DAC and mixer output bias */
snd_soc_write(codec, WM8900_REG_OUTBIASCTL, 0x81);
wm8900_dai.dev = &i2c->dev;
wm8900_codec = codec;
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
goto err;
}
ret = snd_soc_register_dai(&wm8900_dai);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register DAI: %d\n", ret);
goto err_codec;
}
return ret;
err_codec:
snd_soc_unregister_codec(codec);
err:
kfree(wm8900);
wm8900_codec = NULL;
return ret;
}
static __devexit int wm8900_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_dai(&wm8900_dai);
snd_soc_unregister_codec(wm8900_codec);
wm8900_set_bias_level(wm8900_codec, SND_SOC_BIAS_OFF);
wm8900_dai.dev = NULL;
kfree(snd_soc_codec_get_drvdata(wm8900_codec));
wm8900_codec = NULL;
return 0;
}
static const struct i2c_device_id wm8900_i2c_id[] = {
{ "wm8900", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8900_i2c_id);
static struct i2c_driver wm8900_i2c_driver = {
.driver = {
.name = "WM8900",
.owner = THIS_MODULE,
},
.probe = wm8900_i2c_probe,
.remove = __devexit_p(wm8900_i2c_remove),
.id_table = wm8900_i2c_id,
};
static int wm8900_probe(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec;
int ret = 0;
if (!wm8900_codec) {
dev_err(&pdev->dev, "I2C client not yet instantiated\n");
return -ENODEV;
}
codec = wm8900_codec;
socdev->card->codec = codec;
/* Register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register new PCMs\n");
goto pcm_err;
}
snd_soc_add_controls(codec, wm8900_snd_controls,
ARRAY_SIZE(wm8900_snd_controls));
wm8900_add_widgets(codec);
pcm_err:
return ret;
}
/* power down chip */
static int wm8900_remove(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
return 0;
}
struct snd_soc_codec_device soc_codec_dev_wm8900 = {
.probe = wm8900_probe,
.remove = wm8900_remove,
.suspend = wm8900_suspend,
.resume = wm8900_resume,
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8900);
static int __init wm8900_modinit(void)
{
return i2c_add_driver(&wm8900_i2c_driver);
}
module_init(wm8900_modinit);
static void __exit wm8900_exit(void)
{
i2c_del_driver(&wm8900_i2c_driver);
}
module_exit(wm8900_exit);
MODULE_DESCRIPTION("ASoC WM8900 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
thepasto/kernel_acer_salsa | fs/jfs/jfs_dmap.c | 1052 | 111577 | /*
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_lock.h"
#include "jfs_metapage.h"
#include "jfs_debug.h"
/*
* SERIALIZATION of the Block Allocation Map.
*
* the working state of the block allocation map is accessed in
* two directions:
*
* 1) allocation and free requests that start at the dmap
* level and move up through the dmap control pages (i.e.
* the vast majority of requests).
*
* 2) allocation requests that start at dmap control page
* level and work down towards the dmaps.
*
* the serialization scheme used here is as follows.
*
* requests which start at the bottom are serialized against each
* other through buffers and each requests holds onto its buffers
* as it works it way up from a single dmap to the required level
* of dmap control page.
* requests that start at the top are serialized against each other
* and request that start from the bottom by the multiple read/single
* write inode lock of the bmap inode. requests starting at the top
* take this lock in write mode while request starting at the bottom
* take the lock in read mode. a single top-down request may proceed
* exclusively while multiple bottoms-up requests may proceed
* simultaneously (under the protection of busy buffers).
*
* in addition to information found in dmaps and dmap control pages,
* the working state of the block allocation map also includes read/
* write information maintained in the bmap descriptor (i.e. total
* free block count, allocation group level free block counts).
* a single exclusive lock (BMAP_LOCK) is used to guard this information
* in the face of multiple-bottoms up requests.
* (lock ordering: IREAD_LOCK, BMAP_LOCK);
*
* accesses to the persistent state of the block allocation map (limited
* to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
*/
#define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock)
#define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock)
#define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock)
/*
* forward references
*/
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
static int dbBackSplit(dmtree_t * tp, int leafno);
static int dbJoin(dmtree_t * tp, int leafno, int newval);
static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
int level);
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks,
int l2nb, s64 * results);
static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
int l2nb,
s64 * results);
static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
s64 * results);
static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
s64 * results);
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
static int dbFindBits(u32 word, int l2nb);
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbMaxBud(u8 * cp);
s64 dbMapFileSizeToMapSize(struct inode *ipbmap);
static int blkstol2(s64 nb);
static int cntlz(u32 value);
static int cnttz(u32 word);
static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
static int dbInitDmapTree(struct dmap * dp);
static int dbInitTree(struct dmaptree * dtp);
static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
static int dbGetL2AGSize(s64 nblocks);
/*
* buddy table
*
* table used for determining buddy sizes within characters of
* dmap bitmap words. the characters themselves serve as indexes
* into the table, with the table elements yielding the maximum
* binary buddy of free bits within the character.
*/
static const s8 budtab[256] = {
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
};
/*
* NAME: dbMount()
*
* FUNCTION: initializate the block allocation map.
*
* memory is allocated for the in-core bmap descriptor and
* the in-core descriptor is initialized from disk.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient memory
* -EIO - i/o error
*/
int dbMount(struct inode *ipbmap)
{
struct bmap *bmp;
struct dbmap_disk *dbmp_le;
struct metapage *mp;
int i;
/*
* allocate/initialize the in-memory bmap descriptor
*/
/* allocate memory for the in-memory bmap descriptor */
bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
if (bmp == NULL)
return -ENOMEM;
/* read the on-disk bmap descriptor. */
mp = read_metapage(ipbmap,
BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
kfree(bmp);
return -EIO;
}
/* copy the on-disk bmap descriptor to its in-memory version. */
dbmp_le = (struct dbmap_disk *) mp->data;
bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
for (i = 0; i < MAXAG; i++)
bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
bmp->db_maxfreebud = dbmp_le->dn_maxfreebud;
/* release the buffer. */
release_metapage(mp);
/* bind the bmap inode and the bmap descriptor to each other. */
bmp->db_ipbmap = ipbmap;
JFS_SBI(ipbmap->i_sb)->bmap = bmp;
memset(bmp->db_active, 0, sizeof(bmp->db_active));
/*
* allocate/initialize the bmap lock
*/
BMAP_LOCK_INIT(bmp);
return (0);
}
/*
* NAME: dbUnmount()
*
* FUNCTION: terminate the block allocation map in preparation for
* file system unmount.
*
* the in-core bmap descriptor is written to disk and
* the memory for this descriptor is freed.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int dbUnmount(struct inode *ipbmap, int mounterror)
{
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
if (!(mounterror || isReadOnly(ipbmap)))
dbSync(ipbmap);
/*
* Invalidate the page cache buffers
*/
truncate_inode_pages(ipbmap->i_mapping, 0);
/* free the memory for the in-memory bmap. */
kfree(bmp);
return (0);
}
/*
* dbSync()
*/
int dbSync(struct inode *ipbmap)
{
struct dbmap_disk *dbmp_le;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
struct metapage *mp;
int i;
/*
* write bmap global control page
*/
/* get the buffer for the on-disk bmap descriptor. */
mp = read_metapage(ipbmap,
BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
jfs_err("dbSync: read_metapage failed!");
return -EIO;
}
/* copy the in-memory version of the bmap to the on-disk version */
dbmp_le = (struct dbmap_disk *) mp->data;
dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
for (i = 0; i < MAXAG; i++)
dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;
/* write the buffer */
write_metapage(mp);
/*
* write out dirty pages of bmap
*/
filemap_write_and_wait(ipbmap->i_mapping);
diWriteSpecial(ipbmap, 0);
return (0);
}
/*
* NAME: dbFree()
*
* FUNCTION: free the specified block range from the working block
* allocation map.
*
* the blocks will be free from the working map one dmap
* at a time.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
{
struct metapage *mp;
struct dmap *dp;
int nb, rc;
s64 lblkno, rem;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* block to be freed better be within the mapsize. */
if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
IREAD_UNLOCK(ipbmap);
printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
(unsigned long long) blkno,
(unsigned long long) nblocks);
jfs_error(ip->i_sb,
"dbFree: block to be freed is outside the map");
return -EIO;
}
/*
* free the blocks a dmap at a time.
*/
mp = NULL;
for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
/* release previous dmap if any */
if (mp) {
write_metapage(mp);
}
/* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
IREAD_UNLOCK(ipbmap);
return -EIO;
}
dp = (struct dmap *) mp->data;
/* determine the number of blocks to be freed from
* this dmap.
*/
nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
/* free the blocks. */
if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
jfs_error(ip->i_sb, "dbFree: error in block map\n");
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (rc);
}
}
/* write the last buffer. */
write_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (0);
}
/*
* NAME: dbUpdatePMap()
*
* FUNCTION: update the allocation state (free or allocate) of the
* specified block range in the persistent block allocation map.
*
* the blocks will be updated in the persistent map one
* dmap at a time.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
* free - 'true' if block range is to be freed from the persistent
* map; 'false' if it is to be allocated.
* blkno - starting block number of the range.
* nblocks - number of contiguous blocks in the range.
* tblk - transaction block;
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int
dbUpdatePMap(struct inode *ipbmap,
int free, s64 blkno, s64 nblocks, struct tblock * tblk)
{
int nblks, dbitno, wbitno, rbits;
int word, nbits, nwords;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
s64 lblkno, rem, lastlblkno;
u32 mask;
struct dmap *dp;
struct metapage *mp;
struct jfs_log *log;
int lsn, difft, diffp;
unsigned long flags;
/* the blocks better be within the mapsize. */
if (blkno + nblocks > bmp->db_mapsize) {
printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
(unsigned long long) blkno,
(unsigned long long) nblocks);
jfs_error(ipbmap->i_sb,
"dbUpdatePMap: blocks are outside the map");
return -EIO;
}
/* compute delta of transaction lsn from log syncpt */
lsn = tblk->lsn;
log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
logdiff(difft, lsn, log);
/*
* update the block state a dmap at a time.
*/
mp = NULL;
lastlblkno = 0;
for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) {
/* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
if (lblkno != lastlblkno) {
if (mp) {
write_metapage(mp);
}
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE,
0);
if (mp == NULL)
return -EIO;
metapage_wait_for_io(mp);
}
dp = (struct dmap *) mp->data;
/* determine the bit number and word within the dmap of
* the starting block. also determine how many blocks
* are to be updated within this dmap.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
nblks = min(rem, (s64)BPERDMAP - dbitno);
/* update the bits of the dmap words. the first and last
* words may only have a subset of their bits updated. if
* this is the case, we'll work against that word (i.e.
* partial first and/or last) only in a single pass. a
* single pass will also be used to update all words that
* are to have all their bits updated.
*/
for (rbits = nblks; rbits > 0;
rbits -= nbits, dbitno += nbits) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nbits = min(rbits, DBWORD - wbitno);
/* check if only part of the word is to be updated. */
if (nbits < DBWORD) {
/* update (free or allocate) the bits
* in this word.
*/
mask =
(ONES << (DBWORD - nbits) >> wbitno);
if (free)
dp->pmap[word] &=
cpu_to_le32(~mask);
else
dp->pmap[word] |=
cpu_to_le32(mask);
word += 1;
} else {
/* one or more words are to have all
* their bits updated. determine how
* many words and how many bits.
*/
nwords = rbits >> L2DBWORD;
nbits = nwords << L2DBWORD;
/* update (free or allocate) the bits
* in these words.
*/
if (free)
memset(&dp->pmap[word], 0,
nwords * 4);
else
memset(&dp->pmap[word], (int) ONES,
nwords * 4);
word += nwords;
}
}
/*
* update dmap lsn
*/
if (lblkno == lastlblkno)
continue;
lastlblkno = lblkno;
LOGSYNC_LOCK(log, flags);
if (mp->lsn != 0) {
/* inherit older/smaller lsn */
logdiff(diffp, mp->lsn, log);
if (difft < diffp) {
mp->lsn = lsn;
/* move bp after tblock in logsync list */
list_move(&mp->synclist, &tblk->synclist);
}
/* inherit younger/larger clsn */
logdiff(difft, tblk->clsn, log);
logdiff(diffp, mp->clsn, log);
if (difft > diffp)
mp->clsn = tblk->clsn;
} else {
mp->log = log;
mp->lsn = lsn;
/* insert bp after tblock in logsync list */
log->count++;
list_add(&mp->synclist, &tblk->synclist);
mp->clsn = tblk->clsn;
}
LOGSYNC_UNLOCK(log, flags);
}
/* write the last buffer. */
if (mp) {
write_metapage(mp);
}
return (0);
}
/*
* NAME: dbNextAG()
*
* FUNCTION: find the preferred allocation group for new allocations.
*
* Within the allocation groups, we maintain a preferred
* allocation group which consists of a group with at least
* average free space. It is the preferred group that we target
* new inode allocation towards. The tie-in between inode
* allocation and block allocation occurs as we allocate the
* first (data) block of an inode and specify the inode (block)
* as the allocation hint for this block.
*
* We try to avoid having more than one open file growing in
* an allocation group, as this will lead to fragmentation.
* This differs from the old OS/2 method of trying to keep
* empty ags around for large allocations.
*
* PARAMETERS:
* ipbmap - pointer to in-core inode for the block map.
*
* RETURN VALUES:
* the preferred allocation group number.
*/
int dbNextAG(struct inode *ipbmap)
{
s64 avgfree;
int agpref;
s64 hwm = 0;
int i;
int next_best = -1;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
BMAP_LOCK(bmp);
/* determine the average number of free blocks within the ags. */
avgfree = (u32)bmp->db_nfree / bmp->db_numag;
/*
* if the current preferred ag does not have an active allocator
* and has at least average freespace, return it
*/
agpref = bmp->db_agpref;
if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
(bmp->db_agfree[agpref] >= avgfree))
goto unlock;
/* From the last preferred ag, find the next one with at least
* average free space.
*/
for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
if (agpref == bmp->db_numag)
agpref = 0;
if (atomic_read(&bmp->db_active[agpref]))
/* open file is currently growing in this ag */
continue;
if (bmp->db_agfree[agpref] >= avgfree) {
/* Return this one */
bmp->db_agpref = agpref;
goto unlock;
} else if (bmp->db_agfree[agpref] > hwm) {
/* Less than avg. freespace, but best so far */
hwm = bmp->db_agfree[agpref];
next_best = agpref;
}
}
/*
* If no inactive ag was found with average freespace, use the
* next best
*/
if (next_best != -1)
bmp->db_agpref = next_best;
/* else leave db_agpref unchanged */
unlock:
BMAP_UNLOCK(bmp);
/* return the preferred group.
*/
return (bmp->db_agpref);
}
/*
* NAME: dbAlloc()
*
* FUNCTION: attempt to allocate a specified number of contiguous free
* blocks from the working allocation block map.
*
* the block allocation policy uses hints and a multi-step
* approach.
*
* for allocation requests smaller than the number of blocks
* per dmap, we first try to allocate the new blocks
* immediately following the hint. if these blocks are not
* available, we try to allocate blocks near the hint. if
* no blocks near the hint are available, we next try to
* allocate within the same dmap as contains the hint.
*
* if no blocks are available in the dmap or the allocation
* request is larger than the dmap size, we try to allocate
* within the same allocation group as contains the hint. if
* this does not succeed, we finally try to allocate anywhere
* within the aggregate.
*
* we also try to allocate anywhere within the aggregate for
* for allocation requests larger than the allocation group
* size or requests that specify no hint value.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* hint - allocation hint.
* nblocks - number of contiguous blocks in the range.
* results - on successful return, set to the starting block number
* of the newly allocated contiguous range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
{
int rc, agno;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp;
struct metapage *mp;
s64 lblkno, blkno;
struct dmap *dp;
int l2nb;
s64 mapSize;
int writers;
/* assert that nblocks is valid */
assert(nblocks > 0);
/* get the log2 number of blocks to be allocated.
* if the number of blocks is not a log2 multiple,
* it will be rounded up to the next log2 multiple.
*/
l2nb = BLKSTOL2(nblocks);
bmp = JFS_SBI(ip->i_sb)->bmap;
mapSize = bmp->db_mapsize;
/* the hint should be within the map */
if (hint >= mapSize) {
jfs_error(ip->i_sb, "dbAlloc: the hint is outside the map");
return -EIO;
}
/* if the number of blocks to be allocated is greater than the
* allocation group size, try to allocate anywhere.
*/
if (l2nb > bmp->db_agl2size) {
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
rc = dbAllocAny(bmp, nblocks, l2nb, results);
goto write_unlock;
}
/*
* If no hint, let dbNextAG recommend an allocation group
*/
if (hint == 0)
goto pref_ag;
/* we would like to allocate close to the hint. adjust the
* hint to the block following the hint since the allocators
* will start looking for free space starting at this point.
*/
blkno = hint + 1;
if (blkno >= bmp->db_mapsize)
goto pref_ag;
agno = blkno >> bmp->db_agl2size;
/* check if blkno crosses over into a new allocation group.
* if so, check if we should allow allocations within this
* allocation group.
*/
if ((blkno & (bmp->db_agsize - 1)) == 0)
/* check if the AG is currently being written to.
* if so, call dbNextAG() to find a non-busy
* AG with sufficient free space.
*/
if (atomic_read(&bmp->db_active[agno]))
goto pref_ag;
/* check if the allocation request size can be satisfied from a
* single dmap. if so, try to allocate from the dmap containing
* the hint using a tiered strategy.
*/
if (nblocks <= BPERDMAP) {
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* get the buffer for the dmap containing the hint.
*/
rc = -EIO;
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
goto read_unlock;
dp = (struct dmap *) mp->data;
/* first, try to satisfy the allocation request with the
* blocks beginning at the hint.
*/
if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))
!= -ENOSPC) {
if (rc == 0) {
*results = blkno;
mark_metapage_dirty(mp);
}
release_metapage(mp);
goto read_unlock;
}
writers = atomic_read(&bmp->db_active[agno]);
if ((writers > 1) ||
((writers == 1) && (JFS_IP(ip)->active_ag != agno))) {
/*
* Someone else is writing in this allocation
* group. To avoid fragmenting, try another ag
*/
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
goto pref_ag;
}
/* next, try to satisfy the allocation request with blocks
* near the hint.
*/
if ((rc =
dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
!= -ENOSPC) {
if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
goto read_unlock;
}
/* try to satisfy the allocation request with blocks within
* the same dmap as the hint.
*/
if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
!= -ENOSPC) {
if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
goto read_unlock;
}
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
}
/* try to satisfy the allocation request with blocks within
* the same allocation group as the hint.
*/
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
goto write_unlock;
IWRITE_UNLOCK(ipbmap);
pref_ag:
/*
* Let dbNextAG recommend a preferred allocation group
*/
agno = dbNextAG(ipbmap);
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
/* Try to allocate within this allocation group. if that fails, try to
* allocate anywhere in the map.
*/
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
rc = dbAllocAny(bmp, nblocks, l2nb, results);
write_unlock:
IWRITE_UNLOCK(ipbmap);
return (rc);
read_unlock:
IREAD_UNLOCK(ipbmap);
return (rc);
}
#ifdef _NOTYET
/*
* NAME: dbAllocExact()
*
* FUNCTION: try to allocate the requested extent;
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - extent address;
* nblocks - extent length;
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
{
int rc;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
struct dmap *dp;
s64 lblkno;
struct metapage *mp;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/*
* validate extent request:
*
* note: defragfs policy:
* max 64 blocks will be moved.
* allocation request size must be satisfied from a single dmap.
*/
if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) {
IREAD_UNLOCK(ipbmap);
return -EINVAL;
}
if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) {
/* the free space is no longer available */
IREAD_UNLOCK(ipbmap);
return -ENOSPC;
}
/* read in the dmap covering the extent */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
IREAD_UNLOCK(ipbmap);
return -EIO;
}
dp = (struct dmap *) mp->data;
/* try to allocate the requested extent */
rc = dbAllocNext(bmp, dp, blkno, nblocks);
IREAD_UNLOCK(ipbmap);
if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
return (rc);
}
#endif /* _NOTYET */
/*
* NAME: dbReAlloc()
*
* FUNCTION: attempt to extend a current allocation by a specified
* number of blocks.
*
* this routine attempts to satisfy the allocation request
* by first trying to extend the existing allocation in
* place by allocating the additional blocks as the blocks
* immediately following the current allocation. if these
* blocks are not available, this routine will attempt to
* allocate a new set of contiguous blocks large enough
* to cover the existing allocation plus the additional
* number of blocks required.
*
* PARAMETERS:
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* allocation.
* addnblocks - number of blocks to add to the allocation.
* results - on successful return, set to the starting block number
* of the existing allocation if the existing allocation
* was extended in place or to a newly allocated contiguous
* range if the existing allocation could not be extended
* in place.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
int
dbReAlloc(struct inode *ip,
s64 blkno, s64 nblocks, s64 addnblocks, s64 * results)
{
int rc;
/* try to extend the allocation in place.
*/
if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) {
*results = blkno;
return (0);
} else {
if (rc != -ENOSPC)
return (rc);
}
/* could not extend the allocation in place, so allocate a
* new set of blocks for the entire request (i.e. try to get
* a range of contiguous blocks large enough to cover the
* existing allocation plus the additional blocks.)
*/
return (dbAlloc
(ip, blkno + nblocks - 1, addnblocks + nblocks, results));
}
/*
* NAME: dbExtend()
*
* FUNCTION: attempt to extend a current allocation by a specified
* number of blocks.
*
* this routine attempts to satisfy the allocation request
* by first trying to extend the existing allocation in
* place by allocating the additional blocks as the blocks
* immediately following the current allocation.
*
* PARAMETERS:
* ip - pointer to in-core inode requiring allocation.
* blkno - starting block of the current allocation.
* nblocks - number of contiguous blocks within the current
* allocation.
* addnblocks - number of blocks to add to the allocation.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*/
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
s64 lblkno, lastblkno, extblkno;
uint rel_block;
struct metapage *mp;
struct dmap *dp;
int rc;
struct inode *ipbmap = sbi->ipbmap;
struct bmap *bmp;
/*
* We don't want a non-aligned extent to cross a page boundary
*/
if (((rel_block = blkno & (sbi->nbperpage - 1))) &&
(rel_block + nblocks + addnblocks > sbi->nbperpage))
return -ENOSPC;
/* get the last block of the current allocation */
lastblkno = blkno + nblocks - 1;
/* determine the block number of the block following
* the existing allocation.
*/
extblkno = lastblkno + 1;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* better be within the file system */
bmp = sbi->bmap;
if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) {
IREAD_UNLOCK(ipbmap);
jfs_error(ip->i_sb,
"dbExtend: the block is outside the filesystem");
return -EIO;
}
/* we'll attempt to extend the current allocation in place by
* allocating the additional blocks as the blocks immediately
* following the current allocation. we only try to extend the
* current allocation in place if the number of additional blocks
* can fit into a dmap, the last block of the current allocation
* is not the last block of the file system, and the start of the
* inplace extension is not on an allocation group boundary.
*/
if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
(extblkno & (bmp->db_agsize - 1)) == 0) {
IREAD_UNLOCK(ipbmap);
return -ENOSPC;
}
/* get the buffer for the dmap containing the first block
* of the extension.
*/
lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
IREAD_UNLOCK(ipbmap);
return -EIO;
}
dp = (struct dmap *) mp->data;
/* try to allocate the blocks immediately following the
* current allocation.
*/
rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks);
IREAD_UNLOCK(ipbmap);
/* were we successful ? */
if (rc == 0)
write_metapage(mp);
else
/* we were not successful */
release_metapage(mp);
return (rc);
}
/*
* NAME: dbAllocNext()
*
* FUNCTION: attempt to allocate the blocks of the specified block
* range within a dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - starting block number of the range.
* nblocks - number of contiguous free blocks of the range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw;
int l2size;
s8 *leaf;
u32 mask;
if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocNext: Corrupt dmap page");
return -EIO;
}
/* pick up a pointer to the leaves of the dmap tree.
*/
leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* check if the specified block range is contained within
* this dmap.
*/
if (dbitno + nblocks > BPERDMAP)
return -ENOSPC;
/* check if the starting leaf indicates that anything
* is free.
*/
if (leaf[word] == NOFREE)
return -ENOSPC;
/* check the dmaps words corresponding to block range to see
* if the block range is free. not all bits of the first and
* last words may be contained within the block range. if this
* is the case, we'll work against those words (i.e. partial first
* and/or last) on an individual basis (a single pass) and examine
* the actual bits to determine if they are free. a single pass
* will be used for all dmap words fully contained within the
* specified range. within this pass, the leaves of the dmap
* tree will be examined to determine if the blocks are free. a
* single leaf may describe the free space of multiple dmap
* words, so we may visit only a subset of the actual leaves
* corresponding to the dmap words of the block range.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of the word is to be examined.
*/
if (nb < DBWORD) {
/* check if the bits are free.
*/
mask = (ONES << (DBWORD - nb) >> wbitno);
if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask)
return -ENOSPC;
word += 1;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and how many bits.
*/
nwords = rembits >> L2DBWORD;
nb = nwords << L2DBWORD;
/* now examine the appropriate leaves to determine
* if the blocks are free.
*/
while (nwords > 0) {
/* does the leaf describe any free space ?
*/
if (leaf[word] < BUDMIN)
return -ENOSPC;
/* determine the l2 number of bits provided
* by this leaf.
*/
l2size =
min((int)leaf[word], NLSTOL2BSZ(nwords));
/* determine how many words were handled.
*/
nw = BUDSIZE(l2size, BUDMIN);
nwords -= nw;
word += nw;
}
}
}
/* allocate the blocks.
*/
return (dbAllocDmap(bmp, dp, blkno, nblocks));
}
/*
* NAME: dbAllocNear()
*
* FUNCTION: attempt to allocate a number of contiguous free blocks near
* a specified block (hint) within a dmap.
*
* starting with the dmap leaf that covers the hint, we'll
* check the next four contiguous leaves for sufficient free
* space. if sufficient free space is found, we'll allocate
* the desired free space.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap.
* blkno - block number to allocate near.
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAllocNear(struct bmap * bmp,
struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
{
int word, lword, rc;
s8 *leaf;
if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocNear: Corrupt dmap page");
return -EIO;
}
leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
/* determine the word within the dmap that holds the hint
* (i.e. blkno). also, determine the last word in the dmap
* that we'll include in our examination.
*/
word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
lword = min(word + 4, LPERDMAP);
/* examine the leaves for sufficient free space.
*/
for (; word < lword; word++) {
/* does the leaf describe sufficient free space ?
*/
if (leaf[word] < l2nb)
continue;
/* determine the block number within the file system
* of the first block described by this dmap word.
*/
blkno = le64_to_cpu(dp->start) + (word << L2DBWORD);
/* if not all bits of the dmap word are free, get the
* starting bit number within the dmap word of the required
* string of free bits and adjust the block number with the
* value.
*/
if (leaf[word] < BUDMIN)
blkno +=
dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb);
/* allocate the blocks.
*/
if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
*results = blkno;
return (rc);
}
return -ENOSPC;
}
/*
* NAME: dbAllocAG()
*
* FUNCTION: attempt to allocate the specified number of contiguous
* free blocks within the specified allocation group.
*
* unless the allocation group size is equal to the number
* of blocks per dmap, the dmap control pages will be used to
* find the required free space, if available. we start the
* search at the highest dmap control page level which
* distinctly describes the allocation group's free space
* (i.e. the highest level at which the allocation group's
* free space is not mixed in with that of any other group).
* in addition, we start the search within this level at a
* height of the dmapctl dmtree at which the nodes distinctly
* describe the allocation group's free space. at this height,
* the allocation group's free space may be represented by 1
* or two sub-trees, depending on the allocation group size.
* we search the top nodes of these subtrees left to right for
* sufficient free space. if sufficient free space is found,
* the subtree is searched to find the leftmost leaf that
* has free space. once we have made it to the leaf, we
* move the search to the next lower level dmap control page
* corresponding to this leaf. we continue down the dmap control
* pages until we find the dmap that contains or starts the
* sufficient free space and we allocate at this dmap.
*
* if the allocation group size is equal to the dmap size,
* we'll start at the dmap corresponding to the allocation
* group and attempt the allocation at this level.
*
* the dmap control page search is also not performed if the
* allocation group is completely free and we go to the first
* dmap of the allocation group to do the allocation. this is
* done because the allocation group may be part (not the first
* part) of a larger binary buddy system, causing the dmap
* control pages to indicate no free space (NOFREE) within
* the allocation group.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* agno - allocation group number.
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* note: IWRITE_LOCK(ipmap) held on entry/exit;
*/
static int
dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
{
struct metapage *mp;
struct dmapctl *dcp;
int rc, ti, i, k, m, n, agperlev;
s64 blkno, lblkno;
int budmin;
/* allocation request should not be for more than the
* allocation group size.
*/
if (l2nb > bmp->db_agl2size) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAG: allocation request is larger than the "
"allocation group size");
return -EIO;
}
/* determine the starting block number of the allocation
* group.
*/
blkno = (s64) agno << bmp->db_agl2size;
/* check if the allocation group size is the minimum allocation
* group size or if the allocation group is completely free. if
* the allocation group size is the minimum size of BPERDMAP (i.e.
* 1 dmap), there is no need to search the dmap control page (below)
* that fully describes the allocation group since the allocation
* group is already fully described by a dmap. in this case, we
* just call dbAllocCtl() to search the dmap tree and allocate the
* required space if available.
*
* if the allocation group is completely free, dbAllocCtl() is
* also called to allocate the required space. this is done for
* two reasons. first, it makes no sense searching the dmap control
* pages for free space when we know that free space exists. second,
* the dmap control pages may indicate that the allocation group
* has no free space if the allocation group is part (not the first
* part) of a larger binary buddy system.
*/
if (bmp->db_agsize == BPERDMAP
|| bmp->db_agfree[agno] == bmp->db_agsize) {
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
if ((rc == -ENOSPC) &&
(bmp->db_agfree[agno] == bmp->db_agsize)) {
printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n",
(unsigned long long) blkno,
(unsigned long long) nblocks);
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAG: dbAllocCtl failed in free AG");
}
return (rc);
}
/* the buffer for the dmap control page that fully describes the
* allocation group.
*/
lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAG: Corrupt dmapctl page");
release_metapage(mp);
return -EIO;
}
/* search the subtree(s) of the dmap control page that describes
* the allocation group, looking for sufficient free space. to begin,
* determine how many allocation groups are represented in a dmap
* control page at the control page level (i.e. L0, L1, L2) that
* fully describes an allocation group. next, determine the starting
* tree index of this allocation group within the control page.
*/
agperlev =
(1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
/* dmap control page trees fan-out by 4 and a single allocation
* group may be described by 1 or 2 subtrees within the ag level
* dmap control page, depending upon the ag size. examine the ag's
* subtrees for sufficient free space, starting with the leftmost
* subtree.
*/
for (i = 0; i < bmp->db_agwidth; i++, ti++) {
/* is there sufficient free space ?
*/
if (l2nb > dcp->stree[ti])
continue;
/* sufficient free space found in a subtree. now search down
* the subtree to find the leftmost leaf that describes this
* free space.
*/
for (k = bmp->db_agheight; k > 0; k--) {
for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
if (l2nb <= dcp->stree[m + n]) {
ti = m + n;
break;
}
}
if (n == 4) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAG: failed descending stree");
release_metapage(mp);
return -EIO;
}
}
/* determine the block number within the file system
* that corresponds to this leaf.
*/
if (bmp->db_aglevel == 2)
blkno = 0;
else if (bmp->db_aglevel == 1)
blkno &= ~(MAXL1SIZE - 1);
else /* bmp->db_aglevel == 0 */
blkno &= ~(MAXL0SIZE - 1);
blkno +=
((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin;
/* release the buffer in preparation for going down
* the next level of dmap control pages.
*/
release_metapage(mp);
/* check if we need to continue to search down the lower
* level dmap control pages. we need to if the number of
* blocks required is less than maximum number of blocks
* described at the next lower level.
*/
if (l2nb < budmin) {
/* search the lower level dmap control pages to get
* the starting block number of the dmap that
* contains or starts off the free space.
*/
if ((rc =
dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1,
&blkno))) {
if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAG: control page "
"inconsistent");
return -EIO;
}
return (rc);
}
}
/* allocate the blocks.
*/
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAG: unable to allocate blocks");
rc = -EIO;
}
return (rc);
}
/* no space in the allocation group. release the buffer and
* return -ENOSPC.
*/
release_metapage(mp);
return -ENOSPC;
}
/*
* NAME: dbAllocAny()
*
* FUNCTION: attempt to allocate the specified number of contiguous
* free blocks anywhere in the file system.
*
* dbAllocAny() attempts to find the sufficient free space by
* searching down the dmap control pages, starting with the
* highest level (i.e. L0, L1, L2) control page. if free space
* large enough to satisfy the desired free space is found, the
* desired free space is allocated.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks desired.
* l2nb - log2 number of contiguous free blocks desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
{
int rc;
s64 blkno = 0;
/* starting with the top level dmap control page, search
* down the dmap control levels for sufficient free space.
* if free space is found, dbFindCtl() returns the starting
* block number of the dmap that contains or starts off the
* range of free space.
*/
if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno)))
return (rc);
/* allocate the blocks.
*/
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocAny: unable to allocate blocks");
return -EIO;
}
return (rc);
}
/*
* NAME: dbFindCtl()
*
* FUNCTION: starting at a specified dmap control page level and block
* number, search down the dmap control levels for a range of
* contiguous free blocks large enough to satisfy an allocation
* request for the specified number of free blocks.
*
* if sufficient contiguous free blocks are found, this routine
* returns the starting block number within a dmap page that
* contains or starts a range of contiqious free blocks that
* is sufficient in size.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* level - starting dmap control page level.
* l2nb - log2 number of contiguous free blocks desired.
* *blkno - on entry, starting block number for conducting the search.
* on successful return, the first block within a dmap page
* that contains or starts a range of contiguous free blocks.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
{
int rc, leafidx, lev;
s64 b, lblkno;
struct dmapctl *dcp;
int budmin;
struct metapage *mp;
/* starting at the specified dmap control page level and block
* number, search down the dmap control levels for the starting
* block number of a dmap page that contains or starts off
* sufficient free blocks.
*/
for (lev = level, b = *blkno; lev >= 0; lev--) {
/* get the buffer of the dmap control page for the block
* number and level (i.e. L0, L1, L2).
*/
lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbFindCtl: Corrupt dmapctl page");
release_metapage(mp);
return -EIO;
}
/* search the tree within the dmap control page for
* sufficent free space. if sufficient free space is found,
* dbFindLeaf() returns the index of the leaf at which
* free space was found.
*/
rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
/* release the buffer.
*/
release_metapage(mp);
/* space found ?
*/
if (rc) {
if (lev != level) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbFindCtl: dmap inconsistent");
return -EIO;
}
return -ENOSPC;
}
/* adjust the block number to reflect the location within
* the dmap control page (i.e. the leaf) at which free
* space was found.
*/
b += (((s64) leafidx) << budmin);
/* we stop the search at this dmap control page level if
* the number of blocks required is greater than or equal
* to the maximum number of blocks described at the next
* (lower) level.
*/
if (l2nb >= budmin)
break;
}
*blkno = b;
return (0);
}
/*
* NAME: dbAllocCtl()
*
* FUNCTION: attempt to allocate a specified number of contiguous
* blocks starting within a specific dmap.
*
* this routine is called by higher level routines that search
* the dmap control pages above the actual dmaps for contiguous
* free space. the result of successful searches by these
* routines are the starting block numbers within dmaps, with
* the dmaps themselves containing the desired contiguous free
* space or starting a contiguous free space of desired size
* that is made up of the blocks of one or more dmaps. these
* calls should not fail due to insufficent resources.
*
* this routine is called in some cases where it is not known
* whether it will fail due to insufficient resources. more
* specifically, this occurs when allocating from an allocation
* group whose size is equal to the number of blocks per dmap.
* in this case, the dmap control pages are not examined prior
* to calling this routine (to save pathlength) and the call
* might fail.
*
* for a request size that fits within a dmap, this routine relies
* upon the dmap's dmtree to find the requested contiguous free
* space. for request sizes that are larger than a dmap, the
* requested free space will start at the first block of the
* first dmap (i.e. blkno).
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* nblocks - actual number of contiguous free blocks to allocate.
* l2nb - log2 number of contiguous free blocks to allocate.
* blkno - starting block number of the dmap to start the allocation
* from.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
{
int rc, nb;
s64 b, lblkno, n;
struct metapage *mp;
struct dmap *dp;
/* check if the allocation request is confined to a single dmap.
*/
if (l2nb <= L2BPERDMAP) {
/* get the buffer for the dmap.
*/
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dp = (struct dmap *) mp->data;
/* try to allocate the blocks.
*/
rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
return (rc);
}
/* allocation request involving multiple dmaps. it must start on
* a dmap boundary.
*/
assert((blkno & (BPERDMAP - 1)) == 0);
/* allocate the blocks dmap by dmap.
*/
for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) {
/* get the buffer for the dmap.
*/
lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
rc = -EIO;
goto backout;
}
dp = (struct dmap *) mp->data;
/* the dmap better be all free.
*/
if (dp->tree.stree[ROOT] != L2BPERDMAP) {
release_metapage(mp);
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocCtl: the dmap is not all free");
rc = -EIO;
goto backout;
}
/* determine how many blocks to allocate from this dmap.
*/
nb = min(n, (s64)BPERDMAP);
/* allocate the blocks from the dmap.
*/
if ((rc = dbAllocDmap(bmp, dp, b, nb))) {
release_metapage(mp);
goto backout;
}
/* write the buffer.
*/
write_metapage(mp);
}
/* set the results (starting block number) and return.
*/
*results = blkno;
return (0);
/* something failed in handling an allocation request involving
* multiple dmaps. we'll try to clean up by backing out any
* allocation that has already happened for this request. if
* we fail in backing out the allocation, we'll mark the file
* system to indicate that blocks have been leaked.
*/
backout:
/* try to backout the allocations dmap by dmap.
*/
for (n = nblocks - n, b = blkno; n > 0;
n -= BPERDMAP, b += BPERDMAP) {
/* get the buffer for this dmap.
*/
lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
/* could not back out. mark the file system
* to indicate that we have leaked blocks.
*/
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocCtl: I/O Error: Block Leakage.");
continue;
}
dp = (struct dmap *) mp->data;
/* free the blocks is this dmap.
*/
if (dbFreeDmap(bmp, dp, b, BPERDMAP)) {
/* could not back out. mark the file system
* to indicate that we have leaked blocks.
*/
release_metapage(mp);
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocCtl: Block Leakage.");
continue;
}
/* write the buffer.
*/
write_metapage(mp);
}
return (rc);
}
/*
* NAME: dbAllocDmapLev()
*
* FUNCTION: attempt to allocate a specified number of contiguous blocks
* from a specified dmap.
*
* this routine checks if the contiguous blocks are available.
* if so, nblocks of blocks are allocated; otherwise, ENOSPC is
* returned.
*
* PARAMETERS:
* mp - pointer to bmap descriptor
* dp - pointer to dmap to attempt to allocate blocks from.
* l2nb - log2 number of contiguous block desired.
* nblocks - actual number of contiguous block desired.
* results - on successful return, set to the starting block number
* of the newly allocated range.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient disk resources
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or
* IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
*/
static int
dbAllocDmapLev(struct bmap * bmp,
struct dmap * dp, int nblocks, int l2nb, s64 * results)
{
s64 blkno;
int leafidx, rc;
/* can't be more than a dmaps worth of blocks */
assert(l2nb <= L2BPERDMAP);
/* search the tree within the dmap page for sufficient
* free space. if sufficient free space is found, dbFindLeaf()
* returns the index of the leaf at which free space was found.
*/
if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
return -ENOSPC;
/* determine the block number within the file system corresponding
* to the leaf at which free space was found.
*/
blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD);
/* if not all bits of the dmap word are free, get the starting
* bit number within the dmap word of the required string of free
* bits and adjust the block number with this value.
*/
if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN)
blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb);
/* allocate the blocks */
if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
*results = blkno;
return (rc);
}
/*
* NAME: dbAllocDmap()
*
* FUNCTION: adjust the disk allocation map to reflect the allocation
* of a specified block range within a dmap.
*
* this routine allocates the specified blocks from the dmap
* through a call to dbAllocBits(). if the allocation of the
* block range causes the maximum string of free blocks within
* the dmap to change (i.e. the value of the root of the dmap's
* dmtree), this routine will cause this change to be reflected
* up through the appropriate levels of the dmap control pages
* by a call to dbAdjCtl() for the L0 dmap control page that
* covers this dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate the block range from.
* blkno - starting block number of the block to be allocated.
* nblocks - number of blocks to be allocated.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
s8 oldroot;
int rc;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
*/
oldroot = dp->tree.stree[ROOT];
/* allocate the specified (blocks) bits */
dbAllocBits(bmp, dp, blkno, nblocks);
/* if the root has not changed, done. */
if (dp->tree.stree[ROOT] == oldroot)
return (0);
/* root changed. bubble the change up to the dmap control pages.
* if the adjustment of the upper level control pages fails,
* backout the bit allocation (thus making everything consistent).
*/
if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0)))
dbFreeBits(bmp, dp, blkno, nblocks);
return (rc);
}
/*
* NAME: dbFreeDmap()
*
* FUNCTION: adjust the disk allocation map to reflect the allocation
* of a specified block range within a dmap.
*
* this routine frees the specified blocks from the dmap through
* a call to dbFreeBits(). if the deallocation of the block range
* causes the maximum string of free blocks within the dmap to
* change (i.e. the value of the root of the dmap's dmtree), this
* routine will cause this change to be reflected up through the
* appropriate levels of the dmap control pages by a call to
* dbAdjCtl() for the L0 dmap control page that covers this dmap.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free the block range from.
* blkno - starting block number of the block to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
s8 oldroot;
int rc = 0, word;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
*/
oldroot = dp->tree.stree[ROOT];
/* free the specified (blocks) bits */
rc = dbFreeBits(bmp, dp, blkno, nblocks);
/* if error or the root has not changed, done. */
if (rc || (dp->tree.stree[ROOT] == oldroot))
return (rc);
/* root changed. bubble the change up to the dmap control pages.
* if the adjustment of the upper level control pages fails,
* backout the deallocation.
*/
if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) {
word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
/* as part of backing out the deallocation, we will have
* to back split the dmap tree if the deallocation caused
* the freed blocks to become part of a larger binary buddy
* system.
*/
if (dp->tree.stree[word] == NOFREE)
dbBackSplit((dmtree_t *) & dp->tree, word);
dbAllocBits(bmp, dp, blkno, nblocks);
}
return (rc);
}
/*
* NAME: dbAllocBits()
*
* FUNCTION: allocate a specified block range from a dmap.
*
* this routine updates the dmap to reflect the working
* state allocation of the specified block range. it directly
* updates the bits of the working map and causes the adjustment
* of the binary buddy system described by the dmap's dmtree
* leaves to reflect the bits allocated. it also causes the
* dmap's dmtree, as a whole, to reflect the allocated range.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to allocate bits from.
* blkno - starting block number of the bits to be allocated.
* nblocks - number of bits to be allocated.
*
* RETURN VALUES: none
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree;
int size;
s8 *leaf;
/* pick up a pointer to the leaves of the dmap tree */
leaf = dp->tree.stree + LEAFIND;
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* block range better be within the dmap */
assert(dbitno + nblocks <= BPERDMAP);
/* allocate the bits of the dmap's words corresponding to the block
* range. not all bits of the first and last words may be contained
* within the block range. if this is the case, we'll work against
* those words (i.e. partial first and/or last) on an individual basis
* (a single pass), allocating the bits of interest by hand and
* updating the leaf corresponding to the dmap word. a single pass
* will be used for all dmap words fully contained within the
* specified range. within this pass, the bits of all fully contained
* dmap words will be marked as free in a single shot and the leaves
* will be updated. a single leaf may describe the free space of
* multiple dmap words, so we may update only a subset of the actual
* leaves corresponding to the dmap words of the block range.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of a word is to be allocated.
*/
if (nb < DBWORD) {
/* allocate (set to 1) the appropriate bits within
* this dmap word.
*/
dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
>> wbitno);
/* update the leaf for this dmap word. in addition
* to setting the leaf value to the binary buddy max
* of the updated dmap word, dbSplit() will split
* the binary system of the leaves if need be.
*/
dbSplit(tp, word, BUDMIN,
dbMaxBud((u8 *) & dp->wmap[word]));
word += 1;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and allocate (set to 1) the bits of these
* words.
*/
nwords = rembits >> L2DBWORD;
memset(&dp->wmap[word], (int) ONES, nwords * 4);
/* determine how many bits.
*/
nb = nwords << L2DBWORD;
/* now update the appropriate leaves to reflect
* the allocated words.
*/
for (; nwords > 0; nwords -= nw) {
if (leaf[word] < BUDMIN) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAllocBits: leaf page "
"corrupt");
break;
}
/* determine what the leaf value should be
* updated to as the minimum of the l2 number
* of bits being allocated and the l2 number
* of bits currently described by this leaf.
*/
size = min((int)leaf[word], NLSTOL2BSZ(nwords));
/* update the leaf to reflect the allocation.
* in addition to setting the leaf value to
* NOFREE, dbSplit() will split the binary
* system of the leaves to reflect the current
* allocation (size).
*/
dbSplit(tp, word, size, NOFREE);
/* get the number of dmap words handled */
nw = BUDSIZE(size, BUDMIN);
word += nw;
}
}
}
/* update the free count for this dmap */
le32_add_cpu(&dp->nfree, -nblocks);
BMAP_LOCK(bmp);
/* if this allocation group is completely free,
* update the maximum allocation group number if this allocation
* group is the new max.
*/
agno = blkno >> bmp->db_agl2size;
if (agno > bmp->db_maxag)
bmp->db_maxag = agno;
/* update the free count for the allocation group and map */
bmp->db_agfree[agno] -= nblocks;
bmp->db_nfree -= nblocks;
BMAP_UNLOCK(bmp);
}
/*
* NAME: dbFreeBits()
*
* FUNCTION: free a specified block range from a dmap.
*
* this routine updates the dmap to reflect the working
* state allocation of the specified block range. it directly
* updates the bits of the working map and causes the adjustment
* of the binary buddy system described by the dmap's dmtree
* leaves to reflect the bits freed. it also causes the dmap's
* dmtree, as a whole, to reflect the deallocated range.
*
* PARAMETERS:
* bmp - pointer to bmap descriptor
* dp - pointer to dmap to free bits from.
* blkno - starting block number of the bits to be freed.
* nblocks - number of bits to be freed.
*
* RETURN VALUES: 0 for success
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree;
int rc = 0;
int size;
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* block range better be within the dmap.
*/
assert(dbitno + nblocks <= BPERDMAP);
/* free the bits of the dmaps words corresponding to the block range.
* not all bits of the first and last words may be contained within
* the block range. if this is the case, we'll work against those
* words (i.e. partial first and/or last) on an individual basis
* (a single pass), freeing the bits of interest by hand and updating
* the leaf corresponding to the dmap word. a single pass will be used
* for all dmap words fully contained within the specified range.
* within this pass, the bits of all fully contained dmap words will
* be marked as free in a single shot and the leaves will be updated. a
* single leaf may describe the free space of multiple dmap words,
* so we may update only a subset of the actual leaves corresponding
* to the dmap words of the block range.
*
* dbJoin() is used to update leaf values and will join the binary
* buddy system of the leaves if the new leaf values indicate this
* should be done.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of a word is to be freed.
*/
if (nb < DBWORD) {
/* free (zero) the appropriate bits within this
* dmap word.
*/
dp->wmap[word] &=
cpu_to_le32(~(ONES << (DBWORD - nb)
>> wbitno));
/* update the leaf for this dmap word.
*/
rc = dbJoin(tp, word,
dbMaxBud((u8 *) & dp->wmap[word]));
if (rc)
return rc;
word += 1;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and free (zero) the bits of these words.
*/
nwords = rembits >> L2DBWORD;
memset(&dp->wmap[word], 0, nwords * 4);
/* determine how many bits.
*/
nb = nwords << L2DBWORD;
/* now update the appropriate leaves to reflect
* the freed words.
*/
for (; nwords > 0; nwords -= nw) {
/* determine what the leaf value should be
* updated to as the minimum of the l2 number
* of bits being freed and the l2 (max) number
* of bits that can be described by this leaf.
*/
size =
min(LITOL2BSZ
(word, L2LPERDMAP, BUDMIN),
NLSTOL2BSZ(nwords));
/* update the leaf.
*/
rc = dbJoin(tp, word, size);
if (rc)
return rc;
/* get the number of dmap words handled.
*/
nw = BUDSIZE(size, BUDMIN);
word += nw;
}
}
}
/* update the free count for this dmap.
*/
le32_add_cpu(&dp->nfree, nblocks);
BMAP_LOCK(bmp);
/* update the free count for the allocation group and
* map.
*/
agno = blkno >> bmp->db_agl2size;
bmp->db_nfree += nblocks;
bmp->db_agfree[agno] += nblocks;
/* check if this allocation group is not completely free and
* if it is currently the maximum (rightmost) allocation group.
* if so, establish the new maximum allocation group number by
* searching left for the first allocation group with allocation.
*/
if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) ||
(agno == bmp->db_numag - 1 &&
bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) {
while (bmp->db_maxag > 0) {
bmp->db_maxag -= 1;
if (bmp->db_agfree[bmp->db_maxag] !=
bmp->db_agsize)
break;
}
/* re-establish the allocation group preference if the
* current preference is right of the maximum allocation
* group.
*/
if (bmp->db_agpref > bmp->db_maxag)
bmp->db_agpref = bmp->db_maxag;
}
BMAP_UNLOCK(bmp);
return 0;
}
/*
* NAME: dbAdjCtl()
*
* FUNCTION: adjust a dmap control page at a specified level to reflect
* the change in a lower level dmap or dmap control page's
* maximum string of free blocks (i.e. a change in the root
* of the lower level object's dmtree) due to the allocation
* or deallocation of a range of blocks with a single dmap.
*
* on entry, this routine is provided with the new value of
* the lower level dmap or dmap control page root and the
* starting block number of the block range whose allocation
* or deallocation resulted in the root change. this range
* is respresented by a single leaf of the current dmapctl
* and the leaf will be updated with this value, possibly
* causing a binary buddy system within the leaves to be
* split or joined. the update may also cause the dmapctl's
* dmtree to be updated.
*
* if the adjustment of the dmap control page, itself, causes its
* root to change, this change will be bubbled up to the next dmap
* control level by a recursive call to this routine, specifying
* the new root value and the next dmap control page level to
* be adjusted.
* PARAMETERS:
* bmp - pointer to bmap descriptor
* blkno - the first block of a block range within a dmap. it is
* the allocation or deallocation of this block range that
* requires the dmap control page to be adjusted.
* newval - the new value of the lower level dmap or dmap control
* page root.
* alloc - 'true' if adjustment is due to an allocation.
* level - current level of dmap control page (i.e. L0, L1, L2) to
* be adjusted.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
{
struct metapage *mp;
s8 oldroot;
int oldval;
s64 lblkno;
struct dmapctl *dcp;
int rc, leafno, ti;
/* get the buffer for the dmap control page for the specified
* block number and control page level.
*/
lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return -EIO;
dcp = (struct dmapctl *) mp->data;
if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAdjCtl: Corrupt dmapctl page");
release_metapage(mp);
return -EIO;
}
/* determine the leaf number corresponding to the block and
* the index within the dmap control tree.
*/
leafno = BLKTOCTLLEAF(blkno, dcp->budmin);
ti = leafno + le32_to_cpu(dcp->leafidx);
/* save the current leaf value and the current root level (i.e.
* maximum l2 free string described by this dmapctl).
*/
oldval = dcp->stree[ti];
oldroot = dcp->stree[ROOT];
/* check if this is a control page update for an allocation.
* if so, update the leaf to reflect the new leaf value using
* dbSplit(); otherwise (deallocation), use dbJoin() to update
* the leaf with the new value. in addition to updating the
* leaf, dbSplit() will also split the binary buddy system of
* the leaves, if required, and bubble new values within the
* dmapctl tree, if required. similarly, dbJoin() will join
* the binary buddy system of leaves and bubble new values up
* the dmapctl tree as required by the new leaf value.
*/
if (alloc) {
/* check if we are in the middle of a binary buddy
* system. this happens when we are performing the
* first allocation out of an allocation group that
* is part (not the first part) of a larger binary
* buddy system. if we are in the middle, back split
* the system prior to calling dbSplit() which assumes
* that it is at the front of a binary buddy system.
*/
if (oldval == NOFREE) {
rc = dbBackSplit((dmtree_t *) dcp, leafno);
if (rc)
return rc;
oldval = dcp->stree[ti];
}
dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
} else {
rc = dbJoin((dmtree_t *) dcp, leafno, newval);
if (rc)
return rc;
}
/* check if the root of the current dmap control page changed due
* to the update and if the current dmap control page is not at
* the current top level (i.e. L0, L1, L2) of the map. if so (i.e.
* root changed and this is not the top level), call this routine
* again (recursion) for the next higher level of the mapping to
* reflect the change in root for the current dmap control page.
*/
if (dcp->stree[ROOT] != oldroot) {
/* are we below the top level of the map. if so,
* bubble the root up to the next higher level.
*/
if (level < bmp->db_maxlevel) {
/* bubble up the new root of this dmap control page to
* the next level.
*/
if ((rc =
dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc,
level + 1))) {
/* something went wrong in bubbling up the new
* root value, so backout the changes to the
* current dmap control page.
*/
if (alloc) {
dbJoin((dmtree_t *) dcp, leafno,
oldval);
} else {
/* the dbJoin() above might have
* caused a larger binary buddy system
* to form and we may now be in the
* middle of it. if this is the case,
* back split the buddies.
*/
if (dcp->stree[ti] == NOFREE)
dbBackSplit((dmtree_t *)
dcp, leafno);
dbSplit((dmtree_t *) dcp, leafno,
dcp->budmin, oldval);
}
/* release the buffer and return the error.
*/
release_metapage(mp);
return (rc);
}
} else {
/* we're at the top level of the map. update
* the bmap control page to reflect the size
* of the maximum free buddy system.
*/
assert(level == bmp->db_maxlevel);
if (bmp->db_maxfreebud != oldroot) {
jfs_error(bmp->db_ipbmap->i_sb,
"dbAdjCtl: the maximum free buddy is "
"not the old root");
}
bmp->db_maxfreebud = dcp->stree[ROOT];
}
}
/* write the buffer.
*/
write_metapage(mp);
return (0);
}
/*
* NAME: dbSplit()
*
* FUNCTION: update the leaf of a dmtree with a new value, splitting
* the leaf from the binary buddy system of the dmtree's
* leaves, as required.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* splitsz - the size the binary buddy system starting at the leaf
* must be split to, specified as the log2 number of blocks.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
{
int budsz;
int cursz;
s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
/* check if the leaf needs to be split.
*/
if (leaf[leafno] > tp->dmt_budmin) {
/* the split occurs by cutting the buddy system in half
* at the specified leaf until we reach the specified
* size. pick up the starting split size (current size
* - 1 in l2) and the corresponding buddy size.
*/
cursz = leaf[leafno] - 1;
budsz = BUDSIZE(cursz, tp->dmt_budmin);
/* split until we reach the specified size.
*/
while (cursz >= splitsz) {
/* update the buddy's leaf with its new value.
*/
dbAdjTree(tp, leafno ^ budsz, cursz);
/* on to the next size and buddy.
*/
cursz -= 1;
budsz >>= 1;
}
}
/* adjust the dmap tree to reflect the specified leaf's new
* value.
*/
dbAdjTree(tp, leafno, newval);
}
/*
* NAME: dbBackSplit()
*
* FUNCTION: back split the binary buddy system of dmtree leaves
* that hold a specified leaf until the specified leaf
* starts its own binary buddy system.
*
* the allocators typically perform allocations at the start
* of binary buddy systems and dbSplit() is used to accomplish
* any required splits. in some cases, however, allocation
* may occur in the middle of a binary system and requires a
* back split, with the split proceeding out from the middle of
* the system (less efficient) rather than the start of the
* system (more efficient). the cases in which a back split
* is required are rare and are limited to the first allocation
* within an allocation group which is a part (not first part)
* of a larger binary buddy system and a few exception cases
* in which a previous join operation must be backed out.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
*
* RETURN VALUES: none
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbBackSplit(dmtree_t * tp, int leafno)
{
int budsz, bud, w, bsz, size;
int cursz;
s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
/* leaf should be part (not first part) of a binary
* buddy system.
*/
assert(leaf[leafno] == NOFREE);
/* the back split is accomplished by iteratively finding the leaf
* that starts the buddy system that contains the specified leaf and
* splitting that system in two. this iteration continues until
* the specified leaf becomes the start of a buddy system.
*
* determine maximum possible l2 size for the specified leaf.
*/
size =
LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs),
tp->dmt_budmin);
/* determine the number of leaves covered by this size. this
* is the buddy size that we will start with as we search for
* the buddy system that contains the specified leaf.
*/
budsz = BUDSIZE(size, tp->dmt_budmin);
/* back split.
*/
while (leaf[leafno] == NOFREE) {
/* find the leftmost buddy leaf.
*/
for (w = leafno, bsz = budsz;; bsz <<= 1,
w = (w < bud) ? w : bud) {
if (bsz >= le32_to_cpu(tp->dmt_nleafs)) {
jfs_err("JFS: block map error in dbBackSplit");
return -EIO;
}
/* determine the buddy.
*/
bud = w ^ bsz;
/* check if this buddy is the start of the system.
*/
if (leaf[bud] != NOFREE) {
/* split the leaf at the start of the
* system in two.
*/
cursz = leaf[bud] - 1;
dbSplit(tp, bud, cursz, cursz);
break;
}
}
}
if (leaf[leafno] != size) {
jfs_err("JFS: wrong leaf value in dbBackSplit");
return -EIO;
}
return 0;
}
/*
* NAME: dbJoin()
*
* FUNCTION: update the leaf of a dmtree with a new value, joining
* the leaf with other leaves of the dmtree into a multi-leaf
* binary buddy system, as required.
*
* PARAMETERS:
* tp - pointer to the tree containing the leaf.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*/
static int dbJoin(dmtree_t * tp, int leafno, int newval)
{
int budsz, buddy;
s8 *leaf;
/* can the new leaf value require a join with other leaves ?
*/
if (newval >= tp->dmt_budmin) {
/* pickup a pointer to the leaves of the tree.
*/
leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
/* try to join the specified leaf into a large binary
* buddy system. the join proceeds by attempting to join
* the specified leafno with its buddy (leaf) at new value.
* if the join occurs, we attempt to join the left leaf
* of the joined buddies with its buddy at new value + 1.
* we continue to join until we find a buddy that cannot be
* joined (does not have a value equal to the size of the
* last join) or until all leaves have been joined into a
* single system.
*
* get the buddy size (number of words covered) of
* the new value.
*/
budsz = BUDSIZE(newval, tp->dmt_budmin);
/* try to join.
*/
while (budsz < le32_to_cpu(tp->dmt_nleafs)) {
/* get the buddy leaf.
*/
buddy = leafno ^ budsz;
/* if the leaf's new value is greater than its
* buddy's value, we join no more.
*/
if (newval > leaf[buddy])
break;
/* It shouldn't be less */
if (newval < leaf[buddy])
return -EIO;
/* check which (leafno or buddy) is the left buddy.
* the left buddy gets to claim the blocks resulting
* from the join while the right gets to claim none.
* the left buddy is also eligable to participate in
* a join at the next higher level while the right
* is not.
*
*/
if (leafno < buddy) {
/* leafno is the left buddy.
*/
dbAdjTree(tp, buddy, NOFREE);
} else {
/* buddy is the left buddy and becomes
* leafno.
*/
dbAdjTree(tp, leafno, NOFREE);
leafno = buddy;
}
/* on to try the next join.
*/
newval += 1;
budsz <<= 1;
}
}
/* update the leaf value.
*/
dbAdjTree(tp, leafno, newval);
return 0;
}
/*
* NAME: dbAdjTree()
*
* FUNCTION: update a leaf of a dmtree with a new value, adjusting
* the dmtree, as required, to reflect the new leaf value.
* the combination of any buddies must already be done before
* this is called.
*
* PARAMETERS:
* tp - pointer to the tree to be adjusted.
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
*
* RETURN VALUES: none
*/
static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
{
int lp, pp, k;
int max;
/* pick up the index of the leaf for this leafno.
*/
lp = leafno + le32_to_cpu(tp->dmt_leafidx);
/* is the current value the same as the old value ? if so,
* there is nothing to do.
*/
if (tp->dmt_stree[lp] == newval)
return;
/* set the new value.
*/
tp->dmt_stree[lp] = newval;
/* bubble the new value up the tree as required.
*/
for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
/* get the index of the first leaf of the 4 leaf
* group containing the specified leaf (leafno).
*/
lp = ((lp - 1) & ~0x03) + 1;
/* get the index of the parent of this 4 leaf group.
*/
pp = (lp - 1) >> 2;
/* determine the maximum of the 4 leaves.
*/
max = TREEMAX(&tp->dmt_stree[lp]);
/* if the maximum of the 4 is the same as the
* parent's value, we're done.
*/
if (tp->dmt_stree[pp] == max)
break;
/* parent gets new value.
*/
tp->dmt_stree[pp] = max;
/* parent becomes leaf for next go-round.
*/
lp = pp;
}
}
/*
* NAME: dbFindLeaf()
*
* FUNCTION: search a dmtree_t for sufficient free blocks, returning
* the index of a leaf describing the free blocks if
* sufficient free blocks are found.
*
* the search starts at the top of the dmtree_t tree and
* proceeds down the tree to the leftmost leaf with sufficient
* free space.
*
* PARAMETERS:
* tp - pointer to the tree to be searched.
* l2nb - log2 number of free blocks to search for.
* leafidx - return pointer to be set to the index of the leaf
* describing at least l2nb free blocks if sufficient
* free blocks are found.
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient free blocks.
*/
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
{
int ti, n = 0, k, x = 0;
/* first check the root of the tree to see if there is
* sufficient free space.
*/
if (l2nb > tp->dmt_stree[ROOT])
return -ENOSPC;
/* sufficient free space available. now search down the tree
* starting at the next level for the leftmost leaf that
* describes sufficient free space.
*/
for (k = le32_to_cpu(tp->dmt_height), ti = 1;
k > 0; k--, ti = ((ti + n) << 2) + 1) {
/* search the four nodes at this level, starting from
* the left.
*/
for (x = ti, n = 0; n < 4; n++) {
/* sufficient free space found. move to the next
* level (or quit if this is the last level).
*/
if (l2nb <= tp->dmt_stree[x + n])
break;
}
/* better have found something since the higher
* levels of the tree said it was here.
*/
assert(n < 4);
}
/* set the return to the leftmost leaf describing sufficient
* free space.
*/
*leafidx = x + n - le32_to_cpu(tp->dmt_leafidx);
return (0);
}
/*
* NAME: dbFindBits()
*
* FUNCTION: find a specified number of binary buddy free bits within a
* dmap bitmap word value.
*
* this routine searches the bitmap value for (1 << l2nb) free
* bits at (1 << l2nb) alignments within the value.
*
* PARAMETERS:
* word - dmap bitmap word value.
* l2nb - number of free bits specified as a log2 number.
*
* RETURN VALUES:
* starting bit number of free bits.
*/
static int dbFindBits(u32 word, int l2nb)
{
int bitno, nb;
u32 mask;
/* get the number of bits.
*/
nb = 1 << l2nb;
assert(nb <= DBWORD);
/* complement the word so we can use a mask (i.e. 0s represent
* free bits) and compute the mask.
*/
word = ~word;
mask = ONES << (DBWORD - nb);
/* scan the word for nb free bits at nb alignments.
*/
for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) {
if ((mask & word) == mask)
break;
}
ASSERT(bitno < 32);
/* return the bit number.
*/
return (bitno);
}
/*
* NAME: dbMaxBud(u8 *cp)
*
* FUNCTION: determine the largest binary buddy string of free
* bits within 32-bits of the map.
*
* PARAMETERS:
* cp - pointer to the 32-bit value.
*
* RETURN VALUES:
* largest binary buddy of free bits within a dmap word.
*/
static int dbMaxBud(u8 * cp)
{
signed char tmp1, tmp2;
/* check if the wmap word is all free. if so, the
* free buddy size is BUDMIN.
*/
if (*((uint *) cp) == 0)
return (BUDMIN);
/* check if the wmap word is half free. if so, the
* free buddy size is BUDMIN-1.
*/
if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0)
return (BUDMIN - 1);
/* not all free or half free. determine the free buddy
* size thru table lookup using quarters of the wmap word.
*/
tmp1 = max(budtab[cp[2]], budtab[cp[3]]);
tmp2 = max(budtab[cp[0]], budtab[cp[1]]);
return (max(tmp1, tmp2));
}
/*
* NAME: cnttz(uint word)
*
* FUNCTION: determine the number of trailing zeros within a 32-bit
* value.
*
* PARAMETERS:
* value - 32-bit value to be examined.
*
* RETURN VALUES:
* count of trailing zeros
*/
static int cnttz(u32 word)
{
int n;
for (n = 0; n < 32; n++, word >>= 1) {
if (word & 0x01)
break;
}
return (n);
}
/*
* NAME: cntlz(u32 value)
*
* FUNCTION: determine the number of leading zeros within a 32-bit
* value.
*
* PARAMETERS:
* value - 32-bit value to be examined.
*
* RETURN VALUES:
* count of leading zeros
*/
static int cntlz(u32 value)
{
int n;
for (n = 0; n < 32; n++, value <<= 1) {
if (value & HIGHORDER)
break;
}
return (n);
}
/*
* NAME: blkstol2(s64 nb)
*
* FUNCTION: convert a block count to its log2 value. if the block
* count is not a l2 multiple, it is rounded up to the next
* larger l2 multiple.
*
* PARAMETERS:
* nb - number of blocks
*
* RETURN VALUES:
* log2 number of blocks
*/
static int blkstol2(s64 nb)
{
int l2nb;
s64 mask; /* meant to be signed */
mask = (s64) 1 << (64 - 1);
/* count the leading bits.
*/
for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) {
/* leading bit found.
*/
if (nb & mask) {
/* determine the l2 value.
*/
l2nb = (64 - 1) - l2nb;
/* check if we need to round up.
*/
if (~mask & nb)
l2nb++;
return (l2nb);
}
}
assert(0);
return 0; /* fix compiler warning */
}
/*
* NAME: dbAllocBottomUp()
*
* FUNCTION: alloc the specified block range from the working block
* allocation map.
*
* the blocks will be alloc from the working map one dmap
* at a time.
*
* PARAMETERS:
* ip - pointer to in-core inode;
* blkno - starting block number to be freed.
* nblocks - number of blocks to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error
*/
int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
{
struct metapage *mp;
struct dmap *dp;
int nb, rc;
s64 lblkno, rem;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* block to be allocated better be within the mapsize. */
ASSERT(nblocks <= bmp->db_mapsize - blkno);
/*
* allocate the blocks a dmap at a time.
*/
mp = NULL;
for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
/* release previous dmap if any */
if (mp) {
write_metapage(mp);
}
/* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
if (mp == NULL) {
IREAD_UNLOCK(ipbmap);
return -EIO;
}
dp = (struct dmap *) mp->data;
/* determine the number of blocks to be allocated from
* this dmap.
*/
nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
/* allocate the blocks. */
if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (rc);
}
}
/* write the last buffer. */
write_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (0);
}
static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int rc;
int dbitno, word, rembits, nb, nwords, wbitno, agno;
s8 oldroot, *leaf;
struct dmaptree *tp = (struct dmaptree *) & dp->tree;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
*/
oldroot = tp->stree[ROOT];
/* pick up a pointer to the leaves of the dmap tree */
leaf = tp->stree + LEAFIND;
/* determine the bit number and word within the dmap of the
* starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* block range better be within the dmap */
assert(dbitno + nblocks <= BPERDMAP);
/* allocate the bits of the dmap's words corresponding to the block
* range. not all bits of the first and last words may be contained
* within the block range. if this is the case, we'll work against
* those words (i.e. partial first and/or last) on an individual basis
* (a single pass), allocating the bits of interest by hand and
* updating the leaf corresponding to the dmap word. a single pass
* will be used for all dmap words fully contained within the
* specified range. within this pass, the bits of all fully contained
* dmap words will be marked as free in a single shot and the leaves
* will be updated. a single leaf may describe the free space of
* multiple dmap words, so we may update only a subset of the actual
* leaves corresponding to the dmap words of the block range.
*/
for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
/* determine the bit number within the word and
* the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of a word is to be allocated.
*/
if (nb < DBWORD) {
/* allocate (set to 1) the appropriate bits within
* this dmap word.
*/
dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
>> wbitno);
word++;
} else {
/* one or more dmap words are fully contained
* within the block range. determine how many
* words and allocate (set to 1) the bits of these
* words.
*/
nwords = rembits >> L2DBWORD;
memset(&dp->wmap[word], (int) ONES, nwords * 4);
/* determine how many bits */
nb = nwords << L2DBWORD;
word += nwords;
}
}
/* update the free count for this dmap */
le32_add_cpu(&dp->nfree, -nblocks);
/* reconstruct summary tree */
dbInitDmapTree(dp);
BMAP_LOCK(bmp);
/* if this allocation group is completely free,
* update the highest active allocation group number
* if this allocation group is the new max.
*/
agno = blkno >> bmp->db_agl2size;
if (agno > bmp->db_maxag)
bmp->db_maxag = agno;
/* update the free count for the allocation group and map */
bmp->db_agfree[agno] -= nblocks;
bmp->db_nfree -= nblocks;
BMAP_UNLOCK(bmp);
/* if the root has not changed, done. */
if (tp->stree[ROOT] == oldroot)
return (0);
/* root changed. bubble the change up to the dmap control pages.
* if the adjustment of the upper level control pages fails,
* backout the bit allocation (thus making everything consistent).
*/
if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0)))
dbFreeBits(bmp, dp, blkno, nblocks);
return (rc);
}
/*
* NAME: dbExtendFS()
*
* FUNCTION: extend bmap from blkno for nblocks;
* dbExtendFS() updates bmap ready for dbAllocBottomUp();
*
* L2
* |
* L1---------------------------------L1
* | |
* L0---------L0---------L0 L0---------L0---------L0
* | | | | | |
* d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm;
* L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
*
* <---old---><----------------------------extend----------------------->
*/
int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
{
struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb);
int nbperpage = sbi->nbperpage;
int i, i0 = true, j, j0 = true, k, n;
s64 newsize;
s64 p;
struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL;
struct dmapctl *l2dcp, *l1dcp, *l0dcp;
struct dmap *dp;
s8 *l0leaf, *l1leaf, *l2leaf;
struct bmap *bmp = sbi->bmap;
int agno, l2agsize, oldl2agsize;
s64 ag_rem;
newsize = blkno + nblocks;
jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld",
(long long) blkno, (long long) nblocks, (long long) newsize);
/*
* initialize bmap control page.
*
* all the data in bmap control page should exclude
* the mkfs hidden dmap page.
*/
/* update mapsize */
bmp->db_mapsize = newsize;
bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize);
/* compute new AG size */
l2agsize = dbGetL2AGSize(newsize);
oldl2agsize = bmp->db_agl2size;
bmp->db_agl2size = l2agsize;
bmp->db_agsize = 1 << l2agsize;
/* compute new number of AG */
agno = bmp->db_numag;
bmp->db_numag = newsize >> l2agsize;
bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
/*
* reconfigure db_agfree[]
* from old AG configuration to new AG configuration;
*
* coalesce contiguous k (newAGSize/oldAGSize) AGs;
* i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
* note: new AG size = old AG size * (2**x).
*/
if (l2agsize == oldl2agsize)
goto extend;
k = 1 << (l2agsize - oldl2agsize);
ag_rem = bmp->db_agfree[0]; /* save agfree[0] */
for (i = 0, n = 0; i < agno; n++) {
bmp->db_agfree[n] = 0; /* init collection point */
/* coalesce contiguous k AGs; */
for (j = 0; j < k && i < agno; j++, i++) {
/* merge AGi to AGn */
bmp->db_agfree[n] += bmp->db_agfree[i];
}
}
bmp->db_agfree[0] += ag_rem; /* restore agfree[0] */
for (; n < MAXAG; n++)
bmp->db_agfree[n] = 0;
/*
* update highest active ag number
*/
bmp->db_maxag = bmp->db_maxag / k;
/*
* extend bmap
*
* update bit maps and corresponding level control pages;
* global control page db_nfree, db_agfree[agno], db_maxfreebud;
*/
extend:
/* get L2 page */
p = BMAPBLKNO + nbperpage; /* L2 page */
l2mp = read_metapage(ipbmap, p, PSIZE, 0);
if (!l2mp) {
jfs_error(ipbmap->i_sb, "dbExtendFS: L2 page could not be read");
return -EIO;
}
l2dcp = (struct dmapctl *) l2mp->data;
/* compute start L1 */
k = blkno >> L2MAXL1SIZE;
l2leaf = l2dcp->stree + CTLLEAFIND + k;
p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */
/*
* extend each L1 in L2
*/
for (; k < LPERCTL; k++, p += nbperpage) {
/* get L1 page */
if (j0) {
/* read in L1 page: (blkno & (MAXL1SIZE - 1)) */
l1mp = read_metapage(ipbmap, p, PSIZE, 0);
if (l1mp == NULL)
goto errout;
l1dcp = (struct dmapctl *) l1mp->data;
/* compute start L0 */
j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE;
l1leaf = l1dcp->stree + CTLLEAFIND + j;
p = BLKTOL0(blkno, sbi->l2nbperpage);
j0 = false;
} else {
/* assign/init L1 page */
l1mp = get_metapage(ipbmap, p, PSIZE, 0);
if (l1mp == NULL)
goto errout;
l1dcp = (struct dmapctl *) l1mp->data;
/* compute start L0 */
j = 0;
l1leaf = l1dcp->stree + CTLLEAFIND;
p += nbperpage; /* 1st L0 of L1.k */
}
/*
* extend each L0 in L1
*/
for (; j < LPERCTL; j++) {
/* get L0 page */
if (i0) {
/* read in L0 page: (blkno & (MAXL0SIZE - 1)) */
l0mp = read_metapage(ipbmap, p, PSIZE, 0);
if (l0mp == NULL)
goto errout;
l0dcp = (struct dmapctl *) l0mp->data;
/* compute start dmap */
i = (blkno & (MAXL0SIZE - 1)) >>
L2BPERDMAP;
l0leaf = l0dcp->stree + CTLLEAFIND + i;
p = BLKTODMAP(blkno,
sbi->l2nbperpage);
i0 = false;
} else {
/* assign/init L0 page */
l0mp = get_metapage(ipbmap, p, PSIZE, 0);
if (l0mp == NULL)
goto errout;
l0dcp = (struct dmapctl *) l0mp->data;
/* compute start dmap */
i = 0;
l0leaf = l0dcp->stree + CTLLEAFIND;
p += nbperpage; /* 1st dmap of L0.j */
}
/*
* extend each dmap in L0
*/
for (; i < LPERCTL; i++) {
/*
* reconstruct the dmap page, and
* initialize corresponding parent L0 leaf
*/
if ((n = blkno & (BPERDMAP - 1))) {
/* read in dmap page: */
mp = read_metapage(ipbmap, p,
PSIZE, 0);
if (mp == NULL)
goto errout;
n = min(nblocks, (s64)BPERDMAP - n);
} else {
/* assign/init dmap page */
mp = read_metapage(ipbmap, p,
PSIZE, 0);
if (mp == NULL)
goto errout;
n = min(nblocks, (s64)BPERDMAP);
}
dp = (struct dmap *) mp->data;
*l0leaf = dbInitDmap(dp, blkno, n);
bmp->db_nfree += n;
agno = le64_to_cpu(dp->start) >> l2agsize;
bmp->db_agfree[agno] += n;
write_metapage(mp);
l0leaf++;
p += nbperpage;
blkno += n;
nblocks -= n;
if (nblocks == 0)
break;
} /* for each dmap in a L0 */
/*
* build current L0 page from its leaves, and
* initialize corresponding parent L1 leaf
*/
*l1leaf = dbInitDmapCtl(l0dcp, 0, ++i);
write_metapage(l0mp);
l0mp = NULL;
if (nblocks)
l1leaf++; /* continue for next L0 */
else {
/* more than 1 L0 ? */
if (j > 0)
break; /* build L1 page */
else {
/* summarize in global bmap page */
bmp->db_maxfreebud = *l1leaf;
release_metapage(l1mp);
release_metapage(l2mp);
goto finalize;
}
}
} /* for each L0 in a L1 */
/*
* build current L1 page from its leaves, and
* initialize corresponding parent L2 leaf
*/
*l2leaf = dbInitDmapCtl(l1dcp, 1, ++j);
write_metapage(l1mp);
l1mp = NULL;
if (nblocks)
l2leaf++; /* continue for next L1 */
else {
/* more than 1 L1 ? */
if (k > 0)
break; /* build L2 page */
else {
/* summarize in global bmap page */
bmp->db_maxfreebud = *l2leaf;
release_metapage(l2mp);
goto finalize;
}
}
} /* for each L1 in a L2 */
jfs_error(ipbmap->i_sb,
"dbExtendFS: function has not returned as expected");
errout:
if (l0mp)
release_metapage(l0mp);
if (l1mp)
release_metapage(l1mp);
release_metapage(l2mp);
return -EIO;
/*
* finalize bmap control page
*/
finalize:
return 0;
}
/*
* dbFinalizeBmap()
*/
void dbFinalizeBmap(struct inode *ipbmap)
{
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
int actags, inactags, l2nl;
s64 ag_rem, actfree, inactfree, avgfree;
int i, n;
/*
* finalize bmap control page
*/
//finalize:
/*
* compute db_agpref: preferred ag to allocate from
* (the leftmost ag with average free space in it);
*/
//agpref:
/* get the number of active ags and inacitve ags */
actags = bmp->db_maxag + 1;
inactags = bmp->db_numag - actags;
ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */
/* determine how many blocks are in the inactive allocation
* groups. in doing this, we must account for the fact that
* the rightmost group might be a partial group (i.e. file
* system size is not a multiple of the group size).
*/
inactfree = (inactags && ag_rem) ?
((inactags - 1) << bmp->db_agl2size) + ag_rem
: inactags << bmp->db_agl2size;
/* determine how many free blocks are in the active
* allocation groups plus the average number of free blocks
* within the active ags.
*/
actfree = bmp->db_nfree - inactfree;
avgfree = (u32) actfree / (u32) actags;
/* if the preferred allocation group has not average free space.
* re-establish the preferred group as the leftmost
* group with average free space.
*/
if (bmp->db_agfree[bmp->db_agpref] < avgfree) {
for (bmp->db_agpref = 0; bmp->db_agpref < actags;
bmp->db_agpref++) {
if (bmp->db_agfree[bmp->db_agpref] >= avgfree)
break;
}
if (bmp->db_agpref >= bmp->db_numag) {
jfs_error(ipbmap->i_sb,
"cannot find ag with average freespace");
}
}
/*
* compute db_aglevel, db_agheight, db_width, db_agstart:
* an ag is covered in aglevel dmapctl summary tree,
* at agheight level height (from leaf) with agwidth number of nodes
* each, which starts at agstart index node of the smmary tree node
* array;
*/
bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
l2nl =
bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
bmp->db_agheight = l2nl >> 1;
bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
i--) {
bmp->db_agstart += n;
n <<= 2;
}
}
/*
* NAME: dbInitDmap()/ujfs_idmap_page()
*
* FUNCTION: initialize working/persistent bitmap of the dmap page
* for the specified number of blocks:
*
* at entry, the bitmaps had been initialized as free (ZEROS);
* The number of blocks will only account for the actually
* existing blocks. Blocks which don't actually exist in
* the aggregate will be marked as allocated (ONES);
*
* PARAMETERS:
* dp - pointer to page of map
* nblocks - number of blocks this page
*
* RETURNS: NONE
*/
static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks)
{
int blkno, w, b, r, nw, nb, i;
/* starting block number within the dmap */
blkno = Blkno & (BPERDMAP - 1);
if (blkno == 0) {
dp->nblocks = dp->nfree = cpu_to_le32(nblocks);
dp->start = cpu_to_le64(Blkno);
if (nblocks == BPERDMAP) {
memset(&dp->wmap[0], 0, LPERDMAP * 4);
memset(&dp->pmap[0], 0, LPERDMAP * 4);
goto initTree;
}
} else {
le32_add_cpu(&dp->nblocks, nblocks);
le32_add_cpu(&dp->nfree, nblocks);
}
/* word number containing start block number */
w = blkno >> L2DBWORD;
/*
* free the bits corresponding to the block range (ZEROS):
* note: not all bits of the first and last words may be contained
* within the block range.
*/
for (r = nblocks; r > 0; r -= nb, blkno += nb) {
/* number of bits preceding range to be freed in the word */
b = blkno & (DBWORD - 1);
/* number of bits to free in the word */
nb = min(r, DBWORD - b);
/* is partial word to be freed ? */
if (nb < DBWORD) {
/* free (set to 0) from the bitmap word */
dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
>> b));
dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
>> b));
/* skip the word freed */
w++;
} else {
/* free (set to 0) contiguous bitmap words */
nw = r >> L2DBWORD;
memset(&dp->wmap[w], 0, nw * 4);
memset(&dp->pmap[w], 0, nw * 4);
/* skip the words freed */
nb = nw << L2DBWORD;
w += nw;
}
}
/*
* mark bits following the range to be freed (non-existing
* blocks) as allocated (ONES)
*/
if (blkno == BPERDMAP)
goto initTree;
/* the first word beyond the end of existing blocks */
w = blkno >> L2DBWORD;
/* does nblocks fall on a 32-bit boundary ? */
b = blkno & (DBWORD - 1);
if (b) {
/* mark a partial word allocated */
dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b);
w++;
}
/* set the rest of the words in the page to allocated (ONES) */
for (i = w; i < LPERDMAP; i++)
dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES);
/*
* init tree
*/
initTree:
return (dbInitDmapTree(dp));
}
/*
* NAME: dbInitDmapTree()/ujfs_complete_dmap()
*
* FUNCTION: initialize summary tree of the specified dmap:
*
* at entry, bitmap of the dmap has been initialized;
*
* PARAMETERS:
* dp - dmap to complete
* blkno - starting block number for this dmap
* treemax - will be filled in with max free for this dmap
*
* RETURNS: max free string at the root of the tree
*/
static int dbInitDmapTree(struct dmap * dp)
{
struct dmaptree *tp;
s8 *cp;
int i;
/* init fixed info of tree */
tp = &dp->tree;
tp->nleafs = cpu_to_le32(LPERDMAP);
tp->l2nleafs = cpu_to_le32(L2LPERDMAP);
tp->leafidx = cpu_to_le32(LEAFIND);
tp->height = cpu_to_le32(4);
tp->budmin = BUDMIN;
/* init each leaf from corresponding wmap word:
* note: leaf is set to NOFREE(-1) if all blocks of corresponding
* bitmap word are allocated.
*/
cp = tp->stree + le32_to_cpu(tp->leafidx);
for (i = 0; i < LPERDMAP; i++)
*cp++ = dbMaxBud((u8 *) & dp->wmap[i]);
/* build the dmap's binary buddy summary tree */
return (dbInitTree(tp));
}
/*
* NAME: dbInitTree()/ujfs_adjtree()
*
* FUNCTION: initialize binary buddy summary tree of a dmap or dmapctl.
*
* at entry, the leaves of the tree has been initialized
* from corresponding bitmap word or root of summary tree
* of the child control page;
* configure binary buddy system at the leaf level, then
* bubble up the values of the leaf nodes up the tree.
*
* PARAMETERS:
* cp - Pointer to the root of the tree
* l2leaves- Number of leaf nodes as a power of 2
* l2min - Number of blocks that can be covered by a leaf
* as a power of 2
*
* RETURNS: max free string at the root of the tree
*/
static int dbInitTree(struct dmaptree * dtp)
{
int l2max, l2free, bsize, nextb, i;
int child, parent, nparent;
s8 *tp, *cp, *cp1;
tp = dtp->stree;
/* Determine the maximum free string possible for the leaves */
l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin;
/*
* configure the leaf levevl into binary buddy system
*
* Try to combine buddies starting with a buddy size of 1
* (i.e. two leaves). At a buddy size of 1 two buddy leaves
* can be combined if both buddies have a maximum free of l2min;
* the combination will result in the left-most buddy leaf having
* a maximum free of l2min+1.
* After processing all buddies for a given size, process buddies
* at the next higher buddy size (i.e. current size * 2) and
* the next maximum free (current free + 1).
* This continues until the maximum possible buddy combination
* yields maximum free.
*/
for (l2free = dtp->budmin, bsize = 1; l2free < l2max;
l2free++, bsize = nextb) {
/* get next buddy size == current buddy pair size */
nextb = bsize << 1;
/* scan each adjacent buddy pair at current buddy size */
for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx);
i < le32_to_cpu(dtp->nleafs);
i += nextb, cp += nextb) {
/* coalesce if both adjacent buddies are max free */
if (*cp == l2free && *(cp + bsize) == l2free) {
*cp = l2free + 1; /* left take right */
*(cp + bsize) = -1; /* right give left */
}
}
}
/*
* bubble summary information of leaves up the tree.
*
* Starting at the leaf node level, the four nodes described by
* the higher level parent node are compared for a maximum free and
* this maximum becomes the value of the parent node.
* when all lower level nodes are processed in this fashion then
* move up to the next level (parent becomes a lower level node) and
* continue the process for that level.
*/
for (child = le32_to_cpu(dtp->leafidx),
nparent = le32_to_cpu(dtp->nleafs) >> 2;
nparent > 0; nparent >>= 2, child = parent) {
/* get index of 1st node of parent level */
parent = (child - 1) >> 2;
/* set the value of the parent node as the maximum
* of the four nodes of the current level.
*/
for (i = 0, cp = tp + child, cp1 = tp + parent;
i < nparent; i++, cp += 4, cp1++)
*cp1 = TREEMAX(cp);
}
return (*tp);
}
/*
* dbInitDmapCtl()
*
* function: initialize dmapctl page
*/
static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i)
{ /* start leaf index not covered by range */
s8 *cp;
dcp->nleafs = cpu_to_le32(LPERCTL);
dcp->l2nleafs = cpu_to_le32(L2LPERCTL);
dcp->leafidx = cpu_to_le32(CTLLEAFIND);
dcp->height = cpu_to_le32(5);
dcp->budmin = L2BPERDMAP + L2LPERCTL * level;
/*
* initialize the leaves of current level that were not covered
* by the specified input block range (i.e. the leaves have no
* low level dmapctl or dmap).
*/
cp = &dcp->stree[CTLLEAFIND + i];
for (; i < LPERCTL; i++)
*cp++ = NOFREE;
/* build the dmap's binary buddy summary tree */
return (dbInitTree((struct dmaptree *) dcp));
}
/*
* NAME: dbGetL2AGSize()/ujfs_getagl2size()
*
* FUNCTION: Determine log2(allocation group size) from aggregate size
*
* PARAMETERS:
* nblocks - Number of blocks in aggregate
*
* RETURNS: log2(allocation group size) in aggregate blocks
*/
static int dbGetL2AGSize(s64 nblocks)
{
s64 sz;
s64 m;
int l2sz;
if (nblocks < BPERDMAP * MAXAG)
return (L2BPERDMAP);
/* round up aggregate size to power of 2 */
m = ((u64) 1 << (64 - 1));
for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) {
if (m & nblocks)
break;
}
sz = (s64) 1 << l2sz;
if (sz < nblocks)
l2sz += 1;
/* agsize = roundupSize/max_number_of_ag */
return (l2sz - L2MAXAG);
}
/*
* NAME: dbMapFileSizeToMapSize()
*
* FUNCTION: compute number of blocks the block allocation map file
* can cover from the map file size;
*
* RETURNS: Number of blocks which can be covered by this block map file;
*/
/*
* maximum number of map pages at each level including control pages
*/
#define MAXL0PAGES (1 + LPERCTL)
#define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES)
#define MAXL2PAGES (1 + LPERCTL * MAXL1PAGES)
/*
* convert number of map pages to the zero origin top dmapctl level
*/
#define BMAPPGTOLEV(npages) \
(((npages) <= 3 + MAXL0PAGES) ? 0 : \
((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
{
struct super_block *sb = ipbmap->i_sb;
s64 nblocks;
s64 npages, ndmaps;
int level, i;
int complete, factor;
nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize;
npages = nblocks >> JFS_SBI(sb)->l2nbperpage;
level = BMAPPGTOLEV(npages);
/* At each level, accumulate the number of dmap pages covered by
* the number of full child levels below it;
* repeat for the last incomplete child level.
*/
ndmaps = 0;
npages--; /* skip the first global control page */
/* skip higher level control pages above top level covered by map */
npages -= (2 - level);
npages--; /* skip top level's control page */
for (i = level; i >= 0; i--) {
factor =
(i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
complete = (u32) npages / factor;
ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL :
((i == 1) ? LPERCTL : 1));
/* pages in last/incomplete child */
npages = (u32) npages % factor;
/* skip incomplete child's level control page */
npages--;
}
/* convert the number of dmaps into the number of blocks
* which can be covered by the dmaps;
*/
nblocks = ndmaps << L2BPERDMAP;
return (nblocks);
}
| gpl-2.0 |
TomGiordano/kernel_zte_blade | fs/ntfs/compress.c | 1052 | 29794 | /**
* compress.c - NTFS kernel compressed attributes handling.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "attrib.h"
#include "inode.h"
#include "debug.h"
#include "ntfs.h"
/**
* ntfs_compression_constants - enum of constants used in the compression code
*/
typedef enum {
/* Token types and access mask. */
NTFS_SYMBOL_TOKEN = 0,
NTFS_PHRASE_TOKEN = 1,
NTFS_TOKEN_MASK = 1,
/* Compression sub-block constants. */
NTFS_SB_SIZE_MASK = 0x0fff,
NTFS_SB_SIZE = 0x1000,
NTFS_SB_IS_COMPRESSED = 0x8000,
/*
* The maximum compression block size is by definition 16 * the cluster
* size, with the maximum supported cluster size being 4kiB. Thus the
* maximum compression buffer size is 64kiB, so we use this when
* initializing the compression buffer.
*/
NTFS_MAX_CB_SIZE = 64 * 1024,
} ntfs_compression_constants;
/**
* ntfs_compression_buffer - one buffer for the decompression engine
*/
static u8 *ntfs_compression_buffer = NULL;
/**
* ntfs_cb_lock - spinlock which protects ntfs_compression_buffer
*/
static DEFINE_SPINLOCK(ntfs_cb_lock);
/**
* allocate_compression_buffers - allocate the decompression buffers
*
* Caller has to hold the ntfs_lock mutex.
*
* Return 0 on success or -ENOMEM if the allocations failed.
*/
int allocate_compression_buffers(void)
{
BUG_ON(ntfs_compression_buffer);
ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
if (!ntfs_compression_buffer)
return -ENOMEM;
return 0;
}
/**
* free_compression_buffers - free the decompression buffers
*
* Caller has to hold the ntfs_lock mutex.
*/
void free_compression_buffers(void)
{
BUG_ON(!ntfs_compression_buffer);
vfree(ntfs_compression_buffer);
ntfs_compression_buffer = NULL;
}
/**
* zero_partial_compressed_page - zero out of bounds compressed page region
*/
static void zero_partial_compressed_page(struct page *page,
const s64 initialized_size)
{
u8 *kp = page_address(page);
unsigned int kp_ofs;
ntfs_debug("Zeroing page region outside initialized size.");
if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
/*
* FIXME: Using clear_page() will become wrong when we get
* PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
*/
clear_page(kp);
return;
}
kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
return;
}
/**
* handle_bounds_compressed_page - test for&handle out of bounds compressed page
*/
static inline void handle_bounds_compressed_page(struct page *page,
const loff_t i_size, const s64 initialized_size)
{
if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
(initialized_size < i_size))
zero_partial_compressed_page(page, initialized_size);
return;
}
/**
* ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages
* @dest_index: current index into @dest_pages (IN/OUT)
* @dest_ofs: current offset within @dest_pages[@dest_index] (IN/OUT)
* @dest_max_index: maximum index into @dest_pages (IN)
* @dest_max_ofs: maximum offset within @dest_pages[@dest_max_index] (IN)
* @xpage: the target page (-1 if none) (IN)
* @xpage_done: set to 1 if xpage was completed successfully (IN/OUT)
* @cb_start: compression block to decompress (IN)
* @cb_size: size of compression block @cb_start in bytes (IN)
* @i_size: file size when we started the read (IN)
* @initialized_size: initialized file size when we started the read (IN)
*
* The caller must have disabled preemption. ntfs_decompress() reenables it when
* the critical section is finished.
*
* This decompresses the compression block @cb_start into the array of
* destination pages @dest_pages starting at index @dest_index into @dest_pages
* and at offset @dest_pos into the page @dest_pages[@dest_index].
*
* When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
* If xpage is -1 or @xpage has not been completed, @xpage_done is not modified.
*
* @cb_start is a pointer to the compression block which needs decompressing
* and @cb_size is the size of @cb_start in bytes (8-64kiB).
*
* Return 0 if success or -EOVERFLOW on error in the compressed stream.
* @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
* completed during the decompression of the compression block (@cb_start).
*
* Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
* unpredicatbly! You have been warned!
*
* Note to hackers: This function may not sleep until it has finished accessing
* the compression block @cb_start as it is a per-CPU buffer.
*/
static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
const int xpage, char *xpage_done, u8 *const cb_start,
const u32 cb_size, const loff_t i_size,
const s64 initialized_size)
{
/*
* Pointers into the compressed data, i.e. the compression block (cb),
* and the therein contained sub-blocks (sb).
*/
u8 *cb_end = cb_start + cb_size; /* End of cb. */
u8 *cb = cb_start; /* Current position in cb. */
u8 *cb_sb_start = cb; /* Beginning of the current sb in the cb. */
u8 *cb_sb_end; /* End of current sb / beginning of next sb. */
/* Variables for uncompressed data / destination. */
struct page *dp; /* Current destination page being worked on. */
u8 *dp_addr; /* Current pointer into dp. */
u8 *dp_sb_start; /* Start of current sub-block in dp. */
u8 *dp_sb_end; /* End of current sb in dp (dp_sb_start +
NTFS_SB_SIZE). */
u16 do_sb_start; /* @dest_ofs when starting this sub-block. */
u16 do_sb_end; /* @dest_ofs of end of this sb (do_sb_start +
NTFS_SB_SIZE). */
/* Variables for tag and token parsing. */
u8 tag; /* Current tag. */
int token; /* Loop counter for the eight tokens in tag. */
/* Need this because we can't sleep, so need two stages. */
int completed_pages[dest_max_index - *dest_index + 1];
int nr_completed_pages = 0;
/* Default error code. */
int err = -EOVERFLOW;
ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
do_next_sb:
ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
cb - cb_start);
/*
* Have we reached the end of the compression block or the end of the
* decompressed data? The latter can happen for example if the current
* position in the compression block is one byte before its end so the
* first two checks do not detect it.
*/
if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
(*dest_index == dest_max_index &&
*dest_ofs == dest_max_ofs)) {
int i;
ntfs_debug("Completed. Returning success (0).");
err = 0;
return_error:
/* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize completed pages. */
if (nr_completed_pages > 0) {
for (i = 0; i < nr_completed_pages; i++) {
int di = completed_pages[i];
dp = dest_pages[di];
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(dp, i_size,
initialized_size);
flush_dcache_page(dp);
kunmap(dp);
SetPageUptodate(dp);
unlock_page(dp);
if (di == xpage)
*xpage_done = 1;
else
page_cache_release(dp);
dest_pages[di] = NULL;
}
}
return err;
}
/* Setup offsets for the current sub-block destination. */
do_sb_start = *dest_ofs;
do_sb_end = do_sb_start + NTFS_SB_SIZE;
/* Check that we are still within allowed boundaries. */
if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
goto return_overflow;
/* Does the minimum size of a compressed sb overflow valid range? */
if (cb + 6 > cb_end)
goto return_overflow;
/* Setup the current sub-block source pointers and validate range. */
cb_sb_start = cb;
cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
+ 3;
if (cb_sb_end > cb_end)
goto return_overflow;
/* Get the current destination page. */
dp = dest_pages[*dest_index];
if (!dp) {
/* No page present. Skip decompression of this sub-block. */
cb = cb_sb_end;
/* Advance destination position to next sub-block. */
*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
if (!*dest_ofs && (++*dest_index > dest_max_index))
goto return_overflow;
goto do_next_sb;
}
/* We have a valid destination page. Setup the destination pointers. */
dp_addr = (u8*)page_address(dp) + do_sb_start;
/* Now, we are ready to process the current sub-block (sb). */
if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
ntfs_debug("Found uncompressed sub-block.");
/* This sb is not compressed, just copy it into destination. */
/* Advance source position to first data byte. */
cb += 2;
/* An uncompressed sb must be full size. */
if (cb_sb_end - cb != NTFS_SB_SIZE)
goto return_overflow;
/* Copy the block and advance the source position. */
memcpy(dp_addr, cb, NTFS_SB_SIZE);
cb += NTFS_SB_SIZE;
/* Advance destination position to next sub-block. */
*dest_ofs += NTFS_SB_SIZE;
if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
finalize_page:
/*
* First stage: add current page index to array of
* completed pages.
*/
completed_pages[nr_completed_pages++] = *dest_index;
if (++*dest_index > dest_max_index)
goto return_overflow;
}
goto do_next_sb;
}
ntfs_debug("Found compressed sub-block.");
/* This sb is compressed, decompress it into destination. */
/* Setup destination pointers. */
dp_sb_start = dp_addr;
dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
/* Forward to the first tag in the sub-block. */
cb += 2;
do_next_tag:
if (cb == cb_sb_end) {
/* Check if the decompressed sub-block was not full-length. */
if (dp_addr < dp_sb_end) {
int nr_bytes = do_sb_end - *dest_ofs;
ntfs_debug("Filling incomplete sub-block with "
"zeroes.");
/* Zero remainder and update destination position. */
memset(dp_addr, 0, nr_bytes);
*dest_ofs += nr_bytes;
}
/* We have finished the current sub-block. */
if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
goto finalize_page;
goto do_next_sb;
}
/* Check we are still in range. */
if (cb > cb_sb_end || dp_addr > dp_sb_end)
goto return_overflow;
/* Get the next tag and advance to first token. */
tag = *cb++;
/* Parse the eight tokens described by the tag. */
for (token = 0; token < 8; token++, tag >>= 1) {
u16 lg, pt, length, max_non_overlap;
register u16 i;
u8 *dp_back_addr;
/* Check if we are done / still in range. */
if (cb >= cb_sb_end || dp_addr > dp_sb_end)
break;
/* Determine token type and parse appropriately.*/
if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
/*
* We have a symbol token, copy the symbol across, and
* advance the source and destination positions.
*/
*dp_addr++ = *cb++;
++*dest_ofs;
/* Continue with the next token. */
continue;
}
/*
* We have a phrase token. Make sure it is not the first tag in
* the sb as this is illegal and would confuse the code below.
*/
if (dp_addr == dp_sb_start)
goto return_overflow;
/*
* Determine the number of bytes to go back (p) and the number
* of bytes to copy (l). We use an optimized algorithm in which
* we first calculate log2(current destination position in sb),
* which allows determination of l and p in O(1) rather than
* O(n). We just need an arch-optimized log2() function now.
*/
lg = 0;
for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
lg++;
/* Get the phrase token into i. */
pt = le16_to_cpup((le16*)cb);
/*
* Calculate starting position of the byte sequence in
* the destination using the fact that p = (pt >> (12 - lg)) + 1
* and make sure we don't go too far back.
*/
dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
if (dp_back_addr < dp_sb_start)
goto return_overflow;
/* Now calculate the length of the byte sequence. */
length = (pt & (0xfff >> lg)) + 3;
/* Advance destination position and verify it is in range. */
*dest_ofs += length;
if (*dest_ofs > do_sb_end)
goto return_overflow;
/* The number of non-overlapping bytes. */
max_non_overlap = dp_addr - dp_back_addr;
if (length <= max_non_overlap) {
/* The byte sequence doesn't overlap, just copy it. */
memcpy(dp_addr, dp_back_addr, length);
/* Advance destination pointer. */
dp_addr += length;
} else {
/*
* The byte sequence does overlap, copy non-overlapping
* part and then do a slow byte by byte copy for the
* overlapping part. Also, advance the destination
* pointer.
*/
memcpy(dp_addr, dp_back_addr, max_non_overlap);
dp_addr += max_non_overlap;
dp_back_addr += max_non_overlap;
length -= max_non_overlap;
while (length--)
*dp_addr++ = *dp_back_addr++;
}
/* Advance source position and continue with the next token. */
cb += 2;
}
/* No tokens left in the current tag. Continue with the next tag. */
goto do_next_tag;
return_overflow:
ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
goto return_error;
}
/**
* ntfs_read_compressed_block - read a compressed block into the page cache
* @page: locked page in the compression block(s) we need to read
*
* When we are called the page has already been verified to be locked and the
* attribute is known to be non-resident, not encrypted, but compressed.
*
* 1. Determine which compression block(s) @page is in.
* 2. Get hold of all pages corresponding to this/these compression block(s).
* 3. Read the (first) compression block.
* 4. Decompress it into the corresponding pages.
* 5. Throw the compressed data away and proceed to 3. for the next compression
* block or return success if no more compression blocks left.
*
* Warning: We have to be careful what we do about existing pages. They might
* have been written to so that we would lose data if we were to just overwrite
* them with the out-of-date uncompressed data.
*
* FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
* the end of the file I think. We need to detect this case and zero the out
* of bounds remainder of the page in question and mark it as handled. At the
* moment we would just return -EIO on such a page. This bug will only become
* apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
* clusters so is probably not going to be seen by anyone. Still this should
* be fixed. (AIA)
*
* FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
* handling sparse and compressed cbs. (AIA)
*
* FIXME: At the moment we don't do any zeroing out in the case that
* initialized_size is less than data_size. This should be safe because of the
* nature of the compression algorithm used. Just in case we check and output
* an error message in read inode if the two sizes are not equal for a
* compressed file. (AIA)
*/
int ntfs_read_compressed_block(struct page *page)
{
loff_t i_size;
s64 initialized_size;
struct address_space *mapping = page->mapping;
ntfs_inode *ni = NTFS_I(mapping->host);
ntfs_volume *vol = ni->vol;
struct super_block *sb = vol->sb;
runlist_element *rl;
unsigned long flags, block_size = sb->s_blocksize;
unsigned char block_size_bits = sb->s_blocksize_bits;
u8 *cb, *cb_pos, *cb_end;
struct buffer_head **bhs;
unsigned long offset, index = page->index;
u32 cb_size = ni->itype.compressed.block_size;
u64 cb_size_mask = cb_size - 1UL;
VCN vcn;
LCN lcn;
/* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
* The first vcn after the last wanted vcn (minumum alignment is again
* PAGE_CACHE_SIZE.
*/
VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
& ~cb_size_mask) >> vol->cluster_size_bits;
/* Number of compression blocks (cbs) in the wanted vcn range. */
unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
>> ni->itype.compressed.block_size_bits;
/*
* Number of pages required to store the uncompressed data from all
* compression blocks (cbs) overlapping @page. Due to alignment
* guarantees of start_vcn and end_vcn, no need to round up here.
*/
unsigned int nr_pages = (end_vcn - start_vcn) <<
vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
unsigned int xpage, max_page, cur_page, cur_ofs, i;
unsigned int cb_clusters, cb_max_ofs;
int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
struct page **pages;
unsigned char xpage_done = 0;
ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
"%i.", index, cb_size, nr_pages);
/*
* Bad things happen if we get here for anything that is not an
* unnamed $DATA attribute.
*/
BUG_ON(ni->type != AT_DATA);
BUG_ON(ni->name_len);
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
/* Allocate memory to store the buffer heads we need. */
bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
bhs = kmalloc(bhs_size, GFP_NOFS);
if (unlikely(!pages || !bhs)) {
kfree(bhs);
kfree(pages);
unlock_page(page);
ntfs_error(vol->sb, "Failed to allocate internal buffers.");
return -ENOMEM;
}
/*
* We have already been given one page, this is the one we must do.
* Once again, the alignment guarantees keep it simple.
*/
offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
xpage = index - offset;
pages[xpage] = page;
/*
* The remaining pages need to be allocated and inserted into the page
* cache, alignment guarantees keep all the below much simpler. (-8
*/
read_lock_irqsave(&ni->size_lock, flags);
i_size = i_size_read(VFS_I(ni));
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
offset;
/* Is the page fully outside i_size? (truncate in progress) */
if (xpage >= max_page) {
kfree(bhs);
kfree(pages);
zero_user(page, 0, PAGE_CACHE_SIZE);
ntfs_debug("Compressed read outside i_size - truncated?");
SetPageUptodate(page);
unlock_page(page);
return 0;
}
if (nr_pages < max_page)
max_page = nr_pages;
for (i = 0; i < max_page; i++, offset++) {
if (i != xpage)
pages[i] = grab_cache_page_nowait(mapping, offset);
page = pages[i];
if (page) {
/*
* We only (re)read the page if it isn't already read
* in and/or dirty or we would be losing data or at
* least wasting our time.
*/
if (!PageDirty(page) && (!PageUptodate(page) ||
PageError(page))) {
ClearPageError(page);
kmap(page);
continue;
}
unlock_page(page);
page_cache_release(page);
pages[i] = NULL;
}
}
/*
* We have the runlist, and all the destination pages we need to fill.
* Now read the first compression block.
*/
cur_page = 0;
cur_ofs = 0;
cb_clusters = ni->itype.compressed.block_clusters;
do_next_cb:
nr_cbs--;
nr_bhs = 0;
/* Read all cb buffer heads one cluster at a time. */
rl = NULL;
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) {
bool is_retry = false;
if (!rl) {
lock_retry_remap:
down_read(&ni->runlist.lock);
rl = ni->runlist.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
lcn = LCN_RL_NOT_MAPPED;
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)vcn,
(unsigned long long)lcn);
if (lcn < 0) {
/*
* When we reach the first sparse cluster we have
* finished with the cb.
*/
if (lcn == LCN_HOLE)
break;
if (is_retry || lcn != LCN_RL_NOT_MAPPED)
goto rl_err;
is_retry = true;
/*
* Attempt to map runlist, dropping lock for the
* duration.
*/
up_read(&ni->runlist.lock);
if (!ntfs_map_runlist(ni, vcn))
goto lock_retry_remap;
goto map_rl_err;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
/* Read the lcn from device in chunks of block_size bytes. */
max_block = block + (vol->cluster_size >> block_size_bits);
do {
ntfs_debug("block = 0x%x.", block);
if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
goto getblk_err;
nr_bhs++;
} while (++block < max_block);
}
/* Release the lock if we took it. */
if (rl)
up_read(&ni->runlist.lock);
/* Setup and initiate io on all buffer heads. */
for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i];
if (!trylock_buffer(tbh))
continue;
if (unlikely(buffer_uptodate(tbh))) {
unlock_buffer(tbh);
continue;
}
get_bh(tbh);
tbh->b_end_io = end_buffer_read_sync;
submit_bh(READ, tbh);
}
/* Wait for io completion on all buffer heads. */
for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i];
if (buffer_uptodate(tbh))
continue;
wait_on_buffer(tbh);
/*
* We need an optimization barrier here, otherwise we start
* hitting the below fixup code when accessing a loopback
* mounted ntfs partition. This indicates either there is a
* race condition in the loop driver or, more likely, gcc
* overoptimises the code without the barrier and it doesn't
* do the Right Thing(TM).
*/
barrier();
if (unlikely(!buffer_uptodate(tbh))) {
ntfs_warning(vol->sb, "Buffer is unlocked but not "
"uptodate! Unplugging the disk queue "
"and rescheduling.");
get_bh(tbh);
blk_run_address_space(mapping);
schedule();
put_bh(tbh);
if (unlikely(!buffer_uptodate(tbh)))
goto read_err;
ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
}
}
/*
* Get the compression buffer. We must not sleep any more
* until we are finished with it.
*/
spin_lock(&ntfs_cb_lock);
cb = ntfs_compression_buffer;
BUG_ON(!cb);
cb_pos = cb;
cb_end = cb + cb_size;
/* Copy the buffer heads into the contiguous buffer. */
for (i = 0; i < nr_bhs; i++) {
memcpy(cb_pos, bhs[i]->b_data, block_size);
cb_pos += block_size;
}
/* Just a precaution. */
if (cb_pos + 2 <= cb + cb_size)
*(u16*)cb_pos = 0;
/* Reset cb_pos back to the beginning. */
cb_pos = cb;
/* We now have both source (if present) and destination. */
ntfs_debug("Successfully read the compression block.");
/* The last page and maximum offset within it for the current cb. */
cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
cb_max_page >>= PAGE_CACHE_SHIFT;
/* Catch end of file inside a compression block. */
if (cb_max_page > max_page)
cb_max_page = max_page;
if (vcn == start_vcn - cb_clusters) {
/* Sparse cb, zero out page range overlapping the cb. */
ntfs_debug("Found sparse compression block.");
/* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock);
if (cb_max_ofs)
cb_max_page--;
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page) {
/*
* FIXME: Using clear_page() will become wrong
* when we get PAGE_CACHE_SIZE != PAGE_SIZE but
* for now there is no problem.
*/
if (likely(!cur_ofs))
clear_page(page_address(page));
else
memset(page_address(page) + cur_ofs, 0,
PAGE_CACHE_SIZE -
cur_ofs);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
if (cur_page == xpage)
xpage_done = 1;
else
page_cache_release(page);
pages[cur_page] = NULL;
}
cb_pos += PAGE_CACHE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
}
/* If we have a partial final page, deal with it now. */
if (cb_max_ofs && cb_pos < cb_end) {
page = pages[cur_page];
if (page)
memset(page_address(page) + cur_ofs, 0,
cb_max_ofs - cur_ofs);
/*
* No need to update cb_pos at this stage:
* cb_pos += cb_max_ofs - cur_ofs;
*/
cur_ofs = cb_max_ofs;
}
} else if (vcn == start_vcn) {
/* We can't sleep so we need two stages. */
unsigned int cur2_page = cur_page;
unsigned int cur_ofs2 = cur_ofs;
u8 *cb_pos2 = cb_pos;
ntfs_debug("Found uncompressed compression block.");
/* Uncompressed cb, copy it to the destination pages. */
/*
* TODO: As a big optimization, we could detect this case
* before we read all the pages and use block_read_full_page()
* on all full pages instead (we still have to treat partial
* pages especially but at least we are getting rid of the
* synchronous io for the majority of pages.
* Or if we choose not to do the read-ahead/-behind stuff, we
* could just return block_read_full_page(pages[xpage]) as long
* as PAGE_CACHE_SIZE <= cb_size.
*/
if (cb_max_ofs)
cb_max_page--;
/* First stage: copy data into destination pages. */
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page)
memcpy(page_address(page) + cur_ofs, cb_pos,
PAGE_CACHE_SIZE - cur_ofs);
cb_pos += PAGE_CACHE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
}
/* If we have a partial final page, deal with it now. */
if (cb_max_ofs && cb_pos < cb_end) {
page = pages[cur_page];
if (page)
memcpy(page_address(page) + cur_ofs, cb_pos,
cb_max_ofs - cur_ofs);
cb_pos += cb_max_ofs - cur_ofs;
cur_ofs = cb_max_ofs;
}
/* We can sleep from now on, so drop lock. */
spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize pages. */
for (; cur2_page < cb_max_page; cur2_page++) {
page = pages[cur2_page];
if (page) {
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(page, i_size,
initialized_size);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
if (cur2_page == xpage)
xpage_done = 1;
else
page_cache_release(page);
pages[cur2_page] = NULL;
}
cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
cur_ofs2 = 0;
if (cb_pos2 >= cb_end)
break;
}
} else {
/* Compressed cb, decompress it into the destination page(s). */
unsigned int prev_cur_page = cur_page;
ntfs_debug("Found compressed compression block.");
err = ntfs_decompress(pages, &cur_page, &cur_ofs,
cb_max_page, cb_max_ofs, xpage, &xpage_done,
cb_pos, cb_size - (cb_pos - cb), i_size,
initialized_size);
/*
* We can sleep from now on, lock already dropped by
* ntfs_decompress().
*/
if (err) {
ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
"0x%lx with error code %i. Skipping "
"this compression block.",
ni->mft_no, -err);
/* Release the unfinished pages. */
for (; prev_cur_page < cur_page; prev_cur_page++) {
page = pages[prev_cur_page];
if (page) {
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (prev_cur_page != xpage)
page_cache_release(page);
pages[prev_cur_page] = NULL;
}
}
}
}
/* Release the buffer heads. */
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
/* Do we have more work to do? */
if (nr_cbs)
goto do_next_cb;
/* We no longer need the list of buffer heads. */
kfree(bhs);
/* Clean up if we have any pages left. Should never happen. */
for (cur_page = 0; cur_page < max_page; cur_page++) {
page = pages[cur_page];
if (page) {
ntfs_error(vol->sb, "Still have pages left! "
"Terminating them with extreme "
"prejudice. Inode 0x%lx, page index "
"0x%lx.", ni->mft_no, page->index);
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (cur_page != xpage)
page_cache_release(page);
pages[cur_page] = NULL;
}
}
/* We no longer need the list of pages. */
kfree(pages);
/* If we have completed the requested page, we return success. */
if (likely(xpage_done))
return 0;
ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
"EOVERFLOW" : (!err ? "EIO" : "unknown error"));
return err < 0 ? err : -EIO;
read_err:
ntfs_error(vol->sb, "IO error while reading compressed data.");
/* Release the buffer heads. */
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
goto err_out;
map_rl_err:
ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
"compression block.");
goto err_out;
rl_err:
up_read(&ni->runlist.lock);
ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
"compression block.");
goto err_out;
getblk_err:
up_read(&ni->runlist.lock);
ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
err_out:
kfree(bhs);
for (i = cur_page; i < max_page; i++) {
page = pages[i];
if (page) {
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (i != xpage)
page_cache_release(page);
}
}
kfree(pages);
return -EIO;
}
| gpl-2.0 |
shaumux/semc-kernel-qsd8k-jb | fs/ntfs/compress.c | 1052 | 29794 | /**
* compress.c - NTFS kernel compressed attributes handling.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "attrib.h"
#include "inode.h"
#include "debug.h"
#include "ntfs.h"
/**
* ntfs_compression_constants - enum of constants used in the compression code
*/
typedef enum {
/* Token types and access mask. */
NTFS_SYMBOL_TOKEN = 0,
NTFS_PHRASE_TOKEN = 1,
NTFS_TOKEN_MASK = 1,
/* Compression sub-block constants. */
NTFS_SB_SIZE_MASK = 0x0fff,
NTFS_SB_SIZE = 0x1000,
NTFS_SB_IS_COMPRESSED = 0x8000,
/*
* The maximum compression block size is by definition 16 * the cluster
* size, with the maximum supported cluster size being 4kiB. Thus the
* maximum compression buffer size is 64kiB, so we use this when
* initializing the compression buffer.
*/
NTFS_MAX_CB_SIZE = 64 * 1024,
} ntfs_compression_constants;
/**
* ntfs_compression_buffer - one buffer for the decompression engine
*/
static u8 *ntfs_compression_buffer = NULL;
/**
* ntfs_cb_lock - spinlock which protects ntfs_compression_buffer
*/
static DEFINE_SPINLOCK(ntfs_cb_lock);
/**
* allocate_compression_buffers - allocate the decompression buffers
*
* Caller has to hold the ntfs_lock mutex.
*
* Return 0 on success or -ENOMEM if the allocations failed.
*/
int allocate_compression_buffers(void)
{
BUG_ON(ntfs_compression_buffer);
ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
if (!ntfs_compression_buffer)
return -ENOMEM;
return 0;
}
/**
* free_compression_buffers - free the decompression buffers
*
* Caller has to hold the ntfs_lock mutex.
*/
void free_compression_buffers(void)
{
BUG_ON(!ntfs_compression_buffer);
vfree(ntfs_compression_buffer);
ntfs_compression_buffer = NULL;
}
/**
* zero_partial_compressed_page - zero out of bounds compressed page region
*/
static void zero_partial_compressed_page(struct page *page,
const s64 initialized_size)
{
u8 *kp = page_address(page);
unsigned int kp_ofs;
ntfs_debug("Zeroing page region outside initialized size.");
if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
/*
* FIXME: Using clear_page() will become wrong when we get
* PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
*/
clear_page(kp);
return;
}
kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
return;
}
/**
* handle_bounds_compressed_page - test for&handle out of bounds compressed page
*/
static inline void handle_bounds_compressed_page(struct page *page,
const loff_t i_size, const s64 initialized_size)
{
if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
(initialized_size < i_size))
zero_partial_compressed_page(page, initialized_size);
return;
}
/**
* ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages
* @dest_index: current index into @dest_pages (IN/OUT)
* @dest_ofs: current offset within @dest_pages[@dest_index] (IN/OUT)
* @dest_max_index: maximum index into @dest_pages (IN)
* @dest_max_ofs: maximum offset within @dest_pages[@dest_max_index] (IN)
* @xpage: the target page (-1 if none) (IN)
* @xpage_done: set to 1 if xpage was completed successfully (IN/OUT)
* @cb_start: compression block to decompress (IN)
* @cb_size: size of compression block @cb_start in bytes (IN)
* @i_size: file size when we started the read (IN)
* @initialized_size: initialized file size when we started the read (IN)
*
* The caller must have disabled preemption. ntfs_decompress() reenables it when
* the critical section is finished.
*
* This decompresses the compression block @cb_start into the array of
* destination pages @dest_pages starting at index @dest_index into @dest_pages
* and at offset @dest_pos into the page @dest_pages[@dest_index].
*
* When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
* If xpage is -1 or @xpage has not been completed, @xpage_done is not modified.
*
* @cb_start is a pointer to the compression block which needs decompressing
* and @cb_size is the size of @cb_start in bytes (8-64kiB).
*
* Return 0 if success or -EOVERFLOW on error in the compressed stream.
* @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
* completed during the decompression of the compression block (@cb_start).
*
* Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
* unpredicatbly! You have been warned!
*
* Note to hackers: This function may not sleep until it has finished accessing
* the compression block @cb_start as it is a per-CPU buffer.
*/
static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
const int xpage, char *xpage_done, u8 *const cb_start,
const u32 cb_size, const loff_t i_size,
const s64 initialized_size)
{
/*
* Pointers into the compressed data, i.e. the compression block (cb),
* and the therein contained sub-blocks (sb).
*/
u8 *cb_end = cb_start + cb_size; /* End of cb. */
u8 *cb = cb_start; /* Current position in cb. */
u8 *cb_sb_start = cb; /* Beginning of the current sb in the cb. */
u8 *cb_sb_end; /* End of current sb / beginning of next sb. */
/* Variables for uncompressed data / destination. */
struct page *dp; /* Current destination page being worked on. */
u8 *dp_addr; /* Current pointer into dp. */
u8 *dp_sb_start; /* Start of current sub-block in dp. */
u8 *dp_sb_end; /* End of current sb in dp (dp_sb_start +
NTFS_SB_SIZE). */
u16 do_sb_start; /* @dest_ofs when starting this sub-block. */
u16 do_sb_end; /* @dest_ofs of end of this sb (do_sb_start +
NTFS_SB_SIZE). */
/* Variables for tag and token parsing. */
u8 tag; /* Current tag. */
int token; /* Loop counter for the eight tokens in tag. */
/* Need this because we can't sleep, so need two stages. */
int completed_pages[dest_max_index - *dest_index + 1];
int nr_completed_pages = 0;
/* Default error code. */
int err = -EOVERFLOW;
ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
do_next_sb:
ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
cb - cb_start);
/*
* Have we reached the end of the compression block or the end of the
* decompressed data? The latter can happen for example if the current
* position in the compression block is one byte before its end so the
* first two checks do not detect it.
*/
if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
(*dest_index == dest_max_index &&
*dest_ofs == dest_max_ofs)) {
int i;
ntfs_debug("Completed. Returning success (0).");
err = 0;
return_error:
/* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize completed pages. */
if (nr_completed_pages > 0) {
for (i = 0; i < nr_completed_pages; i++) {
int di = completed_pages[i];
dp = dest_pages[di];
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(dp, i_size,
initialized_size);
flush_dcache_page(dp);
kunmap(dp);
SetPageUptodate(dp);
unlock_page(dp);
if (di == xpage)
*xpage_done = 1;
else
page_cache_release(dp);
dest_pages[di] = NULL;
}
}
return err;
}
/* Setup offsets for the current sub-block destination. */
do_sb_start = *dest_ofs;
do_sb_end = do_sb_start + NTFS_SB_SIZE;
/* Check that we are still within allowed boundaries. */
if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
goto return_overflow;
/* Does the minimum size of a compressed sb overflow valid range? */
if (cb + 6 > cb_end)
goto return_overflow;
/* Setup the current sub-block source pointers and validate range. */
cb_sb_start = cb;
cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
+ 3;
if (cb_sb_end > cb_end)
goto return_overflow;
/* Get the current destination page. */
dp = dest_pages[*dest_index];
if (!dp) {
/* No page present. Skip decompression of this sub-block. */
cb = cb_sb_end;
/* Advance destination position to next sub-block. */
*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
if (!*dest_ofs && (++*dest_index > dest_max_index))
goto return_overflow;
goto do_next_sb;
}
/* We have a valid destination page. Setup the destination pointers. */
dp_addr = (u8*)page_address(dp) + do_sb_start;
/* Now, we are ready to process the current sub-block (sb). */
if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
ntfs_debug("Found uncompressed sub-block.");
/* This sb is not compressed, just copy it into destination. */
/* Advance source position to first data byte. */
cb += 2;
/* An uncompressed sb must be full size. */
if (cb_sb_end - cb != NTFS_SB_SIZE)
goto return_overflow;
/* Copy the block and advance the source position. */
memcpy(dp_addr, cb, NTFS_SB_SIZE);
cb += NTFS_SB_SIZE;
/* Advance destination position to next sub-block. */
*dest_ofs += NTFS_SB_SIZE;
if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
finalize_page:
/*
* First stage: add current page index to array of
* completed pages.
*/
completed_pages[nr_completed_pages++] = *dest_index;
if (++*dest_index > dest_max_index)
goto return_overflow;
}
goto do_next_sb;
}
ntfs_debug("Found compressed sub-block.");
/* This sb is compressed, decompress it into destination. */
/* Setup destination pointers. */
dp_sb_start = dp_addr;
dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
/* Forward to the first tag in the sub-block. */
cb += 2;
do_next_tag:
if (cb == cb_sb_end) {
/* Check if the decompressed sub-block was not full-length. */
if (dp_addr < dp_sb_end) {
int nr_bytes = do_sb_end - *dest_ofs;
ntfs_debug("Filling incomplete sub-block with "
"zeroes.");
/* Zero remainder and update destination position. */
memset(dp_addr, 0, nr_bytes);
*dest_ofs += nr_bytes;
}
/* We have finished the current sub-block. */
if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
goto finalize_page;
goto do_next_sb;
}
/* Check we are still in range. */
if (cb > cb_sb_end || dp_addr > dp_sb_end)
goto return_overflow;
/* Get the next tag and advance to first token. */
tag = *cb++;
/* Parse the eight tokens described by the tag. */
for (token = 0; token < 8; token++, tag >>= 1) {
u16 lg, pt, length, max_non_overlap;
register u16 i;
u8 *dp_back_addr;
/* Check if we are done / still in range. */
if (cb >= cb_sb_end || dp_addr > dp_sb_end)
break;
/* Determine token type and parse appropriately.*/
if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
/*
* We have a symbol token, copy the symbol across, and
* advance the source and destination positions.
*/
*dp_addr++ = *cb++;
++*dest_ofs;
/* Continue with the next token. */
continue;
}
/*
* We have a phrase token. Make sure it is not the first tag in
* the sb as this is illegal and would confuse the code below.
*/
if (dp_addr == dp_sb_start)
goto return_overflow;
/*
* Determine the number of bytes to go back (p) and the number
* of bytes to copy (l). We use an optimized algorithm in which
* we first calculate log2(current destination position in sb),
* which allows determination of l and p in O(1) rather than
* O(n). We just need an arch-optimized log2() function now.
*/
lg = 0;
for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
lg++;
/* Get the phrase token into i. */
pt = le16_to_cpup((le16*)cb);
/*
* Calculate starting position of the byte sequence in
* the destination using the fact that p = (pt >> (12 - lg)) + 1
* and make sure we don't go too far back.
*/
dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
if (dp_back_addr < dp_sb_start)
goto return_overflow;
/* Now calculate the length of the byte sequence. */
length = (pt & (0xfff >> lg)) + 3;
/* Advance destination position and verify it is in range. */
*dest_ofs += length;
if (*dest_ofs > do_sb_end)
goto return_overflow;
/* The number of non-overlapping bytes. */
max_non_overlap = dp_addr - dp_back_addr;
if (length <= max_non_overlap) {
/* The byte sequence doesn't overlap, just copy it. */
memcpy(dp_addr, dp_back_addr, length);
/* Advance destination pointer. */
dp_addr += length;
} else {
/*
* The byte sequence does overlap, copy non-overlapping
* part and then do a slow byte by byte copy for the
* overlapping part. Also, advance the destination
* pointer.
*/
memcpy(dp_addr, dp_back_addr, max_non_overlap);
dp_addr += max_non_overlap;
dp_back_addr += max_non_overlap;
length -= max_non_overlap;
while (length--)
*dp_addr++ = *dp_back_addr++;
}
/* Advance source position and continue with the next token. */
cb += 2;
}
/* No tokens left in the current tag. Continue with the next tag. */
goto do_next_tag;
return_overflow:
ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
goto return_error;
}
/**
* ntfs_read_compressed_block - read a compressed block into the page cache
* @page: locked page in the compression block(s) we need to read
*
* When we are called the page has already been verified to be locked and the
* attribute is known to be non-resident, not encrypted, but compressed.
*
* 1. Determine which compression block(s) @page is in.
* 2. Get hold of all pages corresponding to this/these compression block(s).
* 3. Read the (first) compression block.
* 4. Decompress it into the corresponding pages.
* 5. Throw the compressed data away and proceed to 3. for the next compression
* block or return success if no more compression blocks left.
*
* Warning: We have to be careful what we do about existing pages. They might
* have been written to so that we would lose data if we were to just overwrite
* them with the out-of-date uncompressed data.
*
* FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
* the end of the file I think. We need to detect this case and zero the out
* of bounds remainder of the page in question and mark it as handled. At the
* moment we would just return -EIO on such a page. This bug will only become
* apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
* clusters so is probably not going to be seen by anyone. Still this should
* be fixed. (AIA)
*
* FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
* handling sparse and compressed cbs. (AIA)
*
* FIXME: At the moment we don't do any zeroing out in the case that
* initialized_size is less than data_size. This should be safe because of the
* nature of the compression algorithm used. Just in case we check and output
* an error message in read inode if the two sizes are not equal for a
* compressed file. (AIA)
*/
int ntfs_read_compressed_block(struct page *page)
{
loff_t i_size;
s64 initialized_size;
struct address_space *mapping = page->mapping;
ntfs_inode *ni = NTFS_I(mapping->host);
ntfs_volume *vol = ni->vol;
struct super_block *sb = vol->sb;
runlist_element *rl;
unsigned long flags, block_size = sb->s_blocksize;
unsigned char block_size_bits = sb->s_blocksize_bits;
u8 *cb, *cb_pos, *cb_end;
struct buffer_head **bhs;
unsigned long offset, index = page->index;
u32 cb_size = ni->itype.compressed.block_size;
u64 cb_size_mask = cb_size - 1UL;
VCN vcn;
LCN lcn;
/* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
* The first vcn after the last wanted vcn (minumum alignment is again
* PAGE_CACHE_SIZE.
*/
VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
& ~cb_size_mask) >> vol->cluster_size_bits;
/* Number of compression blocks (cbs) in the wanted vcn range. */
unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
>> ni->itype.compressed.block_size_bits;
/*
* Number of pages required to store the uncompressed data from all
* compression blocks (cbs) overlapping @page. Due to alignment
* guarantees of start_vcn and end_vcn, no need to round up here.
*/
unsigned int nr_pages = (end_vcn - start_vcn) <<
vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
unsigned int xpage, max_page, cur_page, cur_ofs, i;
unsigned int cb_clusters, cb_max_ofs;
int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
struct page **pages;
unsigned char xpage_done = 0;
ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
"%i.", index, cb_size, nr_pages);
/*
* Bad things happen if we get here for anything that is not an
* unnamed $DATA attribute.
*/
BUG_ON(ni->type != AT_DATA);
BUG_ON(ni->name_len);
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
/* Allocate memory to store the buffer heads we need. */
bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
bhs = kmalloc(bhs_size, GFP_NOFS);
if (unlikely(!pages || !bhs)) {
kfree(bhs);
kfree(pages);
unlock_page(page);
ntfs_error(vol->sb, "Failed to allocate internal buffers.");
return -ENOMEM;
}
/*
* We have already been given one page, this is the one we must do.
* Once again, the alignment guarantees keep it simple.
*/
offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
xpage = index - offset;
pages[xpage] = page;
/*
* The remaining pages need to be allocated and inserted into the page
* cache, alignment guarantees keep all the below much simpler. (-8
*/
read_lock_irqsave(&ni->size_lock, flags);
i_size = i_size_read(VFS_I(ni));
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
offset;
/* Is the page fully outside i_size? (truncate in progress) */
if (xpage >= max_page) {
kfree(bhs);
kfree(pages);
zero_user(page, 0, PAGE_CACHE_SIZE);
ntfs_debug("Compressed read outside i_size - truncated?");
SetPageUptodate(page);
unlock_page(page);
return 0;
}
if (nr_pages < max_page)
max_page = nr_pages;
for (i = 0; i < max_page; i++, offset++) {
if (i != xpage)
pages[i] = grab_cache_page_nowait(mapping, offset);
page = pages[i];
if (page) {
/*
* We only (re)read the page if it isn't already read
* in and/or dirty or we would be losing data or at
* least wasting our time.
*/
if (!PageDirty(page) && (!PageUptodate(page) ||
PageError(page))) {
ClearPageError(page);
kmap(page);
continue;
}
unlock_page(page);
page_cache_release(page);
pages[i] = NULL;
}
}
/*
* We have the runlist, and all the destination pages we need to fill.
* Now read the first compression block.
*/
cur_page = 0;
cur_ofs = 0;
cb_clusters = ni->itype.compressed.block_clusters;
do_next_cb:
nr_cbs--;
nr_bhs = 0;
/* Read all cb buffer heads one cluster at a time. */
rl = NULL;
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) {
bool is_retry = false;
if (!rl) {
lock_retry_remap:
down_read(&ni->runlist.lock);
rl = ni->runlist.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
lcn = LCN_RL_NOT_MAPPED;
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)vcn,
(unsigned long long)lcn);
if (lcn < 0) {
/*
* When we reach the first sparse cluster we have
* finished with the cb.
*/
if (lcn == LCN_HOLE)
break;
if (is_retry || lcn != LCN_RL_NOT_MAPPED)
goto rl_err;
is_retry = true;
/*
* Attempt to map runlist, dropping lock for the
* duration.
*/
up_read(&ni->runlist.lock);
if (!ntfs_map_runlist(ni, vcn))
goto lock_retry_remap;
goto map_rl_err;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
/* Read the lcn from device in chunks of block_size bytes. */
max_block = block + (vol->cluster_size >> block_size_bits);
do {
ntfs_debug("block = 0x%x.", block);
if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
goto getblk_err;
nr_bhs++;
} while (++block < max_block);
}
/* Release the lock if we took it. */
if (rl)
up_read(&ni->runlist.lock);
/* Setup and initiate io on all buffer heads. */
for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i];
if (!trylock_buffer(tbh))
continue;
if (unlikely(buffer_uptodate(tbh))) {
unlock_buffer(tbh);
continue;
}
get_bh(tbh);
tbh->b_end_io = end_buffer_read_sync;
submit_bh(READ, tbh);
}
/* Wait for io completion on all buffer heads. */
for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i];
if (buffer_uptodate(tbh))
continue;
wait_on_buffer(tbh);
/*
* We need an optimization barrier here, otherwise we start
* hitting the below fixup code when accessing a loopback
* mounted ntfs partition. This indicates either there is a
* race condition in the loop driver or, more likely, gcc
* overoptimises the code without the barrier and it doesn't
* do the Right Thing(TM).
*/
barrier();
if (unlikely(!buffer_uptodate(tbh))) {
ntfs_warning(vol->sb, "Buffer is unlocked but not "
"uptodate! Unplugging the disk queue "
"and rescheduling.");
get_bh(tbh);
blk_run_address_space(mapping);
schedule();
put_bh(tbh);
if (unlikely(!buffer_uptodate(tbh)))
goto read_err;
ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
}
}
/*
* Get the compression buffer. We must not sleep any more
* until we are finished with it.
*/
spin_lock(&ntfs_cb_lock);
cb = ntfs_compression_buffer;
BUG_ON(!cb);
cb_pos = cb;
cb_end = cb + cb_size;
/* Copy the buffer heads into the contiguous buffer. */
for (i = 0; i < nr_bhs; i++) {
memcpy(cb_pos, bhs[i]->b_data, block_size);
cb_pos += block_size;
}
/* Just a precaution. */
if (cb_pos + 2 <= cb + cb_size)
*(u16*)cb_pos = 0;
/* Reset cb_pos back to the beginning. */
cb_pos = cb;
/* We now have both source (if present) and destination. */
ntfs_debug("Successfully read the compression block.");
/* The last page and maximum offset within it for the current cb. */
cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
cb_max_page >>= PAGE_CACHE_SHIFT;
/* Catch end of file inside a compression block. */
if (cb_max_page > max_page)
cb_max_page = max_page;
if (vcn == start_vcn - cb_clusters) {
/* Sparse cb, zero out page range overlapping the cb. */
ntfs_debug("Found sparse compression block.");
/* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock);
if (cb_max_ofs)
cb_max_page--;
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page) {
/*
* FIXME: Using clear_page() will become wrong
* when we get PAGE_CACHE_SIZE != PAGE_SIZE but
* for now there is no problem.
*/
if (likely(!cur_ofs))
clear_page(page_address(page));
else
memset(page_address(page) + cur_ofs, 0,
PAGE_CACHE_SIZE -
cur_ofs);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
if (cur_page == xpage)
xpage_done = 1;
else
page_cache_release(page);
pages[cur_page] = NULL;
}
cb_pos += PAGE_CACHE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
}
/* If we have a partial final page, deal with it now. */
if (cb_max_ofs && cb_pos < cb_end) {
page = pages[cur_page];
if (page)
memset(page_address(page) + cur_ofs, 0,
cb_max_ofs - cur_ofs);
/*
* No need to update cb_pos at this stage:
* cb_pos += cb_max_ofs - cur_ofs;
*/
cur_ofs = cb_max_ofs;
}
} else if (vcn == start_vcn) {
/* We can't sleep so we need two stages. */
unsigned int cur2_page = cur_page;
unsigned int cur_ofs2 = cur_ofs;
u8 *cb_pos2 = cb_pos;
ntfs_debug("Found uncompressed compression block.");
/* Uncompressed cb, copy it to the destination pages. */
/*
* TODO: As a big optimization, we could detect this case
* before we read all the pages and use block_read_full_page()
* on all full pages instead (we still have to treat partial
* pages especially but at least we are getting rid of the
* synchronous io for the majority of pages.
* Or if we choose not to do the read-ahead/-behind stuff, we
* could just return block_read_full_page(pages[xpage]) as long
* as PAGE_CACHE_SIZE <= cb_size.
*/
if (cb_max_ofs)
cb_max_page--;
/* First stage: copy data into destination pages. */
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page)
memcpy(page_address(page) + cur_ofs, cb_pos,
PAGE_CACHE_SIZE - cur_ofs);
cb_pos += PAGE_CACHE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
}
/* If we have a partial final page, deal with it now. */
if (cb_max_ofs && cb_pos < cb_end) {
page = pages[cur_page];
if (page)
memcpy(page_address(page) + cur_ofs, cb_pos,
cb_max_ofs - cur_ofs);
cb_pos += cb_max_ofs - cur_ofs;
cur_ofs = cb_max_ofs;
}
/* We can sleep from now on, so drop lock. */
spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize pages. */
for (; cur2_page < cb_max_page; cur2_page++) {
page = pages[cur2_page];
if (page) {
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(page, i_size,
initialized_size);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
if (cur2_page == xpage)
xpage_done = 1;
else
page_cache_release(page);
pages[cur2_page] = NULL;
}
cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
cur_ofs2 = 0;
if (cb_pos2 >= cb_end)
break;
}
} else {
/* Compressed cb, decompress it into the destination page(s). */
unsigned int prev_cur_page = cur_page;
ntfs_debug("Found compressed compression block.");
err = ntfs_decompress(pages, &cur_page, &cur_ofs,
cb_max_page, cb_max_ofs, xpage, &xpage_done,
cb_pos, cb_size - (cb_pos - cb), i_size,
initialized_size);
/*
* We can sleep from now on, lock already dropped by
* ntfs_decompress().
*/
if (err) {
ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
"0x%lx with error code %i. Skipping "
"this compression block.",
ni->mft_no, -err);
/* Release the unfinished pages. */
for (; prev_cur_page < cur_page; prev_cur_page++) {
page = pages[prev_cur_page];
if (page) {
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (prev_cur_page != xpage)
page_cache_release(page);
pages[prev_cur_page] = NULL;
}
}
}
}
/* Release the buffer heads. */
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
/* Do we have more work to do? */
if (nr_cbs)
goto do_next_cb;
/* We no longer need the list of buffer heads. */
kfree(bhs);
/* Clean up if we have any pages left. Should never happen. */
for (cur_page = 0; cur_page < max_page; cur_page++) {
page = pages[cur_page];
if (page) {
ntfs_error(vol->sb, "Still have pages left! "
"Terminating them with extreme "
"prejudice. Inode 0x%lx, page index "
"0x%lx.", ni->mft_no, page->index);
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (cur_page != xpage)
page_cache_release(page);
pages[cur_page] = NULL;
}
}
/* We no longer need the list of pages. */
kfree(pages);
/* If we have completed the requested page, we return success. */
if (likely(xpage_done))
return 0;
ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
"EOVERFLOW" : (!err ? "EIO" : "unknown error"));
return err < 0 ? err : -EIO;
read_err:
ntfs_error(vol->sb, "IO error while reading compressed data.");
/* Release the buffer heads. */
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
goto err_out;
map_rl_err:
ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
"compression block.");
goto err_out;
rl_err:
up_read(&ni->runlist.lock);
ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
"compression block.");
goto err_out;
getblk_err:
up_read(&ni->runlist.lock);
ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
err_out:
kfree(bhs);
for (i = cur_page; i < max_page; i++) {
page = pages[i];
if (page) {
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (i != xpage)
page_cache_release(page);
}
}
kfree(pages);
return -EIO;
}
| gpl-2.0 |
KylinUI/android_kernel_samsung_hlte | arch/arm/mach-msm/scm-pas.c | 1564 | 6249 | /* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "scm-pas: " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <mach/scm.h>
#include <mach/socinfo.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#include "scm-pas.h"
#define PAS_INIT_IMAGE_CMD 1
#define PAS_MEM_SETUP_CMD 2
#define PAS_AUTH_AND_RESET_CMD 5
#define PAS_SHUTDOWN_CMD 6
#define PAS_IS_SUPPORTED_CMD 7
enum scm_clock_ids {
BUS_CLK = 0,
CORE_CLK,
IFACE_CLK,
CORE_CLK_SRC,
NUM_CLKS
};
static const char * const scm_clock_names[NUM_CLKS] = {
[BUS_CLK] = "bus_clk",
[CORE_CLK] = "core_clk",
[IFACE_CLK] = "iface_clk",
[CORE_CLK_SRC] = "core_clk_src",
};
static struct clk *scm_clocks[NUM_CLKS];
static struct msm_bus_paths scm_pas_bw_tbl[] = {
{
.vectors = (struct msm_bus_vectors[]){
{
.src = MSM_BUS_MASTER_SPS,
.dst = MSM_BUS_SLAVE_EBI_CH0,
},
},
.num_paths = 1,
},
{
.vectors = (struct msm_bus_vectors[]){
{
.src = MSM_BUS_MASTER_SPS,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ib = 492 * 8 * 1000000UL,
.ab = 492 * 8 * 100000UL,
},
},
.num_paths = 1,
},
};
static struct msm_bus_scale_pdata scm_pas_bus_pdata = {
.usecase = scm_pas_bw_tbl,
.num_usecases = ARRAY_SIZE(scm_pas_bw_tbl),
.name = "scm_pas",
};
static uint32_t scm_perf_client;
static DEFINE_MUTEX(scm_pas_bw_mutex);
static int scm_pas_bw_count;
static int scm_pas_enable_bw(void)
{
int ret = 0, i;
if (!scm_perf_client)
return -EINVAL;
mutex_lock(&scm_pas_bw_mutex);
if (!scm_pas_bw_count) {
ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
if (ret)
goto err_bus;
scm_pas_bw_count++;
}
for (i = 0; i < NUM_CLKS; i++)
if (clk_prepare_enable(scm_clocks[i]))
goto err_clk;
mutex_unlock(&scm_pas_bw_mutex);
return ret;
err_clk:
pr_err("clk prepare_enable failed (%s)\n", scm_clock_names[i]);
for (i--; i >= 0; i--)
clk_disable_unprepare(scm_clocks[i]);
err_bus:
pr_err("bandwidth request failed (%d)\n", ret);
msm_bus_scale_client_update_request(scm_perf_client, 0);
mutex_unlock(&scm_pas_bw_mutex);
return ret;
}
static void scm_pas_disable_bw(void)
{
int i;
mutex_lock(&scm_pas_bw_mutex);
if (scm_pas_bw_count-- == 1) {
msm_bus_scale_client_update_request(scm_perf_client, 0);
}
for (i = NUM_CLKS - 1; i >= 0; i--)
clk_disable_unprepare(scm_clocks[i]);
mutex_unlock(&scm_pas_bw_mutex);
}
int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
{
int ret;
struct pas_init_image_req {
u32 proc;
u32 image_addr;
} request;
u32 scm_ret = 0;
void *mdata_buf;
dma_addr_t mdata_phys;
DEFINE_DMA_ATTRS(attrs);
ret = scm_pas_enable_bw();
if (ret)
return ret;
dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
mdata_buf = dma_alloc_attrs(NULL, size, &mdata_phys, GFP_KERNEL,
&attrs);
if (!mdata_buf) {
pr_err("Allocation for metadata failed.\n");
return -ENOMEM;
}
memcpy(mdata_buf, metadata, size);
request.proc = id;
request.image_addr = mdata_phys;
ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
sizeof(request), &scm_ret, sizeof(scm_ret));
dma_free_attrs(NULL, size, mdata_buf, mdata_phys, &attrs);
scm_pas_disable_bw();
if (ret)
return ret;
return scm_ret;
}
EXPORT_SYMBOL(pas_init_image);
int pas_mem_setup(enum pas_id id, u32 start_addr, u32 len)
{
int ret;
struct pas_init_image_req {
u32 proc;
u32 start_addr;
u32 len;
} request;
u32 scm_ret = 0;
request.proc = id;
request.start_addr = start_addr;
request.len = len;
ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
sizeof(request), &scm_ret, sizeof(scm_ret));
if (ret)
return ret;
return scm_ret;
}
EXPORT_SYMBOL(pas_mem_setup);
int pas_auth_and_reset(enum pas_id id)
{
int ret;
u32 proc = id, scm_ret = 0;
ret = scm_pas_enable_bw();
if (ret)
return ret;
ret = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
sizeof(proc), &scm_ret, sizeof(scm_ret));
if (ret)
scm_ret = ret;
scm_pas_disable_bw();
return scm_ret;
}
EXPORT_SYMBOL(pas_auth_and_reset);
int pas_shutdown(enum pas_id id)
{
int ret;
u32 proc = id, scm_ret = 0;
ret = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc, sizeof(proc),
&scm_ret, sizeof(scm_ret));
if (ret)
return ret;
return scm_ret;
}
EXPORT_SYMBOL(pas_shutdown);
static bool secure_pil = true;
module_param(secure_pil, bool, S_IRUGO);
MODULE_PARM_DESC(secure_pil, "Use secure PIL");
int pas_supported(enum pas_id id)
{
int ret;
u32 periph = id, ret_val = 0;
if (!secure_pil)
return 0;
/*
* 8660 SCM doesn't support querying secure PIL support so just return
* true if not overridden on the command line.
*/
if (cpu_is_msm8x60())
return 1;
if (scm_is_call_available(SCM_SVC_PIL, PAS_IS_SUPPORTED_CMD) <= 0)
return 0;
ret = scm_call(SCM_SVC_PIL, PAS_IS_SUPPORTED_CMD, &periph,
sizeof(periph), &ret_val, sizeof(ret_val));
if (ret)
return ret;
return ret_val;
}
EXPORT_SYMBOL(pas_supported);
void scm_pas_init(enum msm_bus_fabric_master_type id)
{
int i, rate;
static int is_inited;
if (is_inited)
return;
for (i = 0; i < NUM_CLKS; i++) {
scm_clocks[i] = clk_get_sys("scm", scm_clock_names[i]);
if (IS_ERR(scm_clocks[i]))
scm_clocks[i] = NULL;
}
/* Fail silently if this clock is not supported */
rate = clk_round_rate(scm_clocks[CORE_CLK_SRC], 1);
clk_set_rate(scm_clocks[CORE_CLK_SRC], rate);
scm_pas_bw_tbl[0].vectors[0].src = id;
scm_pas_bw_tbl[1].vectors[0].src = id;
clk_set_rate(scm_clocks[BUS_CLK], 64000000);
scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
if (!scm_perf_client)
pr_warn("unable to register bus client\n");
is_inited = 1;
}
| gpl-2.0 |
TipsyOs-Devices/android_kernel_samsung_trlte | drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 2076 | 43974 | /*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic_sriov.h"
#include "qlcnic.h"
#include <linux/types.h>
#define QLCNIC_SRIOV_VF_MAX_MAC 1
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
struct qlcnic_sriov_cmd_handler {
int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
};
struct qlcnic_sriov_fw_cmd_handler {
u32 cmd;
int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
};
static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *npar_info,
u16 vport_id)
{
struct qlcnic_cmd_args cmd;
int err;
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
return -ENOMEM;
cmd.req.arg[1] = (vport_id << 16) | 0x1;
cmd.req.arg[2] = npar_info->bit_offsets;
cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
cmd.req.arg[4] = npar_info->max_tx_mac_filters;
cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
(npar_info->max_rx_ip_addr << 16);
cmd.req.arg[6] = npar_info->max_rx_lro_flow |
(npar_info->max_rx_status_rings << 16);
cmd.req.arg[7] = npar_info->max_rx_buf_rings |
(npar_info->max_rx_ques << 16);
cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
dev_err(&adapter->pdev->dev,
"Failed to set vport info, err=%d\n", err);
qlcnic_free_mbx_args(&cmd);
return err;
}
static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
struct qlcnic_info *info, u16 func)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_resources *res = &sriov->ff_max;
u32 temp, num_vf_macs, num_vfs, max;
int ret = -EIO, vpid, id;
struct qlcnic_vport *vp;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
if (vpid < 0)
return -EINVAL;
num_vfs = sriov->num_vfs;
max = num_vfs + 1;
info->bit_offsets = 0xffff;
info->max_tx_ques = res->num_tx_queues / max;
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
if (adapter->ahw->pci_func == func) {
temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
info->max_rx_ucast_mac_filters = temp;
temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
info->max_tx_mac_filters = temp;
info->min_tx_bw = 0;
info->max_tx_bw = MAX_BW;
} else {
id = qlcnic_sriov_func_to_index(adapter, func);
if (id < 0)
return id;
vp = sriov->vf_info[id].vp;
info->min_tx_bw = vp->min_tx_bw;
info->max_tx_bw = vp->max_tx_bw;
info->max_rx_ucast_mac_filters = num_vf_macs;
info->max_tx_mac_filters = num_vf_macs;
}
info->max_rx_ip_addr = res->num_destip / max;
info->max_rx_status_rings = res->num_rx_status_rings / max;
info->max_rx_buf_rings = res->num_rx_buf_rings / max;
info->max_rx_ques = res->num_rx_queues / max;
info->max_rx_lro_flow = res->num_lro_flows_supported / max;
info->max_tx_vlan_keys = res->num_txvlan_keys;
info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
if (ret)
return ret;
return 0;
}
static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
struct qlcnic_info *info)
{
struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
ff_max->num_rx_queues = info->max_rx_ques;
ff_max->num_tx_queues = info->max_tx_ques;
ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
ff_max->num_destip = info->max_rx_ip_addr;
ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
ff_max->num_rx_status_rings = info->max_rx_status_rings;
ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
}
static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *npar_info)
{
int err;
struct qlcnic_cmd_args cmd;
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
return -ENOMEM;
cmd.req.arg[1] = 0x2;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to get PF info, err=%d\n", err);
goto out;
}
npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
npar_info->max_vports = MSW(cmd.rsp.arg[2]);
npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
dev_info(&adapter->pdev->dev,
"\n\ttotal_pf: %d,\n"
"\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
"\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
"\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
"\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
"\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
"\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
npar_info->total_pf, npar_info->total_rss_engines,
npar_info->max_vports, npar_info->max_tx_ques,
npar_info->max_tx_mac_filters,
npar_info->max_rx_mcast_mac_filters,
npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
npar_info->max_remote_ipv6_addrs);
out:
qlcnic_free_mbx_args(&cmd);
return err;
}
static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
u8 func)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vport *vp;
int index;
if (adapter->ahw->pci_func == func) {
sriov->vp_handle = 0;
} else {
index = qlcnic_sriov_func_to_index(adapter, func);
if (index < 0)
return;
vp = sriov->vf_info[index].vp;
vp->handle = 0;
}
}
static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
u16 vport_handle, u8 func)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vport *vp;
int index;
if (adapter->ahw->pci_func == func) {
sriov->vp_handle = vport_handle;
} else {
index = qlcnic_sriov_func_to_index(adapter, func);
if (index < 0)
return;
vp = sriov->vf_info[index].vp;
vp->handle = vport_handle;
}
}
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
u8 func)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vf_info *vf_info;
int index;
if (adapter->ahw->pci_func == func) {
return sriov->vp_handle;
} else {
index = qlcnic_sriov_func_to_index(adapter, func);
if (index >= 0) {
vf_info = &sriov->vf_info[index];
return vf_info->vp->handle;
}
}
return -EINVAL;
}
static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
u8 flag, u16 func)
{
struct qlcnic_cmd_args cmd;
int ret;
int vpid;
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
return -ENOMEM;
if (flag) {
cmd.req.arg[3] = func << 8;
} else {
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
if (vpid < 0) {
ret = -EINVAL;
goto out;
}
cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
}
ret = qlcnic_issue_cmd(adapter, &cmd);
if (ret) {
dev_err(&adapter->pdev->dev,
"Failed %s vport, err %d for func 0x%x\n",
(flag ? "enable" : "disable"), ret, func);
goto out;
}
if (flag) {
vpid = cmd.rsp.arg[2] & 0xffff;
qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
} else {
qlcnic_sriov_pf_reset_vport_handle(adapter, func);
}
out:
qlcnic_free_mbx_args(&cmd);
return ret;
}
static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
u8 enable)
{
struct qlcnic_cmd_args cmd;
int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
if (err)
return err;
cmd.req.arg[1] = 0x4;
if (enable)
cmd.req.arg[1] |= BIT_16;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
dev_err(&adapter->pdev->dev,
"Failed to configure VLAN filtering, err=%d\n", err);
qlcnic_free_mbx_args(&cmd);
return err;
}
static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
u8 func, u8 enable)
{
struct qlcnic_cmd_args cmd;
int err = -EIO;
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
return -ENOMEM;
cmd.req.arg[0] |= (3 << 29);
cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
if (enable)
cmd.req.arg[1] |= BIT_0;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
"Failed to enable sriov eswitch%d\n", err);
err = -EIO;
}
qlcnic_free_mbx_args(&cmd);
return err;
}
static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_back_channel *bc = &sriov->bc;
int i;
for (i = 0; i < sriov->num_vfs; i++)
cancel_work_sync(&sriov->vf_info[i].flr_work);
destroy_workqueue(bc->bc_flr_wq);
}
static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter)
{
struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
struct workqueue_struct *wq;
wq = create_singlethread_workqueue("qlcnic-flr");
if (wq == NULL) {
dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n");
return -ENOMEM;
}
bc->bc_flr_wq = wq;
return 0;
}
void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
{
u8 func = adapter->ahw->pci_func;
if (!qlcnic_sriov_enable_check(adapter))
return;
qlcnic_sriov_pf_del_flr_queue(adapter);
qlcnic_sriov_cfg_bc_intr(adapter, 0);
qlcnic_sriov_pf_config_vport(adapter, 0, func);
qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
__qlcnic_sriov_cleanup(adapter);
adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
}
void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
{
if (!qlcnic_sriov_pf_check(adapter))
return;
if (!qlcnic_sriov_enable_check(adapter))
return;
pci_disable_sriov(adapter->pdev);
netdev_info(adapter->netdev,
"SR-IOV is disabled successfully on port %d\n",
adapter->portnum);
}
static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_sriov_pf_disable(adapter);
qlcnic_sriov_pf_cleanup(adapter);
/* After disabling SRIOV re-init the driver in default mode
configure opmode based on op_mode of function
*/
if (qlcnic_83xx_configure_opmode(adapter))
return -EIO;
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
return 0;
}
static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_info nic_info, pf_info, vp_info;
int err;
u8 func = ahw->pci_func;
if (!qlcnic_sriov_enable_check(adapter))
return 0;
err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1);
if (err)
return err;
err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
if (err)
goto disable_vlan_filtering;
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
if (err)
goto disable_eswitch;
err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
if (err)
goto delete_vport;
err = qlcnic_get_nic_info(adapter, &nic_info, func);
if (err)
goto delete_vport;
err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
if (err)
goto delete_vport;
err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
if (err)
goto delete_vport;
ahw->physical_port = (u8) nic_info.phys_port;
ahw->switch_mode = nic_info.switch_mode;
ahw->max_mtu = nic_info.max_mtu;
ahw->capabilities = nic_info.capabilities;
ahw->nic_mode = QLC_83XX_SRIOV_MODE;
return err;
delete_vport:
qlcnic_sriov_pf_config_vport(adapter, 0, func);
disable_eswitch:
qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
disable_vlan_filtering:
qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
return err;
}
static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
{
int err;
if (!qlcnic_sriov_enable_check(adapter))
return 0;
err = pci_enable_sriov(adapter->pdev, num_vfs);
if (err)
qlcnic_sriov_pf_cleanup(adapter);
return err;
}
static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
int num_vfs)
{
int err = 0;
set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
err = qlcnic_sriov_init(adapter, num_vfs);
if (err)
goto clear_op_mode;
err = qlcnic_sriov_pf_create_flr_queue(adapter);
if (err)
goto sriov_cleanup;
err = qlcnic_sriov_pf_init(adapter);
if (err)
goto del_flr_queue;
err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
del_flr_queue:
qlcnic_sriov_pf_del_flr_queue(adapter);
sriov_cleanup:
__qlcnic_sriov_cleanup(adapter);
clear_op_mode:
clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
return err;
}
static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
{
struct net_device *netdev = adapter->netdev;
int err;
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
netdev_err(netdev,
"SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
return -EIO;
}
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
if (err) {
netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
adapter->portnum);
err = -EIO;
if (qlcnic_83xx_configure_opmode(adapter))
goto error;
} else {
netdev_info(netdev,
"SR-IOV is enabled successfully on port %d\n",
adapter->portnum);
/* Return number of vfs enabled */
err = num_vfs;
}
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
error:
return err;
}
int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
int err;
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
if (num_vfs == 0)
err = qlcnic_pci_sriov_disable(adapter);
else
err = qlcnic_pci_sriov_enable(adapter, num_vfs);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
}
static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_vport *vp;
int err, id;
id = qlcnic_sriov_func_to_index(adapter, func);
if (id < 0)
return id;
vp = adapter->ahw->sriov->vf_info[id].vp;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
if (err)
return err;
cmd.req.arg[1] = 0x3 | func << 16;
if (vp->vlan_mode == QLC_PVID_MODE) {
cmd.req.arg[2] |= BIT_6;
cmd.req.arg[3] |= vp->vlan << 8;
}
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n",
err);
qlcnic_free_mbx_args(&cmd);
return err;
}
static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
u16 func)
{
struct qlcnic_info defvp_info;
int err;
err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
if (err)
return -EIO;
err = qlcnic_sriov_set_vf_acl(adapter, func);
if (err)
return err;
return 0;
}
static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
u16 func = vf->pci_func;
cmd->rsp.arg[0] = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= (1 << 16);
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
if (!err) {
err = qlcnic_sriov_set_vf_vport_info(adapter, func);
if (err)
qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
} else {
err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
if (err)
goto err_out;
cmd->rsp.arg[0] |= (1 << 25);
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
set_bit(QLC_BC_VF_STATE, &vf->state);
else
clear_bit(QLC_BC_VF_STATE, &vf->state);
return err;
err_out:
cmd->rsp.arg[0] |= (2 << 25);
return err;
}
static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
struct qlcnic_vport *vp,
u16 func, u16 vlan, u8 op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_macvlan_mbx mv;
u8 *addr;
int err;
u32 *buf;
int vpid;
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
return -ENOMEM;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
if (vpid < 0) {
err = -EINVAL;
goto out;
}
if (vlan)
op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
addr = vp->mac;
mv.vlan = vlan;
mv.mac_addr0 = addr[0];
mv.mac_addr1 = addr[1];
mv.mac_addr2 = addr[2];
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
buf = &cmd.req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
dev_err(&adapter->pdev->dev,
"MAC-VLAN %s to CAM failed, err=%d.\n",
((op == 1) ? "add " : "delete "), err);
out:
qlcnic_free_mbx_args(&cmd);
return err;
}
static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
{
if ((cmd->req.arg[0] >> 29) != 0x3)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = tran->vf;
struct qlcnic_adapter *adapter = vf->adapter;
struct qlcnic_rcv_mbx_out *mbx_out;
int err;
u16 vlan;
err = qlcnic_sriov_validate_create_rx_ctx(cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
cmd->req.arg[6] = vf->vp->handle;
err = qlcnic_issue_cmd(adapter, cmd);
vlan = vf->vp->vlan;
if (!err) {
mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
vf->rx_ctx_id = mbx_out->ctx_id;
qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
vlan, QLCNIC_MAC_ADD);
} else {
vf->rx_ctx_id = 0;
}
return err;
}
static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
u8 type, *mac;
type = cmd->req.arg[1];
switch (type) {
case QLCNIC_SET_STATION_MAC:
case QLCNIC_SET_FAC_DEF_MAC:
cmd->rsp.arg[0] = (2 << 25);
break;
case QLCNIC_GET_CURRENT_MAC:
cmd->rsp.arg[0] = (1 << 25);
mac = vf->vp->mac;
cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
((mac[3]) << 16 & 0xff0000) |
((mac[2]) << 24 & 0xff000000);
}
return 0;
}
static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
{
if ((cmd->req.arg[0] >> 29) != 0x3)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
struct qlcnic_tx_mbx_out *mbx_out;
int err;
err = qlcnic_sriov_validate_create_tx_ctx(cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
cmd->req.arg[5] |= vf->vp->handle << 16;
err = qlcnic_issue_cmd(adapter, cmd);
if (!err) {
mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
vf->tx_ctx_id = mbx_out->ctx_id;
} else {
vf->tx_ctx_id = 0;
}
return err;
}
static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if ((cmd->req.arg[0] >> 29) != 0x3)
return -EINVAL;
if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
u16 vlan;
err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
vlan = vf->vp->vlan;
qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
vlan, QLCNIC_MAC_DEL);
cmd->req.arg[1] |= vf->vp->handle << 16;
err = qlcnic_issue_cmd(adapter, cmd);
if (!err)
vf->rx_ctx_id = 0;
return err;
}
static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if ((cmd->req.arg[0] >> 29) != 0x3)
return -EINVAL;
if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
cmd->req.arg[1] |= vf->vp->handle << 16;
err = qlcnic_issue_cmd(adapter, cmd);
if (!err)
vf->tx_ctx_id = 0;
return err;
}
static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err = -EIO;
u8 op;
op = cmd->req.arg[1] & 0xff;
cmd->req.arg[1] |= vf->vp->handle << 16;
cmd->req.arg[1] |= BIT_31;
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
return -EINVAL;
if (!(cmd->req.arg[1] & BIT_16))
return -EINVAL;
if ((cmd->req.arg[1] & 0xff) != 0x1)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
if (err)
cmd->rsp.arg[0] |= (0x6 << 25);
else
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if (cmd->req.arg[1] != vf->rx_ctx_id)
return -EINVAL;
if (cmd->req.arg[2] > adapter->ahw->max_mtu)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
if (err)
cmd->rsp.arg[0] |= (0x6 << 25);
else
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if (cmd->req.arg[1] & BIT_31) {
if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
return -EINVAL;
} else {
cmd->req.arg[1] |= vf->vp->handle << 16;
}
return 0;
}
static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if (cmd->req.arg[1] != vf->rx_ctx_id)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
if (err)
cmd->rsp.arg[0] |= (0x6 << 25);
else
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
u16 ctx_id, pkts, time;
ctx_id = cmd->req.arg[1] >> 16;
pkts = cmd->req.arg[2] & 0xffff;
time = cmd->req.arg[2] >> 16;
if (ctx_id != vf->rx_ctx_id)
return -EINVAL;
if (pkts > coal->rx_packets)
return -EINVAL;
if (time < coal->rx_time_us)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = tran->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_macvlan_mbx *macvlan;
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
if (!(cmd->req.arg[1] & BIT_8))
return -EINVAL;
cmd->req.arg[1] |= (vf->vp->handle << 16);
cmd->req.arg[1] |= BIT_31;
macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
if (!(macvlan->mac_addr0 & BIT_0)) {
dev_err(&adapter->pdev->dev,
"MAC address change is not allowed from VF %d",
vf->pci_func);
return -EINVAL;
}
if (vp->vlan_mode == QLC_PVID_MODE) {
op = cmd->req.arg[1] & 0x7;
cmd->req.arg[1] &= ~0x7;
new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
cmd->req.arg[3] |= vp->vlan << 16;
cmd->req.arg[1] |= new_op;
}
return 0;
}
static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
return -EINVAL;
return 0;
}
static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
err = qlcnic_sriov_validate_linkevent(vf, cmd);
if (err) {
cmd->rsp.arg[0] |= (0x6 << 25);
return err;
}
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
cmd->req.arg[1] |= vf->vp->handle << 16;
cmd->req.arg[1] |= BIT_31;
err = qlcnic_issue_cmd(adapter, cmd);
return err;
}
static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25;
switch (mode) {
case QLC_GUEST_VLAN_MODE:
cmd->rsp.arg[1] = mode | 1 << 8;
cmd->rsp.arg[2] = 1 << 16;
break;
case QLC_PVID_MODE:
cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
break;
}
return 0;
}
static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf)
{
struct qlcnic_vport *vp = vf->vp;
if (!vp->vlan)
return -EINVAL;
if (!vf->rx_ctx_id) {
vp->vlan = 0;
return 0;
}
qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
vp->vlan, QLCNIC_MAC_DEL);
vp->vlan = 0;
qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
0, QLCNIC_MAC_ADD);
return 0;
}
static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vport *vp = vf->vp;
int err = -EIO;
if (vp->vlan)
return err;
if (!vf->rx_ctx_id) {
vp->vlan = cmd->req.arg[1] >> 16;
return 0;
}
err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
0, QLCNIC_MAC_DEL);
if (err)
return err;
vp->vlan = cmd->req.arg[1] >> 16;
err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
vp->vlan, QLCNIC_MAC_ADD);
if (err) {
qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
0, QLCNIC_MAC_ADD);
vp->vlan = 0;
}
return err;
}
static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = tran->vf;
struct qlcnic_adapter *adapter = vf->adapter;
struct qlcnic_vport *vp = vf->vp;
int err = -EIO;
u8 op;
if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) {
cmd->rsp.arg[0] |= 2 << 25;
return err;
}
op = cmd->req.arg[1] & 0xf;
if (op)
err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
else
err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
return err;
}
static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
};
static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
[QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
[QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
[QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd},
[QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd},
};
static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
{QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
{QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
{QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
{QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
{QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
{QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
{QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
{QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
{QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
{QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
{QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
{QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
{QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
{QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
{QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
};
void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
u8 size, cmd_op;
cmd_op = trans->req_hdr->cmd_op;
if (trans->req_hdr->op_type == QLC_BC_CMD) {
size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
if (cmd_op < size) {
qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
return;
}
} else {
int i;
size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
for (i = 0; i < size; i++) {
if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
return;
}
}
size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
for (i = 0; i < size; i++) {
if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
qlcnic_issue_cmd(adapter, cmd);
return;
}
}
}
cmd->rsp.arg[0] |= (0x9 << 25);
}
void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
u32 *int_id)
{
u16 vpid;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
adapter->ahw->pci_func);
*int_id |= vpid;
}
void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
u32 *int_id)
{
u16 vpid;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
adapter->ahw->pci_func);
*int_id |= vpid << 16;
}
void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
u32 *int_id)
{
int vpid;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
adapter->ahw->pci_func);
*int_id |= vpid << 16;
}
void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
u32 *int_id)
{
u16 vpid;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
adapter->ahw->pci_func);
*int_id |= vpid << 16;
}
void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
u32 *int_id)
{
u16 vpid;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
adapter->ahw->pci_func);
*int_id |= (vpid << 16) | BIT_31;
}
void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
u32 *int_id)
{
u16 vpid;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
adapter->ahw->pci_func);
*int_id |= (vpid << 16) | BIT_31;
}
void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
u32 *int_id)
{
u16 vpid;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
adapter->ahw->pci_func);
*int_id |= (vpid << 16) | BIT_31;
}
static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf)
{
struct qlcnic_cmd_args cmd;
int vpid;
if (!vf->rx_ctx_id)
return;
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
return;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid >= 0) {
cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
"Failed to delete Tx ctx in firmware for func 0x%x\n",
vf->pci_func);
else
vf->rx_ctx_id = 0;
}
qlcnic_free_mbx_args(&cmd);
}
static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf)
{
struct qlcnic_cmd_args cmd;
int vpid;
if (!vf->tx_ctx_id)
return;
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
return;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid >= 0) {
cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
"Failed to delete Tx ctx in firmware for func 0x%x\n",
vf->pci_func);
else
vf->tx_ctx_id = 0;
}
qlcnic_free_mbx_args(&cmd);
}
static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov,
struct qlcnic_vf_info *vf,
struct qlcnic_bc_trans *trans)
{
struct qlcnic_trans_list *t_list = &vf->rcv_act;
unsigned long flag;
spin_lock_irqsave(&t_list->lock, flag);
__qlcnic_sriov_add_act_list(sriov, vf, trans);
spin_unlock_irqrestore(&t_list->lock, flag);
return 0;
}
static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf)
{
struct qlcnic_adapter *adapter = vf->adapter;
qlcnic_sriov_cleanup_list(&vf->rcv_pend);
cancel_work_sync(&vf->trans_work);
qlcnic_sriov_cleanup_list(&vf->rcv_act);
if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
qlcnic_sriov_del_tx_ctx(adapter, vf);
qlcnic_sriov_del_rx_ctx(adapter, vf);
}
qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func);
clear_bit(QLC_BC_VF_FLR, &vf->state);
if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf,
vf->flr_trans);
clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
vf->flr_trans = NULL;
}
}
static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
{
struct qlcnic_vf_info *vf;
vf = container_of(work, struct qlcnic_vf_info, flr_work);
__qlcnic_sriov_process_flr(vf);
return;
}
static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov,
struct qlcnic_vf_info *vf,
work_func_t func)
{
if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state))
return;
INIT_WORK(&vf->flr_work, func);
queue_work(sriov->bc.bc_flr_wq, &vf->flr_work);
}
static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter,
struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
set_bit(QLC_BC_VF_FLR, &vf->state);
clear_bit(QLC_BC_VF_STATE, &vf->state);
set_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
vf->flr_trans = trans;
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(adapter->netdev, "Software FLR for PCI func %d\n",
vf->pci_func);
}
bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf)
{
struct qlcnic_bc_hdr *hdr = trans->req_hdr;
if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
(hdr->op_type == QLC_BC_CMD) &&
test_bit(QLC_BC_VF_STATE, &vf->state)) {
qlcnic_sriov_handle_soft_flr(adapter, trans, vf);
return true;
}
return false;
}
void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
struct qlcnic_vf_info *vf)
{
struct net_device *dev = vf->adapter->netdev;
if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
clear_bit(QLC_BC_VF_FLR, &vf->state);
return;
}
if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) {
netdev_info(dev, "FLR for PCI func %d in progress\n",
vf->pci_func);
return;
}
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
}
void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_sriov *sriov = ahw->sriov;
struct qlcnic_vf_info *vf;
u16 num_vfs = sriov->num_vfs;
int i;
for (i = 0; i < num_vfs; i++) {
vf = &sriov->vf_info[i];
vf->rx_ctx_id = 0;
vf->tx_ctx_id = 0;
cancel_work_sync(&vf->flr_work);
__qlcnic_sriov_process_flr(vf);
clear_bit(QLC_BC_VF_STATE, &vf->state);
}
qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func);
QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8);
}
int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err;
if (!qlcnic_sriov_enable_check(adapter))
return 0;
ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
err = qlcnic_sriov_pf_init(adapter);
if (err)
return err;
dev_info(&adapter->pdev->dev, "%s: op_mode %d\n",
__func__, ahw->op_mode);
return err;
}
int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
int i, num_vfs = sriov->num_vfs;
struct qlcnic_vf_info *vf_info;
u8 *curr_mac;
if (!qlcnic_sriov_pf_check(adapter))
return -EOPNOTSUPP;
if (!is_valid_ether_addr(mac) || vf >= num_vfs)
return -EINVAL;
if (!compare_ether_addr(adapter->mac_addr, mac)) {
netdev_err(netdev, "MAC address is already in use by the PF\n");
return -EINVAL;
}
for (i = 0; i < num_vfs; i++) {
vf_info = &sriov->vf_info[i];
if (!compare_ether_addr(vf_info->vp->mac, mac)) {
netdev_err(netdev,
"MAC address is already in use by VF %d\n",
i);
return -EINVAL;
}
}
vf_info = &sriov->vf_info[vf];
curr_mac = vf_info->vp->mac;
if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
netdev_err(netdev,
"MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
vf);
return -EOPNOTSUPP;
}
memcpy(curr_mac, mac, netdev->addr_len);
netdev_info(netdev, "MAC Address %pM is configured for VF %d\n",
mac, vf);
return 0;
}
int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vf_info *vf_info;
struct qlcnic_info nic_info;
struct qlcnic_vport *vp;
u16 vpid;
if (!qlcnic_sriov_pf_check(adapter))
return -EOPNOTSUPP;
if (vf >= sriov->num_vfs)
return -EINVAL;
if (tx_rate >= 10000 || tx_rate < 100) {
netdev_err(netdev,
"Invalid Tx rate, allowed range is [%d - %d]",
QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE);
return -EINVAL;
}
if (tx_rate == 0)
tx_rate = 10000;
vf_info = &sriov->vf_info[vf];
vp = vf_info->vp;
vpid = vp->handle;
if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
return -EIO;
nic_info.max_tx_bw = tx_rate / 100;
nic_info.bit_offsets = BIT_0;
if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
return -EIO;
}
vp->max_tx_bw = tx_rate / 100;
netdev_info(netdev,
"Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
tx_rate, vp->max_tx_bw, vf);
return 0;
}
int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
u16 vlan, u8 qos)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vf_info *vf_info;
struct qlcnic_vport *vp;
if (!qlcnic_sriov_pf_check(adapter))
return -EOPNOTSUPP;
if (vf >= sriov->num_vfs || qos > 7)
return -EINVAL;
if (vlan > MAX_VLAN_ID) {
netdev_err(netdev,
"Invalid VLAN ID, allowed range is [0 - %d]\n",
MAX_VLAN_ID);
return -EINVAL;
}
vf_info = &sriov->vf_info[vf];
vp = vf_info->vp;
if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
netdev_err(netdev,
"VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
vf);
return -EOPNOTSUPP;
}
switch (vlan) {
case 4095:
vp->vlan_mode = QLC_GUEST_VLAN_MODE;
break;
case 0:
vp->vlan_mode = QLC_NO_VLAN_MODE;
vp->vlan = 0;
vp->qos = 0;
break;
default:
vp->vlan_mode = QLC_PVID_MODE;
vp->vlan = vlan;
vp->qos = qos;
}
netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
vlan, qos, vf);
return 0;
}
int qlcnic_sriov_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vport *vp;
if (!qlcnic_sriov_pf_check(adapter))
return -EOPNOTSUPP;
if (vf >= sriov->num_vfs)
return -EINVAL;
vp = sriov->vf_info[vf].vp;
memcpy(&ivi->mac, vp->mac, ETH_ALEN);
ivi->vlan = vp->vlan;
ivi->qos = vp->qos;
if (vp->max_tx_bw == MAX_BW)
ivi->tx_rate = 0;
else
ivi->tx_rate = vp->max_tx_bw * 100;
ivi->vf = vf;
return 0;
}
| gpl-2.0 |
Split-Screen/android_kernel_nvidia_shieldtablet | drivers/scsi/lpfc/lpfc_attr.c | 2076 | 162084 | /*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/aer.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_version.h"
#include "lpfc_compat.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
*/
#define LPFC_REG_WRITE_KEY_SIZE 4
#define LPFC_REG_WRITE_KEY "EMLX"
/**
* lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
* @incr: integer to convert.
* @hdw: ascii string holding converted integer plus a string terminator.
*
* Description:
* JEDEC Joint Electron Device Engineering Council.
* Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
* character string. The string is then terminated with a NULL in byte 9.
* Hex 0-9 becomes ascii '0' to '9'.
* Hex a-f becomes ascii '=' to 'B' capital B.
*
* Notes:
* Coded for 32 bit integers only.
**/
static void
lpfc_jedec_to_ascii(int incr, char hdw[])
{
int i, j;
for (i = 0; i < 8; i++) {
j = (incr & 0xf);
if (j <= 9)
hdw[7 - i] = 0x30 + j;
else
hdw[7 - i] = 0x61 + j - 10;
incr = (incr >> 4);
}
hdw[8] = 0;
return;
}
/**
* lpfc_drvr_version_show - Return the Emulex driver string with version number
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
}
/**
* lpfc_enable_fip_show - Return the fip mode of the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->hba_flag & HBA_FIP_SUPPORT)
return snprintf(buf, PAGE_SIZE, "1\n");
else
return snprintf(buf, PAGE_SIZE, "0\n");
}
static ssize_t
lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->cfg_enable_bg)
if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
else
return snprintf(buf, PAGE_SIZE,
"BlockGuard Not Supported\n");
else
return snprintf(buf, PAGE_SIZE,
"BlockGuard Disabled\n");
}
static ssize_t
lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_guard_err_cnt);
}
static ssize_t
lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_apptag_err_cnt);
}
static ssize_t
lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_reftag_err_cnt);
}
/**
* lpfc_info_show - Return some pci info about the host in ascii
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the formatted text from lpfc_info().
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
}
/**
* lpfc_serialnum_show - Return the hba serial number in ascii
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the formatted text serial number.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
}
/**
* lpfc_temp_sensor_show - Return the temperature sensor level
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the formatted support level.
*
* Description:
* Returns a number indicating the temperature sensor level currently
* supported, zero or one in ascii.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
}
/**
* lpfc_modeldesc_show - Return the model description of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the scsi vpd model description.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
}
/**
* lpfc_modelname_show - Return the model name of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the scsi vpd model name.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
}
/**
* lpfc_programtype_show - Return the program type of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the scsi vpd program type.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
}
/**
* lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the Menlo Maintenance sli flag.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n",
(phba->sli.sli_flag & LPFC_MENLO_MAINT));
}
/**
* lpfc_vportnum_show - Return the port number in ascii of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains scsi vpd program type.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
}
/**
* lpfc_fwrev_show - Return the firmware rev running in the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the scsi vpd program type.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t if_type;
uint8_t sli_family;
char fwrev[FW_REV_STR_SIZE];
int len;
lpfc_decode_firmware_rev(phba, fwrev, 1);
if_type = phba->sli4_hba.pc_sli4_params.if_type;
sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
if (phba->sli_rev < LPFC_SLI_REV4)
len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
fwrev, phba->sli_rev);
else
len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
fwrev, phba->sli_rev, if_type, sli_family);
return len;
}
/**
* lpfc_hdw_show - Return the jedec information about the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the scsi vpd program type.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char hdw[9];
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
lpfc_vpd_t *vp = &phba->vpd;
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
}
/**
* lpfc_option_rom_version_show - Return the adapter ROM FCode version
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the ROM and FCode ascii strings.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
}
/**
* lpfc_state_show - Return the link state of the port
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains text describing the state of the link.
*
* Notes:
* The switch statement has no default so zero will be returned.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int len = 0;
switch (phba->link_state) {
case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
if (phba->hba_flag & LINK_DISABLED)
len += snprintf(buf + len, PAGE_SIZE-len,
"Link Down - User disabled\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
"Link Down\n");
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
len += snprintf(buf + len, PAGE_SIZE-len,
"Configuring Link\n");
break;
case LPFC_FDISC:
case LPFC_FLOGI:
case LPFC_FABRIC_CFG_LINK:
case LPFC_NS_REG:
case LPFC_NS_QRY:
case LPFC_BUILD_DISC_LIST:
case LPFC_DISC_AUTH:
len += snprintf(buf + len, PAGE_SIZE - len,
"Discovery\n");
break;
case LPFC_VPORT_READY:
len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
break;
case LPFC_VPORT_FAILED:
len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
break;
case LPFC_VPORT_UNKNOWN:
len += snprintf(buf + len, PAGE_SIZE - len,
"Unknown\n");
break;
}
if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
len += snprintf(buf + len, PAGE_SIZE-len,
" Menlo Maint Mode\n");
else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += snprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
if (vport->fc_flag & FC_FABRIC)
len += snprintf(buf + len, PAGE_SIZE-len,
" Fabric\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
" Point-2-Point\n");
}
}
return len;
}
/**
* lpfc_sli4_protocol_show - Return the fip mode of the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->sli_rev < LPFC_SLI_REV4)
return snprintf(buf, PAGE_SIZE, "fc\n");
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
return snprintf(buf, PAGE_SIZE, "fcoe\n");
if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
return snprintf(buf, PAGE_SIZE, "fc\n");
}
return snprintf(buf, PAGE_SIZE, "unknown\n");
}
/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: one or more lpfc_polling_flags values.
* @count: not used.
*
* Returns:
* -EINVAL if the buffer is not "up" or "down"
* return from link state change function if non-zero
* length of the buf on success
**/
static ssize_t
lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int status = -EINVAL;
if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
(phba->link_state == LPFC_LINK_DOWN))
status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
(phba->link_state >= LPFC_LINK_UP))
status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
if (status == 0)
return strlen(buf);
else
return status;
}
/**
* lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the sum of fc mapped and unmapped.
*
* Description:
* Returns the ascii text number of the sum of the fc mapped and unmapped
* vport counts.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_num_discovered_ports_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
return snprintf(buf, PAGE_SIZE, "%d\n",
vport->fc_map_cnt + vport->fc_unmap_cnt);
}
/**
* lpfc_issue_lip - Misnomer, name carried over from long ago
* @shost: Scsi_Host pointer.
*
* Description:
* Bring the link down gracefully then re-init the link. The firmware will
* re-init the fiber channel interface as required. Does not issue a LIP.
*
* Returns:
* -EPERM port offline or management commands are being blocked
* -ENOMEM cannot allocate memory for the mailbox command
* -EIO error sending the mailbox command
* zero for success
**/
static int
lpfc_issue_lip(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus = MBXERR_ERROR;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
return -EPERM;
pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
if (!pmboxq)
return -ENOMEM;
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
(pmboxq->u.mb.mbxStatus == 0 ||
pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
lpfc_init_link(phba, pmboxq, phba->cfg_topology,
phba->cfg_link_speed);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
phba->fc_ratov * 2);
if ((mbxstatus == MBX_SUCCESS) &&
(pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"2859 SLI authentication is required "
"for INIT_LINK but has not done yet\n");
}
lpfc_set_loopback_flag(phba);
if (mbxstatus != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
if (mbxstatus == MBXERR_ERROR)
return -EIO;
return 0;
}
/**
* lpfc_do_offline - Issues a mailbox command to bring the link down
* @phba: lpfc_hba pointer.
* @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
*
* Notes:
* Assumes any error from lpfc_do_offline() will be negative.
* Can wait up to 5 seconds for the port ring buffers count
* to reach zero, prints a warning if it is not zero and continues.
* lpfc_workq_post_event() returns a non-zero return code if call fails.
*
* Returns:
* -EIO error posting the event
* zero for success
**/
static int
lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
{
struct completion online_compl;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
int status = 0;
int cnt = 0;
int i;
int rc;
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
return 0;
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
if (rc == 0)
return -ENOMEM;
wait_for_completion(&online_compl);
if (status != 0)
return -EIO;
psli = &phba->sli;
/* Wait a little for things to settle down, but not
* long enough for dev loss timeout to expire.
*/
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
while (!list_empty(&pring->txcmplq)) {
msleep(10);
if (cnt++ > 500) { /* 5 secs */
lpfc_printf_log(phba,
KERN_WARNING, LOG_INIT,
"0466 Outstanding IO when "
"bringing Adapter offline\n");
break;
}
}
}
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
if (rc == 0)
return -ENOMEM;
wait_for_completion(&online_compl);
if (status != 0)
return -EIO;
return 0;
}
/**
* lpfc_selective_reset - Offline then onlines the port
* @phba: lpfc_hba pointer.
*
* Description:
* If the port is configured to allow a reset then the hba is brought
* offline then online.
*
* Notes:
* Assumes any error from lpfc_do_offline() will be negative.
* Do not make this function static.
*
* Returns:
* lpfc_do_offline() return code if not zero
* -EIO reset not configured or error posting the event
* zero for success
**/
int
lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
int status = 0;
int rc;
if ((!phba->cfg_enable_hba_reset) ||
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return -EACCES;
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
return status;
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
if (rc == 0)
return -ENOMEM;
wait_for_completion(&online_compl);
if (status != 0)
return -EIO;
return 0;
}
/**
* lpfc_issue_reset - Selectively resets an adapter
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing the string "selective".
* @count: unused variable.
*
* Description:
* If the buf contains the string "selective" then lpfc_selective_reset()
* is called to perform the reset.
*
* Notes:
* Assumes any error from lpfc_selective_reset() will be negative.
* If lpfc_selective_reset() returns zero then the length of the buffer
* is returned which indicates success
*
* Returns:
* -EINVAL if the buffer does not contain the string "selective"
* length of buf if lpfc-selective_reset() if the call succeeds
* return value of lpfc_selective_reset() if the call fails
**/
static ssize_t
lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int status = -EINVAL;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
status = phba->lpfc_selective_reset(phba);
if (status == 0)
return strlen(buf);
else
return status;
}
/**
* lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
* @phba: lpfc_hba pointer.
*
* Description:
* SLI4 interface type-2 device to wait on the sliport status register for
* the readyness after performing a firmware reset.
*
* Returns:
* zero for success, -EPERM when port does not have privilage to perform the
* reset, -EIO when port timeout from recovering from the reset.
*
* Note:
* As the caller will interpret the return code by value, be careful in making
* change or addition to return codes.
**/
int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
{
struct lpfc_register portstat_reg = {0};
int i;
msleep(100);
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
/* verify if privilaged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
!bf_get(lpfc_sliport_status_err, &portstat_reg))
return -EPERM;
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
continue;
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
continue;
if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
continue;
break;
}
if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
return 0;
else
return -EIO;
}
/**
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
* @phba: lpfc_hba pointer.
*
* Description:
* Request SLI4 interface type-2 device to perform a physical register set
* access.
*
* Returns:
* zero for success
**/
static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
struct pci_dev *pdev = phba->pcidev;
uint32_t before_fc_flag;
uint32_t sriov_nr_virtfn;
uint32_t reg_val;
int status = 0, rc = 0;
int job_posted = 1, sriov_err;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
if ((phba->sli_rev < LPFC_SLI_REV4) ||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
/* Keep state if we need to restore back */
before_fc_flag = phba->pport->fc_flag;
sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
/* Disable SR-IOV virtual functions if enabled */
if (phba->cfg_sriov_nr_virtfn) {
pci_disable_sriov(pdev);
phba->cfg_sriov_nr_virtfn = 0;
}
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
return status;
/* wait for the device to be quiesced before firmware reset */
msleep(100);
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
if (opcode == LPFC_FW_DUMP)
reg_val |= LPFC_FW_DUMP_REQUEST;
else if (opcode == LPFC_FW_RESET)
reg_val |= LPFC_CTL_PDEV_CTL_FRST;
else if (opcode == LPFC_DV_RESET)
reg_val |= LPFC_CTL_PDEV_CTL_DRST;
writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
/* flush */
readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
/* delay driver action following IF_TYPE_2 reset */
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (rc == -EPERM) {
/* no privilage for reset */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3150 No privilage to perform the requested "
"access: x%x\n", reg_val);
} else if (rc == -EIO) {
/* reset failed, there is nothing more we can do */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
return rc;
}
/* keep the original port state */
if (before_fc_flag & FC_OFFLINE_MODE)
goto out;
init_completion(&online_compl);
job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
if (!job_posted)
goto out;
wait_for_completion(&online_compl);
out:
/* in any case, restore the virtual functions enabled as before */
if (sriov_nr_virtfn) {
sriov_err =
lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
if (!sriov_err)
phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
}
/* return proper error code */
if (!rc) {
if (!job_posted)
rc = -ENOMEM;
else if (status)
rc = -EIO;
}
return rc;
}
/**
* lpfc_nport_evt_cnt_show - Return the number of nport events
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the ascii number of nport events.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
/**
* lpfc_board_mode_show - Return the state of the board
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the state of the adapter.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
char * state;
if (phba->link_state == LPFC_HBA_ERROR)
state = "error";
else if (phba->link_state == LPFC_WARM_START)
state = "warm start";
else if (phba->link_state == LPFC_INIT_START)
state = "offline";
else
state = "online";
return snprintf(buf, PAGE_SIZE, "%s\n", state);
}
/**
* lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing one of the strings "online", "offline", "warm" or "error".
* @count: unused variable.
*
* Returns:
* -EACCES if enable hba reset not enabled
* -EINVAL if the buffer does not contain a valid string (see above)
* -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
* buf length greater than zero indicates success
**/
static ssize_t
lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
char *board_mode_str = NULL;
int status = 0;
int rc;
if (!phba->cfg_enable_hba_reset) {
status = -EACCES;
goto board_mode_out;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3050 lpfc_board_mode set to %s\n", buf);
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
if (rc == 0) {
status = -ENOMEM;
goto board_mode_out;
}
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
status = -EINVAL;
board_mode_out:
if (!status)
return strlen(buf);
else {
board_mode_str = strchr(buf, '\n');
if (board_mode_str)
*board_mode_str = '\0';
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3097 Failed \"%s\", status(%d), "
"fc_flag(x%x)\n",
buf, status, phba->pport->fc_flag);
return status;
}
}
/**
* lpfc_get_hba_info - Return various bits of informaton about the adapter
* @phba: pointer to the adapter structure.
* @mxri: max xri count.
* @axri: available xri count.
* @mrpi: max rpi count.
* @arpi: available rpi count.
* @mvpi: max vpi count.
* @avpi: available vpi count.
*
* Description:
* If an integer pointer for an count is not null then the value for the
* count is returned.
*
* Returns:
* zero on error
* one for success
**/
static int
lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mxri, uint32_t *axri,
uint32_t *mrpi, uint32_t *arpi,
uint32_t *mvpi, uint32_t *avpi)
{
struct lpfc_mbx_read_config *rd_config;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
uint32_t max_vpi;
/*
* prevent udev from issuing mailbox commands until the port is
* configured.
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
(phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
return 0;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return 0;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return 0;
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_CONFIG;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
rc = MBX_NOT_FINISHED;
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return 0;
}
if (phba->sli_rev == LPFC_SLI_REV4) {
rd_config = &pmboxq->u.mqe.un.rd_config;
if (mrpi)
*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
if (arpi)
*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
phba->sli4_hba.max_cfg_param.rpi_used;
if (mxri)
*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
if (axri)
*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
phba->sli4_hba.max_cfg_param.xri_used;
/* Account for differences with SLI-3. Get vpi count from
* mailbox data and subtract one for max vpi value.
*/
max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
if (mvpi)
*mvpi = max_vpi;
if (avpi)
*avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
if (arpi)
*arpi = pmb->un.varRdConfig.avail_rpi;
if (mxri)
*mxri = pmb->un.varRdConfig.max_xri;
if (axri)
*axri = pmb->un.varRdConfig.avail_xri;
if (mvpi)
*mvpi = pmb->un.varRdConfig.max_vpi;
if (avpi)
*avpi = pmb->un.varRdConfig.avail_vpi;
}
mempool_free(pmboxq, phba->mbox_mem_pool);
return 1;
}
/**
* lpfc_max_rpi_show - Return maximum rpi
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the maximum rpi count in decimal or "Unknown".
*
* Description:
* Calls lpfc_get_hba_info() asking for just the mrpi count.
* If lpfc_get_hba_info() returns zero (failure) the buffer text is set
* to "Unknown" and the buffer length is returned, therefore the caller
* must check for "Unknown" in the buffer to detect a failure.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
* lpfc_used_rpi_show - Return maximum rpi minus available rpi
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing the used rpi count in decimal or "Unknown".
*
* Description:
* Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
* If lpfc_get_hba_info() returns zero (failure) the buffer text is set
* to "Unknown" and the buffer length is returned, therefore the caller
* must check for "Unknown" in the buffer to detect a failure.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
* lpfc_max_xri_show - Return maximum xri
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the maximum xri count in decimal or "Unknown".
*
* Description:
* Calls lpfc_get_hba_info() asking for just the mrpi count.
* If lpfc_get_hba_info() returns zero (failure) the buffer text is set
* to "Unknown" and the buffer length is returned, therefore the caller
* must check for "Unknown" in the buffer to detect a failure.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt;
if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
* lpfc_used_xri_show - Return maximum xpi minus the available xpi
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the used xri count in decimal or "Unknown".
*
* Description:
* Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
* If lpfc_get_hba_info() returns zero (failure) the buffer text is set
* to "Unknown" and the buffer length is returned, therefore the caller
* must check for "Unknown" in the buffer to detect a failure.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
* lpfc_max_vpi_show - Return maximum vpi
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the maximum vpi count in decimal or "Unknown".
*
* Description:
* Calls lpfc_get_hba_info() asking for just the mvpi count.
* If lpfc_get_hba_info() returns zero (failure) the buffer text is set
* to "Unknown" and the buffer length is returned, therefore the caller
* must check for "Unknown" in the buffer to detect a failure.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt;
if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
* lpfc_used_vpi_show - Return maximum vpi minus the available vpi
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the used vpi count in decimal or "Unknown".
*
* Description:
* Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
* If lpfc_get_hba_info() returns zero (failure) the buffer text is set
* to "Unknown" and the buffer length is returned, therefore the caller
* must check for "Unknown" in the buffer to detect a failure.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
* lpfc_npiv_info_show - Return text about NPIV support for the adapter
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: text that must be interpreted to determine if npiv is supported.
*
* Description:
* Buffer will contain text indicating npiv is not suppoerted on the port,
* the port is an NPIV physical port, or it is an npiv virtual port with
* the id of the vport.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (!(phba->max_vpi))
return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
if (vport->port_type == LPFC_PHYSICAL_PORT)
return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
}
/**
* lpfc_poll_show - Return text about poll support for the adapter
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the cfg_poll in hex.
*
* Notes:
* cfg_poll should be a lpfc_polling_flags type.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_poll_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
/**
* lpfc_poll_store - Set the value of cfg_poll for the adapter
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: one or more lpfc_polling_flags values.
* @count: not used.
*
* Notes:
* buf contents converted to integer and checked for a valid value.
*
* Returns:
* -EINVAL if the buffer connot be converted or is out of range
* length of the buf on success
**/
static ssize_t
lpfc_poll_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t creg_val;
uint32_t old_val;
int val=0;
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
if ((val & 0x3) != val)
return -EINVAL;
if (phba->sli_rev == LPFC_SLI_REV4)
val = 0;
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3051 lpfc_poll changed from %d to %d\n",
phba->cfg_poll, val);
spin_lock_irq(&phba->hbalock);
old_val = phba->cfg_poll;
if (val & ENABLE_FCP_RING_POLLING) {
if ((val & DISABLE_FCP_RING_INT) &&
!(old_val & DISABLE_FCP_RING_INT)) {
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
spin_unlock_irq(&phba->hbalock);
return -EINVAL;
}
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
lpfc_poll_start_timer(phba);
}
} else if (val != 0x0) {
spin_unlock_irq(&phba->hbalock);
return -EINVAL;
}
if (!(val & DISABLE_FCP_RING_INT) &&
(old_val & DISABLE_FCP_RING_INT))
{
spin_unlock_irq(&phba->hbalock);
del_timer(&phba->fcp_poll_timer);
spin_lock_irq(&phba->hbalock);
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
spin_unlock_irq(&phba->hbalock);
return -EINVAL;
}
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
phba->cfg_poll = val;
spin_unlock_irq(&phba->hbalock);
return strlen(buf);
}
/**
* lpfc_fips_level_show - Return the current FIPS level for the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
}
/**
* lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
}
/**
* lpfc_dss_show - Return the current state of dss and the configured state
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the formatted text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_dss_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
"" : "Not ");
}
/**
* lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the formatted support level.
*
* Description:
* Returns the maximum number of virtual functions a physical function can
* support, 0 will be returned if called on virtual function.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_sriov_hw_max_virtfn_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint16_t max_nr_virtfn;
max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_show.
*
* lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the attribute value in decimal.
*
* Returns: size of formatted string.
**/
#define lpfc_param_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
uint val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
phba->cfg_##attr);\
}
/**
* lpfc_param_hex_show - Return a cfg attribute value in hex
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_show
*
* lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the attribute value in hexadecimal.
*
* Returns: size of formatted string.
**/
#define lpfc_param_hex_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
uint val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n",\
phba->cfg_##attr);\
}
/**
* lpfc_param_init - Initializes a cfg attribute
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_init. The macro also
* takes a default argument, a minimum and maximum argument.
*
* lpfc_##attr##_init: Initializes an attribute.
* @phba: pointer the the adapter structure.
* @val: integer attribute value.
*
* Validates the min and max values then sets the adapter config field
* accordingly, or uses the default if out of range and prints an error message.
*
* Returns:
* zero on success
* -EINVAL if default used
**/
#define lpfc_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
phba->cfg_##attr = val;\
return 0;\
}\
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
"0449 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
phba->cfg_##attr = default;\
return -EINVAL;\
}
/**
* lpfc_param_set - Set a cfg attribute value
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_set
*
* lpfc_##attr##_set: Sets an attribute value.
* @phba: pointer the the adapter structure.
* @val: integer attribute value.
*
* Description:
* Validates the min and max values then sets the
* adapter config field if in the valid range. prints error message
* and does not set the parameter if invalid.
*
* Returns:
* zero on success
* -EINVAL if val is invalid
**/
#define lpfc_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
"3052 lpfc_" #attr " changed from %d to %d\n", \
phba->cfg_##attr, val); \
phba->cfg_##attr = val;\
return 0;\
}\
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
"0450 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
return -EINVAL;\
}
/**
* lpfc_param_store - Set a vport attribute value
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_store.
*
* lpfc_##attr##_store: Set an sttribute value.
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: contains the attribute value in ascii.
* @count: not used.
*
* Description:
* Convert the ascii text number to an integer, then
* use the lpfc_##attr##_set function to set the value.
*
* Returns:
* -EINVAL if val is invalid or lpfc_##attr##_set() fails
* length of buffer upon success.
**/
#define lpfc_param_store(attr) \
static ssize_t \
lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
uint val = 0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
if (sscanf(buf, "%i", &val) != 1)\
return -EINVAL;\
if (lpfc_##attr##_set(phba, val) == 0) \
return strlen(buf);\
else \
return -EINVAL;\
}
/**
* lpfc_vport_param_show - Return decimal formatted cfg attribute value
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_show
*
* lpfc_##attr##_show: prints the attribute value in decimal.
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the attribute value in decimal.
*
* Returns: length of formatted string.
**/
#define lpfc_vport_param_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
uint val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
/**
* lpfc_vport_param_hex_show - Return hex formatted attribute value
*
* Description:
* Macro that given an attr e.g.
* hba_queue_depth expands into a function with the name
* lpfc_hba_queue_depth_show
*
* lpfc_##attr##_show: prints the attribute value in hexadecimal.
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the attribute value in hexadecimal.
*
* Returns: length of formatted string.
**/
#define lpfc_vport_param_hex_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
uint val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
/**
* lpfc_vport_param_init - Initialize a vport cfg attribute
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_init. The macro also
* takes a default argument, a minimum and maximum argument.
*
* lpfc_##attr##_init: validates the min and max values then sets the
* adapter config field accordingly, or uses the default if out of range
* and prints an error message.
* @phba: pointer the the adapter structure.
* @val: integer attribute value.
*
* Returns:
* zero on success
* -EINVAL if default used
**/
#define lpfc_vport_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
vport->cfg_##attr = val;\
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
"0423 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
vport->cfg_##attr = default;\
return -EINVAL;\
}
/**
* lpfc_vport_param_set - Set a vport cfg attribute
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
* into a function with the name lpfc_hba_queue_depth_set
*
* lpfc_##attr##_set: validates the min and max values then sets the
* adapter config field if in the valid range. prints error message
* and does not set the parameter if invalid.
* @phba: pointer the the adapter structure.
* @val: integer attribute value.
*
* Returns:
* zero on success
* -EINVAL if val is invalid
**/
#define lpfc_vport_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
"3053 lpfc_" #attr " changed from %d to %d\n", \
vport->cfg_##attr, val); \
vport->cfg_##attr = val;\
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
"0424 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
return -EINVAL;\
}
/**
* lpfc_vport_param_store - Set a vport attribute
*
* Description:
* Macro that given an attr e.g. hba_queue_depth
* expands into a function with the name lpfc_hba_queue_depth_store
*
* lpfc_##attr##_store: convert the ascii text number to an integer, then
* use the lpfc_##attr##_set function to set the value.
* @cdev: class device that is converted into a Scsi_host.
* @buf: contains the attribute value in decimal.
* @count: not used.
*
* Returns:
* -EINVAL if val is invalid or lpfc_##attr##_set() fails
* length of buffer upon success.
**/
#define lpfc_vport_param_store(attr) \
static ssize_t \
lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
uint val = 0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
if (sscanf(buf, "%i", &val) != 1)\
return -EINVAL;\
if (lpfc_##attr##_set(vport, val) == 0) \
return strlen(buf);\
else \
return -EINVAL;\
}
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_init(name, defval, minval, maxval)
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
lpfc_param_set(name, defval, minval, maxval)\
lpfc_param_store(name)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
lpfc_param_set(name, defval, minval, maxval)\
lpfc_param_store(name)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
lpfc_vport_param_set(name, defval, minval, maxval)\
lpfc_vport_param_store(name)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
lpfc_vport_param_set(name, defval, minval, maxval)\
lpfc_vport_param_store(name)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
lpfc_link_state_store);
static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
lpfc_sriov_hw_max_virtfn_show, NULL);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
/**
* lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing the string lpfc_soft_wwn_key.
* @count: must be size of lpfc_soft_wwn_key.
*
* Returns:
* -EINVAL if the buffer does not contain lpfc_soft_wwn_key
* length of buf indicates success
**/
static ssize_t
lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
unsigned int cnt = count;
/*
* We're doing a simple sanity check for soft_wwpn setting.
* We require that the user write a specific key to enable
* the soft_wwpn attribute to be settable. Once the attribute
* is written, the enable key resets. If further updates are
* desired, the key must be written again to re-enable the
* attribute.
*
* The "key" is not secret - it is a hardcoded string shown
* here. The intent is to protect against the random user or
* application that is just writing attributes.
*/
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
if ((cnt != strlen(lpfc_soft_wwn_key)) ||
(strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
return -EINVAL;
phba->soft_wwn_enable = 1;
return count;
}
static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
lpfc_soft_wwn_enable_store);
/**
* lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the wwpn in hexadecimal.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwpn);
}
/**
* lpfc_soft_wwpn_store - Set the ww port name of the adapter
* @dev class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: contains the wwpn in hexadecimal.
* @count: number of wwpn bytes in buf
*
* Returns:
* -EACCES hba reset not enabled, adapter over temp
* -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
* -EIO error taking adapter offline or online
* value of count on success
**/
static ssize_t
lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int stat1=0, stat2=0;
unsigned int i, j, cnt=count;
u8 wwpn[8];
int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
spin_lock_irq(&phba->hbalock);
if (phba->over_temp_state == HBA_OVER_TEMP) {
spin_unlock_irq(&phba->hbalock);
return -EACCES;
}
spin_unlock_irq(&phba->hbalock);
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
((cnt == 17) && (*buf++ != 'x')) ||
((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
return -EINVAL;
phba->soft_wwn_enable = 0;
memset(wwpn, 0, sizeof(wwpn));
/* Validate and store the new name */
for (i=0, j=0; i < 16; i++) {
int value;
value = hex_to_bin(*buf++);
if (value >= 0)
j = (j << 4) | value;
else
return -EINVAL;
if (i % 2) {
wwpn[i/2] = j & 0xff;
j = 0;
}
}
phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
fc_host_port_name(shost) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
fc_host_node_name(shost) = phba->cfg_soft_wwnn;
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (stat1)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0463 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat1);
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
LPFC_EVT_ONLINE);
if (rc == 0)
return -ENOMEM;
wait_for_completion(&online_compl);
if (stat2)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0464 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat2);
return (stat1 || stat2) ? -EIO : count;
}
static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
/**
* lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: on return contains the wwnn in hexadecimal.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwnn);
}
/**
* lpfc_soft_wwnn_store - sets the ww node name of the adapter
* @cdev: class device that is converted into a Scsi_host.
* @buf: contains the ww node name in hexadecimal.
* @count: number of wwnn bytes in buf.
*
* Returns:
* -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
* value of count on success
**/
static ssize_t
lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
unsigned int i, j, cnt=count;
u8 wwnn[8];
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
((cnt == 17) && (*buf++ != 'x')) ||
((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
return -EINVAL;
/*
* Allow wwnn to be set many times, as long as the enable is set.
* However, once the wwpn is set, everything locks.
*/
memset(wwnn, 0, sizeof(wwnn));
/* Validate and store the new name */
for (i=0, j=0; i < 16; i++) {
int value;
value = hex_to_bin(*buf++);
if (value >= 0)
j = (j << 4) | value;
else
return -EINVAL;
if (i % 2) {
wwnn[i/2] = j & 0xff;
j = 0;
}
}
phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: soft_wwnn set. Value will take effect upon "
"setting of the soft_wwpn\n", phba->brd_no);
return count;
}
static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
static int lpfc_poll = 0;
module_param(lpfc_poll, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 0 - none,"
" 1 - poll with interrupts enabled"
" 3 - poll and disable FCP ring interrupts");
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
int lpfc_sli_mode = 0;
module_param(lpfc_sli_mode, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
int lpfc_enable_npiv = 1;
module_param(lpfc_enable_npiv, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
int lpfc_enable_rrq = 2;
module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
lpfc_param_show(enable_rrq);
/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
*/
lpfc_param_init(enable_rrq, 2, 0, 2);
static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
/*
# lpfc_suppress_link_up: Bring link up at initialization
# 0x0 = bring link up (issue MBX_INIT_LINK)
# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
# 0x2 = never bring up link
# Default value is 0.
*/
LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
LPFC_DELAY_INIT_LINK_INDEFINITELY,
"Suppress Link Up at initialization");
/*
# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
# 1 - (1024)
# 2 - (2048)
# 3 - (3072)
# 4 - (4096)
# 5 - (5120)
*/
static ssize_t
lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
}
static DEVICE_ATTR(iocb_hw, S_IRUGO,
lpfc_iocb_hw_show, NULL);
static ssize_t
lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "%d\n",
phba->sli.ring[LPFC_ELS_RING].txq_max);
}
static DEVICE_ATTR(txq_hw, S_IRUGO,
lpfc_txq_hw_show, NULL);
static ssize_t
lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "%d\n",
phba->sli.ring[LPFC_ELS_RING].txcmplq_max);
}
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
int lpfc_iocb_cnt = 2;
module_param(lpfc_iocb_cnt, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_iocb_cnt,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
lpfc_param_show(iocb_cnt);
lpfc_param_init(iocb_cnt, 2, 1, 5);
static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
lpfc_iocb_cnt_show, NULL);
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
*/
static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
module_param(lpfc_nodev_tmo, int, 0);
MODULE_PARM_DESC(lpfc_nodev_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
/**
* lpfc_nodev_tmo_show - Return the hba dev loss timeout value
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the dev loss timeout in decimal.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
}
/**
* lpfc_nodev_tmo_init - Set the hba nodev timeout value
* @vport: lpfc vport structure pointer.
* @val: contains the nodev timeout value.
*
* Description:
* If the devloss tmo is already set then nodev tmo is set to devloss tmo,
* a kernel error message is printed and zero is returned.
* Else if val is in range then nodev tmo and devloss tmo are set to val.
* Otherwise nodev tmo is set to the default value.
*
* Returns:
* zero if already set or if val is in range
* -EINVAL val out of range
**/
static int
lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
{
if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0407 Ignoring nodev_tmo module "
"parameter because devloss_tmo is "
"set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
vport->cfg_nodev_tmo = val;
vport->cfg_devloss_tmo = val;
return 0;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0400 lpfc_nodev_tmo attribute cannot be set to"
" %d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
return -EINVAL;
}
/**
* lpfc_update_rport_devloss_tmo - Update dev loss tmo value
* @vport: lpfc vport structure pointer.
*
* Description:
* Update all the ndlp's dev loss tmo with the vport devloss tmo value.
**/
static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
spin_unlock_irq(shost->host_lock);
}
/**
* lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
* @vport: lpfc vport structure pointer.
* @val: contains the tmo value.
*
* Description:
* If the devloss tmo is already set or the vport dev loss tmo has changed
* then a kernel error message is printed and zero is returned.
* Else if val is in range then nodev tmo and devloss tmo are set to val.
* Otherwise nodev tmo is set to the default value.
*
* Returns:
* zero if already set or if val is in range
* -EINVAL val out of range
**/
static int
lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
{
if (vport->dev_loss_tmo_changed ||
(lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0401 Ignoring change to nodev_tmo "
"because devloss_tmo is set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
vport->cfg_nodev_tmo = val;
vport->cfg_devloss_tmo = val;
/*
* For compat: set the fc_host dev loss so new rports
* will get the value.
*/
fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
lpfc_update_rport_devloss_tmo(vport);
return 0;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0403 lpfc_nodev_tmo attribute cannot be set to"
"%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
lpfc_vport_param_store(nodev_tmo)
static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
/*
# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
# disappear until the timer expires. Value range is [0,255]. Default
# value is 30.
*/
module_param(lpfc_devloss_tmo, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_devloss_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
lpfc_vport_param_show(devloss_tmo)
/**
* lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
* @vport: lpfc vport structure pointer.
* @val: contains the tmo value.
*
* Description:
* If val is in a valid range then set the vport nodev tmo,
* devloss tmo, also set the vport dev loss tmo changed flag.
* Else a kernel error message is printed.
*
* Returns:
* zero if val is in range
* -EINVAL val out of range
**/
static int
lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
{
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
vport->cfg_nodev_tmo = val;
vport->cfg_devloss_tmo = val;
vport->dev_loss_tmo_changed = 1;
fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
lpfc_update_rport_devloss_tmo(vport);
return 0;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0404 lpfc_devloss_tmo attribute cannot be set to"
" %d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
lpfc_vport_param_store(devloss_tmo)
static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
# deluged with LOTS of information.
# You can set a bit mask to record specific types of verbose messages:
# See lpfc_logmsh.h for definitions.
*/
LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
"Verbose logging bit-mask");
/*
# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
# objects that have been registered with the nameserver after login.
*/
LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
"Deregister nameserver objects before LOGO");
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
# commands per FCP LUN. Value range is [1,128]. Default value is 30.
*/
LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
"Max number of FCP commands we can queue to a specific LUN");
/*
# tgt_queue_depth: This parameter is used to limit the number of outstanding
# commands per target port. Value range is [10,65535]. Default value is 65535.
*/
LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
"Max number of FCP commands we can queue to a specific target port");
/*
# hba_queue_depth: This parameter is used to limit the number of outstanding
# commands per lpfc HBA. Value range is [32,8192]. If this parameter
# value is greater than the maximum number of exchanges supported by the HBA,
# then maximum number of exchanges supported by the HBA is used to determine
# the hba_queue_depth.
*/
LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
"Max number of FCP commands we can queue to a lpfc HBA");
/*
# peer_port_login: This parameter allows/prevents logins
# between peer ports hosted on the same physical port.
# When this parameter is set 0 peer ports of same physical port
# are not allowed to login to each other.
# When this parameter is set 1 peer ports of same physical port
# are allowed to login to each other.
# Default value of this parameter is 0.
*/
LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
"Allow peer ports on the same physical port to login to each "
"other.");
/*
# restrict_login: This parameter allows/prevents logins
# between Virtual Ports and remote initiators.
# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
# other initiators and will attempt to PLOGI all remote ports.
# When this parameter is set (1) Virtual Ports will reject PLOGIs from
# remote ports and will not attempt to PLOGI to other initiators.
# This parameter does not restrict to the physical port.
# This parameter does not restrict logins to Fabric resident remote ports.
# Default value of this parameter is 1.
*/
static int lpfc_restrict_login = 1;
module_param(lpfc_restrict_login, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
/**
* lpfc_restrict_login_init - Set the vport restrict login flag
* @vport: lpfc vport structure pointer.
* @val: contains the restrict login value.
*
* Description:
* If val is not in a valid range then log a kernel error message and set
* the vport restrict login to one.
* If the port type is physical clear the restrict login flag and return.
* Else set the restrict login flag to val.
*
* Returns:
* zero if val is in range
* -EINVAL val out of range
**/
static int
lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0422 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
return -EINVAL;
}
if (vport->port_type == LPFC_PHYSICAL_PORT) {
vport->cfg_restrict_login = 0;
return 0;
}
vport->cfg_restrict_login = val;
return 0;
}
/**
* lpfc_restrict_login_set - Set the vport restrict login flag
* @vport: lpfc vport structure pointer.
* @val: contains the restrict login value.
*
* Description:
* If val is not in a valid range then log a kernel error message and set
* the vport restrict login to one.
* If the port type is physical and the val is not zero log a kernel
* error message, clear the restrict login flag and return zero.
* Else set the restrict login flag to val.
*
* Returns:
* zero if val is in range
* -EINVAL val out of range
**/
static int
lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0425 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
return -EINVAL;
}
if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0468 lpfc_restrict_login must be 0 for "
"Physical ports.\n");
vport->cfg_restrict_login = 0;
return 0;
}
vport->cfg_restrict_login = val;
return 0;
}
lpfc_vport_param_store(restrict_login);
static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR,
lpfc_restrict_login_show, lpfc_restrict_login_store);
/*
# Some disk devices have a "select ID" or "select Target" capability.
# From a protocol standpoint "select ID" usually means select the
# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
# annex" which contains a table that maps a "select ID" (a number
# between 0 and 7F) to an ALPA. By default, for compatibility with
# older drivers, the lpfc driver scans this table from low ALPA to high
# ALPA.
#
# Turning on the scan-down variable (on = 1, off = 0) will
# cause the lpfc driver to use an inverted table, effectively
# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
#
# (Note: This "select ID" functionality is a LOOP ONLY characteristic
# and will not work across a fabric. Also this parameter will take
# effect only in the case when ALPA map is not available.)
*/
LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
"Start scanning for devices from highest ALPA to lowest");
/*
# lpfc_topology: link topology for init link
# 0x0 = attempt loop mode then point-to-point
# 0x01 = internal loopback mode
# 0x02 = attempt point-to-point mode only
# 0x04 = attempt loop mode only
# 0x06 = attempt point-to-point mode then loop
# Set point-to-point mode if you want to run as an N_Port.
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
/**
* lpfc_topology_set - Set the adapters topology field
* @phba: lpfc_hba pointer.
* @val: topology value.
*
* Description:
* If val is in a valid range then set the adapter's topology field and
* issue a lip; if the lip fails reset the topology to the old value.
*
* If the value is not in range log a kernel error message and return an error.
*
* Returns:
* zero if val is in range and lip okay
* non-zero return value from lpfc_issue_lip()
* -EINVAL val out of range
**/
static ssize_t
lpfc_topology_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int val = 0;
int nolip = 0;
const char *val_buf = buf;
int err;
uint32_t prev_val;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
val_buf = &buf[strlen("nolip ")];
}
if (!isdigit(val_buf[0]))
return -EINVAL;
if (sscanf(val_buf, "%i", &val) != 1)
return -EINVAL;
if (val >= 0 && val <= 6) {
prev_val = phba->cfg_topology;
phba->cfg_topology = val;
if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3113 Loop mode not supported at speed %d\n",
phba->cfg_link_speed);
phba->cfg_topology = prev_val;
return -EINVAL;
}
if (nolip)
return strlen(buf);
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3054 lpfc_topology changed from %d to %d\n",
prev_val, val);
if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
phba->fc_topology_changed = 1;
err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
if (err) {
phba->cfg_topology = prev_val;
return -EINVAL;
} else
return strlen(buf);
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0467 lpfc_topology attribute cannot be set to %d, "
"allowed range is [0, 6]\n",
phba->brd_no, val);
return -EINVAL;
}
static int lpfc_topology = 0;
module_param(lpfc_topology, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
lpfc_param_show(topology)
lpfc_param_init(topology, 0, 0, 6)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
/**
* lpfc_static_vport_show: Read callback function for
* lpfc_static_vport sysfs file.
* @dev: Pointer to class device object.
* @attr: device attribute structure.
* @buf: Data buffer.
*
* This function is the read call back function for
* lpfc_static_vport sysfs file. The lpfc_static_vport
* sysfs file report the mageability of the vport.
**/
static ssize_t
lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
if (vport->vport_flag & STATIC_VPORT)
sprintf(buf, "1\n");
else
sprintf(buf, "0\n");
return strlen(buf);
}
/*
* Sysfs attribute to control the statistical data collection.
*/
static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
lpfc_static_vport_show, NULL);
/**
* lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
* @dev: Pointer to class device.
* @buf: Data buffer.
* @count: Size of the data buffer.
*
* This function get called when an user write to the lpfc_stat_data_ctrl
* sysfs file. This function parse the command written to the sysfs file
* and take appropriate action. These commands are used for controlling
* driver statistical data collection.
* Following are the command this function handles.
*
* setbucket <bucket_type> <base> <step>
* = Set the latency buckets.
* destroybucket = destroy all the buckets.
* start = start data collection
* stop = stop data collection
* reset = reset the collected data
**/
static ssize_t
lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
#define LPFC_MAX_DATA_CTRL_LEN 1024
static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
unsigned long i;
char *str_ptr, *token;
struct lpfc_vport **vports;
struct Scsi_Host *v_shost;
char *bucket_type_str, *base_str, *step_str;
unsigned long base, step, bucket_type;
if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
return -EINVAL;
strcpy(bucket_data, buf);
str_ptr = &bucket_data[0];
/* Ignore this token - this is command token */
token = strsep(&str_ptr, "\t ");
if (!token)
return -EINVAL;
bucket_type_str = strsep(&str_ptr, "\t ");
if (!bucket_type_str)
return -EINVAL;
if (!strncmp(bucket_type_str, "linear", strlen("linear")))
bucket_type = LPFC_LINEAR_BUCKET;
else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
bucket_type = LPFC_POWER2_BUCKET;
else
return -EINVAL;
base_str = strsep(&str_ptr, "\t ");
if (!base_str)
return -EINVAL;
base = simple_strtoul(base_str, NULL, 0);
step_str = strsep(&str_ptr, "\t ");
if (!step_str)
return -EINVAL;
step = simple_strtoul(step_str, NULL, 0);
if (!step)
return -EINVAL;
/* Block the data collection for every vport */
vports = lpfc_create_vport_work_array(phba);
if (vports == NULL)
return -ENOMEM;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(v_shost->host_lock);
/* Block and reset data collection */
vports[i]->stat_data_blocked = 1;
if (vports[i]->stat_data_enabled)
lpfc_vport_reset_stat_data(vports[i]);
spin_unlock_irq(v_shost->host_lock);
}
/* Set the bucket attributes */
phba->bucket_type = bucket_type;
phba->bucket_base = base;
phba->bucket_step = step;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
/* Unblock data collection */
spin_lock_irq(v_shost->host_lock);
vports[i]->stat_data_blocked = 0;
spin_unlock_irq(v_shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
return strlen(buf);
}
if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
vports = lpfc_create_vport_work_array(phba);
if (vports == NULL)
return -ENOMEM;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->stat_data_blocked = 1;
lpfc_free_bucket(vport);
vport->stat_data_enabled = 0;
vports[i]->stat_data_blocked = 0;
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
phba->bucket_type = LPFC_NO_BUCKET;
phba->bucket_base = 0;
phba->bucket_step = 0;
return strlen(buf);
}
if (!strncmp(buf, "start", strlen("start"))) {
/* If no buckets configured return error */
if (phba->bucket_type == LPFC_NO_BUCKET)
return -EINVAL;
spin_lock_irq(shost->host_lock);
if (vport->stat_data_enabled) {
spin_unlock_irq(shost->host_lock);
return strlen(buf);
}
lpfc_alloc_bucket(vport);
vport->stat_data_enabled = 1;
spin_unlock_irq(shost->host_lock);
return strlen(buf);
}
if (!strncmp(buf, "stop", strlen("stop"))) {
spin_lock_irq(shost->host_lock);
if (vport->stat_data_enabled == 0) {
spin_unlock_irq(shost->host_lock);
return strlen(buf);
}
lpfc_free_bucket(vport);
vport->stat_data_enabled = 0;
spin_unlock_irq(shost->host_lock);
return strlen(buf);
}
if (!strncmp(buf, "reset", strlen("reset"))) {
if ((phba->bucket_type == LPFC_NO_BUCKET)
|| !vport->stat_data_enabled)
return strlen(buf);
spin_lock_irq(shost->host_lock);
vport->stat_data_blocked = 1;
lpfc_vport_reset_stat_data(vport);
vport->stat_data_blocked = 0;
spin_unlock_irq(shost->host_lock);
return strlen(buf);
}
return -EINVAL;
}
/**
* lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
* @dev: Pointer to class device object.
* @buf: Data buffer.
*
* This function is the read call back function for
* lpfc_stat_data_ctrl sysfs file. This function report the
* current statistical data collection state.
**/
static ssize_t
lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int index = 0;
int i;
char *bucket_type;
unsigned long bucket_value;
switch (phba->bucket_type) {
case LPFC_LINEAR_BUCKET:
bucket_type = "linear";
break;
case LPFC_POWER2_BUCKET:
bucket_type = "power2";
break;
default:
bucket_type = "No Bucket";
break;
}
sprintf(&buf[index], "Statistical Data enabled :%d, "
"blocked :%d, Bucket type :%s, Bucket base :%d,"
" Bucket step :%d\nLatency Ranges :",
vport->stat_data_enabled, vport->stat_data_blocked,
bucket_type, phba->bucket_base, phba->bucket_step);
index = strlen(buf);
if (phba->bucket_type != LPFC_NO_BUCKET) {
for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
if (phba->bucket_type == LPFC_LINEAR_BUCKET)
bucket_value = phba->bucket_base +
phba->bucket_step * i;
else
bucket_value = phba->bucket_base +
(1 << i) * phba->bucket_step;
if (index + 10 > PAGE_SIZE)
break;
sprintf(&buf[index], "%08ld ", bucket_value);
index = strlen(buf);
}
}
sprintf(&buf[index], "\n");
return strlen(buf);
}
/*
* Sysfs attribute to control the statistical data collection.
*/
static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
/*
* lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
*/
/*
* Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
* for each target.
*/
#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
#define MAX_STAT_DATA_SIZE_PER_TARGET \
STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
/**
* sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
* @filp: sysfs file
* @kobj: Pointer to the kernel object
* @bin_attr: Attribute object
* @buff: Buffer pointer
* @off: File offset
* @count: Buffer size
*
* This function is the read call back function for lpfc_drvr_stat_data
* sysfs file. This function export the statistical data to user
* applications.
**/
static ssize_t
sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device,
kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int i = 0, index = 0;
unsigned long nport_index;
struct lpfc_nodelist *ndlp = NULL;
nport_index = (unsigned long)off /
MAX_STAT_DATA_SIZE_PER_TARGET;
if (!vport->stat_data_enabled || vport->stat_data_blocked
|| (phba->bucket_type == LPFC_NO_BUCKET))
return 0;
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
continue;
if (nport_index > 0) {
nport_index--;
continue;
}
if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
> count)
break;
if (!ndlp->lat_data)
continue;
/* Print the WWN */
sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
ndlp->nlp_portname.u.wwn[0],
ndlp->nlp_portname.u.wwn[1],
ndlp->nlp_portname.u.wwn[2],
ndlp->nlp_portname.u.wwn[3],
ndlp->nlp_portname.u.wwn[4],
ndlp->nlp_portname.u.wwn[5],
ndlp->nlp_portname.u.wwn[6],
ndlp->nlp_portname.u.wwn[7]);
index = strlen(buf);
for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
sprintf(&buf[index], "%010u,",
ndlp->lat_data[i].cmd_count);
index = strlen(buf);
}
sprintf(&buf[index], "\n");
index = strlen(buf);
}
spin_unlock_irq(shost->host_lock);
return index;
}
static struct bin_attribute sysfs_drvr_stat_data_attr = {
.attr = {
.name = "lpfc_drvr_stat_data",
.mode = S_IRUSR,
},
.size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
.read = sysfs_drvr_stat_data_read,
.write = NULL,
};
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
# Value range is [0,16]. Default value is 0.
*/
/**
* lpfc_link_speed_set - Set the adapters link speed
* @phba: lpfc_hba pointer.
* @val: link speed value.
*
* Description:
* If val is in a valid range then set the adapter's link speed field and
* issue a lip; if the lip fails reset the link speed to the old value.
*
* Notes:
* If the value is not in range log a kernel error message and return an error.
*
* Returns:
* zero if val is in range and lip okay.
* non-zero return value from lpfc_issue_lip()
* -EINVAL val out of range
**/
static ssize_t
lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int val = LPFC_USER_LINK_SPEED_AUTO;
int nolip = 0;
const char *val_buf = buf;
int err;
uint32_t prev_val;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
val_buf = &buf[strlen("nolip ")];
}
if (!isdigit(val_buf[0]))
return -EINVAL;
if (sscanf(val_buf, "%i", &val) != 1)
return -EINVAL;
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3055 lpfc_link_speed changed from %d to %d %s\n",
phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2879 lpfc_link_speed attribute cannot be set "
"to %d. Speed is not supported by this port.\n",
val);
return -EINVAL;
}
if (val == LPFC_USER_LINK_SPEED_16G &&
phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3112 lpfc_link_speed attribute cannot be set "
"to %d. Speed is not supported in loop mode.\n",
val);
return -EINVAL;
}
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
prev_val = phba->cfg_link_speed;
phba->cfg_link_speed = val;
if (nolip)
return strlen(buf);
err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
if (err) {
phba->cfg_link_speed = prev_val;
return -EINVAL;
} else
return strlen(buf);
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0469 lpfc_link_speed attribute cannot be set to %d, "
"allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
return -EINVAL;
}
static int lpfc_link_speed = 0;
module_param(lpfc_link_speed, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)
/**
* lpfc_link_speed_init - Set the adapters link speed
* @phba: lpfc_hba pointer.
* @val: link speed value.
*
* Description:
* If val is in a valid range then set the adapter's link speed field.
*
* Notes:
* If the value is not in range log a kernel error message, clear the link
* speed and return an error.
*
* Returns:
* zero if val saved.
* -EINVAL val out of range
**/
static int
lpfc_link_speed_init(struct lpfc_hba *phba, int val)
{
if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3111 lpfc_link_speed of %d cannot "
"support loop mode, setting topology to default.\n",
val);
phba->cfg_topology = 0;
}
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
phba->cfg_link_speed = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0405 lpfc_link_speed attribute cannot "
"be set to %d, allowed values are "
"["LPFC_LINK_SPEED_STRING"]\n", val);
phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
return -EINVAL;
}
static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
lpfc_link_speed_show, lpfc_link_speed_store);
/*
# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
# 0 = aer disabled or not supported
# 1 = aer supported and enabled (default)
# Value range is [0,1]. Default value is 1.
*/
/**
* lpfc_aer_support_store - Set the adapter for aer support
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing enable or disable aer flag.
* @count: unused variable.
*
* Description:
* If the val is 1 and currently the device's AER capability was not
* enabled, invoke the kernel's enable AER helper routine, trying to
* enable the device's AER capability. If the helper routine enabling
* AER returns success, update the device's cfg_aer_support flag to
* indicate AER is supported by the device; otherwise, if the device
* AER capability is already enabled to support AER, then do nothing.
*
* If the val is 0 and currently the device's AER support was enabled,
* invoke the kernel's disable AER helper routine. After that, update
* the device's cfg_aer_support flag to indicate AER is not supported
* by the device; otherwise, if the device AER capability is already
* disabled from supporting AER, then do nothing.
*
* Returns:
* length of the buf on success if val is in range the intended mode
* is supported.
* -EINVAL if val out of range or intended mode is not supported.
**/
static ssize_t
lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int val = 0, rc = -EINVAL;
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
switch (val) {
case 0:
if (phba->hba_flag & HBA_AER_ENABLED) {
rc = pci_disable_pcie_error_reporting(phba->pcidev);
if (!rc) {
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~HBA_AER_ENABLED;
spin_unlock_irq(&phba->hbalock);
phba->cfg_aer_support = 0;
rc = strlen(buf);
} else
rc = -EPERM;
} else {
phba->cfg_aer_support = 0;
rc = strlen(buf);
}
break;
case 1:
if (!(phba->hba_flag & HBA_AER_ENABLED)) {
rc = pci_enable_pcie_error_reporting(phba->pcidev);
if (!rc) {
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= HBA_AER_ENABLED;
spin_unlock_irq(&phba->hbalock);
phba->cfg_aer_support = 1;
rc = strlen(buf);
} else
rc = -EPERM;
} else {
phba->cfg_aer_support = 1;
rc = strlen(buf);
}
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
static int lpfc_aer_support = 1;
module_param(lpfc_aer_support, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
lpfc_param_show(aer_support)
/**
* lpfc_aer_support_init - Set the initial adapters aer support flag
* @phba: lpfc_hba pointer.
* @val: enable aer or disable aer flag.
*
* Description:
* If val is in a valid range [0,1], then set the adapter's initial
* cfg_aer_support field. It will be up to the driver's probe_one
* routine to determine whether the device's AER support can be set
* or not.
*
* Notes:
* If the value is not in range log a kernel error message, and
* choose the default value of setting AER support and return.
*
* Returns:
* zero if val saved.
* -EINVAL val out of range
**/
static int
lpfc_aer_support_init(struct lpfc_hba *phba, int val)
{
if (val == 0 || val == 1) {
phba->cfg_aer_support = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2712 lpfc_aer_support attribute value %d out "
"of range, allowed values are 0|1, setting it "
"to default value of 1\n", val);
/* By default, try to enable AER on a device */
phba->cfg_aer_support = 1;
return -EINVAL;
}
static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
lpfc_aer_support_show, lpfc_aer_support_store);
/**
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing flag 1 for aer cleanup state.
* @count: unused variable.
*
* Description:
* If the @buf contains 1 and the device currently has the AER support
* enabled, then invokes the kernel AER helper routine
* pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
* error status register.
*
* Notes:
*
* Returns:
* -EINVAL if the buf does not contain the 1 or the device is not currently
* enabled with the AER support.
**/
static ssize_t
lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int val, rc = -1;
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
if (val != 1)
return -EINVAL;
if (phba->hba_flag & HBA_AER_ENABLED)
rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
if (rc == 0)
return strlen(buf);
else
return -EPERM;
}
static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
lpfc_aer_cleanup_state);
/**
* lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing the string the number of vfs to be enabled.
* @count: unused variable.
*
* Description:
* When this api is called either through user sysfs, the driver shall
* try to enable or disable SR-IOV virtual functions according to the
* following:
*
* If zero virtual function has been enabled to the physical function,
* the driver shall invoke the pci enable virtual function api trying
* to enable the virtual functions. If the nr_vfn provided is greater
* than the maximum supported, the maximum virtual function number will
* be used for invoking the api; otherwise, the nr_vfn provided shall
* be used for invoking the api. If the api call returned success, the
* actual number of virtual functions enabled will be set to the driver
* cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
* cfg_sriov_nr_virtfn remains zero.
*
* If none-zero virtual functions have already been enabled to the
* physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
* -EINVAL will be returned and the driver does nothing;
*
* If the nr_vfn provided is zero and none-zero virtual functions have
* been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
* disabling virtual function api shall be invoded to disable all the
* virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
* zero. Otherwise, if zero virtual function has been enabled, do
* nothing.
*
* Returns:
* length of the buf on success if val is in range the intended mode
* is supported.
* -EINVAL if val out of range or intended mode is not supported.
**/
static ssize_t
lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct pci_dev *pdev = phba->pcidev;
int val = 0, rc = -EINVAL;
/* Sanity check on user data */
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
if (val < 0)
return -EINVAL;
/* Request disabling virtual functions */
if (val == 0) {
if (phba->cfg_sriov_nr_virtfn > 0) {
pci_disable_sriov(pdev);
phba->cfg_sriov_nr_virtfn = 0;
}
return strlen(buf);
}
/* Request enabling virtual functions */
if (phba->cfg_sriov_nr_virtfn > 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3018 There are %d virtual functions "
"enabled on physical function.\n",
phba->cfg_sriov_nr_virtfn);
return -EEXIST;
}
if (val <= LPFC_MAX_VFN_PER_PFN)
phba->cfg_sriov_nr_virtfn = val;
else {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3019 Enabling %d virtual functions is not "
"allowed.\n", val);
return -EINVAL;
}
rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
if (rc) {
phba->cfg_sriov_nr_virtfn = 0;
rc = -EPERM;
} else
rc = strlen(buf);
return rc;
}
static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
lpfc_param_show(sriov_nr_virtfn)
/**
* lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
* @phba: lpfc_hba pointer.
* @val: link speed value.
*
* Description:
* If val is in a valid range [0,255], then set the adapter's initial
* cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
* number shall be used instead. It will be up to the driver's probe_one
* routine to determine whether the device's SR-IOV is supported or not.
*
* Returns:
* zero if val saved.
* -EINVAL val out of range
**/
static int
lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
{
if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
phba->cfg_sriov_nr_virtfn = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3017 Enabling %d virtual functions is not "
"allowed.\n", val);
return -EINVAL;
}
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
/**
* lpfc_request_firmware_store - Request for Linux generic firmware upgrade
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: containing the string the number of vfs to be enabled.
* @count: unused variable.
*
* Description:
*
* Returns:
* length of the buf on success if val is in range the intended mode
* is supported.
* -EINVAL if val out of range or intended mode is not supported.
**/
static ssize_t
lpfc_request_firmware_upgrade_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int val = 0, rc = -EINVAL;
/* Sanity check on user data */
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
if (val != 1)
return -EINVAL;
rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
if (rc)
rc = -EPERM;
else
rc = strlen(buf);
return rc;
}
static int lpfc_req_fw_upgrade;
module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
lpfc_param_show(request_firmware_upgrade)
/**
* lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
* @phba: lpfc_hba pointer.
* @val: 0 or 1.
*
* Description:
* Set the initial Linux generic firmware upgrade enable or disable flag.
*
* Returns:
* zero if val saved.
* -EINVAL val out of range
**/
static int
lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
{
if (val >= 0 && val <= 1) {
phba->cfg_request_firmware_upgrade = val;
return 0;
}
return -EINVAL;
}
static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
lpfc_request_firmware_upgrade_show,
lpfc_request_firmware_upgrade_store);
/**
* lpfc_fcp_imax_store
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: string with the number of fast-path FCP interrupts per second.
* @count: unused variable.
*
* Description:
* If val is in a valid range [636,651042], then set the adapter's
* maximum number of fast-path FCP interrupts per second.
*
* Returns:
* length of the buf on success if val is in range the intended mode
* is supported.
* -EINVAL if val out of range or intended mode is not supported.
**/
static ssize_t
lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int val = 0, i;
/* fcp_imax is only valid for SLI4 */
if (phba->sli_rev != LPFC_SLI_REV4)
return -EINVAL;
/* Sanity check on user data */
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
/*
* Value range for the HBA is [5000,5000000]
* The value for each EQ depends on how many EQs are configured.
*/
if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
lpfc_modify_fcp_eq_delay(phba, i);
return strlen(buf);
}
/*
# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
# for the HBA.
#
# Value range is [5,000 to 5,000,000]. Default value is 50,000.
*/
static int lpfc_fcp_imax = LPFC_DEF_IMAX;
module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_fcp_imax,
"Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)
/**
* lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
* @phba: lpfc_hba pointer.
* @val: link speed value.
*
* Description:
* If val is in a valid range [636,651042], then initialize the adapter's
* maximum number of fast-path FCP interrupts per second.
*
* Returns:
* zero if val saved.
* -EINVAL val out of range
**/
static int
lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
{
if (phba->sli_rev != LPFC_SLI_REV4) {
phba->cfg_fcp_imax = 0;
return 0;
}
if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
phba->cfg_fcp_imax = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3016 fcp_imax: %d out of range, using default\n", val);
phba->cfg_fcp_imax = LPFC_DEF_IMAX;
return 0;
}
static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
lpfc_fcp_imax_show, lpfc_fcp_imax_store);
/**
* lpfc_state_show - Display current driver CPU affinity
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains text describing the state of the link.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_vector_map_info *cpup;
int idx, len = 0;
if ((phba->sli_rev != LPFC_SLI_REV4) ||
(phba->intr_type != MSIX))
return len;
switch (phba->cfg_fcp_cpu_map) {
case 0:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: No mapping (%d)\n",
phba->cfg_fcp_cpu_map);
return len;
case 1:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: HBA centric mapping (%d): "
"%d online CPUs\n",
phba->cfg_fcp_cpu_map,
phba->sli4_hba.num_online_cpu);
break;
case 2:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: Driver centric mapping (%d): "
"%d online CPUs\n",
phba->cfg_fcp_cpu_map,
phba->sli4_hba.num_online_cpu);
break;
}
cpup = phba->sli4_hba.cpu_map;
for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d\n",
idx, cpup->channel_id, cpup->phys_id,
cpup->core_id);
else
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d IRQ %d\n",
idx, cpup->channel_id, cpup->phys_id,
cpup->core_id, cpup->irq);
cpup++;
}
return len;
}
/**
* lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: one or more lpfc_polling_flags values.
* @count: not used.
*
* Returns:
* -EINVAL - Not implemented yet.
**/
static ssize_t
lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int status = -EINVAL;
return status;
}
/*
# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
# for the HBA.
#
# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
# 0 - Do not affinitze IRQ vectors
# 1 - Affintize HBA vectors with respect to each HBA
# (start with CPU0 for each HBA)
# 2 - Affintize HBA vectors with respect to the entire driver
# (round robin thru all CPUs across all HBAs)
*/
static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_fcp_cpu_map,
"Defines how to map CPUs to IRQ vectors per HBA");
/**
* lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
* @phba: lpfc_hba pointer.
* @val: link speed value.
*
* Description:
* If val is in a valid range [0-2], then affinitze the adapter's
* MSIX vectors.
*
* Returns:
* zero if val saved.
* -EINVAL val out of range
**/
static int
lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
{
if (phba->sli_rev != LPFC_SLI_REV4) {
phba->cfg_fcp_cpu_map = 0;
return 0;
}
if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
phba->cfg_fcp_cpu_map = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3326 fcp_cpu_map: %d out of range, using default\n",
val);
phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
return 0;
}
static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
*/
LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
"Select Fibre Channel class of service for FCP sequences");
/*
# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
# is [0,1]. Default value is 0.
*/
LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
# depth. Default value is 0. When the value of this parameter is zero the
# SCSI command completion time is not used for controlling I/O queue depth. When
# the parameter is set to a non-zero value, the I/O queue depth is controlled
# to limit the I/O completion time to the parameter value.
# The value is set in milliseconds.
*/
static int lpfc_max_scsicmpl_time;
module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
"Use command completion time to control queue depth");
lpfc_vport_param_show(max_scsicmpl_time);
lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
if (val == vport->cfg_max_scsicmpl_time)
return 0;
if ((val < 0) || (val > 60000))
return -EINVAL;
vport->cfg_max_scsicmpl_time = val;
spin_lock_irq(shost->host_lock);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
}
spin_unlock_irq(shost->host_lock);
return 0;
}
lpfc_vport_param_store(max_scsicmpl_time);
static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
lpfc_max_scsicmpl_time_show,
lpfc_max_scsicmpl_time_store);
/*
# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
# range is [0,1]. Default value is 0.
*/
LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
/*
# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
# range is [0,1]. Default value is 0.
# For [0], FCP commands are issued to Work Queues ina round robin fashion.
# For [1], FCP commands are issued to a Work Queue associated with the
# current CPU.
*/
LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
# range is [0,1]. Default value is 0.
# For [0], bus reset issues target reset to ALL devices
# For [1], bus reset issues target reset to non-FCP2 devices
*/
LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
"FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
/*
# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
# cr_delay is set to 0.
*/
LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
"interrupt response is generated");
LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
"interrupt response is generated");
/*
# lpfc_multi_ring_support: Determines how many rings to spread available
# cmd/rsp IOCB entries across.
# Value range is [1,2]. Default value is 1.
*/
LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
"SLI rings to spread IOCB entries across");
/*
# lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
# identifies what rctl value to configure the additional ring for.
# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
*/
LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
255, "Identifies RCTL for additional ring configuration");
/*
# lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
# identifies what type value to configure the additional ring for.
# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
*/
LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
255, "Identifies TYPE for additional ring configuration");
/*
# lpfc_fdmi_on: controls FDMI support.
# 0 = no FDMI support
# 1 = support FDMI without attribute of hostname
# 2 = support FDMI with attribute of hostname
# Value range [0,2]. Default value is 0.
*/
LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
/*
# Specifies the maximum number of ELS cmds we can have outstanding (for
# discovery). Value range is [1,64]. Default value = 32.
*/
LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
"during discovery");
/*
# lpfc_max_luns: maximum allowed LUN.
# Value range is [0,65535]. Default value is 255.
# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
*/
LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN");
/*
# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
# Value range is [1,255], default value is 10.
*/
LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
"Milliseconds driver will wait between polling FCP ring");
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
# 0 = MSI disabled
# 1 = MSI enabled
# 2 = MSI-X enabled (default)
# Value range is [0,2]. Default value is 2.
*/
LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
# This parameter is ignored and will eventually be depricated
#
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP work queues, if possible");
/*
# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP event queues, if possible");
/*
# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of FCP I/O channels");
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
# 1 = HBA resets enabled (default)
# Value range is [0,1]. Default value is 1.
*/
LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
/*
# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
# 0 = HBA Heartbeat disabled
# 1 = HBA Heartbeat enabled (default)
# Value range is [0,1]. Default value is 1.
*/
LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
/*
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
# 0 = BlockGuard disabled (default)
# 1 = BlockGuard enabled
# Value range is [0,1]. Default value is 0.
*/
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
# 0 = disabled (default)
# 1 = enabled
# Value range is [0,1]. Default value is 0.
#
# This feature in under investigation and may be supported in the future.
*/
unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
/*
# lpfc_prot_mask: i
# - Bit mask of host protection capabilities used to register with the
# SCSI mid-layer
# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
# - Allows you to ultimately specify which profiles to use
# - Default will result in registering capabilities for all profiles.
# - SHOST_DIF_TYPE1_PROTECTION 1
# HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
# - SHOST_DIX_TYPE0_PROTECTION 8
# HBA supports DIX Type 0: Host to HBA protection only
# - SHOST_DIX_TYPE1_PROTECTION 16
# HBA supports DIX Type 1: Host to HBA Type 1 protection
#
*/
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIX_TYPE0_PROTECTION |
SHOST_DIX_TYPE1_PROTECTION;
module_param(lpfc_prot_mask, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
/*
# lpfc_prot_guard: i
# - Bit mask of protection guard types to register with the SCSI mid-layer
# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
# - Allows you to ultimately specify which profiles to use
# - Default will result in registering capabilities for all guard types
#
*/
unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
module_param(lpfc_prot_guard, byte, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
/*
* Delay initial NPort discovery when Clean Address bit is cleared in
* FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
* This parameter can have value 0 or 1.
* When this parameter is set to 0, no delay is added to the initial
* discovery.
* When this parameter is set to non-zero value, initial Nport discovery is
* delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
* accept and FCID/Fabric name/Fabric portname is changed.
* Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
* when Clean Address bit is cleared in FLOGI/FDISC
* accept and FCID/Fabric name/Fabric portname is changed.
* Default value is 0.
*/
int lpfc_delay_discovery;
module_param(lpfc_delay_discovery, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_delay_discovery,
"Delay NPort discovery when Clean Address bit is cleared. "
"Allowed values: 0,1.");
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
* This value can be set to values between 64 and 4096. The default value is
* 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
* will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
* Because of the additional overhead involved in setting up T10-DIF,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/*
* This parameter will be depricated, the driver cannot limit the
* protection data s/g list.
*/
LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
"Max Protection Scatter Gather Segment Count");
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_bg_info,
&dev_attr_bg_guard_err,
&dev_attr_bg_apptag_err,
&dev_attr_bg_reftag_err,
&dev_attr_info,
&dev_attr_serialnum,
&dev_attr_modeldesc,
&dev_attr_modelname,
&dev_attr_programtype,
&dev_attr_portnum,
&dev_attr_fwrev,
&dev_attr_hdw,
&dev_attr_option_rom_version,
&dev_attr_link_state,
&dev_attr_num_discovered_ports,
&dev_attr_menlo_mgmt_mode,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_enable_fip,
&dev_attr_lpfc_temp_sensor,
&dev_attr_lpfc_log_verbose,
&dev_attr_lpfc_lun_queue_depth,
&dev_attr_lpfc_tgt_queue_depth,
&dev_attr_lpfc_hba_queue_depth,
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
&dev_attr_lpfc_ack0,
&dev_attr_lpfc_topology,
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
&dev_attr_lpfc_fcp_io_sched,
&dev_attr_lpfc_fcp2_no_tgt_reset,
&dev_attr_lpfc_cr_delay,
&dev_attr_lpfc_cr_count,
&dev_attr_lpfc_multi_ring_support,
&dev_attr_lpfc_multi_ring_rctl,
&dev_attr_lpfc_multi_ring_type,
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_lpfc_enable_npiv,
&dev_attr_lpfc_fcf_failover_policy,
&dev_attr_lpfc_enable_rrq,
&dev_attr_nport_evt_cnt,
&dev_attr_board_mode,
&dev_attr_max_vpi,
&dev_attr_used_vpi,
&dev_attr_max_rpi,
&dev_attr_used_rpi,
&dev_attr_max_xri,
&dev_attr_used_xri,
&dev_attr_npiv_info,
&dev_attr_issue_reset,
&dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_wq_count,
&dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
&dev_attr_lpfc_soft_wwn_enable,
&dev_attr_lpfc_enable_hba_reset,
&dev_attr_lpfc_enable_hba_heartbeat,
&dev_attr_lpfc_sg_seg_cnt,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
&dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
NULL,
};
struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_info,
&dev_attr_link_state,
&dev_attr_num_discovered_ports,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_log_verbose,
&dev_attr_lpfc_lun_queue_depth,
&dev_attr_lpfc_tgt_queue_depth,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_hba_queue_depth,
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_restrict_login,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_nport_evt_cnt,
&dev_attr_npiv_info,
&dev_attr_lpfc_enable_da_id,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
NULL,
};
/**
* sysfs_ctlreg_write - Write method for writing to ctlreg
* @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be written to the adapter IOREG space.
* @off: offset into buffer to beginning of data.
* @count: bytes to transfer.
*
* Description:
* Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
* Uses the adapter io control registers to send buf contents to the adapter.
*
* Returns:
* -ERANGE off and count combo out of range
* -EINVAL off, count or buff address invalid
* -EPERM adapter is offline
* value of count, buf contents written
**/
static ssize_t
sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
struct device *dev = container_of(kobj, struct device, kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->sli_rev >= LPFC_SLI_REV4)
return -EPERM;
if ((off + count) > FF_REG_AREA_SIZE)
return -ERANGE;
if (count <= LPFC_REG_WRITE_KEY_SIZE)
return 0;
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
/* This is to protect HBA registers from accidental writes. */
if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
return -EINVAL;
if (!(vport->fc_flag & FC_OFFLINE_MODE))
return -EPERM;
spin_lock_irq(&phba->hbalock);
for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
buf_off += sizeof(uint32_t))
writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
phba->ctrl_regs_memmap_p + off + buf_off);
spin_unlock_irq(&phba->hbalock);
return count;
}
/**
* sysfs_ctlreg_read - Read method for reading from ctlreg
* @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: if successful contains the data from the adapter IOREG space.
* @off: offset into buffer to beginning of data.
* @count: bytes to transfer.
*
* Description:
* Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
* Uses the adapter io control registers to read data into buf.
*
* Returns:
* -ERANGE off and count combo out of range
* -EINVAL off, count or buff address invalid
* value of count, buf contents read
**/
static ssize_t
sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
uint32_t * tmp_ptr;
struct device *dev = container_of(kobj, struct device, kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->sli_rev >= LPFC_SLI_REV4)
return -EPERM;
if (off > FF_REG_AREA_SIZE)
return -ERANGE;
if ((off + count) > FF_REG_AREA_SIZE)
count = FF_REG_AREA_SIZE - off;
if (count == 0) return 0;
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
spin_lock_irq(&phba->hbalock);
for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
tmp_ptr = (uint32_t *)(buf + buf_off);
*tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
}
spin_unlock_irq(&phba->hbalock);
return count;
}
static struct bin_attribute sysfs_ctlreg_attr = {
.attr = {
.name = "ctlreg",
.mode = S_IRUSR | S_IWUSR,
},
.size = 256,
.read = sysfs_ctlreg_read,
.write = sysfs_ctlreg_write,
};
/**
* sysfs_mbox_write - Write method for writing information via mbox
* @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be written to sysfs mbox.
* @off: offset into buffer to beginning of data.
* @count: bytes to transfer.
*
* Description:
* Deprecated function. All mailbox access from user space is performed via the
* bsg interface.
*
* Returns:
* -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
}
/**
* sysfs_mbox_read - Read method for reading information via mbox
* @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be read from sysfs mbox.
* @off: offset into buffer to beginning of data.
* @count: bytes to transfer.
*
* Description:
* Deprecated function. All mailbox access from user space is performed via the
* bsg interface.
*
* Returns:
* -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
}
static struct bin_attribute sysfs_mbox_attr = {
.attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
.size = MAILBOX_SYSFS_MAX,
.read = sysfs_mbox_read,
.write = sysfs_mbox_write,
};
/**
* lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
* @vport: address of lpfc vport structure.
*
* Return codes:
* zero on success
* error return code from sysfs_create_bin_file()
**/
int
lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
/* Virtual ports do not need ctrl_reg and mbox */
if (error || vport->port_type == LPFC_NPIV_PORT)
goto out;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
goto out_remove_stat_attr;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
if (error)
goto out_remove_ctlreg_attr;
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
out_remove_stat_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
out:
return error;
}
/**
* lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
* @vport: address of lpfc vport structure.
**/
void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
sysfs_remove_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
/* Virtual ports do not need ctrl_reg and mbox */
if (vport->port_type == LPFC_NPIV_PORT)
return;
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
}
/*
* Dynamic FC Host Attributes Support
*/
/**
* lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
* @shost: kernel scsi host pointer.
**/
static void
lpfc_get_host_port_id(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
/* note: fc_myDID already in cpu endianness */
fc_host_port_id(shost) = vport->fc_myDID;
}
/**
* lpfc_get_host_port_type - Set the value of the scsi host port type
* @shost: kernel scsi host pointer.
**/
static void
lpfc_get_host_port_type(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
if (vport->port_type == LPFC_NPIV_PORT) {
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else if (lpfc_is_link_up(phba)) {
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
} else {
if (vport->fc_flag & FC_FABRIC)
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
}
} else
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
spin_unlock_irq(shost->host_lock);
}
/**
* lpfc_get_host_port_state - Set the value of the scsi host port state
* @shost: kernel scsi host pointer.
**/
static void
lpfc_get_host_port_state(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
if (vport->fc_flag & FC_OFFLINE_MODE)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
else {
switch (phba->link_state) {
case LPFC_LINK_UNKNOWN:
case LPFC_LINK_DOWN:
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
/* Links up, reports port state accordingly */
if (vport->port_state < LPFC_VPORT_READY)
fc_host_port_state(shost) =
FC_PORTSTATE_BYPASSED;
else
fc_host_port_state(shost) =
FC_PORTSTATE_ONLINE;
break;
case LPFC_HBA_ERROR:
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
break;
default:
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
break;
}
}
spin_unlock_irq(shost->host_lock);
}
/**
* lpfc_get_host_speed - Set the value of the scsi host speed
* @shost: kernel scsi host pointer.
**/
static void
lpfc_get_host_speed(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
if (lpfc_is_link_up(phba)) {
switch(phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
break;
case LPFC_LINK_SPEED_2GHZ:
fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
break;
case LPFC_LINK_SPEED_4GHZ:
fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
break;
case LPFC_LINK_SPEED_8GHZ:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
case LPFC_LINK_SPEED_10GHZ:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
case LPFC_LINK_SPEED_16GHZ:
fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
} else
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
spin_unlock_irq(shost->host_lock);
}
/**
* lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
* @shost: kernel scsi host pointer.
**/
static void
lpfc_get_host_fabric_name (struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
u64 node_name;
spin_lock_irq(shost->host_lock);
if ((vport->port_state > LPFC_FLOGI) &&
((vport->fc_flag & FC_FABRIC) ||
((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
(vport->fc_flag & FC_PUBLIC_LOOP))))
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
node_name = 0;
spin_unlock_irq(shost->host_lock);
fc_host_fabric_name(shost) = node_name;
}
/**
* lpfc_get_stats - Return statistical information about the adapter
* @shost: kernel scsi host pointer.
*
* Notes:
* NULL on error for link down, no mbox pool, sli2 active,
* management not allowed, memory allocation error, or mbox error.
*
* Returns:
* NULL for error
* address of the adapter host statistics
**/
static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct fc_host_statistics *hs = &phba->link_stats;
struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
unsigned long seconds;
int rc = 0;
/*
* prevent udev from issuing mailbox commands until the port is
* configured.
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
(phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
return NULL;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return NULL;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return NULL;
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if (vport->fc_flag & FC_OFFLINE_MODE)
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return NULL;
}
memset(hs, 0, sizeof (struct fc_host_statistics));
hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
/*
* The MBX_READ_STATUS returns tx_k_bytes which has to
* converted to words
*/
hs->tx_words = (uint64_t)
((uint64_t)pmb->un.varRdStatus.xmitByteCnt
* (uint64_t)256);
hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
hs->rx_words = (uint64_t)
((uint64_t)pmb->un.varRdStatus.rcvByteCnt
* (uint64_t)256);
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if (vport->fc_flag & FC_OFFLINE_MODE)
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return NULL;
}
hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
hs->error_frames = pmb->un.varRdLnk.crcCnt;
hs->link_failure_count -= lso->link_failure_count;
hs->loss_of_sync_count -= lso->loss_of_sync_count;
hs->loss_of_signal_count -= lso->loss_of_signal_count;
hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
hs->invalid_crc_count -= lso->invalid_crc_count;
hs->error_frames -= lso->error_frames;
if (phba->hba_flag & HBA_FCOE_MODE) {
hs->lip_count = -1;
hs->nos_count = (phba->link_events >> 1);
hs->nos_count -= lso->link_events;
} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
hs->lip_count = (phba->fc_eventTag >> 1);
hs->lip_count -= lso->link_events;
hs->nos_count = -1;
} else {
hs->lip_count = -1;
hs->nos_count = (phba->fc_eventTag >> 1);
hs->nos_count -= lso->link_events;
}
hs->dumped_frames = -1;
seconds = get_seconds();
if (seconds < psli->stats_start)
hs->seconds_since_last_reset = seconds +
((unsigned long)-1 - psli->stats_start);
else
hs->seconds_since_last_reset = seconds - psli->stats_start;
mempool_free(pmboxq, phba->mbox_mem_pool);
return hs;
}
/**
* lpfc_reset_stats - Copy the adapter link stats information
* @shost: kernel scsi host pointer.
**/
static void
lpfc_reset_stats(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return;
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmb->un.varWords[0] = 0x1; /* reset request */
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (rc != MBX_TIMEOUT)
mempool_free( pmboxq, phba->mbox_mem_pool);
return;
}
lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
lso->error_frames = pmb->un.varRdLnk.crcCnt;
if (phba->hba_flag & HBA_FCOE_MODE)
lso->link_events = (phba->link_events >> 1);
else
lso->link_events = (phba->fc_eventTag >> 1);
psli->stats_start = get_seconds();
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
/*
* The LPFC driver treats linkdown handling as target loss events so there
* are no sysfs handlers for link_down_tmo.
*/
/**
* lpfc_get_node_by_target - Return the nodelist for a target
* @starget: kernel scsi target pointer.
*
* Returns:
* address of the node list if found
* NULL target not found
**/
static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_nodelist *ndlp;
spin_lock_irq(shost->host_lock);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp) &&
ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
return ndlp;
}
}
spin_unlock_irq(shost->host_lock);
return NULL;
}
/**
* lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
* @starget: kernel scsi target pointer.
**/
static void
lpfc_get_starget_port_id(struct scsi_target *starget)
{
struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
}
/**
* lpfc_get_starget_node_name - Set the target node name
* @starget: kernel scsi target pointer.
*
* Description: Set the target node name to the ndlp node name wwn or zero.
**/
static void
lpfc_get_starget_node_name(struct scsi_target *starget)
{
struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
fc_starget_node_name(starget) =
ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
}
/**
* lpfc_get_starget_port_name - Set the target port name
* @starget: kernel scsi target pointer.
*
* Description: set the target port name to the ndlp port name wwn or zero.
**/
static void
lpfc_get_starget_port_name(struct scsi_target *starget)
{
struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
fc_starget_port_name(starget) =
ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
}
/**
* lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
* @rport: fc rport address.
* @timeout: new value for dev loss tmo.
*
* Description:
* If timeout is non zero set the dev_loss_tmo to timeout, else set
* dev_loss_tmo to one.
**/
static void
lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
}
/**
* lpfc_rport_show_function - Return rport target information
*
* Description:
* Macro that uses field to generate a function with the name lpfc_show_rport_
*
* lpfc_show_rport_##field: returns the bytes formatted in buf
* @cdev: class converted to an fc_rport.
* @buf: on return contains the target_field or zero.
*
* Returns: size of formatted string.
**/
#define lpfc_rport_show_function(field, format_string, sz, cast) \
static ssize_t \
lpfc_show_rport_##field (struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct fc_rport *rport = transport_class_to_rport(dev); \
struct lpfc_rport_data *rdata = rport->hostdata; \
return snprintf(buf, sz, format_string, \
(rdata->target) ? cast rdata->target->field : 0); \
}
#define lpfc_rport_rd_attr(field, format_string, sz) \
lpfc_rport_show_function(field, format_string, sz, ) \
static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
/**
* lpfc_set_vport_symbolic_name - Set the vport's symbolic name
* @fc_vport: The fc_vport who's symbolic name has been changed.
*
* Description:
* This function is called by the transport after the @fc_vport's symbolic name
* has been changed. This function re-registers the symbolic name with the
* switch to propagate the change into the fabric if the vport is active.
**/
static void
lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
if (vport->port_state == LPFC_VPORT_READY)
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
}
/**
* lpfc_hba_log_verbose_init - Set hba's log verbose level
* @phba: Pointer to lpfc_hba struct.
*
* This function is called by the lpfc_get_cfgparam() routine to set the
* module lpfc_log_verbose into the @phba cfg_log_verbose for use with
* log message according to the module's lpfc_log_verbose parameter setting
* before hba port or vport created.
**/
static void
lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
{
phba->cfg_log_verbose = verbose;
}
struct fc_function_template lpfc_transport_functions = {
/* fixed attributes the driver supports */
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = lpfc_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = lpfc_get_host_port_state,
.show_host_port_state = 1,
/* active_fc4s is shown but doesn't change (thus no get function) */
.show_host_active_fc4s = 1,
.get_host_speed = lpfc_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = lpfc_get_host_fabric_name,
.show_host_fabric_name = 1,
/*
* The LPFC driver treats linkdown handling as target loss events
* so there are no sysfs handlers for link_down_tmo.
*/
.get_fc_host_stats = lpfc_get_stats,
.reset_fc_host_stats = lpfc_reset_stats,
.dd_fcrport_size = sizeof(struct lpfc_rport_data),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_starget_port_id = lpfc_get_starget_port_id,
.show_starget_port_id = 1,
.get_starget_node_name = lpfc_get_starget_node_name,
.show_starget_node_name = 1,
.get_starget_port_name = lpfc_get_starget_port_name,
.show_starget_port_name = 1,
.issue_fc_host_lip = lpfc_issue_lip,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
.dd_fcvport_size = sizeof(struct lpfc_vport *),
.vport_disable = lpfc_vport_disable,
.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
.bsg_request = lpfc_bsg_request,
.bsg_timeout = lpfc_bsg_timeout,
};
struct fc_function_template lpfc_vport_transport_functions = {
/* fixed attributes the driver supports */
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = lpfc_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = lpfc_get_host_port_state,
.show_host_port_state = 1,
/* active_fc4s is shown but doesn't change (thus no get function) */
.show_host_active_fc4s = 1,
.get_host_speed = lpfc_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = lpfc_get_host_fabric_name,
.show_host_fabric_name = 1,
/*
* The LPFC driver treats linkdown handling as target loss events
* so there are no sysfs handlers for link_down_tmo.
*/
.get_fc_host_stats = lpfc_get_stats,
.reset_fc_host_stats = lpfc_reset_stats,
.dd_fcrport_size = sizeof(struct lpfc_rport_data),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_starget_port_id = lpfc_get_starget_port_id,
.show_starget_port_id = 1,
.get_starget_node_name = lpfc_get_starget_node_name,
.show_starget_node_name = 1,
.get_starget_port_name = lpfc_get_starget_port_name,
.show_starget_port_name = 1,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
.vport_disable = lpfc_vport_disable,
.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
};
/**
* lpfc_get_cfgparam - Used during probe_one to init the adapter structure
* @phba: lpfc_hba pointer.
**/
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
lpfc_ack0_init(phba, lpfc_ack0);
lpfc_topology_init(phba, lpfc_topology);
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_enable_bg_init(phba, lpfc_enable_bg);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
return;
}
/**
* lpfc_get_vport_cfgparam - Used during port create, init the vport structure
* @vport: lpfc_vport pointer.
**/
void
lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
{
lpfc_log_verbose_init(vport, lpfc_log_verbose);
lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
lpfc_restrict_login_init(vport, lpfc_restrict_login);
lpfc_fcp_class_init(vport, lpfc_fcp_class);
lpfc_use_adisc_init(vport, lpfc_use_adisc);
lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
lpfc_max_luns_init(vport, lpfc_max_luns);
lpfc_scan_down_init(vport, lpfc_scan_down);
lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
return;
}
| gpl-2.0 |
alexax66/LP-Kernel-a3ltexx | drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 2076 | 45739 | /*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include "qlcnic.h"
struct qlcnic_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
static const u32 qlcnic_fw_dump_level[] = {
0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
{"xmit_called", QLC_SIZEOF(stats.xmitcalled),
QLC_OFF(stats.xmitcalled)},
{"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
QLC_OFF(stats.xmitfinished)},
{"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
{"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
{"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
{"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
{"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
{"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
{"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
{"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
{"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
QLC_OFF(stats.rx_dma_map_error)},
{"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
QLC_OFF(stats.tx_dma_map_error)},
{"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx unicast frames",
"rx multicast frames",
"rx broadcast frames",
"rx dropped frames",
"rx errors",
"rx local frames",
"rx numbytes",
"tx unicast frames",
"tx multicast frames",
"tx broadcast frames",
"tx dropped frames",
"tx errors",
"tx local frames",
"tx numbytes",
};
static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_tx_bytes",
"ctx_tx_pkts",
"ctx_tx_errors",
"ctx_tx_dropped_pkts",
"ctx_tx_num_buffers",
};
static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
"mac_tx_frames",
"mac_tx_bytes",
"mac_tx_mcast_pkts",
"mac_tx_bcast_pkts",
"mac_tx_pause_cnt",
"mac_tx_ctrl_pkt",
"mac_tx_lt_64b_pkts",
"mac_tx_lt_127b_pkts",
"mac_tx_lt_255b_pkts",
"mac_tx_lt_511b_pkts",
"mac_tx_lt_1023b_pkts",
"mac_tx_lt_1518b_pkts",
"mac_tx_gt_1518b_pkts",
"mac_rx_frames",
"mac_rx_bytes",
"mac_rx_mcast_pkts",
"mac_rx_bcast_pkts",
"mac_rx_pause_cnt",
"mac_rx_ctrl_pkt",
"mac_rx_lt_64b_pkts",
"mac_rx_lt_127b_pkts",
"mac_rx_lt_255b_pkts",
"mac_rx_lt_511b_pkts",
"mac_rx_lt_1023b_pkts",
"mac_rx_lt_1518b_pkts",
"mac_rx_gt_1518b_pkts",
"mac_rx_length_error",
"mac_rx_length_small",
"mac_rx_length_large",
"mac_rx_jabber",
"mac_rx_dropped",
"mac_crc_error",
"mac_align_error",
"eswitch_frames",
"eswitch_bytes",
"eswitch_multicast_frames",
"eswitch_broadcast_frames",
"eswitch_unicast_frames",
"eswitch_error_free_frames",
"eswitch_error_free_bytes",
};
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
"ctx_lro_pkt_cnt",
"ctx_ip_csum_error",
"ctx_rx_pkts_wo_ctx",
"ctx_rx_pkts_drop_wo_sds_on_card",
"ctx_rx_pkts_drop_wo_sds_on_host",
"ctx_rx_osized_pkts",
"ctx_rx_pkts_dropped_wo_rds",
"ctx_rx_unexpected_mcast_pkts",
"ctx_invalid_mac_address",
"ctx_rx_rds_ring_prim_attempted",
"ctx_rx_rds_ring_prim_success",
"ctx_num_lro_flows_added",
"ctx_num_lro_flows_removed",
"ctx_num_lro_flows_active",
"ctx_pkts_dropped_unknown",
};
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
"EEPROM_Test_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
static inline int qlcnic_82xx_statistics(void)
{
return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
}
static inline int qlcnic_83xx_statistics(void)
{
return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
}
static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
{
if (qlcnic_82xx_check(adapter))
return qlcnic_82xx_statistics();
else if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_statistics();
else
return -1;
}
#define QLCNIC_RING_REGS_COUNT 20
#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
QLCNIC_CMDPEG_STATE,
QLCNIC_RCVPEG_STATE,
QLCNIC_FW_CAPABILITIES,
QLCNIC_CRB_DRV_ACTIVE,
QLCNIC_CRB_DEV_STATE,
QLCNIC_CRB_DRV_STATE,
QLCNIC_CRB_DRV_SCRATCH,
QLCNIC_CRB_DEV_PARTITION_INFO,
QLCNIC_CRB_DRV_IDC_VER,
QLCNIC_PEG_ALIVE_COUNTER,
QLCNIC_PEG_HALT_STATUS1,
QLCNIC_PEG_HALT_STATUS2,
-1
};
static const u32 ext_diag_registers[] = {
CRB_XG_STATE_P3P,
ISR_INT_STATE_REG,
QLCNIC_CRB_PEG_NET_0+0x3c,
QLCNIC_CRB_PEG_NET_1+0x3c,
QLCNIC_CRB_PEG_NET_2+0x3c,
QLCNIC_CRB_PEG_NET_4+0x3c,
-1
};
#define QLCNIC_MGMT_API_VERSION 2
#define QLCNIC_ETHTOOL_REGS_VER 3
static int qlcnic_get_regs_len(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 len;
if (qlcnic_83xx_check(adapter))
len = qlcnic_83xx_get_regs_len(adapter);
else
len = sizeof(ext_diag_registers) + sizeof(diag_registers);
return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
{
return QLCNIC_FLASH_TOTAL_SIZE;
}
static void
qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 fw_major, fw_minor, fw_build;
fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
sizeof(drvinfo->version));
}
static int
qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
if (qlcnic_82xx_check(adapter))
return qlcnic_82xx_get_settings(adapter, ecmd);
else if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_get_settings(adapter, ecmd);
return -EIO;
}
int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
struct ethtool_cmd *ecmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
int check_sfp_module = 0;
u16 pcifn = ahw->pci_func;
/* read which mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
ecmd->advertising = (ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full);
ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
ecmd->duplex = adapter->ahw->link_duplex;
ecmd->autoneg = adapter->ahw->link_autoneg;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val = 0;
val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
} else {
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full;
}
if (netif_running(adapter->netdev) && ahw->has_link_events) {
reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
speed = P3P_LINK_SPEED_VAL(pcifn, reg);
ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
ethtool_cmd_speed_set(ecmd, ahw->link_speed);
ecmd->autoneg = ahw->link_autoneg;
ecmd->duplex = ahw->link_duplex;
goto skip;
}
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
ecmd->duplex = DUPLEX_UNKNOWN;
ecmd->autoneg = AUTONEG_DISABLE;
} else
return -EIO;
skip:
ecmd->phy_address = adapter->ahw->physical_port;
ecmd->transceiver = XCVR_EXTERNAL;
switch (adapter->ahw->board_type) {
case QLCNIC_BRDTYPE_P3P_REF_QG:
case QLCNIC_BRDTYPE_P3P_4_GB:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
ecmd->supported |= SUPPORTED_Autoneg;
ecmd->advertising |= ADVERTISED_Autoneg;
case QLCNIC_BRDTYPE_P3P_10G_CX4:
case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
ecmd->autoneg = adapter->ahw->link_autoneg;
break;
case QLCNIC_BRDTYPE_P3P_IMEZ:
case QLCNIC_BRDTYPE_P3P_XG_LOM:
case QLCNIC_BRDTYPE_P3P_HMEZ:
ecmd->supported |= SUPPORTED_MII;
ecmd->advertising |= ADVERTISED_MII;
ecmd->port = PORT_MII;
ecmd->autoneg = AUTONEG_DISABLE;
break;
case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
ecmd->advertising |= ADVERTISED_TP;
ecmd->supported |= SUPPORTED_TP;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
case QLCNIC_BRDTYPE_P3P_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
break;
case QLCNIC_BRDTYPE_P3P_10G_TP:
if (adapter->ahw->port_type == QLCNIC_XGBE) {
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
ecmd->advertising |=
(ADVERTISED_FIBRE | ADVERTISED_TP);
ecmd->port = PORT_FIBRE;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
} else {
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |=
(ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
}
break;
default:
dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
adapter->ahw->board_type);
return -EIO;
}
if (check_sfp_module) {
switch (adapter->ahw->module_type) {
case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
case LINKEVENT_MODULE_OPTICAL_SRLR:
case LINKEVENT_MODULE_OPTICAL_LRM:
case LINKEVENT_MODULE_OPTICAL_SFP_1G:
ecmd->port = PORT_FIBRE;
break;
case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
case LINKEVENT_MODULE_TWINAX:
ecmd->port = PORT_TP;
break;
default:
ecmd->port = PORT_OTHER;
}
}
return 0;
}
static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
struct ethtool_cmd *ecmd)
{
u32 ret = 0, config = 0;
/* read which mode */
if (ecmd->duplex)
config |= 0x1;
if (ecmd->autoneg)
config |= 0x2;
switch (ethtool_cmd_speed(ecmd)) {
case SPEED_10:
config |= (0 << 8);
break;
case SPEED_100:
config |= (1 << 8);
break;
case SPEED_1000:
config |= (10 << 8);
break;
default:
return -EIO;
}
ret = qlcnic_fw_cmd_set_port(adapter, config);
if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
return -EOPNOTSUPP;
else if (ret)
return -EIO;
return ret;
}
static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
u32 ret = 0;
struct qlcnic_adapter *adapter = netdev_priv(dev);
if (adapter->ahw->port_type != QLCNIC_GBE)
return -EOPNOTSUPP;
if (qlcnic_83xx_check(adapter))
ret = qlcnic_83xx_set_settings(adapter, ecmd);
else
ret = qlcnic_set_port_config(adapter, ecmd);
if (!ret)
return ret;
adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
adapter->ahw->link_duplex = ecmd->duplex;
adapter->ahw->link_autoneg = ecmd->autoneg;
if (!netif_running(dev))
return 0;
dev->netdev_ops->ndo_stop(dev);
return dev->netdev_ops->ndo_open(dev);
}
static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
u32 *regs_buff)
{
int i, j = 0;
for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
j = 0;
while (ext_diag_registers[j] != -1)
regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
return i;
}
static void
qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
u32 *regs_buff = p;
int ring, i = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
(adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
if (qlcnic_82xx_check(adapter))
i = qlcnic_82xx_get_registers(adapter, regs_buff);
else
i = qlcnic_83xx_get_registers(adapter, regs_buff);
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
regs_buff[i++] = 1; /* No. of tx ring */
regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
regs_buff[i++] = 2; /* No. of rx ring */
regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
regs_buff[i++] = adapter->max_sds_rings;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &(recv_ctx->sds_rings[ring]);
regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
}
}
static u32 qlcnic_test_link(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 val;
if (qlcnic_83xx_check(adapter)) {
val = qlcnic_83xx_test_link(adapter);
return (val & 1) ? 0 : 1;
}
val = QLCRD32(adapter, CRB_XG_STATE_P3P);
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
}
static int
qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int offset;
int ret = -1;
if (qlcnic_83xx_check(adapter))
return 0;
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = (adapter->pdev)->vendor |
((adapter->pdev)->device << 16);
offset = eeprom->offset;
if (qlcnic_82xx_check(adapter))
ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
eeprom->len);
if (ret < 0)
return ret;
return 0;
}
static void
qlcnic_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
ring->rx_pending = adapter->num_rxd;
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring->tx_pending = adapter->num_txd;
ring->rx_max_pending = adapter->max_rxd;
ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
}
static u32
qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
{
u32 num_desc;
num_desc = max(val, min);
num_desc = min(num_desc, max);
num_desc = roundup_pow_of_two(num_desc);
if (val != num_desc) {
printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
qlcnic_driver_name, r_name, num_desc, val);
}
return num_desc;
}
static int
qlcnic_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u16 num_rxd, num_jumbo_rxd, num_txd;
if (ring->rx_mini_pending)
return -EOPNOTSUPP;
num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
"rx jumbo");
num_txd = qlcnic_validate_ringparam(ring->tx_pending,
MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
num_jumbo_rxd == adapter->num_jumbo_rxd)
return 0;
adapter->num_rxd = num_rxd;
adapter->num_jumbo_rxd = num_jumbo_rxd;
adapter->num_txd = num_txd;
return qlcnic_reset_context(adapter);
}
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
int min;
struct qlcnic_adapter *adapter = netdev_priv(dev);
min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
channel->max_rx = rounddown_pow_of_two(min);
channel->max_tx = adapter->ahw->max_tx_ques;
channel->rx_count = adapter->max_sds_rings;
channel->tx_count = adapter->ahw->max_tx_ques;
}
static int qlcnic_set_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
if (channel->other_count || channel->combined_count ||
channel->tx_count != channel->max_tx)
return -EINVAL;
err = qlcnic_validate_max_rss(adapter, channel->rx_count);
if (err)
return err;
err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
netdev_info(dev, "allocated 0x%x sds rings\n",
adapter->max_sds_rings);
return err;
}
static void
qlcnic_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
__u32 val;
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_get_pauseparam(adapter, pause);
return;
}
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
switch (port) {
case 0:
pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
break;
case 1:
pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
break;
case 2:
pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
break;
case 3:
default:
pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
break;
}
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
if (port == 0)
pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
else
pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
} else {
dev_err(&netdev->dev, "Unknown board type: %x\n",
adapter->ahw->port_type);
}
}
static int
qlcnic_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
__u32 val;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_set_pauseparam(adapter, pause);
/* read mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
if (pause->rx_pause)
qlcnic_gb_rx_flowctl(val);
else
qlcnic_gb_unset_rx_flowctl(val);
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
val);
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
switch (port) {
case 0:
if (pause->tx_pause)
qlcnic_gb_unset_gb0_mask(val);
else
qlcnic_gb_set_gb0_mask(val);
break;
case 1:
if (pause->tx_pause)
qlcnic_gb_unset_gb1_mask(val);
else
qlcnic_gb_set_gb1_mask(val);
break;
case 2:
if (pause->tx_pause)
qlcnic_gb_unset_gb2_mask(val);
else
qlcnic_gb_set_gb2_mask(val);
break;
case 3:
default:
if (pause->tx_pause)
qlcnic_gb_unset_gb3_mask(val);
else
qlcnic_gb_set_gb3_mask(val);
break;
}
QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (!pause->rx_pause || pause->autoneg)
return -EOPNOTSUPP;
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return -EIO;
val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
if (port == 0) {
if (pause->tx_pause)
qlcnic_xg_unset_xg0_mask(val);
else
qlcnic_xg_set_xg0_mask(val);
} else {
if (pause->tx_pause)
qlcnic_xg_unset_xg1_mask(val);
else
qlcnic_xg_set_xg1_mask(val);
}
QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
} else {
dev_err(&netdev->dev, "Unknown board type: %x\n",
adapter->ahw->port_type);
}
return 0;
}
static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_reg_test(adapter);
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
return 0;
}
static int qlcnic_eeprom_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
if (qlcnic_82xx_check(adapter))
return 0;
return qlcnic_83xx_flash_test(adapter);
}
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
int len;
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
qlcnic_83xx_check(adapter))
return len;
return qlcnic_82xx_statistics();
default:
return -EOPNOTSUPP;
}
}
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
int ret, max_sds_rings = adapter->max_sds_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
if (ret)
goto clear_diag_irq;
ahw->diag_cnt = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
cmd.req.arg[1] = ahw->pci_func;
ret = qlcnic_issue_cmd(adapter, &cmd);
if (ret)
goto done;
usleep_range(1000, 12000);
ret = !ahw->diag_cnt;
done:
qlcnic_free_mbx_args(&cmd);
qlcnic_diag_free_res(netdev, max_sds_rings);
clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
#define QLCNIC_ILB_PKT_SIZE 64
#define QLCNIC_NUM_ILB_PKT 16
#define QLCNIC_ILB_MAX_RCV_LOOP 10
#define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1
#define QLCNIC_LB_PKT_POLL_COUNT 20
static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
{
unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
memcpy(data, mac, ETH_ALEN);
memcpy(data + ETH_ALEN, mac, ETH_ALEN);
memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
}
int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
{
unsigned char buff[QLCNIC_ILB_PKT_SIZE];
qlcnic_create_loopback_buff(buff, mac);
return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
}
int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
struct sk_buff *skb;
int i, loop, cnt = 0;
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
qlcnic_xmit_frame(skb, adapter->netdev);
loop = 0;
do {
msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
qlcnic_process_rcv_ring_diag(sds_ring);
if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
break;
} while (!adapter->ahw->diag_cnt);
dev_kfree_skb_any(skb);
if (!adapter->ahw->diag_cnt)
dev_warn(&adapter->pdev->dev,
"LB Test: packet #%d was not received\n",
i + 1);
else
cnt++;
}
if (cnt != i) {
dev_err(&adapter->pdev->dev,
"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
if (mode != QLCNIC_ILB_MODE)
dev_warn(&adapter->pdev->dev,
"WARNING: Please check loopback cable\n");
return -1;
}
return 0;
}
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
int ret;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_loopback_test(netdev, mode);
if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
dev_info(&adapter->pdev->dev,
"Firmware do not support loopback test\n");
return -EOPNOTSUPP;
}
dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
mode == QLCNIC_ILB_MODE ? "internal" : "external");
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
dev_warn(&adapter->pdev->dev,
"Loopback test not supported in nonprivileged mode\n");
return 0;
}
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
if (ret)
goto clear_it;
sds_ring = &adapter->recv_ctx->sds_rings[0];
ret = qlcnic_set_lb_mode(adapter, mode);
if (ret)
goto free_res;
ahw->diag_cnt = 0;
do {
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
netdev_info(netdev, "firmware didnt respond to loopback"
" configure request\n");
ret = -QLCNIC_FW_NOT_RESPOND;
goto free_res;
} else if (adapter->ahw->diag_cnt) {
ret = adapter->ahw->diag_cnt;
goto free_res;
}
} while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_clear_lb_mode(adapter, mode);
free_res:
qlcnic_diag_free_res(netdev, max_sds_rings);
clear_it:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
static void
qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
u64 *data)
{
memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
data[0] = qlcnic_reg_test(dev);
if (data[0])
eth_test->flags |= ETH_TEST_FL_FAILED;
data[1] = (u64) qlcnic_test_link(dev);
if (data[1])
eth_test->flags |= ETH_TEST_FL_FAILED;
if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
data[2] = qlcnic_irq_test(dev);
if (data[2])
eth_test->flags |= ETH_TEST_FL_FAILED;
data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
data[4] = qlcnic_eeprom_test(dev);
if (data[4])
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
static void
qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int index, i, num_stats;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *qlcnic_gstrings_test,
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (index = 0; index < QLCNIC_STATS_LEN; index++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
if (qlcnic_83xx_check(adapter)) {
num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
for (i = 0; i < num_stats; i++, index++)
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_83xx_tx_stats_strings[i],
ETH_GSTRING_LEN);
num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
for (i = 0; i < num_stats; i++, index++)
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_83xx_mac_stats_strings[i],
ETH_GSTRING_LEN);
num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
for (i = 0; i < num_stats; i++, index++)
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_83xx_rx_stats_strings[i],
ETH_GSTRING_LEN);
return;
} else {
num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
for (i = 0; i < num_stats; i++, index++)
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_83xx_mac_stats_strings[i],
ETH_GSTRING_LEN);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
for (i = 0; i < num_stats; index++, i++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_device_gstrings_stats[i],
ETH_GSTRING_LEN);
}
}
}
static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
{
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
(struct qlcnic_mac_statistics *)stats;
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
*data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
} else if (type == QLCNIC_ESW_STATS) {
struct __qlcnic_esw_statistics *esw_stats =
(struct __qlcnic_esw_statistics *)stats;
*data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->errors);
*data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
}
return data;
}
static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
int index, ret, length, size;
char *p;
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
size = qlcnic_gstrings_stats[index].sizeof_stat;
*data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
}
if (qlcnic_83xx_check(adapter)) {
if (adapter->ahw->linkup)
qlcnic_83xx_get_stats(adapter, data);
return;
} else {
/* Retrieve MAC statistics from firmware */
memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
qlcnic_get_mac_stats(adapter, &mac_stats);
data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
if (ret)
return;
data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
return;
qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
}
static int qlcnic_set_led(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int max_sds_rings = adapter->max_sds_rings;
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_set_led(dev, state);
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
return -EOPNOTSUPP;
}
switch (state) {
case ETHTOOL_ID_ACTIVE:
if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
return -EBUSY;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
break;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
break;
set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
}
if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) {
err = 0;
break;
}
dev_err(&adapter->pdev->dev,
"Failed to set LED blink state.\n");
break;
case ETHTOOL_ID_INACTIVE:
active = 0;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
break;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
break;
set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
}
if (adapter->nic_ops->config_led(adapter, 0, 0xf))
dev_err(&adapter->pdev->dev,
"Failed to reset LED blink state.\n");
break;
default:
return -EINVAL;
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(dev, max_sds_rings);
if (!active || err)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
return err;
}
static void
qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
if (qlcnic_83xx_check(adapter))
return;
wol->supported = 0;
wol->wolopts = 0;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
if (wol_cfg & (1UL << adapter->portnum))
wol->supported |= WAKE_MAGIC;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
if (wol_cfg & (1UL << adapter->portnum))
wol->wolopts |= WAKE_MAGIC;
}
static int
qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
if (!(wol_cfg & (1 << adapter->portnum)))
return -EOPNOTSUPP;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
if (wol->wolopts & WAKE_MAGIC)
wol_cfg |= 1UL << adapter->portnum;
else
wol_cfg &= ~(1UL << adapter->portnum);
QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
return 0;
}
/*
* Set the coalescing parameters. Currently only normal is supported.
* If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
* firmware coalescing to default.
*/
static int qlcnic_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_nic_intr_coalesce *coal;
u32 rx_coalesce_usecs, rx_max_frames;
u32 tx_coalesce_usecs, tx_max_frames;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
/*
* Return Error if unsupported values or
* unsupported parameters are set.
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
ethcoal->tx_coalesce_usecs > 0xffff ||
ethcoal->tx_max_coalesced_frames > 0xffff ||
ethcoal->rx_coalesce_usecs_irq ||
ethcoal->rx_max_coalesced_frames_irq ||
ethcoal->tx_coalesce_usecs_irq ||
ethcoal->tx_max_coalesced_frames_irq ||
ethcoal->stats_block_coalesce_usecs ||
ethcoal->use_adaptive_rx_coalesce ||
ethcoal->use_adaptive_tx_coalesce ||
ethcoal->pkt_rate_low ||
ethcoal->rx_coalesce_usecs_low ||
ethcoal->rx_max_coalesced_frames_low ||
ethcoal->tx_coalesce_usecs_low ||
ethcoal->tx_max_coalesced_frames_low ||
ethcoal->pkt_rate_high ||
ethcoal->rx_coalesce_usecs_high ||
ethcoal->rx_max_coalesced_frames_high ||
ethcoal->tx_coalesce_usecs_high ||
ethcoal->tx_max_coalesced_frames_high)
return -EINVAL;
coal = &adapter->ahw->coal;
if (qlcnic_83xx_check(adapter)) {
if (!ethcoal->tx_coalesce_usecs ||
!ethcoal->tx_max_coalesced_frames ||
!ethcoal->rx_coalesce_usecs ||
!ethcoal->rx_max_coalesced_frames) {
coal->flag = QLCNIC_INTR_DEFAULT;
coal->type = QLCNIC_INTR_COAL_TYPE_RX;
coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
coal->tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
coal->tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
} else {
tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
tx_max_frames = ethcoal->tx_max_coalesced_frames;
rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
rx_max_frames = ethcoal->rx_max_coalesced_frames;
coal->flag = 0;
if ((coal->rx_time_us == rx_coalesce_usecs) &&
(coal->rx_packets == rx_max_frames)) {
coal->type = QLCNIC_INTR_COAL_TYPE_TX;
coal->tx_time_us = tx_coalesce_usecs;
coal->tx_packets = tx_max_frames;
} else if ((coal->tx_time_us == tx_coalesce_usecs) &&
(coal->tx_packets == tx_max_frames)) {
coal->type = QLCNIC_INTR_COAL_TYPE_RX;
coal->rx_time_us = rx_coalesce_usecs;
coal->rx_packets = rx_max_frames;
} else {
coal->type = QLCNIC_INTR_COAL_TYPE_RX;
coal->rx_time_us = rx_coalesce_usecs;
coal->rx_packets = rx_max_frames;
coal->tx_time_us = tx_coalesce_usecs;
coal->tx_packets = tx_max_frames;
}
}
} else {
if (!ethcoal->rx_coalesce_usecs ||
!ethcoal->rx_max_coalesced_frames) {
coal->flag = QLCNIC_INTR_DEFAULT;
coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
} else {
coal->flag = 0;
coal->rx_time_us = ethcoal->rx_coalesce_usecs;
coal->rx_packets = ethcoal->rx_max_coalesced_frames;
}
}
qlcnic_config_intr_coalesce(adapter);
return 0;
}
static int qlcnic_get_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return -EINVAL;
ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us;
ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets;
return 0;
}
static u32 qlcnic_get_msglevel(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
return adapter->ahw->msg_enable;
}
static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
adapter->ahw->msg_enable = msglvl;
}
static int
qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
if (!fw_dump->tmpl_hdr) {
netdev_err(adapter->netdev, "FW Dump not supported\n");
return -ENOTSUPP;
}
if (fw_dump->clr)
dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
else
dump->len = 0;
if (!fw_dump->enable)
dump->flag = ETH_FW_DUMP_DISABLE;
else
dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
dump->version = adapter->fw_version;
return 0;
}
static int
qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
void *buffer)
{
int i, copy_sz;
u32 *hdr_ptr;
__le32 *data;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW Dump not supported\n");
return -ENOTSUPP;
}
if (!fw_dump->clr) {
netdev_info(netdev, "Dump not available\n");
return -EINVAL;
}
/* Copy template header first */
copy_sz = fw_dump->tmpl_hdr->size;
hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
data = buffer;
for (i = 0; i < copy_sz/sizeof(u32); i++)
*data++ = cpu_to_le32(*hdr_ptr++);
/* Copy captured dump data */
memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
dump->len = copy_sz + fw_dump->size;
dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
/* Free dump area once data has been captured */
vfree(fw_dump->data);
fw_dump->data = NULL;
fw_dump->clr = 0;
netdev_info(netdev, "extracted the FW dump Successfully\n");
return 0;
}
static int
qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
int i;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 state;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
return -ENOTSUPP;
}
if (!fw_dump->enable) {
netdev_info(netdev, "FW dump not enabled\n");
return 0;
}
if (fw_dump->clr) {
netdev_info(netdev,
"Previous dump not cleared, not forcing dump\n");
return 0;
}
netdev_info(netdev, "Forcing a FW dump\n");
qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
if (fw_dump->enable && fw_dump->tmpl_hdr) {
netdev_info(netdev, "Disabling FW dump\n");
fw_dump->enable = 0;
}
return 0;
case QLCNIC_ENABLE_FW_DUMP:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
return -ENOTSUPP;
}
if (!fw_dump->enable) {
netdev_info(netdev, "Enabling FW dump\n");
fw_dump->enable = 1;
}
return 0;
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
return 0;
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
netdev_info(netdev, "Device in FAILED state\n");
return 0;
default:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
return -ENOTSUPP;
}
for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
if (val->flag == qlcnic_fw_dump_level[i]) {
fw_dump->tmpl_hdr->drv_cap_mask =
val->flag;
netdev_info(netdev, "Driver mask changed to: 0x%x\n",
fw_dump->tmpl_hdr->drv_cap_mask);
return 0;
}
}
netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
return -EINVAL;
}
return 0;
}
const struct ethtool_ops qlcnic_ethtool_ops = {
.get_settings = qlcnic_get_settings,
.set_settings = qlcnic_set_settings,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
.get_link = ethtool_op_get_link,
.get_eeprom_len = qlcnic_get_eeprom_len,
.get_eeprom = qlcnic_get_eeprom,
.get_ringparam = qlcnic_get_ringparam,
.set_ringparam = qlcnic_set_ringparam,
.get_channels = qlcnic_get_channels,
.set_channels = qlcnic_set_channels,
.get_pauseparam = qlcnic_get_pauseparam,
.set_pauseparam = qlcnic_set_pauseparam,
.get_wol = qlcnic_get_wol,
.set_wol = qlcnic_set_wol,
.self_test = qlcnic_diag_test,
.get_strings = qlcnic_get_strings,
.get_ethtool_stats = qlcnic_get_ethtool_stats,
.get_sset_count = qlcnic_get_sset_count,
.get_coalesce = qlcnic_get_intr_coalesce,
.set_coalesce = qlcnic_set_intr_coalesce,
.set_phys_id = qlcnic_set_led,
.set_msglevel = qlcnic_set_msglevel,
.get_msglevel = qlcnic_get_msglevel,
.get_dump_flag = qlcnic_get_dump_flag,
.get_dump_data = qlcnic_get_dump_data,
.set_dump = qlcnic_set_dump,
};
const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
.get_settings = qlcnic_get_settings,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
.get_link = ethtool_op_get_link,
.get_eeprom_len = qlcnic_get_eeprom_len,
.get_eeprom = qlcnic_get_eeprom,
.get_ringparam = qlcnic_get_ringparam,
.set_ringparam = qlcnic_set_ringparam,
.get_channels = qlcnic_get_channels,
.get_pauseparam = qlcnic_get_pauseparam,
.get_wol = qlcnic_get_wol,
.get_strings = qlcnic_get_strings,
.get_ethtool_stats = qlcnic_get_ethtool_stats,
.get_sset_count = qlcnic_get_sset_count,
.get_coalesce = qlcnic_get_intr_coalesce,
.set_coalesce = qlcnic_set_intr_coalesce,
.set_msglevel = qlcnic_set_msglevel,
.get_msglevel = qlcnic_get_msglevel,
};
| gpl-2.0 |
mathkid95/linux_samsung_jb | arch/s390/mm/init.c | 2588 | 6046 | /*
* arch/s390/mm/init.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
static unsigned long setup_zero_pages(void)
{
struct cpuid cpu_id;
unsigned int order;
unsigned long size;
struct page *page;
int i;
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x9672: /* g5 */
case 0x2064: /* z900 */
case 0x2066: /* z900 */
case 0x2084: /* z990 */
case 0x2086: /* z990 */
case 0x2094: /* z9-109 */
case 0x2096: /* z9-109 */
order = 0;
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
default:
order = 2;
break;
}
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Out of memory in setup_zero_pages");
page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
SetPageReserved(page);
page++;
}
size = PAGE_SIZE << order;
zero_page_mask = (size - 1) & PAGE_MASK;
return 1UL << order;
}
/*
* paging_init() sets up the page tables
*/
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long pgd_type;
init_mm.pgd = swapper_pg_dir;
S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
/* A three level page table (4TB) is enough for the kernel space. */
S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
#else
S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
/* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
atomic_set(&init_mm.context.attach_count, 1);
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
fault_init();
}
void __init mem_init(void)
{
unsigned long codesize, reservedpages, datasize, initsize;
max_mapnr = num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
/* Setup guest page hinting */
cmma_init();
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = 0;
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&_stext,
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long address;
int i;
for (i = 0; i < numpages; i++) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
__ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY;
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
/* Flush cpu write queue. */
mb();
}
}
#endif
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr = begin;
if (begin >= end)
return;
for (; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
}
void free_initmem(void)
{
free_init_pages("unused kernel memory",
(unsigned long)&__init_begin,
(unsigned long)&__init_end);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_init_pages("initrd memory", start, end);
}
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat;
struct zone *zone;
int rc;
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_MOVABLE;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
| gpl-2.0 |
elixirflash/dm-keepfast | arch/sparc/kernel/sbus.c | 2588 | 20478 | /*
* sbus.c: UltraSparc SBUS controller support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/upa.h>
#include <asm/cache.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/starfire.h>
#include "iommu_common.h"
#define MAP_BASE ((u32)0xc0000000)
/* Offsets from iommu_regs */
#define SYSIO_IOMMUREG_BASE 0x2400UL
#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
#define IOMMU_DRAM_VALID (1UL << 30UL)
/* Offsets from strbuf_regs */
#define SYSIO_STRBUFREG_BASE 0x2800UL
#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
#define STRBUF_TAG_VALID 0x02UL
/* Enable 64-bit DVMA mode for the given device. */
void sbus_set_sbus64(struct device *dev, int bursts)
{
struct iommu *iommu = dev->archdata.iommu;
struct platform_device *op = to_platform_device(dev);
const struct linux_prom_registers *regs;
unsigned long cfg_reg;
int slot;
u64 val;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (!regs) {
printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n",
op->dev.of_node->full_name);
return;
}
slot = regs->which_io;
cfg_reg = iommu->write_complete_reg;
switch (slot) {
case 0:
cfg_reg += 0x20UL;
break;
case 1:
cfg_reg += 0x28UL;
break;
case 2:
cfg_reg += 0x30UL;
break;
case 3:
cfg_reg += 0x38UL;
break;
case 13:
cfg_reg += 0x40UL;
break;
case 14:
cfg_reg += 0x48UL;
break;
case 15:
cfg_reg += 0x50UL;
break;
default:
return;
}
val = upa_readq(cfg_reg);
if (val & (1UL << 14UL)) {
/* Extended transfer mode already enabled. */
return;
}
val |= (1UL << 14UL);
if (bursts & DMA_BURST8)
val |= (1UL << 1UL);
if (bursts & DMA_BURST16)
val |= (1UL << 2UL);
if (bursts & DMA_BURST32)
val |= (1UL << 3UL);
if (bursts & DMA_BURST64)
val |= (1UL << 4UL);
upa_writeq(val, cfg_reg);
}
EXPORT_SYMBOL(sbus_set_sbus64);
/* INO number to IMAP register offset for SYSIO external IRQ's.
* This should conform to both Sunfire/Wildfire server and Fusion
* desktop designs.
*/
#define SYSIO_IMAP_SLOT0 0x2c00UL
#define SYSIO_IMAP_SLOT1 0x2c08UL
#define SYSIO_IMAP_SLOT2 0x2c10UL
#define SYSIO_IMAP_SLOT3 0x2c18UL
#define SYSIO_IMAP_SCSI 0x3000UL
#define SYSIO_IMAP_ETH 0x3008UL
#define SYSIO_IMAP_BPP 0x3010UL
#define SYSIO_IMAP_AUDIO 0x3018UL
#define SYSIO_IMAP_PFAIL 0x3020UL
#define SYSIO_IMAP_KMS 0x3028UL
#define SYSIO_IMAP_FLPY 0x3030UL
#define SYSIO_IMAP_SHW 0x3038UL
#define SYSIO_IMAP_KBD 0x3040UL
#define SYSIO_IMAP_MS 0x3048UL
#define SYSIO_IMAP_SER 0x3050UL
#define SYSIO_IMAP_TIM0 0x3060UL
#define SYSIO_IMAP_TIM1 0x3068UL
#define SYSIO_IMAP_UE 0x3070UL
#define SYSIO_IMAP_CE 0x3078UL
#define SYSIO_IMAP_SBERR 0x3080UL
#define SYSIO_IMAP_PMGMT 0x3088UL
#define SYSIO_IMAP_GFX 0x3090UL
#define SYSIO_IMAP_EUPA 0x3098UL
#define bogon ((unsigned long) -1)
static unsigned long sysio_irq_offsets[] = {
/* SBUS Slot 0 --> 3, level 1 --> 7 */
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
/* Onboard devices (not relevant/used on SunFire). */
SYSIO_IMAP_SCSI,
SYSIO_IMAP_ETH,
SYSIO_IMAP_BPP,
bogon,
SYSIO_IMAP_AUDIO,
SYSIO_IMAP_PFAIL,
bogon,
bogon,
SYSIO_IMAP_KMS,
SYSIO_IMAP_FLPY,
SYSIO_IMAP_SHW,
SYSIO_IMAP_KBD,
SYSIO_IMAP_MS,
SYSIO_IMAP_SER,
bogon,
bogon,
SYSIO_IMAP_TIM0,
SYSIO_IMAP_TIM1,
bogon,
bogon,
SYSIO_IMAP_UE,
SYSIO_IMAP_CE,
SYSIO_IMAP_SBERR,
SYSIO_IMAP_PMGMT,
};
#undef bogon
#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
/* Convert Interrupt Mapping register pointer to associated
* Interrupt Clear register pointer, SYSIO specific version.
*/
#define SYSIO_ICLR_UNUSED0 0x3400UL
#define SYSIO_ICLR_SLOT0 0x3408UL
#define SYSIO_ICLR_SLOT1 0x3448UL
#define SYSIO_ICLR_SLOT2 0x3488UL
#define SYSIO_ICLR_SLOT3 0x34c8UL
static unsigned long sysio_imap_to_iclr(unsigned long imap)
{
unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
return imap + diff;
}
static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino)
{
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long imap, iclr;
int sbus_level = 0;
imap = sysio_irq_offsets[ino];
if (imap == ((unsigned long)-1)) {
prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
ino);
prom_halt();
}
imap += reg_base;
/* SYSIO inconsistency. For external SLOTS, we have to select
* the right ICLR register based upon the lower SBUS irq level
* bits.
*/
if (ino >= 0x20) {
iclr = sysio_imap_to_iclr(imap);
} else {
int sbus_slot = (ino & 0x18)>>3;
sbus_level = ino & 0x7;
switch(sbus_slot) {
case 0:
iclr = reg_base + SYSIO_ICLR_SLOT0;
break;
case 1:
iclr = reg_base + SYSIO_ICLR_SLOT1;
break;
case 2:
iclr = reg_base + SYSIO_ICLR_SLOT2;
break;
default:
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
}
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
return build_irq(sbus_level, iclr, imap);
}
/* Error interrupt handling. */
#define SYSIO_UE_AFSR 0x0030UL
#define SYSIO_UE_AFAR 0x0038UL
#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
{
struct platform_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported, portid;
afsr_reg = reg_base + SYSIO_UE_AFSR;
afar_reg = reg_base + SYSIO_UE_AFAR;
/* Latch error status. */
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
portid,
(((error_bits & SYSIO_UEAFSR_PPIO) ?
"PIO" :
((error_bits & SYSIO_UEAFSR_PDRD) ?
"DVMA Read" :
((error_bits & SYSIO_UEAFSR_PDWR) ?
"DVMA Write" : "???")))));
printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
(afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
(afsr & SYSIO_UEAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary UE errors [", portid);
reported = 0;
if (afsr & SYSIO_UEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SYSIO_UEAFSR_SDRD) {
reported++;
printk("(DVMA Read)");
}
if (afsr & SYSIO_UEAFSR_SDWR) {
reported++;
printk("(DVMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SYSIO_CE_AFSR 0x0040UL
#define SYSIO_CE_AFAR 0x0048UL
#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
{
struct platform_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported, portid;
afsr_reg = reg_base + SYSIO_CE_AFSR;
afar_reg = reg_base + SYSIO_CE_AFAR;
/* Latch error status. */
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
portid,
(((error_bits & SYSIO_CEAFSR_PPIO) ?
"PIO" :
((error_bits & SYSIO_CEAFSR_PDRD) ?
"DVMA Read" :
((error_bits & SYSIO_CEAFSR_PDWR) ?
"DVMA Write" : "???")))));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
(afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
(afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
(afsr & SYSIO_CEAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary CE errors [", portid);
reported = 0;
if (afsr & SYSIO_CEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SYSIO_CEAFSR_SDRD) {
reported++;
printk("(DVMA Read)");
}
if (afsr & SYSIO_CEAFSR_SDWR) {
reported++;
printk("(DVMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SYSIO_SBUS_AFSR 0x2010UL
#define SYSIO_SBUS_AFAR 0x2018UL
#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
{
struct platform_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long afsr_reg, afar_reg, reg_base;
unsigned long afsr, afar, error_bits;
int reported, portid;
reg_base = iommu->write_complete_reg - 0x2000UL;
afsr_reg = reg_base + SYSIO_SBUS_AFSR;
afar_reg = reg_base + SYSIO_SBUS_AFAR;
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
portid,
(((error_bits & SYSIO_SBAFSR_PLE) ?
"Late PIO Error" :
((error_bits & SYSIO_SBAFSR_PTO) ?
"Time Out" :
((error_bits & SYSIO_SBAFSR_PBERR) ?
"Error Ack" : "???")))),
(afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
(afsr & SYSIO_SBAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary SBUS errors [", portid);
reported = 0;
if (afsr & SYSIO_SBAFSR_SLE) {
reported++;
printk("(Late PIO Error)");
}
if (afsr & SYSIO_SBAFSR_STO) {
reported++;
printk("(Time Out)");
}
if (afsr & SYSIO_SBAFSR_SBERR) {
reported++;
printk("(Error Ack)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* XXX check iommu/strbuf for further error status XXX */
return IRQ_HANDLED;
}
#define ECC_CONTROL 0x0020UL
#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
#define SYSIO_UE_INO 0x34
#define SYSIO_CE_INO 0x35
#define SYSIO_SBUSERR_INO 0x36
static void __init sysio_register_error_handlers(struct platform_device *op)
{
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned int irq;
u64 control;
int portid;
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
irq = sbus_build_irq(op, SYSIO_UE_INO);
if (request_irq(irq, sysio_ue_handler, 0,
"SYSIO_UE", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
portid);
prom_halt();
}
irq = sbus_build_irq(op, SYSIO_CE_INO);
if (request_irq(irq, sysio_ce_handler, 0,
"SYSIO_CE", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
portid);
prom_halt();
}
irq = sbus_build_irq(op, SYSIO_SBUSERR_INO);
if (request_irq(irq, sysio_sbus_error_handler, 0,
"SYSIO_SBERR", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
portid);
prom_halt();
}
/* Now turn the error interrupts on and also enable ECC checking. */
upa_writeq((SYSIO_ECNTRL_ECCEN |
SYSIO_ECNTRL_UEEN |
SYSIO_ECNTRL_CEEN),
reg_base + ECC_CONTROL);
control = upa_readq(iommu->write_complete_reg);
control |= 0x100UL; /* SBUS Error Interrupt Enable */
upa_writeq(control, iommu->write_complete_reg);
}
/* Boot time initialization. */
static void __init sbus_iommu_init(struct platform_device *op)
{
const struct linux_prom64_registers *pr;
struct device_node *dp = op->dev.of_node;
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long regs, reg_base;
int i, portid;
u64 control;
pr = of_get_property(dp, "reg", NULL);
if (!pr) {
prom_printf("sbus_iommu_init: Cannot map SYSIO "
"control registers.\n");
prom_halt();
}
regs = pr->phys_addr;
iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
if (!iommu)
goto fatal_memory_error;
strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
if (!strbuf)
goto fatal_memory_error;
op->dev.archdata.iommu = iommu;
op->dev.archdata.stc = strbuf;
op->dev.archdata.numa_node = -1;
reg_base = regs + SYSIO_IOMMUREG_BASE;
iommu->iommu_control = reg_base + IOMMU_CONTROL;
iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
iommu->iommu_flush = reg_base + IOMMU_FLUSH;
iommu->iommu_tags = iommu->iommu_control +
(IOMMU_TAGDIAG - IOMMU_CONTROL);
reg_base = regs + SYSIO_STRBUFREG_BASE;
strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
strbuf->strbuf_enabled = 1;
strbuf->strbuf_flushflag = (volatile unsigned long *)
((((unsigned long)&strbuf->__flushflag_buf[0])
+ 63UL)
& ~63UL);
strbuf->strbuf_flushflag_pa = (unsigned long)
__pa(strbuf->strbuf_flushflag);
/* The SYSIO SBUS control register is used for dummy reads
* in order to ensure write completion.
*/
iommu->write_complete_reg = regs + 0x2000UL;
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
printk(KERN_INFO "SYSIO: UPA portID %x, at %016lx\n",
portid, regs);
/* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff, -1))
goto fatal_memory_error;
control = upa_readq(iommu->iommu_control);
control = ((7UL << 16UL) |
(0UL << 2UL) |
(1UL << 1UL) |
(1UL << 0UL));
upa_writeq(control, iommu->iommu_control);
/* Clean out any cruft in the IOMMU using
* diagnostic accesses.
*/
for (i = 0; i < 16; i++) {
unsigned long dram, tag;
dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
dram += (unsigned long)i * 8UL;
tag += (unsigned long)i * 8UL;
upa_writeq(0, dram);
upa_writeq(0, tag);
}
upa_readq(iommu->write_complete_reg);
/* Give the TSB to SYSIO. */
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
/* Setup streaming buffer, DE=1 SB_EN=1 */
control = (1UL << 1UL) | (1UL << 0UL);
upa_writeq(control, strbuf->strbuf_control);
/* Clear out the tags using diagnostics. */
for (i = 0; i < 16; i++) {
unsigned long ptag, ltag;
ptag = strbuf->strbuf_control +
(STRBUF_PTAGDIAG - STRBUF_CONTROL);
ltag = strbuf->strbuf_control +
(STRBUF_LTAGDIAG - STRBUF_CONTROL);
ptag += (unsigned long)i * 8UL;
ltag += (unsigned long)i * 8UL;
upa_writeq(0UL, ptag);
upa_writeq(0UL, ltag);
}
/* Enable DVMA arbitration for all devices/slots. */
control = upa_readq(iommu->write_complete_reg);
control |= 0x3fUL;
upa_writeq(control, iommu->write_complete_reg);
/* Now some Xfire specific grot... */
if (this_is_starfire)
starfire_hookup(portid);
sysio_register_error_handlers(op);
return;
fatal_memory_error:
prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
}
static int __init sbus_init(void)
{
struct device_node *dp;
for_each_node_by_name(dp, "sbus") {
struct platform_device *op = of_find_device_by_node(dp);
sbus_iommu_init(op);
of_propagate_archdata(op);
}
return 0;
}
subsys_initcall(sbus_init);
| gpl-2.0 |
Supermaster34/3.0-Kernel-Galaxy-Player-US | arch/s390/mm/init.c | 2588 | 6046 | /*
* arch/s390/mm/init.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
static unsigned long setup_zero_pages(void)
{
struct cpuid cpu_id;
unsigned int order;
unsigned long size;
struct page *page;
int i;
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x9672: /* g5 */
case 0x2064: /* z900 */
case 0x2066: /* z900 */
case 0x2084: /* z990 */
case 0x2086: /* z990 */
case 0x2094: /* z9-109 */
case 0x2096: /* z9-109 */
order = 0;
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
default:
order = 2;
break;
}
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Out of memory in setup_zero_pages");
page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
SetPageReserved(page);
page++;
}
size = PAGE_SIZE << order;
zero_page_mask = (size - 1) & PAGE_MASK;
return 1UL << order;
}
/*
* paging_init() sets up the page tables
*/
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long pgd_type;
init_mm.pgd = swapper_pg_dir;
S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
/* A three level page table (4TB) is enough for the kernel space. */
S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
#else
S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
/* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
atomic_set(&init_mm.context.attach_count, 1);
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
fault_init();
}
void __init mem_init(void)
{
unsigned long codesize, reservedpages, datasize, initsize;
max_mapnr = num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
/* Setup guest page hinting */
cmma_init();
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = 0;
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&_stext,
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long address;
int i;
for (i = 0; i < numpages; i++) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
__ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY;
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
/* Flush cpu write queue. */
mb();
}
}
#endif
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr = begin;
if (begin >= end)
return;
for (; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
}
void free_initmem(void)
{
free_init_pages("unused kernel memory",
(unsigned long)&__init_begin,
(unsigned long)&__init_end);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_init_pages("initrd memory", start, end);
}
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat;
struct zone *zone;
int rc;
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_MOVABLE;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
| gpl-2.0 |
teamacid/niltmt_kernel | drivers/net/hamradio/dmascc.c | 3612 | 37759 | /*
* Driver for high-speed SCC boards (those with DMA support)
* Copyright (C) 1997-2000 Klaus Kudielka
*
* S5SCC/DMA support by Janko Koleznik S52HI
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <net/ax25.h>
#include "z8530.h"
/* Number of buffers per channel */
#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
/* Cards supported */
#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
0, 8, 1843200, 3686400 }
#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
0, 8, 3686400, 7372800 }
#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
0, 4, 6144000, 6144000 }
#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
0, 8, 4915200, 9830400 }
#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
#define TMR_0_HZ 25600 /* Frequency of timer 0 */
#define TYPE_PI 0
#define TYPE_PI2 1
#define TYPE_TWIN 2
#define TYPE_S5 3
#define NUM_TYPES 4
#define MAX_NUM_DEVS 32
/* SCC chips supported */
#define Z8530 0
#define Z85C30 1
#define Z85230 2
#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
/* I/O registers */
/* 8530 registers relative to card base */
#define SCCB_CMD 0x00
#define SCCB_DATA 0x01
#define SCCA_CMD 0x02
#define SCCA_DATA 0x03
/* 8253/8254 registers relative to card base */
#define TMR_CNT0 0x00
#define TMR_CNT1 0x01
#define TMR_CNT2 0x02
#define TMR_CTRL 0x03
/* Additional PI/PI2 registers relative to card base */
#define PI_DREQ_MASK 0x04
/* Additional PackeTwin registers relative to card base */
#define TWIN_INT_REG 0x08
#define TWIN_CLR_TMR1 0x09
#define TWIN_CLR_TMR2 0x0a
#define TWIN_SPARE_1 0x0b
#define TWIN_DMA_CFG 0x08
#define TWIN_SERIAL_CFG 0x09
#define TWIN_DMA_CLR_FF 0x0a
#define TWIN_SPARE_2 0x0b
/* PackeTwin I/O register values */
/* INT_REG */
#define TWIN_SCC_MSK 0x01
#define TWIN_TMR1_MSK 0x02
#define TWIN_TMR2_MSK 0x04
#define TWIN_INT_MSK 0x07
/* SERIAL_CFG */
#define TWIN_DTRA_ON 0x01
#define TWIN_DTRB_ON 0x02
#define TWIN_EXTCLKA 0x04
#define TWIN_EXTCLKB 0x08
#define TWIN_LOOPA_ON 0x10
#define TWIN_LOOPB_ON 0x20
#define TWIN_EI 0x80
/* DMA_CFG */
#define TWIN_DMA_HDX_T1 0x08
#define TWIN_DMA_HDX_R1 0x0a
#define TWIN_DMA_HDX_T3 0x14
#define TWIN_DMA_HDX_R3 0x16
#define TWIN_DMA_FDX_T3R1 0x1b
#define TWIN_DMA_FDX_T1R3 0x1d
/* Status values */
#define IDLE 0
#define TX_HEAD 1
#define TX_DATA 2
#define TX_PAUSE 3
#define TX_TAIL 4
#define RTS_OFF 5
#define WAIT 6
#define DCD_ON 7
#define RX_ON 8
#define DCD_OFF 9
/* Ioctls */
#define SIOCGSCCPARAM SIOCDEVPRIVATE
#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
/* Data types */
struct scc_param {
int pclk_hz; /* frequency of BRG input (don't change) */
int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
int nrzi; /* 0 (nrz), 1 (nrzi) */
int clocks; /* see dmascc_cfg documentation */
int txdelay; /* [1/TMR_0_HZ] */
int txtimeout; /* [1/HZ] */
int txtail; /* [1/TMR_0_HZ] */
int waittime; /* [1/TMR_0_HZ] */
int slottime; /* [1/TMR_0_HZ] */
int persist; /* 1 ... 256 */
int dma; /* -1 (disable), 0, 1, 3 */
int txpause; /* [1/TMR_0_HZ] */
int rtsoff; /* [1/TMR_0_HZ] */
int dcdon; /* [1/TMR_0_HZ] */
int dcdoff; /* [1/TMR_0_HZ] */
};
struct scc_hardware {
char *name;
int io_region;
int io_delta;
int io_size;
int num_devs;
int scc_offset;
int tmr_offset;
int tmr_hz;
int pclk_hz;
};
struct scc_priv {
int type;
int chip;
struct net_device *dev;
struct scc_info *info;
int channel;
int card_base, scc_cmd, scc_data;
int tmr_cnt, tmr_ctrl, tmr_mode;
struct scc_param param;
char rx_buf[NUM_RX_BUF][BUF_SIZE];
int rx_len[NUM_RX_BUF];
int rx_ptr;
struct work_struct rx_work;
int rx_head, rx_tail, rx_count;
int rx_over;
char tx_buf[NUM_TX_BUF][BUF_SIZE];
int tx_len[NUM_TX_BUF];
int tx_ptr;
int tx_head, tx_tail, tx_count;
int state;
unsigned long tx_start;
int rr0;
spinlock_t *register_lock; /* Per scc_info */
spinlock_t ring_lock;
};
struct scc_info {
int irq_used;
int twin_serial_cfg;
struct net_device *dev[2];
struct scc_priv priv[2];
struct scc_info *next;
spinlock_t register_lock; /* Per device register lock */
};
/* Function declarations */
static int setup_adapter(int card_base, int type, int n) __init;
static void write_scc(struct scc_priv *priv, int reg, int val);
static void write_scc_data(struct scc_priv *priv, int val, int fast);
static int read_scc(struct scc_priv *priv, int reg);
static int read_scc_data(struct scc_priv *priv);
static int scc_open(struct net_device *dev);
static int scc_close(struct net_device *dev);
static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
static int scc_set_mac_address(struct net_device *dev, void *sa);
static inline void tx_on(struct scc_priv *priv);
static inline void rx_on(struct scc_priv *priv);
static inline void rx_off(struct scc_priv *priv);
static void start_timer(struct scc_priv *priv, int t, int r15);
static inline unsigned char random(void);
static inline void z8530_isr(struct scc_info *info);
static irqreturn_t scc_isr(int irq, void *dev_id);
static void rx_isr(struct scc_priv *priv);
static void special_condition(struct scc_priv *priv, int rc);
static void rx_bh(struct work_struct *);
static void tx_isr(struct scc_priv *priv);
static void es_isr(struct scc_priv *priv);
static void tm_isr(struct scc_priv *priv);
/* Initialization variables */
static int io[MAX_NUM_DEVS] __initdata = { 0, };
/* Beware! hw[] is also used in dmascc_exit(). */
static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
/* Global variables */
static struct scc_info *first;
static unsigned long rand;
MODULE_AUTHOR("Klaus Kudielka");
MODULE_DESCRIPTION("Driver for high-speed SCC boards");
module_param_array(io, int, NULL, 0);
MODULE_LICENSE("GPL");
static void __exit dmascc_exit(void)
{
int i;
struct scc_info *info;
while (first) {
info = first;
/* Unregister devices */
for (i = 0; i < 2; i++)
unregister_netdev(info->dev[i]);
/* Reset board */
if (info->priv[0].type == TYPE_TWIN)
outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
write_scc(&info->priv[0], R9, FHWRES);
release_region(info->dev[0]->base_addr,
hw[info->priv[0].type].io_size);
for (i = 0; i < 2; i++)
free_netdev(info->dev[i]);
/* Free memory */
first = info->next;
kfree(info);
}
}
static int __init dmascc_init(void)
{
int h, i, j, n;
int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
t1[MAX_NUM_DEVS];
unsigned t_val;
unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
counting[MAX_NUM_DEVS];
/* Initialize random number generator */
rand = jiffies;
/* Cards found = 0 */
n = 0;
/* Warning message */
if (!io[0])
printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
/* Run autodetection for each card type */
for (h = 0; h < NUM_TYPES; h++) {
if (io[0]) {
/* User-specified I/O address regions */
for (i = 0; i < hw[h].num_devs; i++)
base[i] = 0;
for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
j = (io[i] -
hw[h].io_region) / hw[h].io_delta;
if (j >= 0 && j < hw[h].num_devs &&
hw[h].io_region +
j * hw[h].io_delta == io[i]) {
base[j] = io[i];
}
}
} else {
/* Default I/O address regions */
for (i = 0; i < hw[h].num_devs; i++) {
base[i] =
hw[h].io_region + i * hw[h].io_delta;
}
}
/* Check valid I/O address regions */
for (i = 0; i < hw[h].num_devs; i++)
if (base[i]) {
if (!request_region
(base[i], hw[h].io_size, "dmascc"))
base[i] = 0;
else {
tcmd[i] =
base[i] + hw[h].tmr_offset +
TMR_CTRL;
t0[i] =
base[i] + hw[h].tmr_offset +
TMR_CNT0;
t1[i] =
base[i] + hw[h].tmr_offset +
TMR_CNT1;
}
}
/* Start timers */
for (i = 0; i < hw[h].num_devs; i++)
if (base[i]) {
/* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
outb(0x36, tcmd[i]);
outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
t0[i]);
outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
t0[i]);
/* Timer 1: LSB+MSB, Mode 0, HZ/10 */
outb(0x70, tcmd[i]);
outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
start[i] = jiffies;
delay[i] = 0;
counting[i] = 1;
/* Timer 2: LSB+MSB, Mode 0 */
outb(0xb0, tcmd[i]);
}
time = jiffies;
/* Wait until counter registers are loaded */
udelay(2000000 / TMR_0_HZ);
/* Timing loop */
while (jiffies - time < 13) {
for (i = 0; i < hw[h].num_devs; i++)
if (base[i] && counting[i]) {
/* Read back Timer 1: latch; read LSB; read MSB */
outb(0x40, tcmd[i]);
t_val =
inb(t1[i]) + (inb(t1[i]) << 8);
/* Also check whether counter did wrap */
if (t_val == 0 ||
t_val > TMR_0_HZ / HZ * 10)
counting[i] = 0;
delay[i] = jiffies - start[i];
}
}
/* Evaluate measurements */
for (i = 0; i < hw[h].num_devs; i++)
if (base[i]) {
if ((delay[i] >= 9 && delay[i] <= 11) &&
/* Ok, we have found an adapter */
(setup_adapter(base[i], h, n) == 0))
n++;
else
release_region(base[i],
hw[h].io_size);
}
} /* NUM_TYPES */
/* If any adapter was successfully initialized, return ok */
if (n)
return 0;
/* If no adapter found, return error */
printk(KERN_INFO "dmascc: no adapters found\n");
return -EIO;
}
module_init(dmascc_init);
module_exit(dmascc_exit);
static void __init dev_setup(struct net_device *dev)
{
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->mtu = 1500;
dev->addr_len = AX25_ADDR_LEN;
dev->tx_queue_len = 64;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
}
static const struct net_device_ops scc_netdev_ops = {
.ndo_open = scc_open,
.ndo_stop = scc_close,
.ndo_start_xmit = scc_send_packet,
.ndo_do_ioctl = scc_ioctl,
.ndo_set_mac_address = scc_set_mac_address,
};
static int __init setup_adapter(int card_base, int type, int n)
{
int i, irq, chip;
struct scc_info *info;
struct net_device *dev;
struct scc_priv *priv;
unsigned long time;
unsigned int irqs;
int tmr_base = card_base + hw[type].tmr_offset;
int scc_base = card_base + hw[type].scc_offset;
char *chipnames[] = CHIPNAMES;
/* Initialize what is necessary for write_scc and write_scc_data */
info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
if (!info) {
printk(KERN_ERR "dmascc: "
"could not allocate memory for %s at %#3x\n",
hw[type].name, card_base);
goto out;
}
info->dev[0] = alloc_netdev(0, "", dev_setup);
if (!info->dev[0]) {
printk(KERN_ERR "dmascc: "
"could not allocate memory for %s at %#3x\n",
hw[type].name, card_base);
goto out1;
}
info->dev[1] = alloc_netdev(0, "", dev_setup);
if (!info->dev[1]) {
printk(KERN_ERR "dmascc: "
"could not allocate memory for %s at %#3x\n",
hw[type].name, card_base);
goto out2;
}
spin_lock_init(&info->register_lock);
priv = &info->priv[0];
priv->type = type;
priv->card_base = card_base;
priv->scc_cmd = scc_base + SCCA_CMD;
priv->scc_data = scc_base + SCCA_DATA;
priv->register_lock = &info->register_lock;
/* Reset SCC */
write_scc(priv, R9, FHWRES | MIE | NV);
/* Determine type of chip by enabling SDLC/HDLC enhancements */
write_scc(priv, R15, SHDLCE);
if (!read_scc(priv, R15)) {
/* WR7' not present. This is an ordinary Z8530 SCC. */
chip = Z8530;
} else {
/* Put one character in TX FIFO */
write_scc_data(priv, 0, 0);
if (read_scc(priv, R0) & Tx_BUF_EMP) {
/* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
chip = Z85230;
} else {
/* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
chip = Z85C30;
}
}
write_scc(priv, R15, 0);
/* Start IRQ auto-detection */
irqs = probe_irq_on();
/* Enable interrupts */
if (type == TYPE_TWIN) {
outb(0, card_base + TWIN_DMA_CFG);
inb(card_base + TWIN_CLR_TMR1);
inb(card_base + TWIN_CLR_TMR2);
info->twin_serial_cfg = TWIN_EI;
outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
} else {
write_scc(priv, R15, CTSIE);
write_scc(priv, R0, RES_EXT_INT);
write_scc(priv, R1, EXT_INT_ENAB);
}
/* Start timer */
outb(1, tmr_base + TMR_CNT1);
outb(0, tmr_base + TMR_CNT1);
/* Wait and detect IRQ */
time = jiffies;
while (jiffies - time < 2 + HZ / TMR_0_HZ);
irq = probe_irq_off(irqs);
/* Clear pending interrupt, disable interrupts */
if (type == TYPE_TWIN) {
inb(card_base + TWIN_CLR_TMR1);
} else {
write_scc(priv, R1, 0);
write_scc(priv, R15, 0);
write_scc(priv, R0, RES_EXT_INT);
}
if (irq <= 0) {
printk(KERN_ERR
"dmascc: could not find irq of %s at %#3x (irq=%d)\n",
hw[type].name, card_base, irq);
goto out3;
}
/* Set up data structures */
for (i = 0; i < 2; i++) {
dev = info->dev[i];
priv = &info->priv[i];
priv->type = type;
priv->chip = chip;
priv->dev = dev;
priv->info = info;
priv->channel = i;
spin_lock_init(&priv->ring_lock);
priv->register_lock = &info->register_lock;
priv->card_base = card_base;
priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
priv->tmr_ctrl = tmr_base + TMR_CTRL;
priv->tmr_mode = i ? 0xb0 : 0x70;
priv->param.pclk_hz = hw[type].pclk_hz;
priv->param.brg_tc = -1;
priv->param.clocks = TCTRxCP | RCRTxCP;
priv->param.persist = 256;
priv->param.dma = -1;
INIT_WORK(&priv->rx_work, rx_bh);
dev->ml_priv = priv;
sprintf(dev->name, "dmascc%i", 2 * n + i);
dev->base_addr = card_base;
dev->irq = irq;
dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
}
if (register_netdev(info->dev[0])) {
printk(KERN_ERR "dmascc: could not register %s\n",
info->dev[0]->name);
goto out3;
}
if (register_netdev(info->dev[1])) {
printk(KERN_ERR "dmascc: could not register %s\n",
info->dev[1]->name);
goto out4;
}
info->next = first;
first = info;
printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
hw[type].name, chipnames[chip], card_base, irq);
return 0;
out4:
unregister_netdev(info->dev[0]);
out3:
if (info->priv[0].type == TYPE_TWIN)
outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
write_scc(&info->priv[0], R9, FHWRES);
free_netdev(info->dev[1]);
out2:
free_netdev(info->dev[0]);
out1:
kfree(info);
out:
return -1;
}
/* Driver functions */
static void write_scc(struct scc_priv *priv, int reg, int val)
{
unsigned long flags;
switch (priv->type) {
case TYPE_S5:
if (reg)
outb(reg, priv->scc_cmd);
outb(val, priv->scc_cmd);
return;
case TYPE_TWIN:
if (reg)
outb_p(reg, priv->scc_cmd);
outb_p(val, priv->scc_cmd);
return;
default:
spin_lock_irqsave(priv->register_lock, flags);
outb_p(0, priv->card_base + PI_DREQ_MASK);
if (reg)
outb_p(reg, priv->scc_cmd);
outb_p(val, priv->scc_cmd);
outb(1, priv->card_base + PI_DREQ_MASK);
spin_unlock_irqrestore(priv->register_lock, flags);
return;
}
}
static void write_scc_data(struct scc_priv *priv, int val, int fast)
{
unsigned long flags;
switch (priv->type) {
case TYPE_S5:
outb(val, priv->scc_data);
return;
case TYPE_TWIN:
outb_p(val, priv->scc_data);
return;
default:
if (fast)
outb_p(val, priv->scc_data);
else {
spin_lock_irqsave(priv->register_lock, flags);
outb_p(0, priv->card_base + PI_DREQ_MASK);
outb_p(val, priv->scc_data);
outb(1, priv->card_base + PI_DREQ_MASK);
spin_unlock_irqrestore(priv->register_lock, flags);
}
return;
}
}
static int read_scc(struct scc_priv *priv, int reg)
{
int rc;
unsigned long flags;
switch (priv->type) {
case TYPE_S5:
if (reg)
outb(reg, priv->scc_cmd);
return inb(priv->scc_cmd);
case TYPE_TWIN:
if (reg)
outb_p(reg, priv->scc_cmd);
return inb_p(priv->scc_cmd);
default:
spin_lock_irqsave(priv->register_lock, flags);
outb_p(0, priv->card_base + PI_DREQ_MASK);
if (reg)
outb_p(reg, priv->scc_cmd);
rc = inb_p(priv->scc_cmd);
outb(1, priv->card_base + PI_DREQ_MASK);
spin_unlock_irqrestore(priv->register_lock, flags);
return rc;
}
}
static int read_scc_data(struct scc_priv *priv)
{
int rc;
unsigned long flags;
switch (priv->type) {
case TYPE_S5:
return inb(priv->scc_data);
case TYPE_TWIN:
return inb_p(priv->scc_data);
default:
spin_lock_irqsave(priv->register_lock, flags);
outb_p(0, priv->card_base + PI_DREQ_MASK);
rc = inb_p(priv->scc_data);
outb(1, priv->card_base + PI_DREQ_MASK);
spin_unlock_irqrestore(priv->register_lock, flags);
return rc;
}
}
static int scc_open(struct net_device *dev)
{
struct scc_priv *priv = dev->ml_priv;
struct scc_info *info = priv->info;
int card_base = priv->card_base;
/* Request IRQ if not already used by other channel */
if (!info->irq_used) {
if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
return -EAGAIN;
}
}
info->irq_used++;
/* Request DMA if required */
if (priv->param.dma >= 0) {
if (request_dma(priv->param.dma, "dmascc")) {
if (--info->irq_used == 0)
free_irq(dev->irq, info);
return -EAGAIN;
} else {
unsigned long flags = claim_dma_lock();
clear_dma_ff(priv->param.dma);
release_dma_lock(flags);
}
}
/* Initialize local variables */
priv->rx_ptr = 0;
priv->rx_over = 0;
priv->rx_head = priv->rx_tail = priv->rx_count = 0;
priv->state = IDLE;
priv->tx_head = priv->tx_tail = priv->tx_count = 0;
priv->tx_ptr = 0;
/* Reset channel */
write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
/* X1 clock, SDLC mode */
write_scc(priv, R4, SDLC | X1CLK);
/* DMA */
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
/* 8 bit RX char, RX disable */
write_scc(priv, R3, Rx8);
/* 8 bit TX char, TX disable */
write_scc(priv, R5, Tx8);
/* SDLC address field */
write_scc(priv, R6, 0);
/* SDLC flag */
write_scc(priv, R7, FLAG);
switch (priv->chip) {
case Z85C30:
/* Select WR7' */
write_scc(priv, R15, SHDLCE);
/* Auto EOM reset */
write_scc(priv, R7, AUTOEOM);
write_scc(priv, R15, 0);
break;
case Z85230:
/* Select WR7' */
write_scc(priv, R15, SHDLCE);
/* The following bits are set (see 2.5.2.1):
- Automatic EOM reset
- Interrupt request if RX FIFO is half full
This bit should be ignored in DMA mode (according to the
documentation), but actually isn't. The receiver doesn't work if
it is set. Thus, we have to clear it in DMA mode.
- Interrupt/DMA request if TX FIFO is completely empty
a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
compatibility).
b) If cleared, DMA requests may follow each other very quickly,
filling up the TX FIFO.
Advantage: TX works even in case of high bus latency.
Disadvantage: Edge-triggered DMA request circuitry may miss
a request. No more data is delivered, resulting
in a TX FIFO underrun.
Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
The PackeTwin doesn't. I don't know about the PI, but let's
assume it behaves like the PI2.
*/
if (priv->param.dma >= 0) {
if (priv->type == TYPE_TWIN)
write_scc(priv, R7, AUTOEOM | TXFIFOE);
else
write_scc(priv, R7, AUTOEOM);
} else {
write_scc(priv, R7, AUTOEOM | RXFIFOH);
}
write_scc(priv, R15, 0);
break;
}
/* Preset CRC, NRZ(I) encoding */
write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
/* Configure baud rate generator */
if (priv->param.brg_tc >= 0) {
/* Program BR generator */
write_scc(priv, R12, priv->param.brg_tc & 0xFF);
write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
/* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
PackeTwin, not connected on the PI2); set DPLL source to BRG */
write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
/* Enable DPLL */
write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
} else {
/* Disable BR generator */
write_scc(priv, R14, DTRREQ | BRSRC);
}
/* Configure clocks */
if (priv->type == TYPE_TWIN) {
/* Disable external TX clock receiver */
outb((info->twin_serial_cfg &=
~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
card_base + TWIN_SERIAL_CFG);
}
write_scc(priv, R11, priv->param.clocks);
if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
/* Enable external TX clock receiver */
outb((info->twin_serial_cfg |=
(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
card_base + TWIN_SERIAL_CFG);
}
/* Configure PackeTwin */
if (priv->type == TYPE_TWIN) {
/* Assert DTR, enable interrupts */
outb((info->twin_serial_cfg |= TWIN_EI |
(priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
card_base + TWIN_SERIAL_CFG);
}
/* Read current status */
priv->rr0 = read_scc(priv, R0);
/* Enable DCD interrupt */
write_scc(priv, R15, DCDIE);
netif_start_queue(dev);
return 0;
}
static int scc_close(struct net_device *dev)
{
struct scc_priv *priv = dev->ml_priv;
struct scc_info *info = priv->info;
int card_base = priv->card_base;
netif_stop_queue(dev);
if (priv->type == TYPE_TWIN) {
/* Drop DTR */
outb((info->twin_serial_cfg &=
(priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
card_base + TWIN_SERIAL_CFG);
}
/* Reset channel, free DMA and IRQ */
write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
if (priv->param.dma >= 0) {
if (priv->type == TYPE_TWIN)
outb(0, card_base + TWIN_DMA_CFG);
free_dma(priv->param.dma);
}
if (--info->irq_used == 0)
free_irq(dev->irq, info);
return 0;
}
static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct scc_priv *priv = dev->ml_priv;
switch (cmd) {
case SIOCGSCCPARAM:
if (copy_to_user
(ifr->ifr_data, &priv->param,
sizeof(struct scc_param)))
return -EFAULT;
return 0;
case SIOCSSCCPARAM:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (netif_running(dev))
return -EAGAIN;
if (copy_from_user
(&priv->param, ifr->ifr_data,
sizeof(struct scc_param)))
return -EFAULT;
return 0;
default:
return -EINVAL;
}
}
static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct scc_priv *priv = dev->ml_priv;
unsigned long flags;
int i;
/* Temporarily stop the scheduler feeding us packets */
netif_stop_queue(dev);
/* Transfer data to DMA buffer */
i = priv->tx_head;
skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
priv->tx_len[i] = skb->len - 1;
/* Clear interrupts while we touch our circular buffers */
spin_lock_irqsave(&priv->ring_lock, flags);
/* Move the ring buffer's head */
priv->tx_head = (i + 1) % NUM_TX_BUF;
priv->tx_count++;
/* If we just filled up the last buffer, leave queue stopped.
The higher layers must wait until we have a DMA buffer
to accept the data. */
if (priv->tx_count < NUM_TX_BUF)
netif_wake_queue(dev);
/* Set new TX state */
if (priv->state == IDLE) {
/* Assert RTS, start timer */
priv->state = TX_HEAD;
priv->tx_start = jiffies;
write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
write_scc(priv, R15, 0);
start_timer(priv, priv->param.txdelay, 0);
}
/* Turn interrupts back on and free buffer */
spin_unlock_irqrestore(&priv->ring_lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static int scc_set_mac_address(struct net_device *dev, void *sa)
{
memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
dev->addr_len);
return 0;
}
static inline void tx_on(struct scc_priv *priv)
{
int i, n;
unsigned long flags;
if (priv->param.dma >= 0) {
n = (priv->chip == Z85230) ? 3 : 1;
/* Program DMA controller */
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
set_dma_addr(priv->param.dma,
(int) priv->tx_buf[priv->tx_tail] + n);
set_dma_count(priv->param.dma,
priv->tx_len[priv->tx_tail] - n);
release_dma_lock(flags);
/* Enable TX underrun interrupt */
write_scc(priv, R15, TxUIE);
/* Configure DREQ */
if (priv->type == TYPE_TWIN)
outb((priv->param.dma ==
1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
priv->card_base + TWIN_DMA_CFG);
else
write_scc(priv, R1,
EXT_INT_ENAB | WT_FN_RDYFN |
WT_RDY_ENAB);
/* Write first byte(s) */
spin_lock_irqsave(priv->register_lock, flags);
for (i = 0; i < n; i++)
write_scc_data(priv,
priv->tx_buf[priv->tx_tail][i], 1);
enable_dma(priv->param.dma);
spin_unlock_irqrestore(priv->register_lock, flags);
} else {
write_scc(priv, R15, TxUIE);
write_scc(priv, R1,
EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
tx_isr(priv);
}
/* Reset EOM latch if we do not have the AUTOEOM feature */
if (priv->chip == Z8530)
write_scc(priv, R0, RES_EOM_L);
}
static inline void rx_on(struct scc_priv *priv)
{
unsigned long flags;
/* Clear RX FIFO */
while (read_scc(priv, R0) & Rx_CH_AV)
read_scc_data(priv);
priv->rx_over = 0;
if (priv->param.dma >= 0) {
/* Program DMA controller */
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_READ);
set_dma_addr(priv->param.dma,
(int) priv->rx_buf[priv->rx_head]);
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
enable_dma(priv->param.dma);
/* Configure PackeTwin DMA */
if (priv->type == TYPE_TWIN) {
outb((priv->param.dma ==
1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
priv->card_base + TWIN_DMA_CFG);
}
/* Sp. cond. intr. only, ext int enable, RX DMA enable */
write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
} else {
/* Reset current frame */
priv->rx_ptr = 0;
/* Intr. on all Rx characters and Sp. cond., ext int enable */
write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
WT_FN_RDYFN);
}
write_scc(priv, R0, ERR_RES);
write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
}
static inline void rx_off(struct scc_priv *priv)
{
/* Disable receiver */
write_scc(priv, R3, Rx8);
/* Disable DREQ / RX interrupt */
if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
outb(0, priv->card_base + TWIN_DMA_CFG);
else
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
/* Disable DMA */
if (priv->param.dma >= 0)
disable_dma(priv->param.dma);
}
static void start_timer(struct scc_priv *priv, int t, int r15)
{
outb(priv->tmr_mode, priv->tmr_ctrl);
if (t == 0) {
tm_isr(priv);
} else if (t > 0) {
outb(t & 0xFF, priv->tmr_cnt);
outb((t >> 8) & 0xFF, priv->tmr_cnt);
if (priv->type != TYPE_TWIN) {
write_scc(priv, R15, r15 | CTSIE);
priv->rr0 |= CTS;
}
}
}
static inline unsigned char random(void)
{
/* See "Numerical Recipes in C", second edition, p. 284 */
rand = rand * 1664525L + 1013904223L;
return (unsigned char) (rand >> 24);
}
static inline void z8530_isr(struct scc_info *info)
{
int is, i = 100;
while ((is = read_scc(&info->priv[0], R3)) && i--) {
if (is & CHARxIP) {
rx_isr(&info->priv[0]);
} else if (is & CHATxIP) {
tx_isr(&info->priv[0]);
} else if (is & CHAEXT) {
es_isr(&info->priv[0]);
} else if (is & CHBRxIP) {
rx_isr(&info->priv[1]);
} else if (is & CHBTxIP) {
tx_isr(&info->priv[1]);
} else {
es_isr(&info->priv[1]);
}
write_scc(&info->priv[0], R0, RES_H_IUS);
i++;
}
if (i < 0) {
printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
is);
}
/* Ok, no interrupts pending from this 8530. The INT line should
be inactive now. */
}
static irqreturn_t scc_isr(int irq, void *dev_id)
{
struct scc_info *info = dev_id;
spin_lock(info->priv[0].register_lock);
/* At this point interrupts are enabled, and the interrupt under service
is already acknowledged, but masked off.
Interrupt processing: We loop until we know that the IRQ line is
low. If another positive edge occurs afterwards during the ISR,
another interrupt will be triggered by the interrupt controller
as soon as the IRQ level is enabled again (see asm/irq.h).
Bottom-half handlers will be processed after scc_isr(). This is
important, since we only have small ringbuffers and want new data
to be fetched/delivered immediately. */
if (info->priv[0].type == TYPE_TWIN) {
int is, card_base = info->priv[0].card_base;
while ((is = ~inb(card_base + TWIN_INT_REG)) &
TWIN_INT_MSK) {
if (is & TWIN_SCC_MSK) {
z8530_isr(info);
} else if (is & TWIN_TMR1_MSK) {
inb(card_base + TWIN_CLR_TMR1);
tm_isr(&info->priv[0]);
} else {
inb(card_base + TWIN_CLR_TMR2);
tm_isr(&info->priv[1]);
}
}
} else
z8530_isr(info);
spin_unlock(info->priv[0].register_lock);
return IRQ_HANDLED;
}
static void rx_isr(struct scc_priv *priv)
{
if (priv->param.dma >= 0) {
/* Check special condition and perform error reset. See 2.4.7.5. */
special_condition(priv, read_scc(priv, R1));
write_scc(priv, R0, ERR_RES);
} else {
/* Check special condition for each character. Error reset not necessary.
Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
int rc;
while (read_scc(priv, R0) & Rx_CH_AV) {
rc = read_scc(priv, R1);
if (priv->rx_ptr < BUF_SIZE)
priv->rx_buf[priv->rx_head][priv->
rx_ptr++] =
read_scc_data(priv);
else {
priv->rx_over = 2;
read_scc_data(priv);
}
special_condition(priv, rc);
}
}
}
static void special_condition(struct scc_priv *priv, int rc)
{
int cb;
unsigned long flags;
/* See Figure 2-15. Only overrun and EOF need to be checked. */
if (rc & Rx_OVR) {
/* Receiver overrun */
priv->rx_over = 1;
if (priv->param.dma < 0)
write_scc(priv, R0, ERR_RES);
} else if (rc & END_FR) {
/* End of frame. Get byte count */
if (priv->param.dma >= 0) {
flags = claim_dma_lock();
cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
2;
release_dma_lock(flags);
} else {
cb = priv->rx_ptr - 2;
}
if (priv->rx_over) {
/* We had an overrun */
priv->dev->stats.rx_errors++;
if (priv->rx_over == 2)
priv->dev->stats.rx_length_errors++;
else
priv->dev->stats.rx_fifo_errors++;
priv->rx_over = 0;
} else if (rc & CRC_ERR) {
/* Count invalid CRC only if packet length >= minimum */
if (cb >= 15) {
priv->dev->stats.rx_errors++;
priv->dev->stats.rx_crc_errors++;
}
} else {
if (cb >= 15) {
if (priv->rx_count < NUM_RX_BUF - 1) {
/* Put good frame in FIFO */
priv->rx_len[priv->rx_head] = cb;
priv->rx_head =
(priv->rx_head +
1) % NUM_RX_BUF;
priv->rx_count++;
schedule_work(&priv->rx_work);
} else {
priv->dev->stats.rx_errors++;
priv->dev->stats.rx_over_errors++;
}
}
}
/* Get ready for new frame */
if (priv->param.dma >= 0) {
flags = claim_dma_lock();
set_dma_addr(priv->param.dma,
(int) priv->rx_buf[priv->rx_head]);
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
} else {
priv->rx_ptr = 0;
}
}
}
static void rx_bh(struct work_struct *ugli_api)
{
struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
int i = priv->rx_tail;
int cb;
unsigned long flags;
struct sk_buff *skb;
unsigned char *data;
spin_lock_irqsave(&priv->ring_lock, flags);
while (priv->rx_count) {
spin_unlock_irqrestore(&priv->ring_lock, flags);
cb = priv->rx_len[i];
/* Allocate buffer */
skb = dev_alloc_skb(cb + 1);
if (skb == NULL) {
/* Drop packet */
priv->dev->stats.rx_dropped++;
} else {
/* Fill buffer */
data = skb_put(skb, cb + 1);
data[0] = 0;
memcpy(&data[1], priv->rx_buf[i], cb);
skb->protocol = ax25_type_trans(skb, priv->dev);
netif_rx(skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += cb;
}
spin_lock_irqsave(&priv->ring_lock, flags);
/* Move tail */
priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
priv->rx_count--;
}
spin_unlock_irqrestore(&priv->ring_lock, flags);
}
static void tx_isr(struct scc_priv *priv)
{
int i = priv->tx_tail, p = priv->tx_ptr;
/* Suspend TX interrupts if we don't want to send anything.
See Figure 2-22. */
if (p == priv->tx_len[i]) {
write_scc(priv, R0, RES_Tx_P);
return;
}
/* Write characters */
while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
write_scc_data(priv, priv->tx_buf[i][p++], 0);
}
/* Reset EOM latch of Z8530 */
if (!priv->tx_ptr && p && priv->chip == Z8530)
write_scc(priv, R0, RES_EOM_L);
priv->tx_ptr = p;
}
static void es_isr(struct scc_priv *priv)
{
int i, rr0, drr0, res;
unsigned long flags;
/* Read status, reset interrupt bit (open latches) */
rr0 = read_scc(priv, R0);
write_scc(priv, R0, RES_EXT_INT);
drr0 = priv->rr0 ^ rr0;
priv->rr0 = rr0;
/* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
it might have already been cleared again by AUTOEOM. */
if (priv->state == TX_DATA) {
/* Get remaining bytes */
i = priv->tx_tail;
if (priv->param.dma >= 0) {
disable_dma(priv->param.dma);
flags = claim_dma_lock();
res = get_dma_residue(priv->param.dma);
release_dma_lock(flags);
} else {
res = priv->tx_len[i] - priv->tx_ptr;
priv->tx_ptr = 0;
}
/* Disable DREQ / TX interrupt */
if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
outb(0, priv->card_base + TWIN_DMA_CFG);
else
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
if (res) {
/* Update packet statistics */
priv->dev->stats.tx_errors++;
priv->dev->stats.tx_fifo_errors++;
/* Other underrun interrupts may already be waiting */
write_scc(priv, R0, RES_EXT_INT);
write_scc(priv, R0, RES_EXT_INT);
} else {
/* Update packet statistics */
priv->dev->stats.tx_packets++;
priv->dev->stats.tx_bytes += priv->tx_len[i];
/* Remove frame from FIFO */
priv->tx_tail = (i + 1) % NUM_TX_BUF;
priv->tx_count--;
/* Inform upper layers */
netif_wake_queue(priv->dev);
}
/* Switch state */
write_scc(priv, R15, 0);
if (priv->tx_count &&
(jiffies - priv->tx_start) < priv->param.txtimeout) {
priv->state = TX_PAUSE;
start_timer(priv, priv->param.txpause, 0);
} else {
priv->state = TX_TAIL;
start_timer(priv, priv->param.txtail, 0);
}
}
/* DCD transition */
if (drr0 & DCD) {
if (rr0 & DCD) {
switch (priv->state) {
case IDLE:
case WAIT:
priv->state = DCD_ON;
write_scc(priv, R15, 0);
start_timer(priv, priv->param.dcdon, 0);
}
} else {
switch (priv->state) {
case RX_ON:
rx_off(priv);
priv->state = DCD_OFF;
write_scc(priv, R15, 0);
start_timer(priv, priv->param.dcdoff, 0);
}
}
}
/* CTS transition */
if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
tm_isr(priv);
}
static void tm_isr(struct scc_priv *priv)
{
switch (priv->state) {
case TX_HEAD:
case TX_PAUSE:
tx_on(priv);
priv->state = TX_DATA;
break;
case TX_TAIL:
write_scc(priv, R5, TxCRC_ENAB | Tx8);
priv->state = RTS_OFF;
if (priv->type != TYPE_TWIN)
write_scc(priv, R15, 0);
start_timer(priv, priv->param.rtsoff, 0);
break;
case RTS_OFF:
write_scc(priv, R15, DCDIE);
priv->rr0 = read_scc(priv, R0);
if (priv->rr0 & DCD) {
priv->dev->stats.collisions++;
rx_on(priv);
priv->state = RX_ON;
} else {
priv->state = WAIT;
start_timer(priv, priv->param.waittime, DCDIE);
}
break;
case WAIT:
if (priv->tx_count) {
priv->state = TX_HEAD;
priv->tx_start = jiffies;
write_scc(priv, R5,
TxCRC_ENAB | RTS | TxENAB | Tx8);
write_scc(priv, R15, 0);
start_timer(priv, priv->param.txdelay, 0);
} else {
priv->state = IDLE;
if (priv->type != TYPE_TWIN)
write_scc(priv, R15, DCDIE);
}
break;
case DCD_ON:
case DCD_OFF:
write_scc(priv, R15, DCDIE);
priv->rr0 = read_scc(priv, R0);
if (priv->rr0 & DCD) {
rx_on(priv);
priv->state = RX_ON;
} else {
priv->state = WAIT;
start_timer(priv,
random() / priv->param.persist *
priv->param.slottime, DCDIE);
}
break;
}
}
| gpl-2.0 |
vet-note/android_kernel_samsung_smdk4210 | sound/pci/echoaudio/darla20.c | 3612 | 2891 | /*
* ALSA driver for Echoaudio soundcards.
* Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#define ECHOGALS_FAMILY
#define ECHOCARD_DARLA20
#define ECHOCARD_NAME "Darla20"
#define ECHOCARD_HAS_MONITOR
/* Pipe indexes */
#define PX_ANALOG_OUT 0 /* 8 */
#define PX_DIGITAL_OUT 8 /* 0 */
#define PX_ANALOG_IN 8 /* 2 */
#define PX_DIGITAL_IN 10 /* 0 */
#define PX_NUM 10
/* Bus indexes */
#define BX_ANALOG_OUT 0 /* 8 */
#define BX_DIGITAL_OUT 8 /* 0 */
#define BX_ANALOG_IN 8 /* 2 */
#define BX_DIGITAL_IN 10 /* 0 */
#define BX_NUM 10
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include "echoaudio.h"
MODULE_FIRMWARE("ea/darla20_dsp.fw");
#define FW_DARLA20_DSP 0
static const struct firmware card_fw[] = {
{0, "darla20_dsp.fw"}
};
static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = {
{0x1057, 0x1801, 0xECC0, 0x0010, 0, 0, 0}, /* DSP 56301 Darla20 rev.0 */
{0,}
};
static struct snd_pcm_hardware pcm_hardware_skel = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START,
.formats = SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_3LE |
SNDRV_PCM_FMTBIT_S32_LE |
SNDRV_PCM_FMTBIT_S32_BE,
.rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
.rate_min = 44100,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 262144,
.period_bytes_min = 32,
.period_bytes_max = 131072,
.periods_min = 2,
.periods_max = 220,
/* One page (4k) contains 512 instructions. I don't know if the hw
supports lists longer than this. In this case periods_max=220 is a
safe limit to make sure the list never exceeds 512 instructions. */
};
#include "darla20_dsp.c"
#include "echoaudio_dsp.c"
#include "echoaudio.c"
| gpl-2.0 |
TeamExodus/kernel_lge_hammerhead | arch/m68k/kernel/sys_m68k.c | 4380 | 13710 | /*
* linux/arch/m68k/kernel/sys_m68k.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/m68k
* platform.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/ipc.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/cachectl.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_MMU
#include <asm/tlb.h>
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* This is wrong for sun3 - there PAGE_SIZE is 8Kb,
* so we need to shift the argument down by 1; m68k mmap64(3)
* (in libc) expects the last argument of mmap2 in 4Kb units.
*/
return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/* Convert virtual (user) address VADDR to physical address PADDR */
#define virt_to_phys_040(vaddr) \
({ \
unsigned long _mmusr, _paddr; \
\
__asm__ __volatile__ (".chip 68040\n\t" \
"ptestr (%1)\n\t" \
"movec %%mmusr,%0\n\t" \
".chip 68k" \
: "=r" (_mmusr) \
: "a" (vaddr)); \
_paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
_paddr; \
})
static inline int
cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
/* This nop is needed for some broken versions of the 68040. */
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
if ((paddr = virt_to_phys_040(addr))) {
paddr += addr & ~(PAGE_MASK | 15);
len = (len + (addr & 15) + 15) >> 4;
} else {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
len = (len + 15) >> 4;
}
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* No need to page align here since it is done by
* virt_to_phys_040().
*/
addr += PAGE_SIZE;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_040(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
#define virt_to_phys_060(vaddr) \
({ \
unsigned long paddr; \
__asm__ __volatile__ (".chip 68060\n\t" \
"plpar (%0)\n\t" \
".chip 68k" \
: "=a" (paddr) \
: "0" (vaddr)); \
(paddr); /* XXX */ \
})
static inline int
cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
/*
* 68060 manual says:
* cpush %dc : flush DC, remains valid (with our %cacr setup)
* cpush %ic : invalidate IC
* cpush %bc : flush DC + invalidate IC
*/
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
len += addr & 15;
addr &= -16;
if (!(paddr = virt_to_phys_060(addr))) {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
}
len = (len + 15) >> 4;
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* We just want to jump to the first cache line
* in the next page.
*/
addr += PAGE_SIZE;
addr &= PAGE_MASK;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
addr &= PAGE_MASK; /* Workaround for bug in some
revisions of the 68060 */
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_060(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
struct vm_area_struct *vma;
int ret = -EINVAL;
if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
cache & ~FLUSH_CACHE_BOTH)
goto out;
if (scope == FLUSH_SCOPE_ALL) {
/* Only the superuser may explicitly flush the whole cache. */
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
} else {
/*
* Verify that the specified address region actually belongs
* to this process.
*/
vma = find_vma (current->mm, addr);
ret = -EINVAL;
/* Check for overflow. */
if (addr + len < addr)
goto out;
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
goto out;
}
if (CPU_IS_020_OR_030) {
if (scope == FLUSH_SCOPE_LINE && len < 256) {
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 4;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x400;
len >>= 2;
while (len--) {
__asm__ __volatile__ ("movec %1, %%caar\n\t"
"movec %0, %%cacr"
: /* no outputs */
: "r" (cacr), "r" (addr));
addr += 4;
}
} else {
/* Flush the whole cache, even if page granularity requested. */
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 8;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x800;
__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
}
ret = 0;
goto out;
} else {
/*
* 040 or 060: don't blindly trust 'scope', someone could
* try to flush a few megs of memory.
*/
if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
scope=FLUSH_SCOPE_PAGE;
if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
scope=FLUSH_SCOPE_ALL;
if (CPU_IS_040) {
ret = cache_flush_040 (addr, scope, cache, len);
} else if (CPU_IS_060) {
ret = cache_flush_060 (addr, scope, cache, len);
}
}
out:
return ret;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
/* This was borrowed from ARM's implementation. */
for (;;) {
struct mm_struct *mm = current->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
unsigned long mem_value;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, (unsigned long)mem);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
if (!pte_present(*pte) || !pte_dirty(*pte)
|| !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return mem_value;
bad_access:
up_read(&mm->mmap_sem);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
Simulate a write access fault to do that. */
{
/* The first argument of the function corresponds to
D1, which is the first field of struct pt_regs. */
struct pt_regs *fp = (struct pt_regs *)&newval;
/* '3' is an RMW flag. */
if (do_page_fault(fp, (unsigned long)mem, 3))
/* If the do_page_fault() failed, we don't
have anything meaningful to return.
There should be a SIGSEGV pending for
the process. */
return 0xdeadbeef;
}
}
}
#else
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
flush_cache_all();
return 0;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
struct mm_struct *mm = current->mm;
unsigned long mem_value;
down_read(&mm->mmap_sem);
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
up_read(&mm->mmap_sem);
return mem_value;
}
#endif /* CONFIG_MMU */
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
register long __b asm ("%d2") = (long)(argv);
register long __c asm ("%d3") = (long)(envp);
asm volatile ("trap #0" : "+d" (__res)
: "d" (__a), "d" (__b), "d" (__c));
return __res;
}
asmlinkage unsigned long sys_get_thread_area(void)
{
return current_thread_info()->tp_value;
}
asmlinkage int sys_set_thread_area(unsigned long tp)
{
current_thread_info()->tp_value = tp;
return 0;
}
asmlinkage int sys_atomic_barrier(void)
{
/* no code needed for uniprocs */
return 0;
}
| gpl-2.0 |
NormandyEGY/android_kernel_nokia_normandy | drivers/nfc/nfcwilink.c | 4636 | 14411 | /*
* Texas Instrument's NFC Driver For Shared Transport.
*
* NFC Driver acts as interface between NCI core and
* TI Shared Transport Layer.
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
* Written by Ilan Elias <ilane@ti.com>
*
* Acknowledgements:
* This file is based on btwilink.c, which was written
* by Raja Mani and Pavan Savoy.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/firmware.h>
#include <linux/nfc.h>
#include <net/nfc/nci.h>
#include <net/nfc/nci_core.h>
#include <linux/ti_wilink_st.h>
#define NFCWILINK_CHNL 12
#define NFCWILINK_OPCODE 7
#define NFCWILINK_MAX_FRAME_SIZE 300
#define NFCWILINK_HDR_LEN 4
#define NFCWILINK_OFFSET_LEN_IN_HDR 1
#define NFCWILINK_LEN_SIZE 2
#define NFCWILINK_REGISTER_TIMEOUT 8000 /* 8 sec */
#define NFCWILINK_CMD_TIMEOUT 5000 /* 5 sec */
#define BTS_FILE_NAME_MAX_SIZE 40
#define BTS_FILE_HDR_MAGIC 0x42535442
#define BTS_FILE_CMD_MAX_LEN 0xff
#define BTS_FILE_ACTION_TYPE_SEND_CMD 1
#define NCI_VS_NFCC_INFO_CMD_GID 0x2f
#define NCI_VS_NFCC_INFO_CMD_OID 0x12
#define NCI_VS_NFCC_INFO_RSP_GID 0x4f
#define NCI_VS_NFCC_INFO_RSP_OID 0x12
struct nfcwilink_hdr {
__u8 chnl;
__u8 opcode;
__le16 len;
} __packed;
struct nci_vs_nfcc_info_cmd {
__u8 gid;
__u8 oid;
__u8 plen;
} __packed;
struct nci_vs_nfcc_info_rsp {
__u8 gid;
__u8 oid;
__u8 plen;
__u8 status;
__u8 hw_id;
__u8 sw_ver_x;
__u8 sw_ver_z;
__u8 patch_id;
} __packed;
struct bts_file_hdr {
__le32 magic;
__le32 ver;
__u8 rfu[24];
__u8 actions[0];
} __packed;
struct bts_file_action {
__le16 type;
__le16 len;
__u8 data[0];
} __packed;
struct nfcwilink {
struct platform_device *pdev;
struct nci_dev *ndev;
unsigned long flags;
char st_register_cb_status;
long (*st_write) (struct sk_buff *);
struct completion completed;
struct nci_vs_nfcc_info_rsp nfcc_info;
};
/* NFCWILINK driver flags */
enum {
NFCWILINK_RUNNING,
NFCWILINK_FW_DOWNLOAD,
};
static int nfcwilink_send(struct sk_buff *skb);
static inline struct sk_buff *nfcwilink_skb_alloc(unsigned int len, gfp_t how)
{
struct sk_buff *skb;
skb = alloc_skb(len + NFCWILINK_HDR_LEN, how);
if (skb)
skb_reserve(skb, NFCWILINK_HDR_LEN);
return skb;
}
static void nfcwilink_fw_download_receive(struct nfcwilink *drv,
struct sk_buff *skb)
{
struct nci_vs_nfcc_info_rsp *rsp = (void *)skb->data;
/* Detect NCI_VS_NFCC_INFO_RSP and store the result */
if ((skb->len > 3) && (rsp->gid == NCI_VS_NFCC_INFO_RSP_GID) &&
(rsp->oid == NCI_VS_NFCC_INFO_RSP_OID)) {
memcpy(&drv->nfcc_info, rsp,
sizeof(struct nci_vs_nfcc_info_rsp));
}
kfree_skb(skb);
complete(&drv->completed);
}
static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
{
struct nci_vs_nfcc_info_cmd *cmd;
struct sk_buff *skb;
unsigned long comp_ret;
int rc;
nfc_dev_dbg(&drv->pdev->dev, "get_bts_file_name entry");
skb = nfcwilink_skb_alloc(sizeof(struct nci_vs_nfcc_info_cmd),
GFP_KERNEL);
if (!skb) {
nfc_dev_err(&drv->pdev->dev,
"no memory for nci_vs_nfcc_info_cmd");
return -ENOMEM;
}
skb->dev = (void *)drv->ndev;
cmd = (struct nci_vs_nfcc_info_cmd *)
skb_put(skb, sizeof(struct nci_vs_nfcc_info_cmd));
cmd->gid = NCI_VS_NFCC_INFO_CMD_GID;
cmd->oid = NCI_VS_NFCC_INFO_CMD_OID;
cmd->plen = 0;
drv->nfcc_info.plen = 0;
rc = nfcwilink_send(skb);
if (rc)
return rc;
comp_ret = wait_for_completion_timeout(&drv->completed,
msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
comp_ret);
if (comp_ret == 0) {
nfc_dev_err(&drv->pdev->dev,
"timeout on wait_for_completion_timeout");
return -ETIMEDOUT;
}
nfc_dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d",
drv->nfcc_info.plen,
drv->nfcc_info.status);
if ((drv->nfcc_info.plen != 5) || (drv->nfcc_info.status != 0)) {
nfc_dev_err(&drv->pdev->dev,
"invalid nci_vs_nfcc_info_rsp");
return -EINVAL;
}
snprintf(file_name, BTS_FILE_NAME_MAX_SIZE,
"TINfcInit_%d.%d.%d.%d.bts",
drv->nfcc_info.hw_id,
drv->nfcc_info.sw_ver_x,
drv->nfcc_info.sw_ver_z,
drv->nfcc_info.patch_id);
nfc_dev_info(&drv->pdev->dev, "nfcwilink FW file name: %s", file_name);
return 0;
}
static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
{
struct nfcwilink_hdr *hdr = (struct nfcwilink_hdr *)data;
struct sk_buff *skb;
unsigned long comp_ret;
int rc;
nfc_dev_dbg(&drv->pdev->dev, "send_bts_cmd entry");
/* verify valid cmd for the NFC channel */
if ((len <= sizeof(struct nfcwilink_hdr)) ||
(len > BTS_FILE_CMD_MAX_LEN) ||
(hdr->chnl != NFCWILINK_CHNL) ||
(hdr->opcode != NFCWILINK_OPCODE)) {
nfc_dev_err(&drv->pdev->dev,
"ignoring invalid bts cmd, len %d, chnl %d, opcode %d",
len, hdr->chnl, hdr->opcode);
return 0;
}
/* remove the ST header */
len -= sizeof(struct nfcwilink_hdr);
data += sizeof(struct nfcwilink_hdr);
skb = nfcwilink_skb_alloc(len, GFP_KERNEL);
if (!skb) {
nfc_dev_err(&drv->pdev->dev, "no memory for bts cmd");
return -ENOMEM;
}
skb->dev = (void *)drv->ndev;
memcpy(skb_put(skb, len), data, len);
rc = nfcwilink_send(skb);
if (rc)
return rc;
comp_ret = wait_for_completion_timeout(&drv->completed,
msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
comp_ret);
if (comp_ret == 0) {
nfc_dev_err(&drv->pdev->dev,
"timeout on wait_for_completion_timeout");
return -ETIMEDOUT;
}
return 0;
}
static int nfcwilink_download_fw(struct nfcwilink *drv)
{
unsigned char file_name[BTS_FILE_NAME_MAX_SIZE];
const struct firmware *fw;
__u16 action_type, action_len;
__u8 *ptr;
int len, rc;
nfc_dev_dbg(&drv->pdev->dev, "download_fw entry");
set_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags);
rc = nfcwilink_get_bts_file_name(drv, file_name);
if (rc)
goto exit;
rc = request_firmware(&fw, file_name, &drv->pdev->dev);
if (rc) {
nfc_dev_err(&drv->pdev->dev, "request_firmware failed %d", rc);
/* if the file is not found, don't exit with failure */
if (rc == -ENOENT)
rc = 0;
goto exit;
}
len = fw->size;
ptr = (__u8 *)fw->data;
if ((len == 0) || (ptr == NULL)) {
nfc_dev_dbg(&drv->pdev->dev,
"request_firmware returned size %d", len);
goto release_fw;
}
if (__le32_to_cpu(((struct bts_file_hdr *)ptr)->magic) !=
BTS_FILE_HDR_MAGIC) {
nfc_dev_err(&drv->pdev->dev, "wrong bts magic number");
rc = -EINVAL;
goto release_fw;
}
/* remove the BTS header */
len -= sizeof(struct bts_file_hdr);
ptr += sizeof(struct bts_file_hdr);
while (len > 0) {
action_type =
__le16_to_cpu(((struct bts_file_action *)ptr)->type);
action_len =
__le16_to_cpu(((struct bts_file_action *)ptr)->len);
nfc_dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d",
action_type, action_len);
switch (action_type) {
case BTS_FILE_ACTION_TYPE_SEND_CMD:
rc = nfcwilink_send_bts_cmd(drv,
((struct bts_file_action *)ptr)->data,
action_len);
if (rc)
goto release_fw;
break;
}
/* advance to the next action */
len -= (sizeof(struct bts_file_action) + action_len);
ptr += (sizeof(struct bts_file_action) + action_len);
}
release_fw:
release_firmware(fw);
exit:
clear_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags);
return rc;
}
/* Called by ST when registration is complete */
static void nfcwilink_register_complete(void *priv_data, char data)
{
struct nfcwilink *drv = priv_data;
nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
/* store ST registration status */
drv->st_register_cb_status = data;
/* complete the wait in nfc_st_open() */
complete(&drv->completed);
}
/* Called by ST when receive data is available */
static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
{
struct nfcwilink *drv = priv_data;
int rc;
nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
if (!skb)
return -EFAULT;
if (!drv) {
kfree_skb(skb);
return -EFAULT;
}
/* strip the ST header
(apart for the chnl byte, which is not received in the hdr) */
skb_pull(skb, (NFCWILINK_HDR_LEN-1));
if (test_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags)) {
nfcwilink_fw_download_receive(drv, skb);
return 0;
}
skb->dev = (void *) drv->ndev;
/* Forward skb to NCI core layer */
rc = nci_recv_frame(skb);
if (rc < 0) {
nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
return rc;
}
return 0;
}
/* protocol structure registered with ST */
static struct st_proto_s nfcwilink_proto = {
.chnl_id = NFCWILINK_CHNL,
.max_frame_size = NFCWILINK_MAX_FRAME_SIZE,
.hdr_len = (NFCWILINK_HDR_LEN-1), /* not including chnl byte */
.offset_len_in_hdr = NFCWILINK_OFFSET_LEN_IN_HDR,
.len_size = NFCWILINK_LEN_SIZE,
.reserve = 0,
.recv = nfcwilink_receive,
.reg_complete_cb = nfcwilink_register_complete,
.write = NULL,
};
static int nfcwilink_open(struct nci_dev *ndev)
{
struct nfcwilink *drv = nci_get_drvdata(ndev);
unsigned long comp_ret;
int rc;
nfc_dev_dbg(&drv->pdev->dev, "open entry");
if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
rc = -EBUSY;
goto exit;
}
nfcwilink_proto.priv_data = drv;
init_completion(&drv->completed);
drv->st_register_cb_status = -EINPROGRESS;
rc = st_register(&nfcwilink_proto);
if (rc < 0) {
if (rc == -EINPROGRESS) {
comp_ret = wait_for_completion_timeout(
&drv->completed,
msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
nfc_dev_dbg(&drv->pdev->dev,
"wait_for_completion_timeout returned %ld",
comp_ret);
if (comp_ret == 0) {
/* timeout */
rc = -ETIMEDOUT;
goto clear_exit;
} else if (drv->st_register_cb_status != 0) {
rc = drv->st_register_cb_status;
nfc_dev_err(&drv->pdev->dev,
"st_register_cb failed %d", rc);
goto clear_exit;
}
} else {
nfc_dev_err(&drv->pdev->dev,
"st_register failed %d", rc);
goto clear_exit;
}
}
/* st_register MUST fill the write callback */
BUG_ON(nfcwilink_proto.write == NULL);
drv->st_write = nfcwilink_proto.write;
if (nfcwilink_download_fw(drv)) {
nfc_dev_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d",
rc);
/* open should succeed, even if the FW download failed */
}
goto exit;
clear_exit:
clear_bit(NFCWILINK_RUNNING, &drv->flags);
exit:
return rc;
}
static int nfcwilink_close(struct nci_dev *ndev)
{
struct nfcwilink *drv = nci_get_drvdata(ndev);
int rc;
nfc_dev_dbg(&drv->pdev->dev, "close entry");
if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
return 0;
rc = st_unregister(&nfcwilink_proto);
if (rc)
nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
drv->st_write = NULL;
return rc;
}
static int nfcwilink_send(struct sk_buff *skb)
{
struct nci_dev *ndev = (struct nci_dev *)skb->dev;
struct nfcwilink *drv = nci_get_drvdata(ndev);
struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
long len;
nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
if (!test_bit(NFCWILINK_RUNNING, &drv->flags)) {
kfree_skb(skb);
return -EINVAL;
}
/* add the ST hdr to the start of the buffer */
hdr.len = cpu_to_le16(skb->len);
memcpy(skb_push(skb, NFCWILINK_HDR_LEN), &hdr, NFCWILINK_HDR_LEN);
/* Insert skb to shared transport layer's transmit queue.
* Freeing skb memory is taken care in shared transport layer,
* so don't free skb memory here.
*/
len = drv->st_write(skb);
if (len < 0) {
kfree_skb(skb);
nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
return -EFAULT;
}
return 0;
}
static struct nci_ops nfcwilink_ops = {
.open = nfcwilink_open,
.close = nfcwilink_close,
.send = nfcwilink_send,
};
static int nfcwilink_probe(struct platform_device *pdev)
{
static struct nfcwilink *drv;
int rc;
__u32 protocols;
nfc_dev_dbg(&pdev->dev, "probe entry");
drv = kzalloc(sizeof(struct nfcwilink), GFP_KERNEL);
if (!drv) {
rc = -ENOMEM;
goto exit;
}
drv->pdev = pdev;
protocols = NFC_PROTO_JEWEL_MASK
| NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
| NFC_PROTO_ISO14443_MASK
| NFC_PROTO_NFC_DEP_MASK;
drv->ndev = nci_allocate_device(&nfcwilink_ops,
protocols,
NFCWILINK_HDR_LEN,
0);
if (!drv->ndev) {
nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
rc = -ENOMEM;
goto free_exit;
}
nci_set_parent_dev(drv->ndev, &pdev->dev);
nci_set_drvdata(drv->ndev, drv);
rc = nci_register_device(drv->ndev);
if (rc < 0) {
nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
goto free_dev_exit;
}
dev_set_drvdata(&pdev->dev, drv);
goto exit;
free_dev_exit:
nci_free_device(drv->ndev);
free_exit:
kfree(drv);
exit:
return rc;
}
static int nfcwilink_remove(struct platform_device *pdev)
{
struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
struct nci_dev *ndev;
nfc_dev_dbg(&pdev->dev, "remove entry");
if (!drv)
return -EFAULT;
ndev = drv->ndev;
nci_unregister_device(ndev);
nci_free_device(ndev);
kfree(drv);
dev_set_drvdata(&pdev->dev, NULL);
return 0;
}
static struct platform_driver nfcwilink_driver = {
.probe = nfcwilink_probe,
.remove = nfcwilink_remove,
.driver = {
.name = "nfcwilink",
.owner = THIS_MODULE,
},
};
/* ------- Module Init/Exit interfaces ------ */
static int __init nfcwilink_init(void)
{
printk(KERN_INFO "NFC Driver for TI WiLink");
return platform_driver_register(&nfcwilink_driver);
}
static void __exit nfcwilink_exit(void)
{
platform_driver_unregister(&nfcwilink_driver);
}
module_init(nfcwilink_init);
module_exit(nfcwilink_exit);
/* ------ Module Info ------ */
MODULE_AUTHOR("Ilan Elias <ilane@ti.com>");
MODULE_DESCRIPTION("NFC Driver for TI Shared Transport");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mcmenaminadrian/Linux-devel | arch/mips/powertv/ioremap.c | 7964 | 4733 | /*
* ioremap.c
*
* Support for mapping between dma_addr_t values a phys_addr_t values.
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Author: David VomLehn <dvomlehn@cisco.com>
*
* Description: Defines the platform resources for the SA settop.
*
* NOTE: The bootloader allocates persistent memory at an address which is
* 16 MiB below the end of the highest address in KSEG0. All fixed
* address memory reservations must avoid this region.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/mach-powertv/ioremap.h>
/*
* Define the sizes of and masks for grains in physical and DMA space. The
* values are the same but the types are not.
*/
#define IOR_PHYS_GRAIN ((phys_addr_t) 1 << IOR_LSBITS)
#define IOR_PHYS_GRAIN_MASK (IOR_PHYS_GRAIN - 1)
#define IOR_DMA_GRAIN ((dma_addr_t) 1 << IOR_LSBITS)
#define IOR_DMA_GRAIN_MASK (IOR_DMA_GRAIN - 1)
/*
* Values that, when accessed by an index derived from a phys_addr_t and
* added to phys_addr_t value, yield a DMA address
*/
struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
EXPORT_SYMBOL(_ior_phys_to_dma);
/*
* Values that, when accessed by an index derived from a dma_addr_t and
* added to that dma_addr_t value, yield a physical address
*/
struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
EXPORT_SYMBOL(_ior_dma_to_phys);
/**
* setup_dma_to_phys - set up conversion from DMA to physical addresses
* @dma_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index
* into the array _dma_to_phys.
* @delta: Value that, when added to the DMA address, will yield the
* physical address
* @s: Number of bytes in the section of memory with the given delta
* between DMA and physical addresses.
*/
static void setup_dma_to_phys(dma_addr_t dma, phys_addr_t delta, dma_addr_t s)
{
int dma_idx, first_idx, last_idx;
phys_addr_t first, last;
/*
* Calculate the first and last indices, rounding the first up and
* the second down.
*/
first = dma & ~IOR_DMA_GRAIN_MASK;
last = (dma + s - 1) & ~IOR_DMA_GRAIN_MASK;
first_idx = first >> IOR_LSBITS; /* Convert to indices */
last_idx = last >> IOR_LSBITS;
for (dma_idx = first_idx; dma_idx <= last_idx; dma_idx++)
_ior_dma_to_phys[dma_idx].offset = delta >> IOR_DMA_SHIFT;
}
/**
* setup_phys_to_dma - set up conversion from DMA to physical addresses
* @phys_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index
* into the array _phys_to_dma.
* @delta: Value that, when added to the DMA address, will yield the
* physical address
* @s: Number of bytes in the section of memory with the given delta
* between DMA and physical addresses.
*/
static void setup_phys_to_dma(phys_addr_t phys, dma_addr_t delta, phys_addr_t s)
{
int phys_idx, first_idx, last_idx;
phys_addr_t first, last;
/*
* Calculate the first and last indices, rounding the first up and
* the second down.
*/
first = phys & ~IOR_PHYS_GRAIN_MASK;
last = (phys + s - 1) & ~IOR_PHYS_GRAIN_MASK;
first_idx = first >> IOR_LSBITS; /* Convert to indices */
last_idx = last >> IOR_LSBITS;
for (phys_idx = first_idx; phys_idx <= last_idx; phys_idx++)
_ior_phys_to_dma[phys_idx].offset = delta >> IOR_PHYS_SHIFT;
}
/**
* ioremap_add_map - add to the physical and DMA address conversion arrays
* @phys: Process's view of the address of the start of the memory chunk
* @dma: DMA address of the start of the memory chunk
* @size: Size, in bytes, of the chunk of memory
*
* NOTE: It might be obvious, but the assumption is that all @size bytes have
* the same offset between the physical address and the DMA address.
*/
void ioremap_add_map(phys_addr_t phys, phys_addr_t dma, phys_addr_t size)
{
if (size == 0)
return;
if ((dma & IOR_DMA_GRAIN_MASK) != 0 ||
(phys & IOR_PHYS_GRAIN_MASK) != 0 ||
(size & IOR_PHYS_GRAIN_MASK) != 0)
pr_crit("Memory allocation must be in chunks of 0x%x bytes\n",
IOR_PHYS_GRAIN);
setup_dma_to_phys(dma, phys - dma, size);
setup_phys_to_dma(phys, dma - phys, size);
}
| gpl-2.0 |
Tegra4/android_kernel_hp_phobos | drivers/media/dvb/ttpci/av7110_hw.c | 8988 | 31992 | /*
* av7110_hw.c: av7110 low level hardware access and firmware interface
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* originally based on code by:
* Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*
* the project's page is at http://www.linuxtv.org/
*/
/* for debugging ARM communication: */
//#define COM_DEBUG
#include <stdarg.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include "av7110.h"
#include "av7110_hw.h"
#define _NOHANDSHAKE
/****************************************************************************
* DEBI functions
****************************************************************************/
/* This DEBI code is based on the Stradis driver
by Nathan Laredo <laredo@gnu.org> */
int av7110_debiwrite(struct av7110 *av7110, u32 config,
int addr, u32 val, int count)
{
struct saa7146_dev *dev = av7110->dev;
if (count <= 0 || count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return -1;
}
if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
printk("%s: wait_for_debi_done failed\n", __func__);
return -1;
}
saa7146_write(dev, DEBI_CONFIG, config);
if (count <= 4) /* immediate transfer */
saa7146_write(dev, DEBI_AD, val);
else /* block transfer */
saa7146_write(dev, DEBI_AD, av7110->debi_bus);
saa7146_write(dev, DEBI_COMMAND, (count << 17) | (addr & 0xffff));
saa7146_write(dev, MC2, (2 << 16) | 2);
return 0;
}
u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, int count)
{
struct saa7146_dev *dev = av7110->dev;
u32 result = 0;
if (count > 32764 || count <= 0) {
printk("%s: invalid count %d\n", __func__, count);
return 0;
}
if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
printk("%s: wait_for_debi_done #1 failed\n", __func__);
return 0;
}
saa7146_write(dev, DEBI_AD, av7110->debi_bus);
saa7146_write(dev, DEBI_COMMAND, (count << 17) | 0x10000 | (addr & 0xffff));
saa7146_write(dev, DEBI_CONFIG, config);
saa7146_write(dev, MC2, (2 << 16) | 2);
if (count > 4)
return count;
if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
printk("%s: wait_for_debi_done #2 failed\n", __func__);
return 0;
}
result = saa7146_read(dev, DEBI_AD);
result &= (0xffffffffUL >> ((4 - count) * 8));
return result;
}
/* av7110 ARM core boot stuff */
#if 0
void av7110_reset_arm(struct av7110 *av7110)
{
saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTLO);
/* Disable DEBI and GPIO irq */
SAA7146_IER_DISABLE(av7110->dev, MASK_19 | MASK_03);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTHI);
msleep(30); /* the firmware needs some time to initialize */
ARM_ResetMailBox(av7110);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
SAA7146_IER_ENABLE(av7110->dev, MASK_03);
av7110->arm_ready = 1;
dprintk(1, "reset ARM\n");
}
#endif /* 0 */
static int waitdebi(struct av7110 *av7110, int adr, int state)
{
int k;
dprintk(4, "%p\n", av7110);
for (k = 0; k < 100; k++) {
if (irdebi(av7110, DEBINOSWAP, adr, 0, 2) == state)
return 0;
udelay(5);
}
return -ETIMEDOUT;
}
static int load_dram(struct av7110 *av7110, u32 *data, int len)
{
int i;
int blocks, rest;
u32 base, bootblock = AV7110_BOOT_BLOCK;
dprintk(4, "%p\n", av7110);
blocks = len / AV7110_BOOT_MAX_SIZE;
rest = len % AV7110_BOOT_MAX_SIZE;
base = DRAM_START_CODE;
for (i = 0; i < blocks; i++) {
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): timeout at block %d\n", i);
return -ETIMEDOUT;
}
dprintk(4, "writing DRAM block %d\n", i);
mwdebi(av7110, DEBISWAB, bootblock,
((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE);
bootblock ^= 0x1400;
iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, AV7110_BOOT_MAX_SIZE, 2);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
base += AV7110_BOOT_MAX_SIZE;
}
if (rest > 0) {
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): timeout at last block\n");
return -ETIMEDOUT;
}
if (rest > 4)
mwdebi(av7110, DEBISWAB, bootblock,
((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, rest);
else
mwdebi(av7110, DEBISWAB, bootblock,
((u8 *)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4);
iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, rest, 2);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
}
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): timeout after last block\n");
return -ETIMEDOUT;
}
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, 0, 2);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_AV7110_BOOT_COMPLETE) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): final handshake timeout\n");
return -ETIMEDOUT;
}
return 0;
}
/* we cannot write av7110 DRAM directly, so load a bootloader into
* the DPRAM which implements a simple boot protocol */
int av7110_bootarm(struct av7110 *av7110)
{
const struct firmware *fw;
const char *fw_name = "av7110/bootcode.bin";
struct saa7146_dev *dev = av7110->dev;
u32 ret;
int i;
dprintk(4, "%p\n", av7110);
av7110->arm_ready = 0;
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO);
/* Disable DEBI and GPIO irq */
SAA7146_IER_DISABLE(av7110->dev, MASK_03 | MASK_19);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
/* enable DEBI */
saa7146_write(av7110->dev, MC1, 0x08800880);
saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000);
saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
/* test DEBI */
iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
/* FIXME: Why does Nexus CA require 2x iwdebi for first init? */
iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
if ((ret=irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4)) != 0x10325476) {
printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: "
"%08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
ret, 0x10325476);
return -1;
}
for (i = 0; i < 8192; i += 4)
iwdebi(av7110, DEBISWAP, DPRAM_BASE + i, 0x00, 4);
dprintk(2, "debi test OK\n");
/* boot */
dprintk(1, "load boot code\n");
saa7146_setgpio(dev, ARM_IRQ_LINE, SAA7146_GPIO_IRQLO);
//saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT);
//saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT);
ret = request_firmware(&fw, fw_name, &dev->pci->dev);
if (ret) {
printk(KERN_ERR "dvb-ttpci: Failed to load firmware \"%s\"\n",
fw_name);
return ret;
}
mwdebi(av7110, DEBISWAB, DPRAM_BASE, fw->data, fw->size);
release_firmware(fw);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
"saa7146_wait_for_debi_done() timed out\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
mdelay(1);
dprintk(1, "load dram code\n");
if (load_dram(av7110, (u32 *)av7110->bin_root, av7110->size_root) < 0) {
printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
"load_dram() failed\n");
return -1;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO);
mdelay(1);
dprintk(1, "load dpram code\n");
mwdebi(av7110, DEBISWAB, DPRAM_BASE, av7110->bin_dpram, av7110->size_dpram);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
"saa7146_wait_for_debi_done() timed out after loading DRAM\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
msleep(30); /* the firmware needs some time to initialize */
//ARM_ClearIrq(av7110);
ARM_ResetMailBox(av7110);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
SAA7146_IER_ENABLE(av7110->dev, MASK_03);
av7110->arm_errors = 0;
av7110->arm_ready = 1;
return 0;
}
MODULE_FIRMWARE("av7110/bootcode.bin");
/****************************************************************************
* DEBI command polling
****************************************************************************/
int av7110_wait_msgstate(struct av7110 *av7110, u16 flags)
{
unsigned long start;
u32 stat;
int err;
if (FW_VERSION(av7110->arm_app) <= 0x261c) {
/* not supported by old firmware */
msleep(50);
return 0;
}
/* new firmware */
start = jiffies;
for (;;) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
mutex_unlock(&av7110->dcomlock);
if ((stat & flags) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting for MSGSTATE %04x\n",
__func__, stat & flags);
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
static int __av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
{
int i;
unsigned long start;
char *type = NULL;
u16 flags[2] = {0, 0};
u32 stat;
int err;
// dprintk(4, "%p\n", av7110);
if (!av7110->arm_ready) {
dprintk(1, "arm not ready.\n");
return -ENXIO;
}
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for COMMAND idle\n", __func__);
av7110->arm_errors++;
return -ETIMEDOUT;
}
msleep(1);
}
if (FW_VERSION(av7110->arm_app) <= 0x261f)
wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0xffff, 2);
#ifndef _NOHANDSHAKE
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_SHAKE);
if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for HANDSHAKE_REG\n", __func__);
return -ETIMEDOUT;
}
msleep(1);
}
#endif
switch ((buf[0] >> 8) & 0xff) {
case COMTYPE_PIDFILTER:
case COMTYPE_ENCODER:
case COMTYPE_REC_PLAY:
case COMTYPE_MPEGDECODER:
type = "MSG";
flags[0] = GPMQOver;
flags[1] = GPMQFull;
break;
case COMTYPE_OSD:
type = "OSD";
flags[0] = OSDQOver;
flags[1] = OSDQFull;
break;
case COMTYPE_MISC:
if (FW_VERSION(av7110->arm_app) >= 0x261d) {
type = "MSG";
flags[0] = GPMQOver;
flags[1] = GPMQBusy;
}
break;
default:
break;
}
if (type != NULL) {
/* non-immediate COMMAND type */
start = jiffies;
for (;;) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
if (stat & flags[0]) {
printk(KERN_ERR "%s: %s QUEUE overflow\n",
__func__, type);
return -1;
}
if ((stat & flags[1]) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting on busy %s QUEUE\n",
__func__, type);
av7110->arm_errors++;
return -ETIMEDOUT;
}
msleep(1);
}
}
for (i = 2; i < length; i++)
wdebi(av7110, DEBINOSWAP, COMMAND + 2 * i, (u32) buf[i], 2);
if (length)
wdebi(av7110, DEBINOSWAP, COMMAND + 2, (u32) buf[1], 2);
else
wdebi(av7110, DEBINOSWAP, COMMAND + 2, 0, 2);
wdebi(av7110, DEBINOSWAP, COMMAND, (u32) buf[0], 2);
if (FW_VERSION(av7110->arm_app) <= 0x261f)
wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0x0000, 2);
#ifdef COM_DEBUG
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for COMMAND %d to complete\n",
__func__, (buf[0] >> 8) & 0xff);
return -ETIMEDOUT;
}
msleep(1);
}
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
if (stat & GPMQOver) {
printk(KERN_ERR "dvb-ttpci: %s(): GPMQOver\n", __func__);
return -ENOSPC;
}
else if (stat & OSDQOver) {
printk(KERN_ERR "dvb-ttpci: %s(): OSDQOver\n", __func__);
return -ENOSPC;
}
#endif
return 0;
}
static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
{
int ret;
// dprintk(4, "%p\n", av7110);
if (!av7110->arm_ready) {
dprintk(1, "arm not ready.\n");
return -1;
}
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
ret = __av7110_send_fw_cmd(av7110, buf, length);
mutex_unlock(&av7110->dcomlock);
if (ret && ret!=-ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: %s(): av7110_send_fw_cmd error %d\n",
__func__, ret);
return ret;
}
int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...)
{
va_list args;
u16 buf[num + 2];
int i, ret;
// dprintk(4, "%p\n", av7110);
buf[0] = ((type << 8) | com);
buf[1] = num;
if (num) {
va_start(args, num);
for (i = 0; i < num; i++)
buf[i + 2] = va_arg(args, u32);
va_end(args);
}
ret = av7110_send_fw_cmd(av7110, buf, num + 2);
if (ret && ret != -ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: av7110_fw_cmd error %d\n", ret);
return ret;
}
#if 0
int av7110_send_ci_cmd(struct av7110 *av7110, u8 subcom, u8 *buf, u8 len)
{
int i, ret;
u16 cmd[18] = { ((COMTYPE_COMMON_IF << 8) + subcom),
16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
dprintk(4, "%p\n", av7110);
for(i = 0; i < len && i < 32; i++)
{
if(i % 2 == 0)
cmd[(i / 2) + 2] = (u16)(buf[i]) << 8;
else
cmd[(i / 2) + 2] |= buf[i];
}
ret = av7110_send_fw_cmd(av7110, cmd, 18);
if (ret && ret != -ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: av7110_send_ci_cmd error %d\n", ret);
return ret;
}
#endif /* 0 */
int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
int request_buf_len, u16 *reply_buf, int reply_buf_len)
{
int err;
s16 i;
unsigned long start;
#ifdef COM_DEBUG
u32 stat;
#endif
dprintk(4, "%p\n", av7110);
if (!av7110->arm_ready) {
dprintk(1, "arm not ready.\n");
return -1;
}
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
if ((err = __av7110_send_fw_cmd(av7110, request_buf, request_buf_len)) < 0) {
mutex_unlock(&av7110->dcomlock);
printk(KERN_ERR "dvb-ttpci: av7110_fw_request error %d\n", err);
return err;
}
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting for COMMAND to complete\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
#ifdef _NOHANDSHAKE
msleep(1);
#endif
}
#ifndef _NOHANDSHAKE
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_SHAKE);
if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting for HANDSHAKE_REG\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
#endif
#ifdef COM_DEBUG
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
if (stat & GPMQOver) {
printk(KERN_ERR "%s: GPMQOver\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -1;
}
else if (stat & OSDQOver) {
printk(KERN_ERR "%s: OSDQOver\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -1;
}
#endif
for (i = 0; i < reply_buf_len; i++)
reply_buf[i] = rdebi(av7110, DEBINOSWAP, COM_BUFF + 2 * i, 0, 2);
mutex_unlock(&av7110->dcomlock);
return 0;
}
static int av7110_fw_query(struct av7110 *av7110, u16 tag, u16* buf, s16 length)
{
int ret;
ret = av7110_fw_request(av7110, &tag, 0, buf, length);
if (ret)
printk(KERN_ERR "dvb-ttpci: av7110_fw_query error %d\n", ret);
return ret;
}
/****************************************************************************
* Firmware commands
****************************************************************************/
/* get version of the firmware ROM, RTSL, video ucode and ARM application */
int av7110_firmversion(struct av7110 *av7110)
{
u16 buf[20];
u16 tag = ((COMTYPE_REQUEST << 8) + ReqVersion);
dprintk(4, "%p\n", av7110);
if (av7110_fw_query(av7110, tag, buf, 16)) {
printk("dvb-ttpci: failed to boot firmware @ card %d\n",
av7110->dvb_adapter.num);
return -EIO;
}
av7110->arm_fw = (buf[0] << 16) + buf[1];
av7110->arm_rtsl = (buf[2] << 16) + buf[3];
av7110->arm_vid = (buf[4] << 16) + buf[5];
av7110->arm_app = (buf[6] << 16) + buf[7];
av7110->avtype = (buf[8] << 16) + buf[9];
printk("dvb-ttpci: info @ card %d: firm %08x, rtsl %08x, vid %08x, app %08x\n",
av7110->dvb_adapter.num, av7110->arm_fw,
av7110->arm_rtsl, av7110->arm_vid, av7110->arm_app);
/* print firmware capabilities */
if (FW_CI_LL_SUPPORT(av7110->arm_app))
printk("dvb-ttpci: firmware @ card %d supports CI link layer interface\n",
av7110->dvb_adapter.num);
else
printk("dvb-ttpci: no firmware support for CI link layer interface @ card %d\n",
av7110->dvb_adapter.num);
return 0;
}
int av7110_diseqc_send(struct av7110 *av7110, int len, u8 *msg, unsigned long burst)
{
int i, ret;
u16 buf[18] = { ((COMTYPE_AUDIODAC << 8) + SendDiSEqC),
16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
dprintk(4, "%p\n", av7110);
if (len > 10)
len = 10;
buf[1] = len + 2;
buf[2] = len;
if (burst != -1)
buf[3] = burst ? 0x01 : 0x00;
else
buf[3] = 0xffff;
for (i = 0; i < len; i++)
buf[i + 4] = msg[i];
ret = av7110_send_fw_cmd(av7110, buf, 18);
if (ret && ret!=-ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: av7110_diseqc_send error %d\n", ret);
return ret;
}
#ifdef CONFIG_DVB_AV7110_OSD
static inline int SetColorBlend(struct av7110 *av7110, u8 windownr)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, SetCBlend, 1, windownr);
}
static inline int SetBlend_(struct av7110 *av7110, u8 windownr,
enum av7110_osd_palette_type colordepth, u16 index, u8 blending)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, SetBlend, 4,
windownr, colordepth, index, blending);
}
static inline int SetColor_(struct av7110 *av7110, u8 windownr,
enum av7110_osd_palette_type colordepth, u16 index, u16 colorhi, u16 colorlo)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, SetColor, 5,
windownr, colordepth, index, colorhi, colorlo);
}
static inline int SetFont(struct av7110 *av7110, u8 windownr, u8 fontsize,
u16 colorfg, u16 colorbg)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Font, 4,
windownr, fontsize, colorfg, colorbg);
}
static int FlushText(struct av7110 *av7110)
{
unsigned long start;
int err;
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_OSD);
if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for BUFF1_BASE == 0\n",
__func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
mutex_unlock(&av7110->dcomlock);
return 0;
}
static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, char *buf)
{
int i, ret;
unsigned long start;
int length = strlen(buf) + 1;
u16 cbuf[5] = { (COMTYPE_OSD << 8) + DText, 3, win, x, y };
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
start = jiffies;
while (1) {
ret = time_after(jiffies, start + ARM_WAIT_OSD);
if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0)
break;
if (ret) {
printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for BUFF1_BASE == 0\n",
__func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
#ifndef _NOHANDSHAKE
start = jiffies;
while (1) {
ret = time_after(jiffies, start + ARM_WAIT_SHAKE);
if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
break;
if (ret) {
printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for HANDSHAKE_REG\n",
__func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
#endif
for (i = 0; i < length / 2; i++)
wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2,
swab16(*(u16 *)(buf + 2 * i)), 2);
if (length & 1)
wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2, 0, 2);
ret = __av7110_send_fw_cmd(av7110, cbuf, 5);
mutex_unlock(&av7110->dcomlock);
if (ret && ret!=-ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: WriteText error %d\n", ret);
return ret;
}
static inline int DrawLine(struct av7110 *av7110, u8 windownr,
u16 x, u16 y, u16 dx, u16 dy, u16 color)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, DLine, 6,
windownr, x, y, dx, dy, color);
}
static inline int DrawBlock(struct av7110 *av7110, u8 windownr,
u16 x, u16 y, u16 dx, u16 dy, u16 color)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, DBox, 6,
windownr, x, y, dx, dy, color);
}
static inline int HideWindow(struct av7110 *av7110, u8 windownr)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WHide, 1, windownr);
}
static inline int MoveWindowRel(struct av7110 *av7110, u8 windownr, u16 x, u16 y)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveD, 3, windownr, x, y);
}
static inline int MoveWindowAbs(struct av7110 *av7110, u8 windownr, u16 x, u16 y)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveA, 3, windownr, x, y);
}
static inline int DestroyOSDWindow(struct av7110 *av7110, u8 windownr)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WDestroy, 1, windownr);
}
static inline int CreateOSDWindow(struct av7110 *av7110, u8 windownr,
osd_raw_window_t disptype,
u16 width, u16 height)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WCreate, 4,
windownr, disptype, width, height);
}
static enum av7110_osd_palette_type bpp2pal[8] = {
Pal1Bit, Pal2Bit, 0, Pal4Bit, 0, 0, 0, Pal8Bit
};
static osd_raw_window_t bpp2bit[8] = {
OSD_BITMAP1, OSD_BITMAP2, 0, OSD_BITMAP4, 0, 0, 0, OSD_BITMAP8
};
static inline int WaitUntilBmpLoaded(struct av7110 *av7110)
{
int ret = wait_event_timeout(av7110->bmpq,
av7110->bmp_state != BMP_LOADING, 10*HZ);
if (ret == 0) {
printk("dvb-ttpci: warning: timeout waiting in LoadBitmap: %d, %d\n",
ret, av7110->bmp_state);
av7110->bmp_state = BMP_NONE;
return -ETIMEDOUT;
}
return 0;
}
static inline int LoadBitmap(struct av7110 *av7110,
u16 dx, u16 dy, int inc, u8 __user * data)
{
u16 format;
int bpp;
int i;
int d, delta;
u8 c;
int ret;
dprintk(4, "%p\n", av7110);
format = bpp2bit[av7110->osdbpp[av7110->osdwin]];
av7110->bmp_state = BMP_LOADING;
if (format == OSD_BITMAP8) {
bpp=8; delta = 1;
} else if (format == OSD_BITMAP4) {
bpp=4; delta = 2;
} else if (format == OSD_BITMAP2) {
bpp=2; delta = 4;
} else if (format == OSD_BITMAP1) {
bpp=1; delta = 8;
} else {
av7110->bmp_state = BMP_NONE;
return -EINVAL;
}
av7110->bmplen = ((dx * dy * bpp + 7) & ~7) / 8;
av7110->bmpp = 0;
if (av7110->bmplen > 32768) {
av7110->bmp_state = BMP_NONE;
return -EINVAL;
}
for (i = 0; i < dy; i++) {
if (copy_from_user(av7110->bmpbuf + 1024 + i * dx, data + i * inc, dx)) {
av7110->bmp_state = BMP_NONE;
return -EINVAL;
}
}
if (format != OSD_BITMAP8) {
for (i = 0; i < dx * dy / delta; i++) {
c = ((u8 *)av7110->bmpbuf)[1024 + i * delta + delta - 1];
for (d = delta - 2; d >= 0; d--) {
c |= (((u8 *)av7110->bmpbuf)[1024 + i * delta + d]
<< ((delta - d - 1) * bpp));
((u8 *)av7110->bmpbuf)[1024 + i] = c;
}
}
}
av7110->bmplen += 1024;
dprintk(4, "av7110_fw_cmd: LoadBmp size %d\n", av7110->bmplen);
ret = av7110_fw_cmd(av7110, COMTYPE_OSD, LoadBmp, 3, format, dx, dy);
if (!ret)
ret = WaitUntilBmpLoaded(av7110);
return ret;
}
static int BlitBitmap(struct av7110 *av7110, u16 x, u16 y)
{
dprintk(4, "%p\n", av7110);
return av7110_fw_cmd(av7110, COMTYPE_OSD, BlitBmp, 4, av7110->osdwin, x, y, 0);
}
static inline int ReleaseBitmap(struct av7110 *av7110)
{
dprintk(4, "%p\n", av7110);
if (av7110->bmp_state != BMP_LOADED && FW_VERSION(av7110->arm_app) < 0x261e)
return -1;
if (av7110->bmp_state == BMP_LOADING)
dprintk(1,"ReleaseBitmap called while BMP_LOADING\n");
av7110->bmp_state = BMP_NONE;
return av7110_fw_cmd(av7110, COMTYPE_OSD, ReleaseBmp, 0);
}
static u32 RGB2YUV(u16 R, u16 G, u16 B)
{
u16 y, u, v;
u16 Y, Cr, Cb;
y = R * 77 + G * 150 + B * 29; /* Luma=0.299R+0.587G+0.114B 0..65535 */
u = 2048 + B * 8 -(y >> 5); /* Cr 0..4095 */
v = 2048 + R * 8 -(y >> 5); /* Cb 0..4095 */
Y = y / 256;
Cb = u / 16;
Cr = v / 16;
return Cr | (Cb << 16) | (Y << 8);
}
static int OSDSetColor(struct av7110 *av7110, u8 color, u8 r, u8 g, u8 b, u8 blend)
{
int ret;
u16 ch, cl;
u32 yuv;
yuv = blend ? RGB2YUV(r,g,b) : 0;
cl = (yuv & 0xffff);
ch = ((yuv >> 16) & 0xffff);
ret = SetColor_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]],
color, ch, cl);
if (!ret)
ret = SetBlend_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]],
color, ((blend >> 4) & 0x0f));
return ret;
}
static int OSDSetPalette(struct av7110 *av7110, u32 __user * colors, u8 first, u8 last)
{
int i;
int length = last - first + 1;
if (length * 4 > DATA_BUFF3_SIZE)
return -EINVAL;
for (i = 0; i < length; i++) {
u32 color, blend, yuv;
if (get_user(color, colors + i))
return -EFAULT;
blend = (color & 0xF0000000) >> 4;
yuv = blend ? RGB2YUV(color & 0xFF, (color >> 8) & 0xFF,
(color >> 16) & 0xFF) | blend : 0;
yuv = ((yuv & 0xFFFF0000) >> 16) | ((yuv & 0x0000FFFF) << 16);
wdebi(av7110, DEBINOSWAP, DATA_BUFF3_BASE + i * 4, yuv, 4);
}
return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Palette, 4,
av7110->osdwin,
bpp2pal[av7110->osdbpp[av7110->osdwin]],
first, last);
}
static int OSDSetBlock(struct av7110 *av7110, int x0, int y0,
int x1, int y1, int inc, u8 __user * data)
{
uint w, h, bpp, bpl, size, lpb, bnum, brest;
int i;
int rc,release_rc;
w = x1 - x0 + 1;
h = y1 - y0 + 1;
if (inc <= 0)
inc = w;
if (w <= 0 || w > 720 || h <= 0 || h > 576)
return -EINVAL;
bpp = av7110->osdbpp[av7110->osdwin] + 1;
bpl = ((w * bpp + 7) & ~7) / 8;
size = h * bpl;
lpb = (32 * 1024) / bpl;
bnum = size / (lpb * bpl);
brest = size - bnum * lpb * bpl;
if (av7110->bmp_state == BMP_LOADING) {
/* possible if syscall is repeated by -ERESTARTSYS and if firmware cannot abort */
BUG_ON (FW_VERSION(av7110->arm_app) >= 0x261e);
rc = WaitUntilBmpLoaded(av7110);
if (rc)
return rc;
/* just continue. This should work for all fw versions
* if bnum==1 && !brest && LoadBitmap was successful
*/
}
rc = 0;
for (i = 0; i < bnum; i++) {
rc = LoadBitmap(av7110, w, lpb, inc, data);
if (rc)
break;
rc = BlitBitmap(av7110, x0, y0 + i * lpb);
if (rc)
break;
data += lpb * inc;
}
if (!rc && brest) {
rc = LoadBitmap(av7110, w, brest / bpl, inc, data);
if (!rc)
rc = BlitBitmap(av7110, x0, y0 + bnum * lpb);
}
release_rc = ReleaseBitmap(av7110);
if (!rc)
rc = release_rc;
if (rc)
dprintk(1,"returns %d\n",rc);
return rc;
}
int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
{
int ret;
if (mutex_lock_interruptible(&av7110->osd_mutex))
return -ERESTARTSYS;
switch (dc->cmd) {
case OSD_Close:
ret = DestroyOSDWindow(av7110, av7110->osdwin);
break;
case OSD_Open:
av7110->osdbpp[av7110->osdwin] = (dc->color - 1) & 7;
ret = CreateOSDWindow(av7110, av7110->osdwin,
bpp2bit[av7110->osdbpp[av7110->osdwin]],
dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1);
if (ret)
break;
if (!dc->data) {
ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
if (ret)
break;
ret = SetColorBlend(av7110, av7110->osdwin);
}
break;
case OSD_Show:
ret = MoveWindowRel(av7110, av7110->osdwin, 0, 0);
break;
case OSD_Hide:
ret = HideWindow(av7110, av7110->osdwin);
break;
case OSD_Clear:
ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, 0);
break;
case OSD_Fill:
ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, dc->color);
break;
case OSD_SetColor:
ret = OSDSetColor(av7110, dc->color, dc->x0, dc->y0, dc->x1, dc->y1);
break;
case OSD_SetPalette:
if (FW_VERSION(av7110->arm_app) >= 0x2618)
ret = OSDSetPalette(av7110, dc->data, dc->color, dc->x0);
else {
int i, len = dc->x0-dc->color+1;
u8 __user *colors = (u8 __user *)dc->data;
u8 r, g = 0, b = 0, blend = 0;
ret = 0;
for (i = 0; i<len; i++) {
if (get_user(r, colors + i * 4) ||
get_user(g, colors + i * 4 + 1) ||
get_user(b, colors + i * 4 + 2) ||
get_user(blend, colors + i * 4 + 3)) {
ret = -EFAULT;
break;
}
ret = OSDSetColor(av7110, dc->color + i, r, g, b, blend);
if (ret)
break;
}
}
break;
case OSD_SetPixel:
ret = DrawLine(av7110, av7110->osdwin,
dc->x0, dc->y0, 0, 0, dc->color);
break;
case OSD_SetRow:
dc->y1 = dc->y0;
/* fall through */
case OSD_SetBlock:
ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data);
break;
case OSD_FillRow:
ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0,
dc->x1-dc->x0+1, dc->y1, dc->color);
break;
case OSD_FillBlock:
ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0,
dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1, dc->color);
break;
case OSD_Line:
ret = DrawLine(av7110, av7110->osdwin,
dc->x0, dc->y0, dc->x1 - dc->x0, dc->y1 - dc->y0, dc->color);
break;
case OSD_Text:
{
char textbuf[240];
if (strncpy_from_user(textbuf, dc->data, 240) < 0) {
ret = -EFAULT;
break;
}
textbuf[239] = 0;
if (dc->x1 > 3)
dc->x1 = 3;
ret = SetFont(av7110, av7110->osdwin, dc->x1,
(u16) (dc->color & 0xffff), (u16) (dc->color >> 16));
if (!ret)
ret = FlushText(av7110);
if (!ret)
ret = WriteText(av7110, av7110->osdwin, dc->x0, dc->y0, textbuf);
break;
}
case OSD_SetWindow:
if (dc->x0 < 1 || dc->x0 > 7)
ret = -EINVAL;
else {
av7110->osdwin = dc->x0;
ret = 0;
}
break;
case OSD_MoveWindow:
ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
if (!ret)
ret = SetColorBlend(av7110, av7110->osdwin);
break;
case OSD_OpenRaw:
if (dc->color < OSD_BITMAP1 || dc->color > OSD_CURSOR) {
ret = -EINVAL;
break;
}
if (dc->color >= OSD_BITMAP1 && dc->color <= OSD_BITMAP8HR)
av7110->osdbpp[av7110->osdwin] = (1 << (dc->color & 3)) - 1;
else
av7110->osdbpp[av7110->osdwin] = 0;
ret = CreateOSDWindow(av7110, av7110->osdwin, (osd_raw_window_t)dc->color,
dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1);
if (ret)
break;
if (!dc->data) {
ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
if (!ret)
ret = SetColorBlend(av7110, av7110->osdwin);
}
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&av7110->osd_mutex);
if (ret==-ERESTARTSYS)
dprintk(1, "av7110_osd_cmd(%d) returns with -ERESTARTSYS\n",dc->cmd);
else if (ret)
dprintk(1, "av7110_osd_cmd(%d) returns with %d\n",dc->cmd,ret);
return ret;
}
int av7110_osd_capability(struct av7110 *av7110, osd_cap_t *cap)
{
switch (cap->cmd) {
case OSD_CAP_MEMSIZE:
if (FW_4M_SDRAM(av7110->arm_app))
cap->val = 1000000;
else
cap->val = 92000;
return 0;
default:
return -EINVAL;
}
}
#endif /* CONFIG_DVB_AV7110_OSD */
| gpl-2.0 |
crimeofheart/Solid_Kernel-GEE | fs/xfs/xfs_xattr.c | 11548 | 5759 | /*
* Copyright (C) 2008 Christoph Hellwig.
* Portions Copyright (C) 2000-2008 Silicon Graphics, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_acl.h"
#include "xfs_vnodeops.h"
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
static int
xfs_xattr_get(struct dentry *dentry, const char *name,
void *value, size_t size, int xflags)
{
struct xfs_inode *ip = XFS_I(dentry->d_inode);
int error, asize = size;
if (strcmp(name, "") == 0)
return -EINVAL;
/* Convert Linux syscall to XFS internal ATTR flags */
if (!size) {
xflags |= ATTR_KERNOVAL;
value = NULL;
}
error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags);
if (error)
return error;
return asize;
}
static int
xfs_xattr_set(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags, int xflags)
{
struct xfs_inode *ip = XFS_I(dentry->d_inode);
if (strcmp(name, "") == 0)
return -EINVAL;
/* Convert Linux syscall to XFS internal ATTR flags */
if (flags & XATTR_CREATE)
xflags |= ATTR_CREATE;
if (flags & XATTR_REPLACE)
xflags |= ATTR_REPLACE;
if (!value)
return -xfs_attr_remove(ip, (unsigned char *)name, xflags);
return -xfs_attr_set(ip, (unsigned char *)name,
(void *)value, size, xflags);
}
static const struct xattr_handler xfs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.flags = 0, /* no flags implies user namespace */
.get = xfs_xattr_get,
.set = xfs_xattr_set,
};
static const struct xattr_handler xfs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.flags = ATTR_ROOT,
.get = xfs_xattr_get,
.set = xfs_xattr_set,
};
static const struct xattr_handler xfs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.flags = ATTR_SECURE,
.get = xfs_xattr_get,
.set = xfs_xattr_set,
};
const struct xattr_handler *xfs_xattr_handlers[] = {
&xfs_xattr_user_handler,
&xfs_xattr_trusted_handler,
&xfs_xattr_security_handler,
#ifdef CONFIG_XFS_POSIX_ACL
&xfs_xattr_acl_access_handler,
&xfs_xattr_acl_default_handler,
#endif
NULL
};
static unsigned int xfs_xattr_prefix_len(int flags)
{
if (flags & XFS_ATTR_SECURE)
return sizeof("security");
else if (flags & XFS_ATTR_ROOT)
return sizeof("trusted");
else
return sizeof("user");
}
static const char *xfs_xattr_prefix(int flags)
{
if (flags & XFS_ATTR_SECURE)
return xfs_xattr_security_handler.prefix;
else if (flags & XFS_ATTR_ROOT)
return xfs_xattr_trusted_handler.prefix;
else
return xfs_xattr_user_handler.prefix;
}
static int
xfs_xattr_put_listent(
struct xfs_attr_list_context *context,
int flags,
unsigned char *name,
int namelen,
int valuelen,
unsigned char *value)
{
unsigned int prefix_len = xfs_xattr_prefix_len(flags);
char *offset;
int arraytop;
ASSERT(context->count >= 0);
/*
* Only show root namespace entries if we are actually allowed to
* see them.
*/
if ((flags & XFS_ATTR_ROOT) && !capable(CAP_SYS_ADMIN))
return 0;
arraytop = context->count + prefix_len + namelen + 1;
if (arraytop > context->firstu) {
context->count = -1; /* insufficient space */
return 1;
}
offset = (char *)context->alist + context->count;
strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
offset += prefix_len;
strncpy(offset, (char *)name, namelen); /* real name */
offset += namelen;
*offset = '\0';
context->count += prefix_len + namelen + 1;
return 0;
}
static int
xfs_xattr_put_listent_sizes(
struct xfs_attr_list_context *context,
int flags,
unsigned char *name,
int namelen,
int valuelen,
unsigned char *value)
{
context->count += xfs_xattr_prefix_len(flags) + namelen + 1;
return 0;
}
static int
list_one_attr(const char *name, const size_t len, void *data,
size_t size, ssize_t *result)
{
char *p = data + *result;
*result += len;
if (!size)
return 0;
if (*result > size)
return -ERANGE;
strcpy(p, name);
return 0;
}
ssize_t
xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
{
struct xfs_attr_list_context context;
struct attrlist_cursor_kern cursor = { 0 };
struct inode *inode = dentry->d_inode;
int error;
/*
* First read the regular on-disk attributes.
*/
memset(&context, 0, sizeof(context));
context.dp = XFS_I(inode);
context.cursor = &cursor;
context.resynch = 1;
context.alist = data;
context.bufsize = size;
context.firstu = context.bufsize;
if (size)
context.put_listent = xfs_xattr_put_listent;
else
context.put_listent = xfs_xattr_put_listent_sizes;
xfs_attr_list_int(&context);
if (context.count < 0)
return -ERANGE;
/*
* Then add the two synthetic ACL attributes.
*/
if (posix_acl_access_exists(inode)) {
error = list_one_attr(POSIX_ACL_XATTR_ACCESS,
strlen(POSIX_ACL_XATTR_ACCESS) + 1,
data, size, &context.count);
if (error)
return error;
}
if (posix_acl_default_exists(inode)) {
error = list_one_attr(POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT) + 1,
data, size, &context.count);
if (error)
return error;
}
return context.count;
}
| gpl-2.0 |
Kalashnikitty/Aurora_D802 | drivers/net/wireless/bcm4334/src/dhd/sys/dhd_sdio.c | 29 | 251283 | /*
* DHD Bus Module for SDIO
*
* Copyright (C) 1999-2013, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_sdio.c 426658 2013-09-30 12:14:01Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <bcmsdh.h>
#ifdef BCMEMBEDIMAGE
#include BCMEMBEDIMAGE
#endif /* BCMEMBEDIMAGE */
#include <bcmdefs.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <bcmdevs.h>
#include <siutils.h>
#include <hndpmu.h>
#include <hndsoc.h>
#include <bcmsdpcm.h>
#if defined(DHD_DEBUG)
#include <hndrte_armtrap.h>
#include <hndrte_cons.h>
#endif /* defined(DHD_DEBUG) */
#include <sbchipc.h>
#include <sbhnddma.h>
#include <sdio.h>
#ifdef BCMSPI
#include <spid.h>
#endif /* BCMSPI */
#include <sbsdio.h>
#include <sbsdpcmdev.h>
#include <bcmsdpcm.h>
#include <bcmsdbus.h>
#include <proto/ethernet.h>
#include <proto/802.1d.h>
#include <proto/802.11.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_dbg.h>
#include <dhdioctl.h>
#include <sdiovar.h>
bool dhd_mp_halting(dhd_pub_t *dhdp);
extern void bcmsdh_waitfor_iodrain(void *sdh);
extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
extern bool bcmsdh_fatal_error(void *sdh);
#ifndef DHDSDIO_MEM_DUMP_FNAME
#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
#endif
#define QLEN 256 /* bulk rx and tx queue lengths */
#define FCHI (QLEN - 10)
#define FCLOW (FCHI / 2)
#define PRIOMASK 7
#define TXRETRIES 2 /* # of retries for tx frames */
#ifndef DHD_RXBOUND
#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
#endif
#ifndef DHD_TXBOUND
#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */
#endif
#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
#define MAX_NVRAMBUF_SIZE 4096 /* max nvram buf size */
#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */
#ifndef DHD_FIRSTREAD
#define DHD_FIRSTREAD 32
#endif
#if !ISPOWEROF2(DHD_FIRSTREAD)
#error DHD_FIRSTREAD is not a power of 2!
#endif
#ifdef BCMSDIOH_TXGLOM
/* Total length of TX frame header for dongle protocol */
#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN + SDPCM_SWHEADER_LEN)
/* Total length of RX frame for dongle protocol */
#else
/* Total length of TX frame header for dongle protocol */
#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
#endif
#define SDPCM_HDRLEN_RX (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
#ifdef SDTEST
#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
#else
#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
#endif
/* Space for header read, limit for data packets */
#ifndef MAX_HDR_READ
#define MAX_HDR_READ 32
#endif
#if !ISPOWEROF2(MAX_HDR_READ)
#error MAX_HDR_READ is not a power of 2!
#endif
#define MAX_RX_DATASZ 2048
/* Maximum milliseconds to wait for F2 to come up */
#define DHD_WAIT_F2RDY 3000
/* Bump up limit on waiting for HT to account for first startup;
* if the image is doing a CRC calculation before programming the PMU
* for HT availability, it could take a couple hundred ms more, so
* max out at a 1 second (1000000us).
*/
#if (PMU_MAX_TRANSITION_DLY <= 1000000)
#undef PMU_MAX_TRANSITION_DLY
#define PMU_MAX_TRANSITION_DLY 1000000
#endif
/* Value for ChipClockCSR during initial setup */
#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
/* Packet free applicable unconditionally for sdio and sdspi. Conditional if
* bufpool was present for gspi bus.
*/
#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
PKTFREE(bus->dhd->osh, pkt, FALSE);
DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
extern void bcmsdh_set_irq(int flag);
#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
#ifdef PROP_TXSTATUS
extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
extern void dhd_wlfc_trigger_pktcommit(dhd_pub_t *dhd);
#ifdef DHDTCPACK_SUPPRESS
extern int dhd_os_wlfc_block(dhd_pub_t *pub);
extern int dhd_os_wlfc_unblock(dhd_pub_t *pub);
#endif /* DHDTCPACK_SUPPRESS */
#endif /* PROP_TXSTATUS */
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
DEFINE_MUTEX(_dhd_sdio_mutex_lock_);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif
#ifdef DHD_DEBUG
/* Device console log buffer state */
#define CONSOLE_LINE_MAX 192
#define CONSOLE_BUFFER_MAX 2024
typedef struct dhd_console {
uint count; /* Poll interval msec counter */
uint log_addr; /* Log struct address (fixed) */
hndrte_log_t log; /* Log struct (host copy) */
uint bufsize; /* Size of log buffer */
uint8 *buf; /* Log buffer (host copy) */
uint last; /* Last buffer read index */
} dhd_console_t;
#endif /* DHD_DEBUG */
#define REMAP_ENAB(bus) ((bus)->remap)
#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
#define KSO_ENAB(bus) ((bus)->kso)
#define SR_ENAB(bus) ((bus)->_srenab)
#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto))
#define MIN_RSRC_ADDR (SI_ENUM_BASE + 0x618)
#define MIN_RSRC_SR 0x3
#define CORE_CAPEXT_ADDR (SI_ENUM_BASE + 0x64c)
#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1)
#define RCTL_MACPHY_DISABLE_MASK (1 << 26)
#define RCTL_LOGIC_DISABLE_MASK (1 << 27)
#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup)
#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */
#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */
#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */
#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0)
#define CC_CHIPCTRL3_SR_ENG_ENABLE (1 << 2)
#define OVERFLOW_BLKSZ512_WM 48
#define OVERFLOW_BLKSZ512_MES 80
#define CC_PMUCC3 (0x3)
/* Private data for SDIO bus interaction */
typedef struct dhd_bus {
dhd_pub_t *dhd;
bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */
si_t *sih; /* Handle for SI calls */
char *vars; /* Variables (from CIS and/or other) */
uint varsz; /* Size of variables buffer */
uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
sdpcmd_regs_t *regs; /* Registers for SDIO core */
uint sdpcmrev; /* SDIO core revision */
uint armrev; /* CPU core revision */
uint ramrev; /* SOCRAM core revision */
uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
uint32 srmemsize; /* Size of SRMEM */
uint32 bus; /* gSPI or SDIO bus */
uint32 hostintmask; /* Copy of Host Interrupt Mask */
uint32 intstatus; /* Intstatus bits (events) pending */
bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
bool fcstate; /* State of dongle flow-control */
uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
char *fw_path; /* module_param: path to firmware image */
char *nv_path; /* module_param: path to nvram vars file */
const char *nvram_params; /* user specified nvram params. */
uint blocksize; /* Block size of SDIO transfers */
uint roundup; /* Max roundup limit */
struct pktq txq; /* Queue length used for flow-control */
uint8 flowcontrol; /* per prio flow control bitmask */
uint8 tx_seq; /* Transmit sequence number (next) */
uint8 tx_max; /* Maximum transmit sequence allowed */
uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
uint16 nextlen; /* Next Read Len from last header */
uint8 rx_seq; /* Receive sequence number (expected) */
bool rxskip; /* Skip receive (awaiting NAK ACK) */
void *glomd; /* Packet containing glomming descriptor */
void *glom; /* Packet chain for glommed superframe */
uint glomerr; /* Glom packet read errors */
uint8 *rxbuf; /* Buffer for receiving control packets */
uint rxblen; /* Allocated length of rxbuf */
uint8 *rxctl; /* Aligned pointer into rxbuf */
uint8 *databuf; /* Buffer for receiving big glom packet */
uint8 *dataptr; /* Aligned pointer into databuf */
uint rxlen; /* Length of valid data in buffer */
uint8 sdpcm_ver; /* Bus protocol reported by dongle */
bool intr; /* Use interrupts */
bool poll; /* Use polling */
bool ipend; /* Device interrupt is pending */
bool intdis; /* Interrupts disabled by isr */
uint intrcount; /* Count of device interrupt callbacks */
uint lastintrs; /* Count as of last watchdog timer */
uint spurious; /* Count of spurious interrupts */
uint pollrate; /* Ticks between device polls */
uint polltick; /* Tick counter */
uint pollcnt; /* Count of active polls */
#ifdef DHD_DEBUG
dhd_console_t console; /* Console output polling support */
uint console_addr; /* Console address from shared struct */
#endif /* DHD_DEBUG */
uint regfails; /* Count of R_REG/W_REG failures */
uint clkstate; /* State of sd and backplane clock(s) */
bool activity; /* Activity flag for clock down */
int32 idletime; /* Control for activity timeout */
int32 idlecount; /* Activity timeout counter */
int32 idleclock; /* How to set bus driver when idle */
int32 sd_divisor; /* Speed control to bus driver */
int32 sd_mode; /* Mode control to bus driver */
int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
bool use_rxchain; /* If dhd should use PKT chains */
bool sleeping; /* Is SDIO bus sleeping? */
uint rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
bool alp_only; /* Don't use HT clock (ALP only) */
/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
bool usebufpool;
#ifdef SDTEST
/* external loopback */
bool ext_loop;
uint8 loopid;
/* pktgen configuration */
uint pktgen_freq; /* Ticks between bursts */
uint pktgen_count; /* Packets to send each burst */
uint pktgen_print; /* Bursts between count displays */
uint pktgen_total; /* Stop after this many */
uint pktgen_minlen; /* Minimum packet data len */
uint pktgen_maxlen; /* Maximum packet data len */
uint pktgen_mode; /* Configured mode: tx, rx, or echo */
uint pktgen_stop; /* Number of tx failures causing stop */
/* active pktgen fields */
uint pktgen_tick; /* Tick counter for bursts */
uint pktgen_ptick; /* Burst counter for printing */
uint pktgen_sent; /* Number of test packets generated */
uint pktgen_rcvd; /* Number of test packets received */
uint pktgen_prev_time; /* Time at which previous stats where printed */
uint pktgen_prev_sent; /* Number of test packets generated when
* previous stats were printed
*/
uint pktgen_prev_rcvd; /* Number of test packets received when
* previous stats were printed
*/
uint pktgen_fail; /* Number of failed send attempts */
uint16 pktgen_len; /* Length of next packet to send */
#define PKTGEN_RCV_IDLE (0)
#define PKTGEN_RCV_ONGOING (1)
uint16 pktgen_rcv_state; /* receive state */
uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */
#endif /* SDTEST */
/* Some additional counters */
uint tx_sderrs; /* Count of tx attempts with sd errors */
uint fcqueued; /* Tx packets that got queued */
uint rxrtx; /* Count of rtx requests (NAK to dongle) */
uint rx_toolong; /* Receive frames too long to receive */
uint rxc_errors; /* SDIO errors when reading control frames */
uint rx_hdrfail; /* SDIO errors on header reads */
uint rx_badhdr; /* Bad received headers (roosync?) */
uint rx_badseq; /* Mismatched rx sequence number */
uint fc_rcvd; /* Number of flow-control events received */
uint fc_xoff; /* Number which turned on flow-control */
uint fc_xon; /* Number which turned off flow-control */
uint rxglomfail; /* Failed deglom attempts */
uint rxglomframes; /* Number of glom frames (superframes) */
uint rxglompkts; /* Number of packets from glom frames */
uint f2rxhdrs; /* Number of header reads */
uint f2rxdata; /* Number of frame data reads */
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
#ifdef BCMSPI
bool dwordmode;
#endif /* BCMSPI */
uint8 *ctrl_frame_buf;
uint32 ctrl_frame_len;
bool ctrl_frame_stat;
#ifndef BCMSPI
uint32 rxint_mode; /* rx interrupt mode */
#endif /* BCMSPI */
bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
* Available with socram rev 16
* Remap region not DMA-able
*/
bool kso;
bool _slpauto;
bool _oobwakeup;
bool _srenab;
bool readframes;
bool reqbussleep;
uint32 resetinstr;
uint32 dongle_ram_base;
#ifdef BCMSDIOH_TXGLOM
void *glom_pkt_arr[SDPCM_MAXGLOM_SIZE]; /* Array of pkts for glomming */
uint16 glom_cnt; /* Number of pkts in the glom array */
uint16 glom_total_len; /* Total length of pkts in glom array */
bool glom_enable; /* Flag to indicate whether tx glom is enabled/disabled */
uint8 glom_mode; /* Glom mode - 0-copy mode, 1 - Multi-descriptor mode */
uint32 glomsize; /* Glom size limitation */
#endif
} dhd_bus_t;
/* clkstate */
#define CLK_NONE 0
#define CLK_SDONLY 1
#define CLK_PENDING 2 /* Not used yet */
#define CLK_AVAIL 3
#define DHD_NOPMU(dhd) (FALSE)
#ifdef DHD_DEBUG
static int qcount[NUMPRIO];
static int tx_packets[NUMPRIO];
#endif /* DHD_DEBUG */
/* Deferred transmit */
const uint dhd_deferred_tx = 1;
extern uint dhd_watchdog_ms;
#ifdef BCMSPI_ANDROID
extern uint *dhd_spi_lockcount;
#endif /* BCMSPI_ANDROID */
extern void dhd_os_wd_timer(void *bus, uint wdtick);
/* Tx/Rx bounds */
uint dhd_txbound;
uint dhd_rxbound;
uint dhd_txminmax = DHD_TXMINMAX;
/* override the RAM size if possible */
#define DONGLE_MIN_RAMSIZE (128 *1024)
int dhd_dongle_ramsize;
uint dhd_doflow = TRUE;
uint dhd_dpcpoll = FALSE;
module_param(dhd_doflow, uint, 0644);
module_param(dhd_dpcpoll, uint, 0644);
static bool dhd_alignctl;
static bool sd1idle;
static bool retrydata;
#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
#ifdef BCMSPI
/* At a watermark around 8 the spid hits underflow error. */
static const uint watermark = 32;
static const uint mesbusyctrl = 0;
#elif defined(SDIO_CRC_ERROR_FIX)
static uint watermark = 48;
static uint mesbusyctrl = 80;
#else
static const uint watermark = 8;
static const uint mesbusyctrl = 0;
#endif /* BCMSPI */
static const uint firstread = DHD_FIRSTREAD;
#define HDATLEN (firstread - (SDPCM_HDRLEN))
/* Retry count for register access failures */
static const uint retry_limit = 2;
/* Force even SD lengths (some host controllers mess up on odd bytes) */
static bool forcealign;
#define ALIGNMENT 4
#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
#endif
#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
#define PKTALIGN(osh, p, len, align) \
do { \
uint datalign; \
datalign = (uintptr)PKTDATA((osh), (p)); \
datalign = ROUNDUP(datalign, (align)) - datalign; \
ASSERT(datalign < (align)); \
ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \
if (datalign) \
PKTPULL((osh), (p), datalign); \
PKTSETLEN((osh), (p), (len)); \
} while (0)
/* Limit on rounding up frames */
static const uint max_roundup = 512;
/* Try doing readahead */
static bool dhd_readahead;
/* To check if there's window offered */
#define DATAOK(bus) \
(((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
/* To check if there's window offered for ctrl frame */
#define TXCTLOK(bus) \
(((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
/* Number of pkts available in dongle for data RX */
#define DATABUFCNT(bus) \
((uint8)(bus->tx_max - bus->tx_seq) - 1)
/* Macros to get register read/write status */
/* NOTE: these assume a local dhdsdio_bus_t *bus! */
#define R_SDREG(regvar, regaddr, retryvar) \
do { \
retryvar = 0; \
do { \
regvar = R_REG(bus->dhd->osh, regaddr); \
} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
if (retryvar) { \
bus->regfails += (retryvar-1); \
if (retryvar > retry_limit) { \
DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
__FUNCTION__, __LINE__)); \
regvar = 0; \
} \
} \
} while (0)
#define W_SDREG(regval, regaddr, retryvar) \
do { \
retryvar = 0; \
do { \
W_REG(bus->dhd->osh, regaddr, regval); \
} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
if (retryvar) { \
bus->regfails += (retryvar-1); \
if (retryvar > retry_limit) \
DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
__FUNCTION__, __LINE__)); \
} \
} while (0)
#define BUS_WAKE(bus) \
do { \
bus->idlecount = 0; \
if ((bus)->sleeping) \
dhdsdio_bussleep((bus), FALSE); \
} while (0);
/*
* pktavail interrupts from dongle to host can be managed in 3 different ways
* whenever there is a packet available in dongle to transmit to host.
*
* Mode 0: Dongle writes the software host mailbox and host is interrupted.
* Mode 1: (sdiod core rev >= 4)
* Device sets a new bit in the intstatus whenever there is a packet
* available in fifo. Host can't clear this specific status bit until all the
* packets are read from the FIFO. No need to ack dongle intstatus.
* Mode 2: (sdiod core rev >= 4)
* Device sets a bit in the intstatus, and host acks this by writing
* one to this bit. Dongle won't generate anymore packet interrupts
* until host reads all the packets from the dongle and reads a zero to
* figure that there are no more packets. No need to disable host ints.
* Need to ack the intstatus.
*/
#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */
#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */
#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */
#ifdef BCMSPI
#define FRAME_AVAIL_MASK(bus) I_HMB_FRAME_IND
#define DHD_BUS SPI_BUS
/* check packet-available-interrupt in piggybacked dstatus */
#define PKT_AVAILABLE(bus, intstatus) (bcmsdh_get_dstatus(bus->sdh) & STATUS_F2_PKT_AVAILABLE)
#define HOSTINTMASK (I_HMB_FC_CHANGE | I_HMB_HOST_INT)
#define GSPI_PR55150_BAILOUT \
do { \
uint32 dstatussw = bcmsdh_get_dstatus((void *)bus->sdh); \
uint32 dstatushw = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL); \
uint32 intstatuserr = 0; \
uint retries = 0; \
\
R_SDREG(intstatuserr, &bus->regs->intstatus, retries); \
printf("dstatussw = 0x%x, dstatushw = 0x%x, intstatus = 0x%x\n", \
dstatussw, dstatushw, intstatuserr); \
\
bus->nextlen = 0; \
*finished = TRUE; \
} while (0)
#else /* BCMSDIO */
#define FRAME_AVAIL_MASK(bus) \
((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
#define DHD_BUS SDIO_BUS
#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus)))
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
#define GSPI_PR55150_BAILOUT
#endif /* BCMSPI */
#ifdef SDTEST
static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
#endif
#ifdef DHD_DEBUG
static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
#endif /* DHD_DEBUG */
static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
static void dhdsdio_disconnect(void *ptr);
static bool dhdsdio_chipmatch(uint16 chipid);
static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
void * regsva, uint16 devid);
static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
bool reset_flag);
static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size);
static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
uint8 *buf, uint nbytes,
void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
uint8 *buf, uint nbytes,
void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
#ifdef BCMSDIOH_TXGLOM
static void dhd_bcmsdh_glom_post(dhd_bus_t *bus, uint8 *frame, void *pkt, uint len);
static void dhd_bcmsdh_glom_clear(dhd_bus_t *bus);
#endif
static bool dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
static int _dhdsdio_download_firmware(dhd_bus_t *bus);
static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
static int dhdsdio_download_nvram(dhd_bus_t *bus);
#ifdef BCMEMBEDIMAGE
static int dhdsdio_download_code_array(dhd_bus_t *bus);
#endif
static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
#ifdef WLMEDIA_HTSF
#include <htsf.h>
extern uint32 dhd_get_htsf(void *dhd, int ifidx);
#endif /* WLMEDIA_HTSF */
static void
dhd_overflow_war(struct dhd_bus *bus)
{
int err;
uint8 devctl, wm, mes;
/* See .ppt in PR for these recommended values */
if (bus->blocksize == 512) {
wm = OVERFLOW_BLKSZ512_WM;
mes = OVERFLOW_BLKSZ512_MES;
} else {
mes = bus->blocksize/4;
wm = bus->blocksize/4;
}
/* Update watermark */
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
/* Update MES */
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
(mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n",
bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err),
bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err),
bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err)));
}
static void
dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size)
{
int32 min_size = DONGLE_MIN_RAMSIZE;
/* Restrict the ramsize to user specified limit */
DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
dhd_dongle_ramsize, min_size));
if ((dhd_dongle_ramsize > min_size) &&
(dhd_dongle_ramsize < (int32)bus->orig_ramsize))
bus->ramsize = dhd_dongle_ramsize;
}
static int
dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
{
int err = 0;
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
if (!err)
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
(address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
if (!err)
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
(address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
return err;
}
#ifdef BCMSPI
static void
dhdsdio_wkwlan(dhd_bus_t *bus, bool on)
{
int err;
uint32 regdata;
bcmsdh_info_t *sdh = bus->sdh;
if (bus->sih->buscoretype == SDIOD_CORE_ID) {
/* wake up wlan function :WAKE_UP goes as ht_avail_request and alp_avail_request */
regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL);
DHD_INFO(("F0 REG0 rd = 0x%x\n", regdata));
if (on == TRUE)
regdata |= WAKE_UP;
else
regdata &= ~WAKE_UP;
bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err);
}
}
#endif /* BCMSPI */
#ifdef USE_OOB_GPIO1
static int
dhdsdio_oobwakeup_init(dhd_bus_t *bus)
{
uint32 val, addr, data;
bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
/* Set device for gpio1 wakeup */
bcmsdh_reg_write(bus->sdh, addr, 4, 2);
val = bcmsdh_reg_read(bus->sdh, data, 4);
val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
bcmsdh_reg_write(bus->sdh, data, 4, val);
bus->_oobwakeup = TRUE;
return 0;
}
#endif /* USE_OOB_GPIO1 */
#ifndef BCMSPI
/*
* Query if FW is in SR mode
*/
static bool
dhdsdio_sr_cap(dhd_bus_t *bus)
{
bool cap = FALSE;
uint32 core_capext, addr, data;
if (bus->sih->chip == BCM4324_CHIP_ID) {
addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
bcmsdh_reg_write(bus->sdh, addr, 4, 3);
core_capext = bcmsdh_reg_read(bus->sdh, data, 4);
} else if (bus->sih->chip == BCM4330_CHIP_ID) {
core_capext = FALSE;
} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID) ||
(bus->sih->chip == BCM4350_CHIP_ID)) {
core_capext = TRUE;
} else {
core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4);
core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK);
}
if (!(core_capext))
return FALSE;
if (bus->sih->chip == BCM4324_CHIP_ID) {
/* FIX: Should change to query SR control register instead */
cap = TRUE;
} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID)) {
uint32 enabval = 0;
addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3);
enabval = bcmsdh_reg_read(bus->sdh, data, 4);
if ((bus->sih->chip == BCM4350_CHIP_ID) ||
0)
enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
if (enabval)
cap = TRUE;
} else {
data = bcmsdh_reg_read(bus->sdh,
SI_ENUM_BASE + OFFSETOF(chipcregs_t, retention_ctl), 4);
if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0)
cap = TRUE;
}
return cap;
}
static int
dhdsdio_srwar_init(dhd_bus_t *bus)
{
bcmsdh_gpio_init(bus->sdh);
#ifdef USE_OOB_GPIO1
dhdsdio_oobwakeup_init(bus);
#endif
return 0;
}
static int
dhdsdio_sr_init(dhd_bus_t *bus)
{
uint8 val;
int err = 0;
if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
dhdsdio_srwar_init(bus);
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
/* Add CMD14 Support */
dhdsdio_devcap_set(bus,
(SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
bus->_srenab = TRUE;
return 0;
}
#endif /* BCMSPI */
/*
* FIX: Be sure KSO bit is enabled
* Currently, it's defaulting to 0 which should be 1.
*/
static int
dhdsdio_clk_kso_init(dhd_bus_t *bus)
{
uint8 val;
int err = 0;
/* set flag */
bus->kso = TRUE;
/*
* Enable KeepSdioOn (KSO) bit for normal operation
* Default is 0 (4334A0) so set it. Fixed in B0.
*/
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
if (err)
DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
}
return 0;
}
#define KSO_DBG(x)
#define KSO_WAIT_US 50
#if defined(CUSTOMER_HW4)
#define MAX_KSO_ATTEMPTS 64
#else
#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
#endif /* CUSTOMER_HW4 */
static int
dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
{
uint8 wr_val = 0, rd_val, cmp_val, bmask;
int err = 0;
int try_cnt = 0;
KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
if (on) {
cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
bmask = cmp_val;
OSL_SLEEP(3);
} else {
/* Put device to sleep, turn off KSO */
cmp_val = 0;
bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
}
do {
rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
if (((rd_val & bmask) == cmp_val) && !err)
break;
KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err));
OSL_DELAY(KSO_WAIT_US);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
if (try_cnt > 2)
KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
if (try_cnt > MAX_KSO_ATTEMPTS) {
DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n",
__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
}
return err;
}
static int
dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
{
int err = 0;
if (on == FALSE) {
BUS_WAKE(bus);
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err)));
dhdsdio_clk_kso_enab(bus, FALSE);
} else {
DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
/* Make sure we have SD bus access */
if (bus->clkstate == CLK_NONE) {
DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
dhdsdio_clk_kso_enab(bus, TRUE);
DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
dhdsdio_sleepcsr_get(bus)));
}
bus->kso = on;
BCM_REFERENCE(err);
return 0;
}
static uint8
dhdsdio_sleepcsr_get(dhd_bus_t *bus)
{
int err = 0;
uint8 val = 0;
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
if (err)
DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
return val;
}
uint8
dhdsdio_devcap_get(dhd_bus_t *bus)
{
return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
}
static int
dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
{
int err = 0;
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
if (err)
DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
return 0;
}
static int
dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
{
int err = 0, retry;
uint8 val;
retry = 0;
if (on == TRUE) {
/* Enter Sleep */
/* Be sure we request clk before going to sleep
* so we can wake-up with clk request already set
* else device can go back to sleep immediately
*/
if (!SLPAUTO_ENAB(bus))
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
else {
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if ((val & SBSDIO_CSR_MASK) == 0) {
DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
__FUNCTION__, val));
/* Reset clock request */
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
SBSDIO_ALP_AVAIL_REQ, &err);
DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err)));
}
}
DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err)));
err = dhdsdio_clk_kso_enab(bus, FALSE);
if (OOB_WAKEUP_ENAB(bus))
{
err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */
}
} else {
/* Exit Sleep */
/* Make sure we have SD bus access */
if (bus->clkstate == CLK_NONE) {
DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
SPINWAIT_SLEEP(sdioh_spinwait_sleep,
(bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
GPIO_DEV_SRSTATE_TIMEOUT);
if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
}
}
if (OOB_WAKEUP_ENAB(bus))
{
err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */
}
do {
err = dhdsdio_clk_kso_enab(bus, TRUE);
if (err)
OSL_SLEEP(10);
} while ((err != 0) && (++retry < 3));
if (err != 0) {
DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
err = 0; /* continue anyway */
}
if (err == 0) {
uint8 csr;
/* Wait for device ready during transition to wake-up */
SPINWAIT_SLEEP(sdioh_spinwait_sleep,
(((csr = dhdsdio_sleepcsr_get(bus)) &
SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
(SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000));
DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
__FUNCTION__, csr));
err = BCME_NODEVICE;
}
SPINWAIT_SLEEP(sdioh_spinwait_sleep,
(((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
(SBSDIO_HT_AVAIL)), (10000));
#ifdef CUSTOMER_HW10
DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr));
if( ( csr & SBSDIO_HT_AVAIL ) != SBSDIO_HT_AVAIL ) {
DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n", __FUNCTION__, csr));
err = BCME_NODEVICE;
}
#endif
}
}
/* Update if successful */
if (err == 0)
bus->kso = on ? FALSE : TRUE;
else {
DHD_ERROR(("%s: Sleep request failed: on:%d err:%d\n", __FUNCTION__, on, err));
if (!on && retry > 2)
bus->kso = TRUE;
}
return err;
}
/* Turn backplane clock on or off */
static int
dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
{
#define HT_AVAIL_ERROR_MAX 10
static int ht_avail_error = 0;
int err;
uint8 clkctl, clkreq, devctl;
bcmsdh_info_t *sdh;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
clkctl = 0;
sdh = bus->sdh;
if (!KSO_ENAB(bus))
return BCME_OK;
if (SLPAUTO_ENAB(bus)) {
bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
return BCME_OK;
}
if (on) {
/* Request HT Avail */
clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
#ifdef BCMSPI
dhdsdio_wkwlan(bus, TRUE);
#endif /* BCMSPI */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
if (err) {
ht_avail_error++;
if (ht_avail_error < HT_AVAIL_ERROR_MAX) {
DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
dhd_os_send_hang_message(bus->dhd);
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
return BCME_ERROR;
} else {
ht_avail_error = 0;
}
/* Check current status */
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
return BCME_ERROR;
}
#if !defined(OOB_INTR_ONLY)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
if (err) {
DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
DHD_INFO(("CLKCTL: set PENDING\n"));
bus->clkstate = CLK_PENDING;
return BCME_OK;
} else
#endif /* !defined (OOB_INTR_ONLY) */
{
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
}
}
/* Otherwise, wait here (polling) for HT Avail */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
SPINWAIT_SLEEP(sdioh_spinwait_sleep,
((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err)),
!SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
}
if (err) {
DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
return BCME_ERROR;
}
if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
__FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
return BCME_ERROR;
}
/* Mark clock available */
bus->clkstate = CLK_AVAIL;
DHD_INFO(("CLKCTL: turned ON\n"));
#if defined(DHD_DEBUG)
if (bus->alp_only == TRUE) {
#if !defined(BCMLXSDMMC)
if (!SBSDIO_ALPONLY(clkctl)) {
DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
}
#endif /* !defined(BCMLXSDMMC) */
} else {
if (SBSDIO_ALPONLY(clkctl)) {
DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
}
}
#endif /* defined (DHD_DEBUG) */
bus->activity = TRUE;
#ifdef DHD_USE_IDLECOUNT
bus->idlecount = 0;
#endif /* DHD_USE_IDLECOUNT */
} else {
clkreq = 0;
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
}
bus->clkstate = CLK_SDONLY;
if (!SR_ENAB(bus)) {
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
DHD_INFO(("CLKCTL: turned OFF\n"));
if (err) {
DHD_ERROR(("%s: Failed access turning clock off: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
}
#ifdef BCMSPI
dhdsdio_wkwlan(bus, FALSE);
#endif /* BCMSPI */
}
return BCME_OK;
}
/* Change idle/active SD state */
static int
dhdsdio_sdclk(dhd_bus_t *bus, bool on)
{
#ifndef BCMSPI
int err;
int32 iovalue;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (on) {
if (bus->idleclock == DHD_IDLE_STOP) {
/* Turn on clock and restore mode */
iovalue = 1;
err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
&iovalue, sizeof(iovalue), TRUE);
if (err) {
DHD_ERROR(("%s: error enabling sd_clock: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
iovalue = bus->sd_mode;
err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
&iovalue, sizeof(iovalue), TRUE);
if (err) {
DHD_ERROR(("%s: error changing sd_mode: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
/* Restore clock speed */
iovalue = bus->sd_divisor;
err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
&iovalue, sizeof(iovalue), TRUE);
if (err) {
DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
}
bus->clkstate = CLK_SDONLY;
} else {
/* Stop or slow the SD clock itself */
if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
__FUNCTION__, bus->sd_divisor, bus->sd_mode));
return BCME_ERROR;
}
if (bus->idleclock == DHD_IDLE_STOP) {
if (sd1idle) {
/* Change to SD1 mode and turn off clock */
iovalue = 1;
err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
&iovalue, sizeof(iovalue), TRUE);
if (err) {
DHD_ERROR(("%s: error changing sd_clock: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
}
iovalue = 0;
err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
&iovalue, sizeof(iovalue), TRUE);
if (err) {
DHD_ERROR(("%s: error disabling sd_clock: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
/* Set divisor to idle value */
iovalue = bus->idleclock;
err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
&iovalue, sizeof(iovalue), TRUE);
if (err) {
DHD_ERROR(("%s: error changing sd_divisor: %d\n",
__FUNCTION__, err));
return BCME_ERROR;
}
}
bus->clkstate = CLK_NONE;
}
#endif /* BCMSPI */
return BCME_OK;
}
/* Transition SD and backplane clock readiness */
static int
dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
{
int ret = BCME_OK;
#ifdef DHD_DEBUG
uint oldstate = bus->clkstate;
#endif /* DHD_DEBUG */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Early exit if we're already there */
if (bus->clkstate == target) {
if (target == CLK_AVAIL) {
dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
bus->activity = TRUE;
#ifdef DHD_USE_IDLECOUNT
bus->idlecount = 0;
#endif /* DHD_USE_IDLECOUNT */
}
return ret;
}
switch (target) {
case CLK_AVAIL:
/* Make sure SD clock is available */
if (bus->clkstate == CLK_NONE)
dhdsdio_sdclk(bus, TRUE);
/* Now request HT Avail on the backplane */
ret = dhdsdio_htclk(bus, TRUE, pendok);
if (ret == BCME_OK) {
dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
bus->activity = TRUE;
#ifdef DHD_USE_IDLECOUNT
bus->idlecount = 0;
#endif /* DHD_USE_IDLECOUNT */
}
break;
case CLK_SDONLY:
/* Remove HT request, or bring up SD clock */
if (bus->clkstate == CLK_NONE)
ret = dhdsdio_sdclk(bus, TRUE);
else if (bus->clkstate == CLK_AVAIL)
ret = dhdsdio_htclk(bus, FALSE, FALSE);
else
DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
bus->clkstate, target));
if (ret == BCME_OK) {
dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
}
break;
case CLK_NONE:
/* Make sure to remove HT request */
if (bus->clkstate == CLK_AVAIL)
ret = dhdsdio_htclk(bus, FALSE, FALSE);
/* Now remove the SD clock */
ret = dhdsdio_sdclk(bus, FALSE);
#ifdef DHD_DEBUG
if (dhd_console_ms == 0)
#endif /* DHD_DEBUG */
if (bus->poll == 0)
dhd_os_wd_timer(bus->dhd, 0);
break;
}
#ifdef DHD_DEBUG
DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
#endif /* DHD_DEBUG */
return ret;
}
static int
dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
{
int err = 0;
bcmsdh_info_t *sdh = bus->sdh;
sdpcmd_regs_t *regs = bus->regs;
uint retries = 0;
DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
(sleep ? "SLEEP" : "WAKE"),
(bus->sleeping ? "SLEEP" : "WAKE")));
/* Done if we're already in the requested state */
if (sleep == bus->sleeping)
return BCME_OK;
/* Going to sleep: set the alarm and turn off the lights... */
if (sleep) {
/* Don't sleep if something is pending */
#ifdef CUSTOMER_HW4
if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq) || bus->readframes ||
bus->ctrl_frame_stat)
#else
if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
#endif /* CUSTOMER_HW4 */
return BCME_BUSY;
if (!SLPAUTO_ENAB(bus)) {
/* Disable SDIO interrupts (no longer interested) */
bcmsdh_intr_disable(bus->sdh);
/* Make sure the controller has the bus up */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Tell device to start using OOB wakeup */
W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries);
if (retries > retry_limit)
DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
/* Turn off our contribution to the HT clock request */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
/* Isolate the bus */
if (bus->sih->chip != BCM4329_CHIP_ID &&
bus->sih->chip != BCM4319_CHIP_ID) {
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
SBSDIO_DEVCTL_PADS_ISO, NULL);
}
} else {
/* Leave interrupts enabled since device can exit sleep and
* interrupt host
*/
err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
}
/* Change state */
bus->sleeping = TRUE;
} else {
/* Waking up: bus power up is ok, set local state */
if (!SLPAUTO_ENAB(bus)) {
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
/* Force pad isolation off if possible (in case power never toggled) */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
/* Make sure the controller has the bus up */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Send misc interrupt to indicate OOB not needed */
W_SDREG(0, ®s->tosbmailboxdata, retries);
if (retries <= retry_limit)
W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries);
if (retries > retry_limit)
DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
/* Make sure we have SD bus access */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
/* Enable interrupts again */
if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
bus->intdis = FALSE;
bcmsdh_intr_enable(bus->sdh);
}
} else {
err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
}
if (err == 0) {
/* Change state */
bus->sleeping = FALSE;
}
}
return err;
}
#if defined(CUSTOMER_HW4) && defined(USE_DYNAMIC_F2_BLKSIZE)
int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size)
{
int func_blk_size = function_num;
int bcmerr = 0;
int result;
bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size,
sizeof(int), &result, sizeof(int), IOV_GET);
if (bcmerr != BCME_OK) {
DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num));
return BCME_ERROR;
}
if (result != block_size) {
DHD_TRACE_HW4(("%s: F%d Block size set from %d to %d\n",
__FUNCTION__, function_num, result, block_size));
func_blk_size = function_num << 16 | block_size;
bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL,
0, &func_blk_size, sizeof(int32), IOV_SET);
if (bcmerr != BCME_OK) {
DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__));
return BCME_ERROR;
}
}
return BCME_OK;
}
#endif /* CUSTOMER_HW4 && USE_DYNAMIC_F2_BLKSIZE */
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
void
dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
{
#if defined(BCMSPI_ANDROID)
bcmsdh_intr_enable(bus->sdh);
#elif defined(HW_OOB)
bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
#else
sdpcmd_regs_t *regs = bus->regs;
uint retries = 0;
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
if (enable == TRUE) {
/* Tell device to start using OOB wakeup */
W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries);
if (retries > retry_limit)
DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
} else {
/* Send misc interrupt to indicate OOB not needed */
W_SDREG(0, ®s->tosbmailboxdata, retries);
if (retries <= retry_limit)
W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries);
}
/* Turn off our contribution to the HT clock request */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
#endif /* !defined(HW_OOB) */
}
#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
#ifdef DHDTCPACK_SUPPRESS
extern bool dhd_use_tcpack_suppress;
/* Please be sure this function is called under dhd_os_tcpacklock() */
void dhd_onoff_tcpack_sup(void *pub, bool on)
{
dhd_pub_t *dhdp = (dhd_pub_t *)pub;
if (dhd_use_tcpack_suppress != on) {
DHD_ERROR(("dhd_onoff_tcpack_sup: %d -> %d\n", dhd_use_tcpack_suppress, on));
dhd_use_tcpack_suppress = on;
dhdp->tcp_ack_info_cnt = 0;
bzero(dhdp->tcp_ack_info_tbl, sizeof(struct tcp_ack_info)*MAXTCPSTREAMS);
} else
DHD_ERROR(("dhd_onoff_tcpack_sup: alread %d\n", on));
return;
}
inline void dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
{
uint8 i;
tcp_ack_info_t *tcp_ack_info = NULL;
int tbl_cnt;
dhd_os_tcpacklock(dhdp);
tbl_cnt = dhdp->tcp_ack_info_cnt;
for (i = 0; i < tbl_cnt; i++) {
tcp_ack_info = &dhdp->tcp_ack_info_tbl[i];
if (tcp_ack_info->p_tcpackinqueue == pkt) {
/* This pkt is being transmitted so remove the tcp_ack_info of it.
* compact the array unless the last element,
* then the pkt's array is removed.
*/
if (i < tbl_cnt-1) {
memmove(&dhdp->tcp_ack_info_tbl[i],
&dhdp->tcp_ack_info_tbl[i+1],
sizeof(struct tcp_ack_info)*(tbl_cnt - (i+1)));
}
bzero(&dhdp->tcp_ack_info_tbl[tbl_cnt-1], sizeof(struct tcp_ack_info));
if (--dhdp->tcp_ack_info_cnt < 0) {
DHD_ERROR(("dhdsdio_sendfromq:(ERROR) tcp_ack_info_cnt %d"
" Stop using tcpack_suppress\n", dhdp->tcp_ack_info_cnt));
dhd_onoff_tcpack_sup(dhdp, FALSE);
}
break;
}
}
dhd_os_tcpackunlock(dhdp);
}
bool
dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
{
uint8 *eh_header;
uint16 eh_type;
uint8 *ip_header;
uint8 *tcp_header;
uint32 ip_hdr_len;
uint32 cur_framelen;
uint8 bdc_hdr_len = BDC_HEADER_LEN;
uint8 wlfc_hdr_len = 0;
uint8 *data = PKTDATA(dhdp->osh, pkt);
cur_framelen = PKTLEN(dhdp->osh, pkt);
#ifdef PROP_TXSTATUS
/* In this case, BDC header is not pushed in dhd_sendpkt() */
if (dhdp->wlfc_state) {
bdc_hdr_len = 0;
wlfc_hdr_len = 8;
}
#endif
if (cur_framelen < bdc_hdr_len + ETHER_HDR_LEN) {
DHD_TRACE(("dhd_tcpack_suppress: Too short packet length %d\n", cur_framelen));
return FALSE;
}
/* Get rid of BDC header */
eh_header = data + bdc_hdr_len;
cur_framelen -= bdc_hdr_len;
eh_type = eh_header[12] << 8 | eh_header[13];
if (eh_type != ETHER_TYPE_IP) {
DHD_TRACE(("dhd_tcpack_suppress: Not a IP packet 0x%x\n", eh_type));
return FALSE;
}
DHD_TRACE(("dhd_tcpack_suppress: IP pkt! 0x%x\n", eh_type));
ip_header = eh_header + ETHER_HDR_LEN;
cur_framelen -= ETHER_HDR_LEN;
ip_hdr_len = 4 * (ip_header[0] & 0x0f);
if ((ip_header[0] & 0xf0) != 0x40) {
DHD_TRACE(("dhd_tcpack_suppress: Not IPv4!\n"));
return FALSE;
}
if (cur_framelen < ip_hdr_len) {
DHD_ERROR(("dhd_tcpack_suppress: IP packet length %d wrong!\n", cur_framelen));
return FALSE;
}
/* not tcp */
if (ip_header[9] != 0x06) {
DHD_TRACE(("dhd_tcpack_suppress: Not a TCP packet 0x%x\n", ip_header[9]));
return FALSE;
}
DHD_TRACE(("dhd_tcpack_suppress: TCP pkt!\n"));
tcp_header = ip_header + ip_hdr_len;
/* is it an ack ? */
if (tcp_header[13] == 0x10) {
#if defined(DHD_DEBUG)
uint32 tcp_seq_num = tcp_header[4] << 24 | tcp_header[5] << 16 |
tcp_header[6] << 8 | tcp_header[7];
#endif
uint32 tcp_ack_num = tcp_header[8] << 24 | tcp_header[9] << 16 |
tcp_header[10] << 8 | tcp_header[11];
uint16 ip_tcp_ttllen = (ip_header[3] & 0xff) + (ip_header[2] << 8);
uint32 tcp_hdr_len = 4*((tcp_header[12] & 0xf0) >> 4);
DHD_TRACE(("dhd_tcpack_suppress: TCP ACK seq %ud ack %ud\n",
tcp_seq_num, tcp_ack_num));
/* zero length ? */
if (ip_tcp_ttllen == ip_hdr_len + tcp_hdr_len) {
int i;
tcp_ack_info_t *tcp_ack_info = NULL;
DHD_TRACE(("dhd_tcpack_suppress: TCP ACK zero length\n"));
/* Look for tcp_ack_info that has the same
* ip src/dst addrs and tcp src/dst ports
*/
dhd_os_tcpacklock(dhdp);
for (i = 0; i < dhdp->tcp_ack_info_cnt; i++) {
if (dhdp->tcp_ack_info_tbl[i].p_tcpackinqueue &&
!memcmp(&ip_header[12], dhdp->tcp_ack_info_tbl[i].ipaddrs, 8) &&
!memcmp(tcp_header, dhdp->tcp_ack_info_tbl[i].tcpports, 4)) {
tcp_ack_info = &dhdp->tcp_ack_info_tbl[i];
break;
}
}
if (i == dhdp->tcp_ack_info_cnt && i < MAXTCPSTREAMS)
tcp_ack_info = &dhdp->tcp_ack_info_tbl[dhdp->tcp_ack_info_cnt++];
if (!tcp_ack_info) {
DHD_TRACE(("dhd_tcpack_suppress: No empty tcp ack info"
"%d %d %d %d, %d %d %d %d\n",
tcp_header[0], tcp_header[1], tcp_header[2], tcp_header[3],
dhdp->tcp_ack_info_tbl[i].tcpports[0],
dhdp->tcp_ack_info_tbl[i].tcpports[1],
dhdp->tcp_ack_info_tbl[i].tcpports[2],
dhdp->tcp_ack_info_tbl[i].tcpports[3]));
dhd_os_tcpackunlock(dhdp);
return FALSE;
}
if (tcp_ack_info->p_tcpackinqueue) {
if (tcp_ack_num > tcp_ack_info->tcpack_number) {
void *prevpkt = tcp_ack_info->p_tcpackinqueue;
uint8 pushed_len = SDPCM_HDRLEN +
(BDC_HEADER_LEN - bdc_hdr_len) + wlfc_hdr_len;
#ifdef PROP_TXSTATUS
/* In case the prev pkt is delayenqueued
* but not delayedequeued yet, it may not have
* any additional header yet.
*/
dhd_os_wlfc_block(dhdp);
if (dhdp->wlfc_state && (PKTLEN(dhdp->osh, prevpkt) ==
tcp_ack_info->ip_tcp_ttllen + ETHER_HDR_LEN))
pushed_len = 0;
#endif
if ((ip_tcp_ttllen == tcp_ack_info->ip_tcp_ttllen) &&
(PKTLEN(dhdp->osh, pkt) ==
PKTLEN(dhdp->osh, prevpkt) - pushed_len)) {
bcopy(PKTDATA(dhdp->osh, pkt),
PKTDATA(dhdp->osh, prevpkt) + pushed_len,
PKTLEN(dhdp->osh, pkt));
PKTFREE(dhdp->osh, pkt, FALSE);
DHD_TRACE(("dhd_tcpack_suppress: pkt 0x%p"
" TCP ACK replace %ud -> %ud\n", prevpkt,
tcp_ack_info->tcpack_number, tcp_ack_num));
tcp_ack_info->tcpack_number = tcp_ack_num;
#ifdef PROP_TXSTATUS
dhd_os_wlfc_unblock(dhdp);
#endif
dhd_os_tcpackunlock(dhdp);
return TRUE;
} else
DHD_TRACE(("dhd_tcpack_suppress: len mismatch"
" %d(%d) %d(%d)\n",
PKTLEN(dhdp->osh, pkt), ip_tcp_ttllen,
PKTLEN(dhdp->osh, prevpkt),
tcp_ack_info->ip_tcp_ttllen));
#ifdef PROP_TXSTATUS
dhd_os_wlfc_unblock(dhdp);
#endif
} else {
#ifdef TCPACK_TEST
void *prevpkt = tcp_ack_info->p_tcpackinqueue;
#endif
DHD_TRACE(("dhd_tcpack_suppress: TCP ACK number reverse"
" prev %ud (0x%p) new %ud (0x%p)\n",
tcp_ack_info->tcpack_number,
tcp_ack_info->p_tcpackinqueue,
tcp_ack_num, pkt));
#ifdef TCPACK_TEST
if (PKTLEN(dhdp->osh, pkt) == PKTLEN(dhdp->osh, prevpkt)) {
PKTFREE(dhdp->osh, pkt, FALSE);
dhd_os_tcpackunlock(dhdp);
return TRUE;
}
#endif
}
} else {
tcp_ack_info->p_tcpackinqueue = pkt;
tcp_ack_info->tcpack_number = tcp_ack_num;
tcp_ack_info->ip_tcp_ttllen = ip_tcp_ttllen;
bcopy(&ip_header[12], tcp_ack_info->ipaddrs, 8);
bcopy(tcp_header, tcp_ack_info->tcpports, 4);
}
dhd_os_tcpackunlock(dhdp);
} else
DHD_TRACE(("dhd_tcpack_suppress: TCP ACK with DATA len %d\n",
ip_tcp_ttllen - ip_hdr_len - tcp_hdr_len));
}
return FALSE;
}
#endif /* DHDTCPACK_SUPPRESS */
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
static int
dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt, bool queue_only)
{
int ret;
osl_t *osh;
uint8 *frame;
uint16 len, pad1 = 0, act_len = 0;
uint32 swheader;
uint retries = 0;
uint32 real_pad = 0;
bcmsdh_info_t *sdh;
void *new;
int i;
int pkt_cnt;
#ifdef BCMSDIOH_TXGLOM
uint8 *frame_tmp;
#endif
#ifdef WLMEDIA_HTSF
char *p;
htsfts_t *htsf_ts;
#endif
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
sdh = bus->sdh;
osh = bus->dhd->osh;
#ifdef DHDTCPACK_SUPPRESS
if (dhd_use_tcpack_suppress) {
dhd_tcpack_check_xmit(bus->dhd, pkt);
}
#endif /* DHDTCPACK_SUPPRESS */
if (bus->dhd->dongle_reset) {
ret = BCME_NOTREADY;
goto done;
}
frame = (uint8*)PKTDATA(osh, pkt);
#ifdef WLMEDIA_HTSF
if (PKTLEN(osh, pkt) >= 100) {
p = PKTDATA(osh, pkt);
htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12);
if (htsf_ts->magic == HTSFMAGIC) {
htsf_ts->c20 = get_cycles();
htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
}
}
#endif /* WLMEDIA_HTSF */
/* Add alignment padding, allocate new packet if needed */
if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) {
if (PKTHEADROOM(osh, pkt) < pad1) {
DHD_INFO(("%s: insufficient headroom %d for %d pad1\n",
__FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1));
bus->dhd->tx_realloc++;
new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
if (!new) {
DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
__FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
ret = BCME_NOMEM;
goto done;
}
PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
if (free_pkt)
PKTFREE(osh, pkt, TRUE);
/* free the pkt if canned one is not used */
free_pkt = TRUE;
pkt = new;
frame = (uint8*)PKTDATA(osh, pkt);
ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
pad1 = 0;
} else {
PKTPUSH(osh, pkt, pad1);
frame = (uint8*)PKTDATA(osh, pkt);
ASSERT((pad1 + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
bzero(frame, pad1 + SDPCM_HDRLEN);
}
}
ASSERT(pad1 < DHD_SDALIGN);
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
len = (uint16)PKTLEN(osh, pkt);
*(uint16*)frame = htol16(len);
*(((uint16*)frame) + 1) = htol16(~len);
#ifdef BCMSDIOH_TXGLOM
if (bus->glom_enable) {
uint32 hwheader1 = 0, hwheader2 = 0;
act_len = len;
/* Software tag: channel, sequence number, data offset */
swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) |
((bus->tx_seq + bus->glom_cnt) % SDPCM_SEQUENCE_WRAP) |
(((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN + sizeof(swheader));
if (queue_only) {
uint8 alignment = ALIGNMENT;
#if defined(BCMLXSDMMC) && defined(CUSTOMER_HW4)
if (bus->glom_mode == SDPCM_TXGLOM_MDESC)
alignment = DHD_SDALIGN;
#endif /* defined(BCMLXSDMMC) && defined(CUSTOMER_HW4) */
if (forcealign && (len & (alignment - 1)))
len = ROUNDUP(len, alignment);
/* Hardware extention tag */
/* 2byte frame length, 1byte-, 1byte frame flag,
* 2byte-hdrlength, 2byte padlenght
*/
hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (0 << 24);
hwheader2 = (len - act_len) << 16;
htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
real_pad = len - act_len;
if (PKTTAILROOM(osh, pkt) < real_pad) {
DHD_INFO(("%s 1: insufficient tailroom %d for %d real_pad\n",
__FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
if (PKTPADTAILROOM(osh, pkt, real_pad)) {
DHD_ERROR(("CHK1: padding error size %d\n", real_pad));
ret = BCME_NOMEM;
goto done;
}
#ifndef BCMLXSDMMC
else
PKTSETLEN(osh, pkt, act_len);
#endif
}
#ifdef BCMLXSDMMC
PKTSETLEN(osh, pkt, len);
#endif /* BCMLXSDMMC */
/* Post the frame pointer to sdio glom array */
dhd_bcmsdh_glom_post(bus, frame, pkt, len);
/* Save the pkt pointer in bus glom array */
bus->glom_pkt_arr[bus->glom_cnt] = pkt;
bus->glom_total_len += len;
bus->glom_cnt++;
return BCME_OK;
} else {
/* Raise len to next SDIO block to eliminate tail command */
if (bus->roundup && bus->blocksize &&
((bus->glom_total_len + len) > bus->blocksize)) {
uint16 pad2 = bus->blocksize -
((bus->glom_total_len + len) % bus->blocksize);
if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize)) {
len += pad2;
} else {
}
} else if ((bus->glom_total_len + len) % DHD_SDALIGN) {
len += DHD_SDALIGN
- ((bus->glom_total_len + len) % DHD_SDALIGN);
}
if (forcealign && (len & (ALIGNMENT - 1))) {
len = ROUNDUP(len, ALIGNMENT);
}
/* Hardware extention tag */
/* 2byte frame length, 1byte-, 1byte frame flag,
* 2byte-hdrlength, 2byte padlenght
*/
hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (1 << 24);
hwheader2 = (len - act_len) << 16;
htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
real_pad = len - act_len;
if (PKTTAILROOM(osh, pkt) < real_pad) {
DHD_INFO(("%s 2: insufficient tailroom %d"
" for %d real_pad\n",
__FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
if (PKTPADTAILROOM(osh, pkt, real_pad)) {
DHD_ERROR(("CHK2: padding error size %d."
" %d more pkts are discarded together.\n",
real_pad, bus->glom_cnt));
/* Save the pkt pointer in bus glom array
* Otherwise, this last pkt will not be
* cleaned under "goto done"
*/
bus->glom_pkt_arr[bus->glom_cnt] = pkt;
bus->glom_cnt++;
bus->glom_total_len += len;
ret = BCME_NOMEM;
goto done;
}
#ifndef BCMLXSDMMC
else
PKTSETLEN(osh, pkt, act_len);
#endif
}
#ifdef BCMLXSDMMC
PKTSETLEN(osh, pkt, len);
#endif /* BCMLXSDMMC */
/* Post the frame pointer to sdio glom array */
dhd_bcmsdh_glom_post(bus, frame, pkt, len);
/* Save the pkt pointer in bus glom array */
bus->glom_pkt_arr[bus->glom_cnt] = pkt;
bus->glom_cnt++;
bus->glom_total_len += len;
/* Update the total length on the first pkt */
frame_tmp = (uint8*)PKTDATA(osh, bus->glom_pkt_arr[0]);
*(uint16*)frame_tmp = htol16(bus->glom_total_len);
*(((uint16*)frame_tmp) + 1) = htol16(~bus->glom_total_len);
}
} else
#endif /* BCMSDIOH_TXGLOM */
{
act_len = len;
/* Software tag: channel, sequence number, data offset */
swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
(((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
#ifdef DHD_DEBUG
if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) {
tx_packets[PKTPRIO(pkt)]++;
}
if (DHD_BYTES_ON() &&
(((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
(DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
prhex("Tx Frame", frame, len);
} else if (DHD_HDRS_ON()) {
prhex("TxHdr", frame, MIN(len, 16));
}
#endif
#ifndef BCMSPI
/* Raise len to next SDIO block to eliminate tail command */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
uint16 pad2 = bus->blocksize - (len % bus->blocksize);
if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize))
#ifdef NOTUSED
if (pad2 <= PKTTAILROOM(osh, pkt))
#endif /* NOTUSED */
len += pad2;
} else if (len % DHD_SDALIGN) {
len += DHD_SDALIGN - (len % DHD_SDALIGN);
}
#endif /* BCMSPI */
/* Some controllers have trouble with odd bytes -- round to even */
if (forcealign && (len & (ALIGNMENT - 1))) {
#ifdef NOTUSED
if (PKTTAILROOM(osh, pkt))
#endif
len = ROUNDUP(len, ALIGNMENT);
#ifdef NOTUSED
else
DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
#endif
}
real_pad = len - act_len;
if (PKTTAILROOM(osh, pkt) < real_pad) {
DHD_INFO(("%s 3: insufficient tailroom %d for %d real_pad\n",
__FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
if (PKTPADTAILROOM(osh, pkt, real_pad)) {
DHD_ERROR(("CHK3: padding error size %d\n", real_pad));
ret = BCME_NOMEM;
goto done;
}
#ifndef BCMLXSDMMC
else
PKTSETLEN(osh, pkt, act_len);
#endif
}
#ifdef BCMLXSDMMC
PKTSETLEN(osh, pkt, len);
#endif /* BCMLXSDMMC */
}
do {
ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
frame, len, pkt, NULL, NULL);
bus->f2txdata++;
ASSERT(ret != BCME_PENDING);
if (ret == BCME_NODEVICE) {
DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
} else if (ret < 0) {
/* On failure, abort the command and terminate the frame */
DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
__FUNCTION__, ret));
bus->tx_sderrs++;
bcmsdh_abort(sdh, SDIO_FUNC_2);
#ifdef BCMSPI
DHD_ERROR(("%s: gSPI transmit error. Check Overflow or F2-fifo-not-ready"
" counters.\n", __FUNCTION__));
#endif /* BCMSPI */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
uint8 hi, lo;
hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
}
if (ret == 0) {
#ifdef BCMSDIOH_TXGLOM
if (bus->glom_enable) {
bus->tx_seq = (bus->tx_seq + bus->glom_cnt) % SDPCM_SEQUENCE_WRAP;
} else
#endif
{
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
}
}
} while ((ret < 0) && retrydata && retries++ < TXRETRIES);
done:
#ifdef BCMSDIOH_TXGLOM
if (bus->glom_enable && !queue_only) {
dhd_bcmsdh_glom_clear(bus);
pkt_cnt = bus->glom_cnt;
} else
#endif
{
pkt_cnt = 1;
}
/* restore pkt buffer pointer before calling tx complete routine */
while (pkt_cnt) {
#ifdef BCMSDIOH_TXGLOM
uint32 doff;
if (bus->glom_enable) {
#ifdef BCMLXSDMMC
uint32 pad2 = 0;
#endif /* BCMLXSDMMC */
if (!queue_only)
pkt = bus->glom_pkt_arr[bus->glom_cnt - pkt_cnt];
frame = (uint8*)PKTDATA(osh, pkt);
doff = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
doff = (doff & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
#ifdef BCMLXSDMMC
pad2 = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - pad2);
#endif /* BCMLXSDMMC */
PKTPULL(osh, pkt, doff);
} else
#endif /* BCMSDIOH_TXGLOM */
{
#ifdef BCMLXSDMMC
if (act_len > 0)
PKTSETLEN(osh, pkt, act_len);
#endif /* BCMLXSDMMC */
PKTPULL(osh, pkt, SDPCM_HDRLEN + pad1);
}
#ifdef PROP_TXSTATUS
if (bus->dhd->wlfc_state) {
dhd_os_sdunlock(bus->dhd);
dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0);
dhd_os_sdlock(bus->dhd);
} else {
#endif /* PROP_TXSTATUS */
#ifdef SDTEST
if (chan != SDPCM_TEST_CHANNEL) {
dhd_txcomplete(bus->dhd, pkt, ret != 0);
}
#else /* SDTEST */
dhd_txcomplete(bus->dhd, pkt, ret != 0);
#endif /* SDTEST */
if (free_pkt)
PKTFREE(osh, pkt, TRUE);
#ifdef PROP_TXSTATUS
}
#endif
pkt_cnt--;
}
#ifdef BCMSDIOH_TXGLOM
/* Reset the glom array */
if (bus->glom_enable && !queue_only) {
bus->glom_cnt = 0;
bus->glom_total_len = 0;
}
#endif
return ret;
}
int
dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
{
int ret = BCME_ERROR;
osl_t *osh;
uint datalen, prec;
#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
uint8 *dump_data;
uint16 protocol;
#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
osh = bus->dhd->osh;
datalen = PKTLEN(osh, pkt);
#ifdef SDTEST
/* Push the test header if doing loopback */
if (bus->ext_loop) {
uint8* data;
PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
data = PKTDATA(osh, pkt);
*data++ = SDPCM_TEST_ECHOREQ;
*data++ = (uint8)bus->loopid++;
*data++ = (datalen >> 0);
*data++ = (datalen >> 8);
datalen += SDPCM_TEST_HDRLEN;
}
#endif /* SDTEST */
#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
dump_data = PKTDATA(osh, pkt);
dump_data += 4; /* skip 4 bytes header */
protocol = (dump_data[12] << 8) | dump_data[13];
if (protocol == ETHER_TYPE_802_1X) {
DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
dump_data[14], dump_data[15], dump_data[30]));
}
#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
{
int i;
DHD_ERROR(("TX DUMP\n"));
for (i = 0; i < (datalen - 4); i++) {
DHD_ERROR(("%02X ", dump_data[i]));
if ((i & 15) == 15)
printk("\n");
}
DHD_ERROR(("\n"));
}
#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
/* Add space for the header */
PKTPUSH(osh, pkt, SDPCM_HDRLEN);
ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
#ifndef DHDTHREAD
/* Lock: we're about to use shared data/code (and SDIO) */
dhd_os_sdlock(bus->dhd);
#endif /* DHDTHREAD */
/* Check for existing queue, current flow-control, pending event, or pending clock */
if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
(!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
(bus->clkstate != CLK_AVAIL)) {
DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
pktq_len(&bus->txq)));
bus->fcqueued++;
/* Priority based enq */
dhd_os_sdlock_txq(bus->dhd);
if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
PKTPULL(osh, pkt, SDPCM_HDRLEN);
#ifndef DHDTHREAD
/* Need to also release txqlock before releasing sdlock.
* This thread still has txqlock and releases sdlock.
* Deadlock happens when dpc() grabs sdlock first then
* attempts to grab txqlock.
*/
dhd_os_sdunlock_txq(bus->dhd);
dhd_os_sdunlock(bus->dhd);
#endif
#ifdef PROP_TXSTATUS
if (bus->dhd->wlfc_state)
dhd_wlfc_txcomplete(bus->dhd, pkt, FALSE);
else
#endif
dhd_txcomplete(bus->dhd, pkt, FALSE);
#ifndef DHDTHREAD
dhd_os_sdlock(bus->dhd);
dhd_os_sdlock_txq(bus->dhd);
#endif
#ifdef PROP_TXSTATUS
/* let the caller decide whether to free the packet */
if (!bus->dhd->wlfc_state)
#endif
PKTFREE(osh, pkt, TRUE);
ret = BCME_NORESOURCE;
}
else
ret = BCME_OK;
if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
#ifdef DHD_DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
qcount[prec] = pktq_plen(&bus->txq, prec);
#endif
dhd_os_sdunlock_txq(bus->dhd);
/* Schedule DPC if needed to send queued packet(s) */
if (dhd_deferred_tx && !bus->dpc_sched) {
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
}
} else {
#ifdef DHDTHREAD
/* Lock: we're about to use shared data/code (and SDIO) */
dhd_os_sdlock(bus->dhd);
#endif /* DHDTHREAD */
/* Otherwise, send it now */
BUS_WAKE(bus);
/* Make sure back plane ht clk is on, no pending allowed */
dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
#ifndef SDTEST
ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE, FALSE);
#else
ret = dhdsdio_txpkt(bus, pkt,
(bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE, FALSE);
#endif
if (ret)
bus->dhd->tx_errors++;
else
bus->dhd->dstats.tx_bytes += datalen;
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = FALSE;
dhdsdio_clkctl(bus, CLK_NONE, TRUE);
}
#ifdef DHDTHREAD
dhd_os_sdunlock(bus->dhd);
#endif /* DHDTHREAD */
}
#ifndef DHDTHREAD
dhd_os_sdunlock(bus->dhd);
#endif /* DHDTHREAD */
return ret;
}
static uint
dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
{
void *pkt;
uint32 intstatus = 0;
uint retries = 0;
int ret = 0, prec_out;
uint cnt = 0;
uint datalen;
uint8 tx_prec_map;
uint16 txpktqlen = 0;
#ifdef BCMSDIOH_TXGLOM
uint i;
uint8 glom_cnt;
#endif
dhd_pub_t *dhd = bus->dhd;
sdpcmd_regs_t *regs = bus->regs;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (!KSO_ENAB(bus)) {
DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
return BCME_NODEVICE;
}
tx_prec_map = ~bus->flowcontrol;
/* Send frames until the limit or some other event */
for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
#ifdef BCMSDIOH_TXGLOM
if (bus->glom_enable) {
void *pkttable[SDPCM_MAXGLOM_SIZE];
dhd_os_sdlock_txq(bus->dhd);
glom_cnt = MIN(DATABUFCNT(bus), bus->glomsize);
glom_cnt = MIN(glom_cnt, pktq_mlen(&bus->txq, tx_prec_map));
glom_cnt = MIN(glom_cnt, maxframes-cnt);
/* Limiting the size to 2pkts in case of copy */
if (bus->glom_mode == SDPCM_TXGLOM_CPY)
glom_cnt = MIN(glom_cnt, 10);
for (i = 0; i < glom_cnt; i++)
pkttable[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
txpktqlen = pktq_len(&bus->txq);
dhd_os_sdunlock_txq(bus->dhd);
if (glom_cnt == 0)
break;
datalen = 0;
for (i = 0; i < glom_cnt; i++) {
uint datalen_tmp = 0;
if ((pkt = pkttable[i]) == NULL) {
/* This case should not happen */
DHD_ERROR(("No pkts in the queue for glomming\n"));
break;
}
datalen_tmp = (PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN);
#ifndef SDTEST
ret = dhdsdio_txpkt(bus,
pkt,
SDPCM_DATA_CHANNEL,
TRUE,
(i == (glom_cnt-1))? FALSE: TRUE);
#else
ret = dhdsdio_txpkt(bus,
pkt,
(bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL),
TRUE,
(i == (glom_cnt-1))? FALSE: TRUE);
#endif
if (ret == BCME_OK)
datalen += datalen_tmp;
}
cnt += i-1;
} else
#endif /* BCMSDIOH_TXGLOM */
{
dhd_os_sdlock_txq(bus->dhd);
if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
txpktqlen = pktq_len(&bus->txq);
dhd_os_sdunlock_txq(bus->dhd);
break;
}
txpktqlen = pktq_len(&bus->txq);
dhd_os_sdunlock_txq(bus->dhd);
datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
#ifndef SDTEST
ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE, FALSE);
#else
ret = dhdsdio_txpkt(bus,
pkt,
(bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL),
TRUE,
FALSE);
#endif
}
if (ret)
bus->dhd->tx_errors++;
else
bus->dhd->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt)
{
/* Check device status, signal pending interrupt */
R_SDREG(intstatus, ®s->intstatus, retries);
bus->f2txdata++;
if (bcmsdh_regfail(bus->sdh))
break;
if (intstatus & bus->hostintmask)
bus->ipend = TRUE;
}
}
/* Deflow-control stack if needed */
if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
dhd->txoff && (txpktqlen < FCLOW))
dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
return cnt;
}
static void
dhdsdio_sendpendctl(dhd_bus_t *bus)
{
bcmsdh_info_t *sdh = bus->sdh;
int ret, i;
uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN;
#ifdef BCMSDIOH_TXGLOM
if (bus->glom_enable)
frame_seq += SDPCM_HWEXT_LEN;
#endif
if (*frame_seq != bus->tx_seq) {
DHD_INFO(("%s IOCTL frame seq lag detected!"
" frm_seq:%d != bus->tx_seq:%d, corrected\n",
__FUNCTION__, *frame_seq, bus->tx_seq));
*frame_seq = bus->tx_seq;
}
ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
(uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
NULL, NULL, NULL);
ASSERT(ret != BCME_PENDING);
if (ret == BCME_NODEVICE) {
DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
} else if (ret < 0) {
/* On failure, abort the command and terminate the frame */
DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
__FUNCTION__, ret));
bus->tx_sderrs++;
bcmsdh_abort(sdh, SDIO_FUNC_2);
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
uint8 hi, lo;
hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
}
if (ret == 0) {
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
}
bus->ctrl_frame_stat = FALSE;
dhd_wait_event_wakeup(bus->dhd);
}
int
dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
{
#ifdef CUSTOMER_HW10
#define ERROR_BCME_NODEVICE_MAX 5
static int err_nodevice = 0;
#endif
uint8 *frame;
uint16 len;
uint32 swheader;
uint retries = 0;
bcmsdh_info_t *sdh = bus->sdh;
uint8 doff = 0;
int ret = -1;
int i;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (bus->dhd->dongle_reset)
return -EIO;
/* Back the pointer to make a room for bus header */
frame = msg - SDPCM_HDRLEN;
len = (msglen += SDPCM_HDRLEN);
/* Add alignment padding (optional for ctl frames) */
if (dhd_alignctl) {
if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
frame -= doff;
len += doff;
msglen += doff;
bzero(frame, doff + SDPCM_HDRLEN);
}
ASSERT(doff < DHD_SDALIGN);
}
doff += SDPCM_HDRLEN;
#ifndef BCMSPI
/* Round send length to next SDIO block */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
uint16 pad = bus->blocksize - (len % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize))
len += pad;
} else if (len % DHD_SDALIGN) {
len += DHD_SDALIGN - (len % DHD_SDALIGN);
}
#endif /* BCMSPI */
/* Satisfy length-alignment requirements */
if (forcealign && (len & (ALIGNMENT - 1)))
len = ROUNDUP(len, ALIGNMENT);
ASSERT(ISALIGNED((uintptr)frame, 2));
/* Need to lock here to protect txseq and SDIO tx calls */
dhd_os_sdlock(bus->dhd);
BUS_WAKE(bus);
/* Make sure backplane clock is on */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
*(uint16*)frame = htol16((uint16)msglen);
*(((uint16*)frame) + 1) = htol16(~msglen);
#ifdef BCMSDIOH_TXGLOM
if (bus->glom_enable) {
uint32 hwheader1, hwheader2;
/* Software tag: channel, sequence number, data offset */
swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
| bus->tx_seq
| ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN
+ SDPCM_HWEXT_LEN + sizeof(swheader));
hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24);
hwheader2 = (len - (msglen)) << 16;
htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
*(uint16*)frame = htol16(len);
*(((uint16*)frame) + 1) = htol16(~(len));
} else
#endif /* BCMSDIOH_TXGLOM */
{
/* Software tag: channel, sequence number, data offset */
swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
| bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
}
if (!TXCTLOK(bus)) {
DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
__FUNCTION__, bus->tx_max, bus->tx_seq));
bus->ctrl_frame_stat = TRUE;
/* Send from dpc */
bus->ctrl_frame_buf = frame;
bus->ctrl_frame_len = len;
if (!bus->dpc_sched) {
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
}
if (bus->ctrl_frame_stat) {
dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
}
if (bus->ctrl_frame_stat == FALSE) {
DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
ret = 0;
} else {
bus->dhd->txcnt_timeout++;
if (!bus->dhd->hang_was_sent) {
#ifdef CUSTOMER_HW4
uint32 status, retry = 0;
R_SDREG(status, &bus->regs->intstatus, retry);
DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n",
__FUNCTION__, status));
DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n",
__FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate));
#endif /* CUSTOMER_HW4 */
DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
__FUNCTION__, bus->dhd->txcnt_timeout));
}
ret = -1;
bus->ctrl_frame_stat = FALSE;
goto done;
}
}
bus->dhd->txcnt_timeout = 0;
bus->ctrl_frame_stat = TRUE;
if (ret == -1) {
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_CTL_ON()) {
prhex("Tx Frame", frame, len);
} else if (DHD_HDRS_ON()) {
prhex("TxHdr", frame, MIN(len, 16));
}
#endif
do {
ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
frame, len, NULL, NULL, NULL);
ASSERT(ret != BCME_PENDING);
if (ret == BCME_NODEVICE) {
DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
} else if (ret < 0) {
/* On failure, abort the command and terminate the frame */
DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
__FUNCTION__, ret));
bus->tx_sderrs++;
bcmsdh_abort(sdh, SDIO_FUNC_2);
#ifdef BCMSPI
DHD_ERROR(("%s: Check Overflow or F2-fifo-not-ready counters."
" gSPI transmit error on control channel.\n",
__FUNCTION__));
#endif /* BCMSPI */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
uint8 hi, lo;
hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
}
if (ret == 0) {
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
}
} while ((ret < 0) && retries++ < TXRETRIES);
}
bus->ctrl_frame_stat = FALSE;
done:
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = FALSE;
dhdsdio_clkctl(bus, CLK_NONE, TRUE);
}
dhd_os_sdunlock(bus->dhd);
if (ret)
bus->dhd->tx_ctlerrs++;
else
bus->dhd->tx_ctlpkts++;
if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
return -ETIMEDOUT;
#ifdef CUSTOMER_HW10
if( ret == BCME_NODEVICE ) err_nodevice++;
else err_nodevice = 0;
return ret ? err_nodevice > ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0;
#endif
return ret ? -EIO : 0;
}
int
dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
bool pending;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (bus->dhd->dongle_reset)
return -EIO;
/* Wait until control frame is available */
timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
dhd_os_sdlock(bus->dhd);
rxlen = bus->rxlen;
bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
bus->rxlen = 0;
dhd_os_sdunlock(bus->dhd);
if (rxlen) {
DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
__FUNCTION__, rxlen, msglen));
} else if (timeleft == 0) {
#ifdef DHD_DEBUG
uint32 status, retry = 0;
R_SDREG(status, &bus->regs->intstatus, retry);
DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n",
__FUNCTION__, status));
#else
DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
#endif /* DHD_DEBUG */
#ifdef DHD_DEBUG
dhd_os_sdlock(bus->dhd);
dhdsdio_checkdied(bus, NULL, 0);
dhd_os_sdunlock(bus->dhd);
#endif /* DHD_DEBUG */
} else if (pending == TRUE) {
/* signal pending */
DHD_ERROR(("%s: signal pending\n", __FUNCTION__));
return -EINTR;
} else {
DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
#ifdef DHD_DEBUG
dhd_os_sdlock(bus->dhd);
dhdsdio_checkdied(bus, NULL, 0);
dhd_os_sdunlock(bus->dhd);
#endif /* DHD_DEBUG */
}
if (timeleft == 0) {
if (rxlen == 0)
bus->dhd->rxcnt_timeout++;
DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__,
bus->dhd->rxcnt_timeout, rxlen));
}
else
bus->dhd->rxcnt_timeout = 0;
if (rxlen)
bus->dhd->rx_ctlpkts++;
else
bus->dhd->rx_ctlerrs++;
if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT)
return -ETIMEDOUT;
if (bus->dhd->dongle_trap_occured)
return -EREMOTEIO;
return rxlen ? (int)rxlen : -EIO;
}
/* IOVar table */
enum {
IOV_INTR = 1,
IOV_POLLRATE,
IOV_SDREG,
IOV_SBREG,
IOV_SDCIS,
IOV_MEMBYTES,
IOV_RAMSIZE,
IOV_RAMSTART,
#ifdef DHD_DEBUG
IOV_CHECKDIED,
IOV_SERIALCONS,
#endif /* DHD_DEBUG */
IOV_SET_DOWNLOAD_STATE,
IOV_SOCRAM_STATE,
IOV_FORCEEVEN,
IOV_SDIOD_DRIVE,
IOV_READAHEAD,
IOV_SDRXCHAIN,
IOV_ALIGNCTL,
IOV_SDALIGN,
IOV_DEVRESET,
IOV_CPU,
#if defined(SDIO_CRC_ERROR_FIX)
IOV_WATERMARK,
IOV_MESBUSYCTRL,
#endif /* SDIO_CRC_ERROR_FIX */
#ifdef SDTEST
IOV_PKTGEN,
IOV_EXTLOOP,
#endif /* SDTEST */
IOV_SPROM,
IOV_TXBOUND,
IOV_RXBOUND,
IOV_TXMINMAX,
IOV_IDLETIME,
IOV_IDLECLOCK,
IOV_SD1IDLE,
IOV_SLEEP,
IOV_DONGLEISOLATION,
IOV_KSO,
IOV_DEVSLEEP,
IOV_DEVCAP,
IOV_VARS,
#ifdef SOFTAP
IOV_FWPATH,
#endif
IOV_TXGLOMSIZE,
IOV_TXGLOMMODE,
IOV_HANGREPORT
};
const bcm_iovar_t dhdsdio_iovars[] = {
{"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
{"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 },
{"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 },
{"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 },
{"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 },
{"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 },
{"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
{"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 },
{"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 },
{"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 },
{"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 },
{"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
{"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 },
{"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 },
{"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 },
{"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 },
{"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 },
{"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
#ifdef DHD_DEBUG
{"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
{"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
{"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
{"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 },
{"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
{"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
{"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 },
{"cpu", IOV_CPU, 0, IOVT_BOOL, 0 },
#ifdef DHD_DEBUG
{"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 },
{"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 },
#endif /* DHD_DEBUG */
#endif /* DHD_DEBUG */
#ifdef SDTEST
{"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 },
{"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) },
#endif /* SDTEST */
#if defined(SDIO_CRC_ERROR_FIX)
{"watermark", IOV_WATERMARK, 0, IOVT_UINT32, 0 },
{"mesbusyctrl", IOV_MESBUSYCTRL, 0, IOVT_UINT32, 0 },
#endif /* SDIO_CRC_ERROR_FIX */
{"devcap", IOV_DEVCAP, 0, IOVT_UINT32, 0 },
{"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
{"kso", IOV_KSO, 0, IOVT_UINT32, 0 },
{"devsleep", IOV_DEVSLEEP, 0, IOVT_UINT32, 0 },
#ifdef SOFTAP
{"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
#endif
{"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 },
{"txglommode", IOV_TXGLOMMODE, 0, IOVT_UINT32, 0 },
{"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 },
{NULL, 0, 0, 0, 0 }
};
static void
dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
{
uint q1, q2;
if (!div) {
bcm_bprintf(strbuf, "%s N/A", desc);
} else {
q1 = num / div;
q2 = (100 * (num - (q1 * div))) / div;
bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
}
}
void
dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
dhd_bus_t *bus = dhdp->bus;
bcm_bprintf(strbuf, "Bus SDIO structure:\n");
bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n",
bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
bus->rxlen, bus->rx_seq);
bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n",
bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n",
bus->pollrate, bus->pollcnt, bus->regfails);
bcm_bprintf(strbuf, "\nAdditional counters:\n");
bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n",
bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
bus->rxc_errors);
bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n",
bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n",
bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n",
bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n",
(bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
bus->f2txdata, bus->f1regdata);
{
dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
(bus->f2rxhdrs + bus->f2rxdata));
dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
(bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
bcm_bprintf(strbuf, "\n");
dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
bus->dhd->rx_packets);
dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
bcm_bprintf(strbuf, "\n");
dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
(bus->f2txdata + bus->f1regdata));
dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
bcm_bprintf(strbuf, "\n");
dhd_dump_pct(strbuf, "Total: pkts/f2rw",
(bus->dhd->tx_packets + bus->dhd->rx_packets),
(bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
dhd_dump_pct(strbuf, ", pkts/f1sd",
(bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
dhd_dump_pct(strbuf, ", pkts/sd",
(bus->dhd->tx_packets + bus->dhd->rx_packets),
(bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
dhd_dump_pct(strbuf, ", pkts/int",
(bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
bcm_bprintf(strbuf, "\n\n");
}
#ifdef SDTEST
if (bus->pktgen_count) {
bcm_bprintf(strbuf, "pktgen config and count:\n");
bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n",
bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n",
bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
}
#endif /* SDTEST */
#ifdef DHD_DEBUG
bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup);
#endif /* DHD_DEBUG */
bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
}
void
dhd_bus_clearcounts(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
}
#ifdef SDTEST
static int
dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
{
dhd_pktgen_t pktgen;
pktgen.version = DHD_PKTGEN_VERSION;
pktgen.freq = bus->pktgen_freq;
pktgen.count = bus->pktgen_count;
pktgen.print = bus->pktgen_print;
pktgen.total = bus->pktgen_total;
pktgen.minlen = bus->pktgen_minlen;
pktgen.maxlen = bus->pktgen_maxlen;
pktgen.numsent = bus->pktgen_sent;
pktgen.numrcvd = bus->pktgen_rcvd;
pktgen.numfail = bus->pktgen_fail;
pktgen.mode = bus->pktgen_mode;
pktgen.stop = bus->pktgen_stop;
bcopy(&pktgen, arg, sizeof(pktgen));
return 0;
}
static int
dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
{
dhd_pktgen_t pktgen;
uint oldcnt, oldmode;
bcopy(arg, &pktgen, sizeof(pktgen));
if (pktgen.version != DHD_PKTGEN_VERSION)
return BCME_BADARG;
oldcnt = bus->pktgen_count;
oldmode = bus->pktgen_mode;
bus->pktgen_freq = pktgen.freq;
bus->pktgen_count = pktgen.count;
bus->pktgen_print = pktgen.print;
bus->pktgen_total = pktgen.total;
bus->pktgen_minlen = pktgen.minlen;
bus->pktgen_maxlen = pktgen.maxlen;
bus->pktgen_mode = pktgen.mode;
bus->pktgen_stop = pktgen.stop;
bus->pktgen_tick = bus->pktgen_ptick = 0;
bus->pktgen_prev_time = jiffies;
bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
/* Clear counts for a new pktgen (mode change, or was stopped) */
if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) {
bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0;
bus->pktgen_prev_rcvd = bus->pktgen_fail = 0;
}
return 0;
}
#endif /* SDTEST */
static void
dhdsdio_devram_remap(dhd_bus_t *bus, bool val)
{
uint8 enable, protect, remap;
si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
remap = val ? TRUE : FALSE;
si_socdevram(bus->sih, TRUE, &enable, &protect, &remap);
}
static int
dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
{
int bcmerror = 0;
uint32 sdaddr;
uint dsize;
/* In remap mode, adjust address beyond socram and redirect
* to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
* is not backplane accessible
*/
if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
address -= bus->orig_ramsize;
address += SOCDEVRAM_BP_ADDR;
}
/* Determine initial transfer parameters */
sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
else
dsize = size;
/* Set the backplane window to include the start address */
if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
goto xfer_done;
}
/* Do the transfer(s) */
while (size) {
DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
__FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
(address & SBSDIO_SBWINDOW_MASK)));
if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
break;
}
/* Adjust for next transfer (if any) */
if ((size -= dsize)) {
data += dsize;
address += dsize;
if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
break;
}
sdaddr = 0;
dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
}
}
xfer_done:
/* Return the window to backplane enumeration space for core access */
if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
bcmsdh_cur_sbwad(bus->sdh)));
}
return bcmerror;
}
#ifdef DHD_DEBUG
static int
dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
{
uint32 addr;
int rv, i;
uint32 shaddr = 0;
shaddr = bus->dongle_ram_base + bus->ramsize - 4;
i = 0;
do {
/* Read last word in memory to determine address of sdpcm_shared structure */
if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
return rv;
addr = ltoh32(addr);
DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
/*
* Check if addr is valid.
* NVRAM length at the end of memory should have been overwritten.
*/
if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
if ((bus->srmemsize > 0) && (i++ == 0)) {
shaddr -= bus->srmemsize;
} else {
DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
__FUNCTION__, addr));
return BCME_ERROR;
}
} else
break;
} while (i < 2);
/* Read hndrte_shared structure */
if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
return rv;
/* Endianness */
sh->flags = ltoh32(sh->flags);
sh->trap_addr = ltoh32(sh->trap_addr);
sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
sh->assert_file_addr = ltoh32(sh->assert_file_addr);
sh->assert_line = ltoh32(sh->assert_line);
sh->console_addr = ltoh32(sh->console_addr);
sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
return BCME_OK;
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
"is different than sdpcm_shared version %d in dongle\n",
__FUNCTION__, SDPCM_SHARED_VERSION,
sh->flags & SDPCM_SHARED_VERSION_MASK));
return BCME_ERROR;
}
return BCME_OK;
}
#define CONSOLE_LINE_MAX 192
static int
dhdsdio_readconsole(dhd_bus_t *bus)
{
dhd_console_t *c = &bus->console;
uint8 line[CONSOLE_LINE_MAX], ch;
uint32 n, idx, addr;
int rv;
/* Don't do anything until FWREADY updates console address */
if (bus->console_addr == 0)
return 0;
if (!KSO_ENAB(bus))
return 0;
/* Read console log struct */
addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
return rv;
/* Allocate console buffer (one time only) */
if (c->buf == NULL) {
c->bufsize = ltoh32(c->log.buf_size);
if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
return BCME_NOMEM;
}
idx = ltoh32(c->log.idx);
/* Protect against corrupt value */
if (idx > c->bufsize)
return BCME_ERROR;
/* Skip reading the console buffer if the index pointer has not moved */
if (idx == c->last)
return BCME_OK;
/* Read the console buffer */
addr = ltoh32(c->log.buf);
if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
return rv;
while (c->last != idx) {
for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
if (c->last == idx) {
/* This would output a partial line. Instead, back up
* the buffer pointer and output this line next time around.
*/
if (c->last >= n)
c->last -= n;
else
c->last = c->bufsize - n;
goto break2;
}
ch = c->buf[c->last];
c->last = (c->last + 1) % c->bufsize;
if (ch == '\n')
break;
line[n] = ch;
}
if (n > 0) {
if (line[n - 1] == '\r')
n--;
line[n] = 0;
printf("CONSOLE: %s\n", line);
}
}
break2:
return BCME_OK;
}
static int
dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
{
int bcmerror = 0;
uint msize = 512;
char *mbuffer = NULL;
char *console_buffer = NULL;
uint maxstrlen = 256;
char *str = NULL;
trap_t tr;
sdpcm_shared_t sdpcm_shared;
struct bcmstrbuf strbuf;
uint32 console_ptr, console_size, console_index;
uint8 line[CONSOLE_LINE_MAX], ch;
uint32 n, i, addr;
int rv;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (DHD_NOCHECKDIED_ON())
return 0;
if (data == NULL) {
/*
* Called after a rx ctrl timeout. "data" is NULL.
* allocate memory to trace the trap or assert.
*/
size = msize;
mbuffer = data = MALLOC(bus->dhd->osh, msize);
if (mbuffer == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
bcmerror = BCME_NOMEM;
goto done;
}
}
if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
bcmerror = BCME_NOMEM;
goto done;
}
if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
goto done;
bcm_binit(&strbuf, data, size);
bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
/* NOTE: Misspelled assert is intentional - DO NOT FIX.
* (Avoids conflict with real asserts for programmatic parsing of output.)
*/
bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
}
if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
/* NOTE: Misspelled assert is intentional - DO NOT FIX.
* (Avoids conflict with real asserts for programmatic parsing of output.)
*/
bcm_bprintf(&strbuf, "No trap%s in dongle",
(sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
?"/assrt" :"");
} else {
if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
/* Download assert */
bcm_bprintf(&strbuf, "Dongle assert");
if (sdpcm_shared.assert_exp_addr != 0) {
str[0] = '\0';
if ((bcmerror = dhdsdio_membytes(bus, FALSE,
sdpcm_shared.assert_exp_addr,
(uint8 *)str, maxstrlen)) < 0)
goto done;
str[maxstrlen - 1] = '\0';
bcm_bprintf(&strbuf, " expr \"%s\"", str);
}
if (sdpcm_shared.assert_file_addr != 0) {
str[0] = '\0';
if ((bcmerror = dhdsdio_membytes(bus, FALSE,
sdpcm_shared.assert_file_addr,
(uint8 *)str, maxstrlen)) < 0)
goto done;
str[maxstrlen - 1] = '\0';
bcm_bprintf(&strbuf, " file \"%s\"", str);
}
bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
}
if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
bus->dhd->dongle_trap_occured = TRUE;
if ((bcmerror = dhdsdio_membytes(bus, FALSE,
sdpcm_shared.trap_addr,
(uint8*)&tr, sizeof(trap_t))) < 0)
goto done;
bcm_bprintf(&strbuf,
"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
"lp 0x%x, rpc 0x%x Trap offset 0x%x, "
"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
ltoh32(sdpcm_shared.trap_addr),
ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log);
if ((rv = dhdsdio_membytes(bus, FALSE, addr,
(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
goto printbuf;
addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size);
if ((rv = dhdsdio_membytes(bus, FALSE, addr,
(uint8 *)&console_size, sizeof(console_size))) < 0)
goto printbuf;
addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx);
if ((rv = dhdsdio_membytes(bus, FALSE, addr,
(uint8 *)&console_index, sizeof(console_index))) < 0)
goto printbuf;
console_ptr = ltoh32(console_ptr);
console_size = ltoh32(console_size);
console_index = ltoh32(console_index);
if (console_size > CONSOLE_BUFFER_MAX ||
!(console_buffer = MALLOC(bus->dhd->osh, console_size)))
goto printbuf;
if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
(uint8 *)console_buffer, console_size)) < 0)
goto printbuf;
for (i = 0, n = 0; i < console_size; i += n + 1) {
for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
ch = console_buffer[(console_index + i + n) % console_size];
if (ch == '\n')
break;
line[n] = ch;
}
if (n > 0) {
if (line[n - 1] == '\r')
n--;
line[n] = 0;
/* Don't use DHD_ERROR macro since we print
* a lot of information quickly. The macro
* will truncate a lot of the printfs
*/
if (dhd_msg_level & DHD_ERROR_VAL)
printf("CONSOLE: %s\n", line);
}
}
}
}
printbuf:
if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
}
done:
if (mbuffer)
MFREE(bus->dhd->osh, mbuffer, msize);
if (str)
MFREE(bus->dhd->osh, str, maxstrlen);
if (console_buffer)
MFREE(bus->dhd->osh, console_buffer, console_size);
return bcmerror;
}
#endif /* #ifdef DHD_DEBUG */
int
dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
{
int bcmerror = BCME_OK;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Basic sanity checks */
if (bus->dhd->up) {
bcmerror = BCME_NOTDOWN;
goto err;
}
if (!len) {
bcmerror = BCME_BUFTOOSHORT;
goto err;
}
/* Free the old ones and replace with passed variables */
if (bus->vars)
MFREE(bus->dhd->osh, bus->vars, bus->varsz);
bus->vars = MALLOC(bus->dhd->osh, len);
bus->varsz = bus->vars ? len : 0;
if (bus->vars == NULL) {
bcmerror = BCME_NOMEM;
goto err;
}
/* Copy the passed variables, which should include the terminating double-null */
bcopy(arg, bus->vars, bus->varsz);
err:
return bcmerror;
}
#ifdef DHD_DEBUG
#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24)
#define CC_CHIPCTRL_JTAG_SEL (1 << 3)
#define CC_CHIPCTRL_GPIO_SEL (0x3)
#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334 (1 << 28)
static int
dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
{
int int_val;
uint32 addr, data, uart_enab = 0;
uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
*bcmerror = 0;
bcmsdh_reg_write(bus->sdh, addr, 4, 1);
if (bcmsdh_regfail(bus->sdh)) {
*bcmerror = BCME_SDIO_ERROR;
return -1;
}
int_val = bcmsdh_reg_read(bus->sdh, data, 4);
if (bcmsdh_regfail(bus->sdh)) {
*bcmerror = BCME_SDIO_ERROR;
return -1;
}
if (bus->sih->chip == BCM4330_CHIP_ID) {
uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
}
else if (bus->sih->chip == BCM4334_CHIP_ID ||
bus->sih->chip == BCM43340_CHIP_ID ||
bus->sih->chip == BCM43341_CHIP_ID ||
0) {
if (enable) {
/* Moved to PMU chipcontrol 1 from 4330 */
int_val &= ~gpio_sel;
int_val |= jtag_sel;
} else {
int_val |= gpio_sel;
int_val &= ~jtag_sel;
}
uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
}
if (!set)
return (int_val & uart_enab);
if (enable)
int_val |= uart_enab;
else
int_val &= ~uart_enab;
bcmsdh_reg_write(bus->sdh, data, 4, int_val);
if (bcmsdh_regfail(bus->sdh)) {
*bcmerror = BCME_SDIO_ERROR;
return -1;
}
if (bus->sih->chip == BCM4330_CHIP_ID) {
uint32 chipcontrol;
addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
chipcontrol &= ~jtag_sel;
if (enable) {
chipcontrol |= jtag_sel;
chipcontrol &= ~gpio_sel;
}
bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
}
return (int_val & uart_enab);
}
#endif
static int
dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
void *params, int plen, void *arg, int len, int val_size)
{
int bcmerror = 0;
int32 int_val = 0;
bool bool_val = 0;
DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
__FUNCTION__, actionid, name, params, plen, arg, len, val_size));
if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
goto exit;
if (plen >= (int)sizeof(int_val))
bcopy(params, &int_val, sizeof(int_val));
bool_val = (int_val != 0) ? TRUE : FALSE;
/* Some ioctls use the bus */
dhd_os_sdlock(bus->dhd);
/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
actionid == IOV_GVAL(IOV_DEVRESET))) {
bcmerror = BCME_NOTREADY;
goto exit;
}
/*
* Special handling for keepSdioOn: New SDIO Wake-up Mechanism
*/
if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
dhdsdio_clk_kso_iovar(bus, bool_val);
goto exit;
} else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
{
dhdsdio_clk_devsleep_iovar(bus, bool_val);
if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
bus->dpc_sched));
if (!bus->dpc_sched) {
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
}
}
}
goto exit;
}
/* Handle sleep stuff before any clock mucking */
if (vi->varid == IOV_SLEEP) {
if (IOV_ISSET(actionid)) {
bcmerror = dhdsdio_bussleep(bus, bool_val);
} else {
int_val = (int32)bus->sleeping;
bcopy(&int_val, arg, val_size);
}
goto exit;
}
/* Request clock to allow SDIO accesses */
if (!bus->dhd->dongle_reset) {
BUS_WAKE(bus);
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
}
switch (actionid) {
case IOV_GVAL(IOV_INTR):
int_val = (int32)bus->intr;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_INTR):
bus->intr = bool_val;
bus->intdis = FALSE;
if (bus->dhd->up) {
if (bus->intr) {
DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
bcmsdh_intr_enable(bus->sdh);
} else {
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
bcmsdh_intr_disable(bus->sdh);
}
}
break;
case IOV_GVAL(IOV_POLLRATE):
int_val = (int32)bus->pollrate;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_POLLRATE):
bus->pollrate = (uint)int_val;
bus->poll = (bus->pollrate != 0);
break;
case IOV_GVAL(IOV_IDLETIME):
int_val = bus->idletime;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_IDLETIME):
if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
bcmerror = BCME_BADARG;
} else {
bus->idletime = int_val;
}
break;
case IOV_GVAL(IOV_IDLECLOCK):
int_val = (int32)bus->idleclock;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_IDLECLOCK):
bus->idleclock = int_val;
break;
case IOV_GVAL(IOV_SD1IDLE):
int_val = (int32)sd1idle;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_SD1IDLE):
sd1idle = bool_val;
break;
case IOV_SVAL(IOV_MEMBYTES):
case IOV_GVAL(IOV_MEMBYTES):
{
uint32 address;
uint size, dsize;
uint8 *data;
bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
ASSERT(plen >= 2*sizeof(int));
address = (uint32)int_val;
bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
size = (uint)int_val;
/* Do some validation */
dsize = set ? plen - (2 * sizeof(int)) : len;
if (dsize < size) {
DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
__FUNCTION__, (set ? "set" : "get"), address, size, dsize));
bcmerror = BCME_BADARG;
break;
}
DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
(set ? "write" : "read"), size, address));
/* check if CR4 */
if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
/*
* If address is start of RAM (i.e. a downloaded image),
* store the reset instruction to be written in 0
*/
if (address == bus->dongle_ram_base) {
bus->resetinstr = *(((uint32*)params) + 2);
}
} else {
/* If we know about SOCRAM, check for a fit */
if ((bus->orig_ramsize) &&
((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
{
uint8 enable, protect, remap;
si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
if (!enable || protect) {
DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
__FUNCTION__, bus->orig_ramsize, size, address));
DHD_ERROR(("%s: socram enable %d, protect %d\n",
__FUNCTION__, enable, protect));
bcmerror = BCME_BADARG;
break;
}
if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
uint32 devramsize = si_socdevram_size(bus->sih);
if ((address < SOCDEVRAM_ARM_ADDR) ||
(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
__FUNCTION__, address, size));
DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
bcmerror = BCME_BADARG;
break;
}
/* move it such that address is real now */
address -= SOCDEVRAM_ARM_ADDR;
address += SOCDEVRAM_BP_ADDR;
DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
__FUNCTION__, (set ? "write" : "read"), size, address));
} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
/* Can not access remap region while devram remap bit is set
* ROM content would be returned in this case
*/
DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
__FUNCTION__, address));
bcmerror = BCME_ERROR;
break;
}
}
}
/* Generate the actual data pointer */
data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
/* Call to do the transfer */
bcmerror = dhdsdio_membytes(bus, set, address, data, size);
break;
}
case IOV_GVAL(IOV_RAMSIZE):
int_val = (int32)bus->ramsize;
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_RAMSTART):
int_val = (int32)bus->dongle_ram_base;
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_SDIOD_DRIVE):
int_val = (int32)dhd_sdiod_drive_strength;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_SDIOD_DRIVE):
dhd_sdiod_drive_strength = int_val;
si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
break;
case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
bcmerror = dhdsdio_download_state(bus, bool_val);
break;
case IOV_SVAL(IOV_SOCRAM_STATE):
bcmerror = dhdsdio_download_state(bus, bool_val);
break;
case IOV_SVAL(IOV_VARS):
bcmerror = dhdsdio_downloadvars(bus, arg, len);
break;
case IOV_GVAL(IOV_READAHEAD):
int_val = (int32)dhd_readahead;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_READAHEAD):
if (bool_val && !dhd_readahead)
bus->nextlen = 0;
dhd_readahead = bool_val;
break;
case IOV_GVAL(IOV_SDRXCHAIN):
int_val = (int32)bus->use_rxchain;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_SDRXCHAIN):
if (bool_val && !bus->sd_rxchain)
bcmerror = BCME_UNSUPPORTED;
else
bus->use_rxchain = bool_val;
break;
#ifndef BCMSPI
case IOV_GVAL(IOV_ALIGNCTL):
int_val = (int32)dhd_alignctl;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_ALIGNCTL):
dhd_alignctl = bool_val;
break;
#endif /* BCMSPI */
case IOV_GVAL(IOV_SDALIGN):
int_val = DHD_SDALIGN;
bcopy(&int_val, arg, val_size);
break;
#ifdef DHD_DEBUG
case IOV_GVAL(IOV_VARS):
if (bus->varsz < (uint)len)
bcopy(bus->vars, arg, bus->varsz);
else
bcmerror = BCME_BUFTOOSHORT;
break;
#endif /* DHD_DEBUG */
#ifdef DHD_DEBUG
case IOV_GVAL(IOV_SDREG):
{
sdreg_t *sd_ptr;
uint32 addr, size;
sd_ptr = (sdreg_t *)params;
addr = (uintptr)bus->regs + sd_ptr->offset;
size = sd_ptr->func;
int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
if (bcmsdh_regfail(bus->sdh))
bcmerror = BCME_SDIO_ERROR;
bcopy(&int_val, arg, sizeof(int32));
break;
}
case IOV_SVAL(IOV_SDREG):
{
sdreg_t *sd_ptr;
uint32 addr, size;
sd_ptr = (sdreg_t *)params;
addr = (uintptr)bus->regs + sd_ptr->offset;
size = sd_ptr->func;
bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
if (bcmsdh_regfail(bus->sdh))
bcmerror = BCME_SDIO_ERROR;
break;
}
/* Same as above, but offset is not backplane (not SDIO core) */
case IOV_GVAL(IOV_SBREG):
{
sdreg_t sdreg;
uint32 addr, size;
bcopy(params, &sdreg, sizeof(sdreg));
addr = SI_ENUM_BASE + sdreg.offset;
size = sdreg.func;
int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
if (bcmsdh_regfail(bus->sdh))
bcmerror = BCME_SDIO_ERROR;
bcopy(&int_val, arg, sizeof(int32));
break;
}
case IOV_SVAL(IOV_SBREG):
{
sdreg_t sdreg;
uint32 addr, size;
bcopy(params, &sdreg, sizeof(sdreg));
addr = SI_ENUM_BASE + sdreg.offset;
size = sdreg.func;
bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
if (bcmsdh_regfail(bus->sdh))
bcmerror = BCME_SDIO_ERROR;
break;
}
case IOV_GVAL(IOV_SDCIS):
{
*(char *)arg = 0;
bcmstrcat(arg, "\nFunc 0\n");
bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
bcmstrcat(arg, "\nFunc 1\n");
bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
bcmstrcat(arg, "\nFunc 2\n");
bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
break;
}
case IOV_GVAL(IOV_FORCEEVEN):
int_val = (int32)forcealign;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_FORCEEVEN):
forcealign = bool_val;
break;
case IOV_GVAL(IOV_TXBOUND):
int_val = (int32)dhd_txbound;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_TXBOUND):
dhd_txbound = (uint)int_val;
break;
case IOV_GVAL(IOV_RXBOUND):
int_val = (int32)dhd_rxbound;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_RXBOUND):
dhd_rxbound = (uint)int_val;
break;
case IOV_GVAL(IOV_TXMINMAX):
int_val = (int32)dhd_txminmax;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_TXMINMAX):
dhd_txminmax = (uint)int_val;
break;
case IOV_GVAL(IOV_SERIALCONS):
int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
if (bcmerror != 0)
break;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_SERIALCONS):
dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
break;
#endif /* DHD_DEBUG */
#ifdef SDTEST
case IOV_GVAL(IOV_EXTLOOP):
int_val = (int32)bus->ext_loop;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_EXTLOOP):
bus->ext_loop = bool_val;
break;
case IOV_GVAL(IOV_PKTGEN):
bcmerror = dhdsdio_pktgen_get(bus, arg);
break;
case IOV_SVAL(IOV_PKTGEN):
bcmerror = dhdsdio_pktgen_set(bus, arg);
break;
#endif /* SDTEST */
#if defined(SDIO_CRC_ERROR_FIX)
case IOV_GVAL(IOV_WATERMARK):
int_val = (int32)watermark;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_WATERMARK):
watermark = (uint)int_val;
watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark;
DHD_ERROR(("Setting watermark as 0x%x.\n", watermark));
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL);
break;
case IOV_GVAL(IOV_MESBUSYCTRL):
int_val = (int32)mesbusyctrl;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_MESBUSYCTRL):
mesbusyctrl = (uint)int_val;
mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK)
? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl;
DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl));
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
((uint8)mesbusyctrl | 0x80), NULL);
break;
#endif /* SDIO_CRC_ERROR_FIX */
case IOV_GVAL(IOV_DONGLEISOLATION):
int_val = bus->dhd->dongle_isolation;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_DONGLEISOLATION):
bus->dhd->dongle_isolation = bool_val;
break;
case IOV_SVAL(IOV_DEVRESET):
DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
__FUNCTION__, bool_val, bus->dhd->dongle_reset,
bus->dhd->busstate));
ASSERT(bus->dhd->osh);
/* ASSERT(bus->cl_devid); */
dhd_bus_devreset(bus->dhd, (uint8)bool_val);
break;
#ifdef SOFTAP
case IOV_GVAL(IOV_FWPATH):
{
uint32 fw_path_len;
fw_path_len = strlen(bus->fw_path);
DHD_INFO(("[softap] get fwpath, l=%d\n", len));
if (fw_path_len > len-1) {
bcmerror = BCME_BUFTOOSHORT;
break;
}
if (fw_path_len) {
bcopy(bus->fw_path, arg, fw_path_len);
((uchar*)arg)[fw_path_len] = 0;
}
break;
}
case IOV_SVAL(IOV_FWPATH):
DHD_INFO(("[softap] set fwpath, idx=%d\n", int_val));
switch (int_val) {
case 1:
bus->fw_path = fw_path; /* ordinary one */
break;
case 2:
bus->fw_path = fw_path2;
break;
default:
bcmerror = BCME_BADARG;
break;
}
DHD_INFO(("[softap] new fw path: %s\n", (bus->fw_path[0] ? bus->fw_path : "NULL")));
break;
#endif /* SOFTAP */
case IOV_GVAL(IOV_DEVRESET):
DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
/* Get its status */
int_val = (bool) bus->dhd->dongle_reset;
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_KSO):
int_val = dhdsdio_sleepcsr_get(bus);
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_DEVCAP):
int_val = dhdsdio_devcap_get(bus);
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_DEVCAP):
dhdsdio_devcap_set(bus, (uint8) int_val);
break;
#ifdef BCMSDIOH_TXGLOM
case IOV_GVAL(IOV_TXGLOMSIZE):
int_val = (int32)bus->glomsize;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_TXGLOMSIZE):
if (int_val > SDPCM_MAXGLOM_SIZE) {
bcmerror = BCME_ERROR;
} else {
bus->glomsize = (uint)int_val;
}
break;
case IOV_GVAL(IOV_TXGLOMMODE):
int_val = (int32)bus->glom_mode;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_TXGLOMMODE):
if ((int_val != SDPCM_TXGLOM_CPY) && (int_val != SDPCM_TXGLOM_MDESC)) {
bcmerror = BCME_RANGE;
} else {
if ((bus->glom_mode = bcmsdh_set_mode(bus->sdh, (uint)int_val)) != int_val)
bcmerror = BCME_ERROR;
}
break;
#endif /* BCMSDIOH_TXGLOM */
case IOV_SVAL(IOV_HANGREPORT):
bus->dhd->hang_report = bool_val;
DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report));
break;
case IOV_GVAL(IOV_HANGREPORT):
int_val = (int32)bus->dhd->hang_report;
bcopy(&int_val, arg, val_size);
break;
default:
bcmerror = BCME_UNSUPPORTED;
break;
}
exit:
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = FALSE;
dhdsdio_clkctl(bus, CLK_NONE, TRUE);
}
dhd_os_sdunlock(bus->dhd);
return bcmerror;
}
static int
dhdsdio_write_vars(dhd_bus_t *bus)
{
int bcmerror = 0;
uint32 varsize, phys_size;
uint32 varaddr;
uint8 *vbuffer;
uint32 varsizew;
#ifdef DHD_DEBUG
uint8 *nvram_ularray;
#endif /* DHD_DEBUG */
/* Even if there are no vars are to be written, we still need to set the ramsize. */
varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
varaddr = (bus->ramsize - 4) - varsize;
varaddr += bus->dongle_ram_base;
if (bus->vars) {
if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
DHD_ERROR(("PR85623WAR in place\n"));
varsize += 4;
varaddr -= 4;
}
}
vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
if (!vbuffer)
return BCME_NOMEM;
bzero(vbuffer, varsize);
bcopy(bus->vars, vbuffer, bus->varsz);
/* Write the vars list */
bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
#ifdef DHD_DEBUG
/* Verify NVRAM bytes */
DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
if (!nvram_ularray)
return BCME_NOMEM;
/* Upload image to verify downloaded contents. */
memset(nvram_ularray, 0xaa, varsize);
/* Read the vars list to temp buffer for comparison */
bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
if (bcmerror) {
DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
__FUNCTION__, bcmerror, varsize, varaddr));
}
/* Compare the org NVRAM with the one read from RAM */
if (memcmp(vbuffer, nvram_ularray, varsize)) {
DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
} else
DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
__FUNCTION__));
MFREE(bus->dhd->osh, nvram_ularray, varsize);
#endif /* DHD_DEBUG */
MFREE(bus->dhd->osh, vbuffer, varsize);
}
phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
phys_size += bus->dongle_ram_base;
/* adjust to the user specified RAM */
DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
phys_size, bus->ramsize));
DHD_INFO(("Vars are at %d, orig varsize is %d\n",
varaddr, varsize));
varsize = ((phys_size - 4) - varaddr);
/*
* Determine the length token:
* Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
*/
if (bcmerror) {
varsizew = 0;
} else {
varsizew = varsize / 4;
varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
varsizew = htol32(varsizew);
}
DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
/* Write the length token to the last word */
bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
(uint8*)&varsizew, 4);
return bcmerror;
}
static int
dhdsdio_download_state(dhd_bus_t *bus, bool enter)
{
uint retries;
int bcmerror = 0;
int foundcr4 = 0;
if (!bus->sih)
return BCME_ERROR;
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
*/
if (enter) {
bus->alp_only = TRUE;
if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
!(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
foundcr4 = 1;
} else {
DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
}
if (!foundcr4) {
si_core_disable(bus->sih, 0);
if (bcmsdh_regfail(bus->sdh)) {
bcmerror = BCME_SDIO_ERROR;
goto fail;
}
if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
si_core_reset(bus->sih, 0, 0);
if (bcmsdh_regfail(bus->sdh)) {
DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n",
__FUNCTION__));
bcmerror = BCME_SDIO_ERROR;
goto fail;
}
/* Disable remap for download */
if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
dhdsdio_devram_remap(bus, FALSE);
/* Clear the top bit of memory */
if (bus->ramsize) {
uint32 zeros = 0;
if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4,
(uint8*)&zeros, 4) < 0) {
bcmerror = BCME_SDIO_ERROR;
goto fail;
}
}
} else {
/* For CR4,
* Halt ARM
* Remove ARM reset
* Read RAM base address [0x18_0000]
* [next] Download firmware
* [done at else] Populate the reset vector
* [done at else] Remove ARM halt
*/
/* Halt ARM & remove reset */
si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
}
} else {
if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
if (!si_iscoreup(bus->sih)) {
DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
if ((bcmerror = dhdsdio_write_vars(bus))) {
DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
goto fail;
}
/* Enable remap before ARM reset but after vars.
* No backplane access in remap mode
*/
if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
dhdsdio_devram_remap(bus, TRUE);
if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
!si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
!(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
} else {
/* cr4 has no socram, but tcm's */
/* write vars */
if ((bcmerror = dhdsdio_write_vars(bus))) {
DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
goto fail;
}
if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
!si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
/* switch back to arm core again */
if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
/* write address 0 with reset instruction */
bcmerror = dhdsdio_membytes(bus, TRUE, 0,
(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
/* now remove reset and halt and continue to run CR4 */
}
si_core_reset(bus->sih, 0, 0);
if (bcmsdh_regfail(bus->sdh)) {
DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
bcmerror = BCME_SDIO_ERROR;
goto fail;
}
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = FALSE;
bus->dhd->busstate = DHD_BUS_LOAD;
}
fail:
/* Always return to SDIOD core */
if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
si_setcore(bus->sih, SDIOD_CORE_ID, 0);
return bcmerror;
}
int
dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
void *params, int plen, void *arg, int len, bool set)
{
dhd_bus_t *bus = dhdp->bus;
const bcm_iovar_t *vi = NULL;
int bcmerror = 0;
int val_size;
uint32 actionid;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
ASSERT(name);
ASSERT(len >= 0);
/* Get MUST have return space */
ASSERT(set || (arg && len));
/* Set does NOT take qualifiers */
ASSERT(!set || (!params && !plen));
/* Look up var locally; if not found pass to host driver */
if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
dhd_os_sdlock(bus->dhd);
BUS_WAKE(bus);
/* Turn on clock in case SD command needs backplane */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
/* Check for bus configuration changes of interest */
/* If it was divisor change, read the new one */
if (set && strcmp(name, "sd_divisor") == 0) {
if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
&bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
bus->sd_divisor = -1;
DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
} else {
DHD_INFO(("%s: noted %s update, value now %d\n",
__FUNCTION__, name, bus->sd_divisor));
}
}
/* If it was a mode change, read the new one */
if (set && strcmp(name, "sd_mode") == 0) {
if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
&bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
bus->sd_mode = -1;
DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
} else {
DHD_INFO(("%s: noted %s update, value now %d\n",
__FUNCTION__, name, bus->sd_mode));
}
}
/* Similar check for blocksize change */
if (set && strcmp(name, "sd_blocksize") == 0) {
int32 fnum = 2;
if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
} else {
DHD_INFO(("%s: noted %s update, value now %d\n",
__FUNCTION__, "sd_blocksize", bus->blocksize));
if ((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID))
dhd_overflow_war(bus);
}
}
bus->roundup = MIN(max_roundup, bus->blocksize);
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = FALSE;
dhdsdio_clkctl(bus, CLK_NONE, TRUE);
}
dhd_os_sdunlock(bus->dhd);
goto exit;
}
DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
name, (set ? "set" : "get"), len, plen));
/* set up 'params' pointer in case this is a set command so that
* the convenience int and bool code can be common to set and get
*/
if (params == NULL) {
params = arg;
plen = len;
}
if (vi->type == IOVT_VOID)
val_size = 0;
else if (vi->type == IOVT_BUFFER)
val_size = len;
else
/* all other types are integer sized */
val_size = sizeof(int);
actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
exit:
return bcmerror;
}
void
dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
{
osl_t *osh;
uint32 local_hostintmask;
uint8 saveclk;
uint retries;
int err;
if (!bus->dhd)
return;
osh = bus->dhd->osh;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
bcmsdh_waitlockfree(NULL);
if (enforce_mutex)
dhd_os_sdlock(bus->dhd);
if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
/* if Firmware already hangs disbale any interrupt */
bus->dhd->busstate = DHD_BUS_DOWN;
bus->hostintmask = 0;
bcmsdh_intr_disable(bus->sdh);
} else {
BUS_WAKE(bus);
/* Change our idea of bus state */
bus->dhd->busstate = DHD_BUS_DOWN;
if (KSO_ENAB(bus)) {
/* Enable clock for device interrupts */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Disable and clear interrupts at the chip level also */
W_SDREG(0, &bus->regs->hostintmask, retries);
local_hostintmask = bus->hostintmask;
bus->hostintmask = 0;
/* Force clocks on backplane to be sure F2 interrupt propagates */
saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
(saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
__FUNCTION__, err));
}
/* Turn off the bus (F2), free any pending packets */
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
bcmsdh_intr_disable(bus->sdh);
#ifndef BCMSPI
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
#endif /* !BCMSPI */
/* Clear any pending interrupts now that F2 is disabled */
W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
}
/* Turn off the backplane clock (only) */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
/* Clear the data packet queues */
pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
/* Clear any held glomming stuff */
if (bus->glomd)
PKTFREE(osh, bus->glomd, FALSE);
if (bus->glom)
PKTFREE(osh, bus->glom, FALSE);
bus->glom = bus->glomd = NULL;
/* Clear rx control and wake any waiters */
bus->rxlen = 0;
dhd_os_ioctl_resp_wake(bus->dhd);
/* Reset some F2 state stuff */
bus->rxskip = FALSE;
bus->tx_seq = bus->rx_seq = 0;
bus->tx_max = 4;
if (enforce_mutex)
dhd_os_sdunlock(bus->dhd);
}
#ifdef BCMSDIOH_TXGLOM
void
dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
{
dhd_bus_t *bus = dhdp->bus;
char buf[256];
uint32 rxglom;
int32 ret;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (enable) {
rxglom = 1;
memset(buf, 0, sizeof(buf));
bcm_mkiovar("bus:rxglom",
(void *)&rxglom,
4, buf, sizeof(buf));
ret = dhd_wl_ioctl_cmd(dhdp,
WLC_SET_VAR, buf,
sizeof(buf), TRUE, 0);
if (!(ret < 0)) {
bus->glom_enable = TRUE;
}
} else {
bus->glom_enable = FALSE;
}
}
#endif /* BCMSDIOH_TXGLOM */
int
dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
{
dhd_bus_t *bus = dhdp->bus;
dhd_timeout_t tmo;
uint retries = 0;
uint8 ready, enable;
int err, ret = 0;
#ifdef BCMSPI
uint32 dstatus = 0; /* gSPI device-status bits */
#else /* BCMSPI */
uint8 saveclk;
#endif /* BCMSPI */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
ASSERT(bus->dhd);
if (!bus->dhd)
return 0;
if (enforce_mutex)
dhd_os_sdlock(bus->dhd);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
if (bus->clkstate != CLK_AVAIL) {
DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
ret = -1;
goto exit;
}
#ifdef BCMSPI
/* fake "ready" for spi, wake-wlan would have already enabled F1 and F2 */
ready = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
enable = 0;
/* Give the dongle some time to do its thing and set IOR2 */
dhd_timeout_start(&tmo, WAIT_F2RXFIFORDY * WAIT_F2RXFIFORDY_DELAY * 1000);
while (!enable && !dhd_timeout_expired(&tmo)) {
dstatus = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL);
if (dstatus & STATUS_F2_RX_READY)
enable = TRUE;
}
if (enable) {
DHD_ERROR(("Took %u usec before dongle is ready\n", tmo.elapsed));
enable = ready;
} else {
DHD_ERROR(("dstatus when timed out on f2-fifo not ready = 0x%x\n", dstatus));
DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
ret = -1;
goto exit;
}
#else /* !BCMSPI */
/* Force clocks on backplane to be sure F2 interrupt propagates */
saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
(saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
ret = -1;
goto exit;
}
/* Enable function 2 (frame transfers) */
W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
&bus->regs->tosbmailboxdata, retries);
enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
/* Give the dongle some time to do its thing and set IOR2 */
dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
ready = 0;
while (ready != enable && !dhd_timeout_expired(&tmo))
ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
#endif /* !BCMSPI */
DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
__FUNCTION__, enable, ready, tmo.elapsed));
/* If F2 successfully enabled, set core and enable interrupts */
if (ready == enable) {
/* Make sure we're talking to the core. */
if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
ASSERT(bus->regs != NULL);
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
/* corerev 4 could use the newer interrupt logic to detect the frames */
#ifndef BCMSPI
if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
(bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
bus->hostintmask &= ~I_HMB_FRAME_IND;
bus->hostintmask |= I_XMTDATA_AVAIL;
}
#endif /* BCMSPI */
W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
#ifdef SDIO_CRC_ERROR_FIX
if (bus->blocksize < 512) {
mesbusyctrl = watermark = bus->blocksize / 4;
}
#endif /* SDIO_CRC_ERROR_FIX */
if (!((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID))) {
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
(uint8)watermark, &err);
}
#ifdef SDIO_CRC_ERROR_FIX
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
(uint8)mesbusyctrl|0x80, &err);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK, NULL);
#endif /* SDIO_CRC_ERROR_FIX */
/* Set bus state according to enable result */
dhdp->busstate = DHD_BUS_DATA;
/* bcmsdh_intr_unmask(bus->sdh); */
bus->intdis = FALSE;
if (bus->intr) {
DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
#ifndef BCMSPI_ANDROID
bcmsdh_intr_enable(bus->sdh);
#endif /* !BCMSPI_ANDROID */
} else {
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
bcmsdh_intr_disable(bus->sdh);
}
}
#ifndef BCMSPI
else {
/* Disable F2 again */
enable = SDIO_FUNC_ENABLE_1;
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
}
if (dhdsdio_sr_cap(bus)) {
dhdsdio_sr_init(bus);
/* Masking the chip active interrupt permanantly */
bus->hostintmask &= ~I_CHIPACTIVE;
W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n",
__FUNCTION__, bus->hostintmask));
}
else
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
#endif /* !BCMSPI */
/* If we didn't come up, turn off backplane clock */
if (dhdp->busstate != DHD_BUS_DATA)
dhdsdio_clkctl(bus, CLK_NONE, FALSE);
exit:
if (enforce_mutex)
dhd_os_sdunlock(bus->dhd);
return ret;
}
static void
dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
{
bcmsdh_info_t *sdh = bus->sdh;
sdpcmd_regs_t *regs = bus->regs;
uint retries = 0;
uint16 lastrbc;
uint8 hi, lo;
int err;
DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
(abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
if (!KSO_ENAB(bus)) {
DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
return;
}
if (abort) {
bcmsdh_abort(sdh, SDIO_FUNC_2);
}
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
if (err) {
DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__));
goto fail;
}
bus->f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err);
if (err) {
DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__));
goto fail;
}
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
__FUNCTION__, lastrbc, ((hi << 8) + lo)));
}
lastrbc = (hi << 8) + lo;
}
if (!retries) {
DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
} else {
DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
}
if (rtx) {
bus->rxrtx++;
W_SDREG(SMB_NAK, ®s->tosbmailbox, retries);
bus->f1regdata++;
if (retries <= retry_limit) {
bus->rxskip = TRUE;
}
}
/* Clear partial in any case */
bus->nextlen = 0;
fail:
/* If we can't reach the device, signal failure */
if (err || bcmsdh_regfail(sdh))
bus->dhd->busstate = DHD_BUS_DOWN;
}
static void
dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
{
bcmsdh_info_t *sdh = bus->sdh;
uint rdlen, pad;
int sdret;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Control data already received in aligned rxctl */
if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
goto gotpkt;
ASSERT(bus->rxbuf);
/* Set rxctl for frame (w/optional alignment) */
bus->rxctl = bus->rxbuf;
if (dhd_alignctl) {
bus->rxctl += firstread;
if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
bus->rxctl += (DHD_SDALIGN - pad);
bus->rxctl -= firstread;
}
ASSERT(bus->rxctl >= bus->rxbuf);
/* Copy the already-read portion over */
bcopy(hdr, bus->rxctl, firstread);
if (len <= firstread)
goto gotpkt;
/* Copy the full data pkt in gSPI case and process ioctl. */
if (bus->bus == SPI_BUS) {
bcopy(hdr, bus->rxctl, len);
goto gotpkt;
}
/* Raise rdlen to next SDIO block to avoid tail command */
rdlen = len - firstread;
if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
pad = bus->blocksize - (rdlen % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
((len + pad) < bus->dhd->maxctl))
rdlen += pad;
} else if (rdlen % DHD_SDALIGN) {
rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
}
/* Satisfy length-alignment requirements */
if (forcealign && (rdlen & (ALIGNMENT - 1)))
rdlen = ROUNDUP(rdlen, ALIGNMENT);
/* Drop if the read is too big or it exceeds our maximum */
if ((rdlen + firstread) > bus->dhd->maxctl) {
DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
__FUNCTION__, rdlen, bus->dhd->maxctl));
bus->dhd->rx_errors++;
dhdsdio_rxfail(bus, FALSE, FALSE);
goto done;
}
if ((len - doff) > bus->dhd->maxctl) {
DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
__FUNCTION__, len, (len - doff), bus->dhd->maxctl));
bus->dhd->rx_errors++; bus->rx_toolong++;
dhdsdio_rxfail(bus, FALSE, FALSE);
goto done;
}
/* Read remainder of frame body into the rxctl buffer */
sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
(bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
bus->f2rxdata++;
ASSERT(sdret != BCME_PENDING);
/* Control frame failures need retransmission */
if (sdret < 0) {
DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
dhdsdio_rxfail(bus, TRUE, TRUE);
goto done;
}
gotpkt:
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_CTL_ON()) {
prhex("RxCtrl", bus->rxctl, len);
}
#endif
/* Point to valid data and indicate its length */
bus->rxctl += doff;
bus->rxlen = len - doff;
done:
/* Awake any waiters */
dhd_os_ioctl_resp_wake(bus->dhd);
}
int
dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
void **pkt, uint32 *pkt_count);
static uint8
dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
{
uint16 dlen, totlen;
uint8 *dptr, num = 0;
uint16 sublen, check;
void *pfirst, *plast, *pnext;
void * list_tail[DHD_MAX_IFS] = { NULL };
void * list_head[DHD_MAX_IFS] = { NULL };
uint8 idx;
osl_t *osh = bus->dhd->osh;
int errcode;
uint8 chan, seq, doff, sfdoff;
uint8 txmax;
uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
uint reorder_info_len;
int ifidx = 0;
bool usechain = bus->use_rxchain;
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
dhd_os_sdlock_rxq(bus->dhd);
pfirst = plast = pnext = NULL;
dlen = (uint16)PKTLEN(osh, bus->glomd);
dptr = PKTDATA(osh, bus->glomd);
if (!dlen || (dlen & 1)) {
DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
__FUNCTION__, dlen));
dlen = 0;
}
for (totlen = num = 0; dlen; num++) {
/* Get (and move past) next length */
sublen = ltoh16_ua(dptr);
dlen -= sizeof(uint16);
dptr += sizeof(uint16);
if ((sublen < SDPCM_HDRLEN_RX) ||
((num == 0) && (sublen < (2 * SDPCM_HDRLEN_RX)))) {
DHD_ERROR(("%s: descriptor len %d bad: %d\n",
__FUNCTION__, num, sublen));
pnext = NULL;
break;
}
if (sublen % DHD_SDALIGN) {
DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
__FUNCTION__, sublen, DHD_SDALIGN));
usechain = FALSE;
}
totlen += sublen;
/* For last frame, adjust read len so total is a block multiple */
if (!dlen) {
sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
totlen = ROUNDUP(totlen, bus->blocksize);
}
/* Allocate/chain packet for next subframe */
if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
__FUNCTION__, num, sublen));
break;
}
ASSERT(!PKTLINK(pnext));
if (!pfirst) {
ASSERT(!plast);
pfirst = plast = pnext;
} else {
ASSERT(plast);
PKTSETNEXT(osh, plast, pnext);
plast = pnext;
}
/* Adhere to start alignment requirements */
PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
}
/* If all allocations succeeded, save packet chain in bus structure */
if (pnext) {
DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
__FUNCTION__, totlen, num));
if (DHD_GLOM_ON() && bus->nextlen) {
if (totlen != bus->nextlen) {
DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
"rxseq %d\n", __FUNCTION__, bus->nextlen,
totlen, rxseq));
}
}
bus->glom = pfirst;
pfirst = pnext = NULL;
} else {
if (pfirst)
PKTFREE(osh, pfirst, FALSE);
bus->glom = NULL;
num = 0;
}
/* Done with descriptor packet */
PKTFREE(osh, bus->glomd, FALSE);
bus->glomd = NULL;
bus->nextlen = 0;
dhd_os_sdunlock_rxq(bus->dhd);
}
/* Ok -- either we just generated a packet chain, or had one from before */
if (bus->glom) {
if (DHD_GLOM_ON()) {
DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
pnext, (uint8*)PKTDATA(osh, pnext),
PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
}
}
pfirst = bus->glom;
dlen = (uint16)pkttotlen(osh, pfirst);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
if (usechain) {
errcode = dhd_bcmsdh_recv_buf(bus,
bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
F2SYNC, (uint8*)PKTDATA(osh, pfirst),
dlen, pfirst, NULL, NULL);
} else if (bus->dataptr) {
errcode = dhd_bcmsdh_recv_buf(bus,
bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
F2SYNC, bus->dataptr,
dlen, NULL, NULL, NULL);
sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
if (sublen != dlen) {
DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
__FUNCTION__, dlen, sublen));
errcode = -1;
}
pnext = NULL;
} else {
DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
errcode = -1;
}
bus->f2rxdata++;
ASSERT(errcode != BCME_PENDING);
/* On failure, kill the superframe, allow a couple retries */
if (errcode < 0) {
DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
__FUNCTION__, dlen, errcode));
bus->dhd->rx_errors++;
if (bus->glomerr++ < 3) {
dhdsdio_rxfail(bus, TRUE, TRUE);
} else {
bus->glomerr = 0;
dhdsdio_rxfail(bus, TRUE, FALSE);
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE(osh, bus->glom, FALSE);
dhd_os_sdunlock_rxq(bus->dhd);
bus->rxglomfail++;
bus->glom = NULL;
}
return 0;
}
#ifdef DHD_DEBUG
if (DHD_GLOM_ON()) {
prhex("SUPERFRAME", PKTDATA(osh, pfirst),
MIN(PKTLEN(osh, pfirst), 48));
}
#endif
/* Validate the superframe header */
dptr = (uint8 *)PKTDATA(osh, pfirst);
sublen = ltoh16_ua(dptr);
check = ltoh16_ua(dptr + sizeof(uint16));
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
__FUNCTION__, bus->nextlen, seq));
bus->nextlen = 0;
}
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
errcode = 0;
if ((uint16)~(sublen^check)) {
DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
__FUNCTION__, sublen, check));
errcode = -1;
} else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
__FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
errcode = -1;
} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
errcode = -1;
} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
errcode = -1;
} else if ((doff < SDPCM_HDRLEN_RX) ||
(doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN_RX))) {
DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
__FUNCTION__, doff, sublen, PKTLEN(osh, pfirst),
SDPCM_HDRLEN_RX));
errcode = -1;
}
/* Check sequence number of superframe SW header */
if (rxseq != seq) {
DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
__FUNCTION__, seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x40) {
DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
}
bus->tx_max = txmax;
/* Remove superframe header, remember offset */
PKTPULL(osh, pfirst, doff);
sfdoff = doff;
/* Validate all the subframe headers */
for (num = 0, pnext = pfirst; pnext && !errcode;
num++, pnext = PKTNEXT(osh, pnext)) {
dptr = (uint8 *)PKTDATA(osh, pnext);
dlen = (uint16)PKTLEN(osh, pnext);
sublen = ltoh16_ua(dptr);
check = ltoh16_ua(dptr + sizeof(uint16));
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
#ifdef DHD_DEBUG
if (DHD_GLOM_ON()) {
prhex("subframe", dptr, 32);
}
#endif
if ((uint16)~(sublen^check)) {
DHD_ERROR(("%s (subframe %d): HW hdr error: "
"len/check 0x%04x/0x%04x\n",
__FUNCTION__, num, sublen, check));
errcode = -1;
} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN_RX)) {
DHD_ERROR(("%s (subframe %d): length mismatch: "
"len 0x%04x, expect 0x%04x\n",
__FUNCTION__, num, sublen, dlen));
errcode = -1;
} else if ((chan != SDPCM_DATA_CHANNEL) &&
(chan != SDPCM_EVENT_CHANNEL)) {
DHD_ERROR(("%s (subframe %d): bad channel %d\n",
__FUNCTION__, num, chan));
errcode = -1;
} else if ((doff < SDPCM_HDRLEN_RX) || (doff > sublen)) {
DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
__FUNCTION__, num, doff, sublen, SDPCM_HDRLEN_RX));
errcode = -1;
}
}
if (errcode) {
/* Terminate frame on error, request a couple retries */
if (bus->glomerr++ < 3) {
/* Restore superframe header space */
PKTPUSH(osh, pfirst, sfdoff);
dhdsdio_rxfail(bus, TRUE, TRUE);
} else {
bus->glomerr = 0;
dhdsdio_rxfail(bus, TRUE, FALSE);
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE(osh, bus->glom, FALSE);
dhd_os_sdunlock_rxq(bus->dhd);
bus->rxglomfail++;
bus->glom = NULL;
}
bus->nextlen = 0;
return 0;
}
/* Basic SD framing looks ok - process each packet (header) */
bus->glom = NULL;
plast = NULL;
dhd_os_sdlock_rxq(bus->dhd);
for (num = 0; pfirst; rxseq++, pfirst = pnext) {
pnext = PKTNEXT(osh, pfirst);
PKTSETNEXT(osh, pfirst, NULL);
dptr = (uint8 *)PKTDATA(osh, pfirst);
sublen = ltoh16_ua(dptr);
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
__FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
PKTLEN(osh, pfirst), sublen, chan, seq));
ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
if (rxseq != seq) {
DHD_GLOM(("%s: rx_seq %d, expected %d\n",
__FUNCTION__, seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
prhex("Rx Subframe Data", dptr, dlen);
}
#endif
PKTSETLEN(osh, pfirst, sublen);
PKTPULL(osh, pfirst, doff);
reorder_info_len = sizeof(reorder_info_buf);
if (PKTLEN(osh, pfirst) == 0) {
PKTFREE(bus->dhd->osh, pfirst, FALSE);
continue;
} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
&reorder_info_len) != 0) {
DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
bus->dhd->rx_errors++;
PKTFREE(osh, pfirst, FALSE);
continue;
}
if (reorder_info_len) {
uint32 free_buf_count;
void *ppfirst;
ppfirst = pfirst;
/* Reordering info from the firmware */
dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
reorder_info_len, &ppfirst, &free_buf_count);
if (free_buf_count == 0) {
continue;
}
else {
void *temp;
/* go to the end of the chain and attach the pnext there */
temp = ppfirst;
while (PKTNEXT(osh, temp) != NULL) {
temp = PKTNEXT(osh, temp);
}
pfirst = temp;
if (list_tail[ifidx] == NULL)
list_head[ifidx] = ppfirst;
else
PKTSETNEXT(osh, list_tail[ifidx], ppfirst);
list_tail[ifidx] = pfirst;
}
num += (uint8)free_buf_count;
}
else {
/* this packet will go up, link back into chain and count it */
if (list_tail[ifidx] == NULL) {
list_head[ifidx] = list_tail[ifidx] = pfirst;
}
else {
PKTSETNEXT(osh, list_tail[ifidx], pfirst);
list_tail[ifidx] = pfirst;
}
num++;
}
#ifdef DHD_DEBUG
if (DHD_GLOM_ON()) {
DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
__FUNCTION__, num, pfirst,
PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
prhex("", (uint8 *)PKTDATA(osh, pfirst),
MIN(PKTLEN(osh, pfirst), 32));
}
#endif /* DHD_DEBUG */
}
dhd_os_sdunlock_rxq(bus->dhd);
for (idx = 0; idx < DHD_MAX_IFS; idx++) {
if (list_head[idx]) {
void *temp;
uint8 cnt = 0;
temp = list_head[idx];
do {
temp = PKTNEXT(osh, temp);
cnt++;
} while (temp);
if (cnt) {
dhd_os_sdunlock(bus->dhd);
dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0);
dhd_os_sdlock(bus->dhd);
}
}
}
bus->rxglomframes++;
bus->rxglompkts += num;
}
return num;
}
/* Return TRUE if there may be more frames to read */
static uint
dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
{
osl_t *osh = bus->dhd->osh;
bcmsdh_info_t *sdh = bus->sdh;
uint16 len, check; /* Extracted hardware header fields */
uint8 chan, seq, doff; /* Extracted software header fields */
uint8 fcbits; /* Extracted fcbits from software header */
uint8 delta;
void *pkt; /* Packet for event or data frames */
uint16 pad; /* Number of pad bytes to read */
uint16 rdlen; /* Total number of bytes to read */
uint8 rxseq; /* Next sequence number to expect */
uint rxleft = 0; /* Remaining number of frames allowed */
int sdret; /* Return code from bcmsdh calls */
uint8 txmax; /* Maximum tx sequence offered */
#ifdef BCMSPI
uint32 dstatus = 0; /* gSPI device status bits of */
#endif /* BCMSPI */
bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
uint8 *rxbuf;
int ifidx = 0;
uint rxcount = 0; /* Total frames read */
uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
uint reorder_info_len;
uint pkt_count;
#if defined(DHD_DEBUG) || defined(SDTEST)
bool sdtest = FALSE; /* To limit message spew from test mode */
#endif
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
bus->readframes = TRUE;
if (!KSO_ENAB(bus)) {
DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
bus->readframes = FALSE;
return 0;
}
ASSERT(maxframes);
#ifdef SDTEST
/* Allow pktgen to override maxframes */
if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
maxframes = bus->pktgen_count;
sdtest = TRUE;
}
#endif
/* Not finished unless we encounter no more frames indication */
*finished = FALSE;
#ifdef BCMSPI
/* Get pktlen from gSPI device F0 reg. */
if (bus->bus == SPI_BUS) {
/* Peek in dstatus bits and find out size to do rx-read. */
dstatus = bcmsdh_get_dstatus(bus->sdh);
if (dstatus == 0)
DHD_ERROR(("%s:ZERO spi dstatus, a case observed in PR61352 hit !!!\n",
__FUNCTION__));
DHD_TRACE(("Device status from regread = 0x%x\n", dstatus));
DHD_TRACE(("Device status from bit-reconstruction = 0x%x\n",
bcmsdh_get_dstatus((void *)bus->sdh)));
if ((dstatus & STATUS_F2_PKT_AVAILABLE) && (((dstatus & STATUS_UNDERFLOW)) == 0)) {
bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >>
STATUS_F2_PKT_LEN_SHIFT);
/* '0' size with pkt-available interrupt is eqvt to 2048 bytes */
bus->nextlen = (bus->nextlen == 0) ? SPI_MAX_PKT_LEN : bus->nextlen;
if (bus->dwordmode)
bus->nextlen = bus->nextlen << 2;
DHD_TRACE(("Entering %s: length to be read from gSPI = %d\n",
__FUNCTION__, bus->nextlen));
} else {
if (dstatus & STATUS_F2_PKT_AVAILABLE)
DHD_ERROR(("Underflow during %s.\n", __FUNCTION__));
else
DHD_ERROR(("False pkt-available intr.\n"));
*finished = TRUE;
return (maxframes - rxleft);
}
}
#endif /* BCMSPI */
for (rxseq = bus->rx_seq, rxleft = maxframes;
!bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
rxseq++, rxleft--) {
#ifdef DHDTHREAD
/* tx more to improve rx performance */
if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
dhdsdio_sendpendctl(bus);
} else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) {
dhdsdio_sendfromq(bus, dhd_txbound);
}
#endif /* DHDTHREAD */
/* Handle glomming separately */
if (bus->glom || bus->glomd) {
uint8 cnt;
DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
__FUNCTION__, bus->glomd, bus->glom));
cnt = dhdsdio_rxglom(bus, rxseq);
DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
rxseq += cnt - 1;
rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
continue;
}
/* Try doing single read if we can */
if (dhd_readahead && bus->nextlen) {
uint16 nextlen = bus->nextlen;
bus->nextlen = 0;
if (bus->bus == SPI_BUS) {
rdlen = len = nextlen;
}
else {
rdlen = len = nextlen << 4;
/* Pad read to blocksize for efficiency */
if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
pad = bus->blocksize - (rdlen % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
((rdlen + pad + firstread) < MAX_RX_DATASZ))
rdlen += pad;
} else if (rdlen % DHD_SDALIGN) {
rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
}
}
/* We use bus->rxctl buffer in WinXP for initial control pkt receives.
* Later we use buffer-poll for data as well as control packets.
* This is required because dhd receives full frame in gSPI unlike SDIO.
* After the frame is received we have to distinguish whether it is data
* or non-data frame.
*/
/* Allocate a packet buffer */
dhd_os_sdlock_rxq(bus->dhd);
if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
if (bus->bus == SPI_BUS) {
bus->usebufpool = FALSE;
bus->rxctl = bus->rxbuf;
if (dhd_alignctl) {
bus->rxctl += firstread;
if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
bus->rxctl += (DHD_SDALIGN - pad);
bus->rxctl -= firstread;
}
ASSERT(bus->rxctl >= bus->rxbuf);
rxbuf = bus->rxctl;
/* Read the entire frame */
sdret = dhd_bcmsdh_recv_buf(bus,
bcmsdh_cur_sbwad(sdh),
SDIO_FUNC_2,
F2SYNC, rxbuf, rdlen,
NULL, NULL, NULL);
bus->f2rxdata++;
ASSERT(sdret != BCME_PENDING);
#ifdef BCMSPI
if (bcmsdh_get_dstatus((void *)bus->sdh) &
STATUS_UNDERFLOW) {
bus->nextlen = 0;
*finished = TRUE;
DHD_ERROR(("%s: read %d control bytes failed "
"due to spi underflow\n",
__FUNCTION__, rdlen));
/* dhd.rx_ctlerrs is higher level */
bus->rxc_errors++;
dhd_os_sdunlock_rxq(bus->dhd);
continue;
}
#endif /* BCMSPI */
/* Control frame failures need retransmission */
if (sdret < 0) {
DHD_ERROR(("%s: read %d control bytes failed: %d\n",
__FUNCTION__, rdlen, sdret));
/* dhd.rx_ctlerrs is higher level */
bus->rxc_errors++;
dhd_os_sdunlock_rxq(bus->dhd);
dhdsdio_rxfail(bus, TRUE,
(bus->bus == SPI_BUS) ? FALSE : TRUE);
continue;
}
} else {
/* Give up on data, request rtx of events */
DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
"expected rxseq %d\n",
__FUNCTION__, len, rdlen, rxseq));
/* Just go try again w/normal header read */
dhd_os_sdunlock_rxq(bus->dhd);
continue;
}
} else {
if (bus->bus == SPI_BUS)
bus->usebufpool = TRUE;
ASSERT(!PKTLINK(pkt));
PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
rxbuf = (uint8 *)PKTDATA(osh, pkt);
/* Read the entire frame */
sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
SDIO_FUNC_2,
F2SYNC, rxbuf, rdlen,
pkt, NULL, NULL);
bus->f2rxdata++;
ASSERT(sdret != BCME_PENDING);
#ifdef BCMSPI
if (bcmsdh_get_dstatus((void *)bus->sdh) & STATUS_UNDERFLOW) {
bus->nextlen = 0;
*finished = TRUE;
DHD_ERROR(("%s (nextlen): read %d bytes failed due "
"to spi underflow\n",
__FUNCTION__, rdlen));
PKTFREE(bus->dhd->osh, pkt, FALSE);
bus->dhd->rx_errors++;
dhd_os_sdunlock_rxq(bus->dhd);
continue;
}
#endif /* BCMSPI */
if (sdret < 0) {
DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
__FUNCTION__, rdlen, sdret));
PKTFREE(bus->dhd->osh, pkt, FALSE);
bus->dhd->rx_errors++;
dhd_os_sdunlock_rxq(bus->dhd);
/* Force retry w/normal header read. Don't attempt NAK for
* gSPI
*/
dhdsdio_rxfail(bus, TRUE,
(bus->bus == SPI_BUS) ? FALSE : TRUE);
continue;
}
}
dhd_os_sdunlock_rxq(bus->dhd);
/* Now check the header */
bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN_RX);
/* Extract hardware header fields */
len = ltoh16_ua(bus->rxhdr);
check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
/* All zeros means readahead info was bad */
if (!(len|check)) {
DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
__FUNCTION__));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE2();
dhd_os_sdunlock_rxq(bus->dhd);
GSPI_PR55150_BAILOUT;
continue;
}
/* Validate check bytes */
if ((uint16)~(len^check)) {
DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
" 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
len, check));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE2();
dhd_os_sdunlock_rxq(bus->dhd);
bus->rx_badhdr++;
dhdsdio_rxfail(bus, FALSE, FALSE);
GSPI_PR55150_BAILOUT;
continue;
}
/* Validate frame length */
if (len < SDPCM_HDRLEN_RX) {
DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
__FUNCTION__, len));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE2();
dhd_os_sdunlock_rxq(bus->dhd);
GSPI_PR55150_BAILOUT;
continue;
}
/* Check for consistency with readahead info */
#ifdef BCMSPI
if (bus->bus == SPI_BUS) {
if (bus->dwordmode) {
uint16 spilen;
if ((bus->sih->chip == BCM4329_CHIP_ID) &&
(bus->sih->chiprev == 2))
spilen = ROUNDUP(len, 16);
else
spilen = ROUNDUP(len, 4);
len_consistent = (nextlen != spilen);
} else
len_consistent = (nextlen != len);
} else
#endif /* BCMSPI */
len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
if (len_consistent) {
/* Mismatch, force retry w/normal header (may be >4K) */
DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
"expected rxseq %d\n",
__FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE2();
dhd_os_sdunlock_rxq(bus->dhd);
dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
GSPI_PR55150_BAILOUT;
continue;
}
/* Extract software header fields */
chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
#ifdef BCMSPI
/* Save the readahead length if there is one */
if (bus->bus == SPI_BUS) {
/* Use reconstructed dstatus bits and find out readahead size */
dstatus = bcmsdh_get_dstatus((void *)bus->sdh);
DHD_INFO(("Device status from bit-reconstruction = 0x%x\n",
bcmsdh_get_dstatus((void *)bus->sdh)));
if (dstatus & STATUS_F2_PKT_AVAILABLE) {
bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >>
STATUS_F2_PKT_LEN_SHIFT);
bus->nextlen = (bus->nextlen == 0) ?
SPI_MAX_PKT_LEN : bus->nextlen;
if (bus->dwordmode)
bus->nextlen = bus->nextlen << 2;
DHD_INFO(("readahead len from gSPI = %d \n",
bus->nextlen));
bus->dhd->rx_readahead_cnt ++;
} else {
bus->nextlen = 0;
*finished = TRUE;
}
} else {
#endif /* BCMSPI */
bus->nextlen =
bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
" (%d), seq %d\n", __FUNCTION__, bus->nextlen,
seq));
bus->nextlen = 0;
}
bus->dhd->rx_readahead_cnt ++;
#ifdef BCMSPI
}
#endif /* BCMSPI */
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
delta = 0;
if (~bus->flowcontrol & fcbits) {
bus->fc_xoff++;
delta = 1;
}
if (bus->flowcontrol & ~fcbits) {
bus->fc_xon++;
delta = 1;
}
if (delta) {
bus->fc_rcvd++;
bus->flowcontrol = fcbits;
}
/* Check and update sequence number */
if (rxseq != seq) {
DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
__FUNCTION__, seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x40) {
#ifdef BCMSPI
if ((bus->bus == SPI_BUS) && !(dstatus & STATUS_F2_RX_READY)) {
DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_seq + 2;
} else {
#endif /* BCMSPI */
DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
#ifdef BCMSPI
}
#endif /* BCMSPI */
}
bus->tx_max = txmax;
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
prhex("Rx Data", rxbuf, len);
} else if (DHD_HDRS_ON()) {
prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN_RX);
}
#endif
if (chan == SDPCM_CONTROL_CHANNEL) {
if (bus->bus == SPI_BUS) {
dhdsdio_read_control(bus, rxbuf, len, doff);
if (bus->usebufpool) {
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE(bus->dhd->osh, pkt, FALSE);
dhd_os_sdunlock_rxq(bus->dhd);
}
continue;
} else {
DHD_ERROR(("%s (nextlen): readahead on control"
" packet %d?\n", __FUNCTION__, seq));
/* Force retry w/normal header read */
bus->nextlen = 0;
dhdsdio_rxfail(bus, FALSE, TRUE);
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE2();
dhd_os_sdunlock_rxq(bus->dhd);
continue;
}
}
if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
DHD_ERROR(("Received %d bytes on %d channel. Running out of "
"rx pktbuf's or not yet malloced.\n", len, chan));
continue;
}
/* Validate data offset */
if ((doff < SDPCM_HDRLEN_RX) || (doff > len)) {
DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
__FUNCTION__, doff, len, SDPCM_HDRLEN_RX));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE2();
dhd_os_sdunlock_rxq(bus->dhd);
ASSERT(0);
dhdsdio_rxfail(bus, FALSE, FALSE);
continue;
}
/* All done with this one -- now deliver the packet */
goto deliver;
}
/* gSPI frames should not be handled in fractions */
if (bus->bus == SPI_BUS) {
break;
}
/* Read frame header (hardware and software) */
sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
bus->rxhdr, firstread, NULL, NULL, NULL);
bus->f2rxhdrs++;
ASSERT(sdret != BCME_PENDING);
if (sdret < 0) {
DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
bus->rx_hdrfail++;
dhdsdio_rxfail(bus, TRUE, TRUE);
continue;
}
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN_RX);
}
#endif
/* Extract hardware header fields */
len = ltoh16_ua(bus->rxhdr);
check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
/* All zeros means no more frames */
if (!(len|check)) {
*finished = TRUE;
break;
}
/* Validate check bytes */
if ((uint16)~(len^check)) {
DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
__FUNCTION__, len, check));
bus->rx_badhdr++;
dhdsdio_rxfail(bus, FALSE, FALSE);
continue;
}
/* Validate frame length */
if (len < SDPCM_HDRLEN_RX) {
DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
continue;
}
/* Extract software header fields */
chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
/* Validate data offset */
if ((doff < SDPCM_HDRLEN_RX) || (doff > len)) {
DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
__FUNCTION__, doff, len, SDPCM_HDRLEN_RX, seq));
bus->rx_badhdr++;
ASSERT(0);
dhdsdio_rxfail(bus, FALSE, FALSE);
continue;
}
/* Save the readahead length if there is one */
bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
__FUNCTION__, bus->nextlen, seq));
bus->nextlen = 0;
}
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
delta = 0;
if (~bus->flowcontrol & fcbits) {
bus->fc_xoff++;
delta = 1;
}
if (bus->flowcontrol & ~fcbits) {
bus->fc_xon++;
delta = 1;
}
if (delta) {
bus->fc_rcvd++;
bus->flowcontrol = fcbits;
}
/* Check and update sequence number */
if (rxseq != seq) {
DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x40) {
DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
}
bus->tx_max = txmax;
/* Call a separate function for control frames */
if (chan == SDPCM_CONTROL_CHANNEL) {
dhdsdio_read_control(bus, bus->rxhdr, len, doff);
continue;
}
ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
(chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
/* Length to read */
rdlen = (len > firstread) ? (len - firstread) : 0;
/* May pad read to blocksize for efficiency */
if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
pad = bus->blocksize - (rdlen % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
((rdlen + pad + firstread) < MAX_RX_DATASZ))
rdlen += pad;
} else if (rdlen % DHD_SDALIGN) {
rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
}
/* Satisfy length-alignment requirements */
if (forcealign && (rdlen & (ALIGNMENT - 1)))
rdlen = ROUNDUP(rdlen, ALIGNMENT);
if ((rdlen + firstread) > MAX_RX_DATASZ) {
/* Too long -- skip this frame */
DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
bus->dhd->rx_errors++; bus->rx_toolong++;
dhdsdio_rxfail(bus, FALSE, FALSE);
continue;
}
dhd_os_sdlock_rxq(bus->dhd);
if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
/* Give up on data, request rtx of events */
DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
__FUNCTION__, rdlen, chan));
bus->dhd->rx_dropped++;
dhd_os_sdunlock_rxq(bus->dhd);
dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
continue;
}
dhd_os_sdunlock_rxq(bus->dhd);
ASSERT(!PKTLINK(pkt));
/* Leave room for what we already read, and align remainder */
ASSERT(firstread < (PKTLEN(osh, pkt)));
PKTPULL(osh, pkt, firstread);
PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
/* Read the remaining frame data */
sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
bus->f2rxdata++;
ASSERT(sdret != BCME_PENDING);
if (sdret < 0) {
DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
((chan == SDPCM_EVENT_CHANNEL) ? "event" :
((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE(bus->dhd->osh, pkt, FALSE);
dhd_os_sdunlock_rxq(bus->dhd);
bus->dhd->rx_errors++;
dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
continue;
}
/* Copy the already-read portion */
PKTPUSH(osh, pkt, firstread);
bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
prhex("Rx Data", PKTDATA(osh, pkt), len);
}
#endif
deliver:
/* Save superframe descriptor and allocate packet frame */
if (chan == SDPCM_GLOM_CHANNEL) {
if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
__FUNCTION__, len));
#ifdef DHD_DEBUG
if (DHD_GLOM_ON()) {
prhex("Glom Data", PKTDATA(osh, pkt), len);
}
#endif
PKTSETLEN(osh, pkt, len);
ASSERT(doff == SDPCM_HDRLEN_RX);
PKTPULL(osh, pkt, SDPCM_HDRLEN_RX);
bus->glomd = pkt;
} else {
DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
dhdsdio_rxfail(bus, FALSE, FALSE);
}
continue;
}
/* Fill in packet len and prio, deliver upward */
PKTSETLEN(osh, pkt, len);
PKTPULL(osh, pkt, doff);
#ifdef SDTEST
/* Test channel packets are processed separately */
if (chan == SDPCM_TEST_CHANNEL) {
dhdsdio_testrcv(bus, pkt, seq);
continue;
}
#endif /* SDTEST */
if (PKTLEN(osh, pkt) == 0) {
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE(bus->dhd->osh, pkt, FALSE);
dhd_os_sdunlock_rxq(bus->dhd);
continue;
} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
&reorder_info_len) != 0) {
DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE(bus->dhd->osh, pkt, FALSE);
dhd_os_sdunlock_rxq(bus->dhd);
bus->dhd->rx_errors++;
continue;
}
if (reorder_info_len) {
/* Reordering info from the firmware */
dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
&pkt, &pkt_count);
if (pkt_count == 0)
continue;
}
else
pkt_count = 1;
/* Unlock during rx call */
dhd_os_sdunlock(bus->dhd);
dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
dhd_os_sdlock(bus->dhd);
}
rxcount = maxframes - rxleft;
#ifdef DHD_DEBUG
/* Message if we hit the limit */
if (!rxleft && !sdtest)
DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
else
#endif /* DHD_DEBUG */
DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
/* Back off rxseq if awaiting rtx, update rx_seq */
if (bus->rxskip)
rxseq--;
bus->rx_seq = rxseq;
if (bus->reqbussleep)
{
dhdsdio_bussleep(bus, TRUE);
bus->reqbussleep = FALSE;
}
bus->readframes = FALSE;
return rxcount;
}
static uint32
dhdsdio_hostmail(dhd_bus_t *bus)
{
sdpcmd_regs_t *regs = bus->regs;
uint32 intstatus = 0;
uint32 hmb_data;
uint8 fcbits;
uint retries = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Read mailbox data and ack that we did so */
R_SDREG(hmb_data, ®s->tohostmailboxdata, retries);
if (retries <= retry_limit)
W_SDREG(SMB_INT_ACK, ®s->tosbmailbox, retries);
bus->f1regdata += 2;
/* Dongle recomposed rx frames, accept them again */
if (hmb_data & HMB_DATA_NAKHANDLED) {
DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
if (!bus->rxskip) {
DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
}
bus->rxskip = FALSE;
intstatus |= FRAME_AVAIL_MASK(bus);
}
/*
* DEVREADY does not occur with gSPI.
*/
if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
bus->sdpcm_ver, SDPCM_PROT_VERSION));
else
DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
#ifndef BCMSPI
/* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
(bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) {
uint32 val;
val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
val &= ~CC_XMTDATAAVAIL_MODE;
val |= CC_XMTDATAAVAIL_CTRL;
W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
}
#endif /* BCMSPI */
#ifdef DHD_DEBUG
/* Retrieve console state address now that firmware should have updated it */
{
sdpcm_shared_t shared;
if (dhdsdio_readshared(bus, &shared) == 0)
bus->console_addr = shared.console_addr;
}
#endif /* DHD_DEBUG */
}
/*
* Flow Control has been moved into the RX headers and this out of band
* method isn't used any more. Leave this here for possibly remaining backward
* compatible with older dongles
*/
if (hmb_data & HMB_DATA_FC) {
fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
if (fcbits & ~bus->flowcontrol)
bus->fc_xoff++;
if (bus->flowcontrol & ~fcbits)
bus->fc_xon++;
bus->fc_rcvd++;
bus->flowcontrol = fcbits;
}
#ifdef DHD_DEBUG
/* At least print a message if FW halted */
if (hmb_data & HMB_DATA_FWHALT) {
DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
dhdsdio_checkdied(bus, NULL, 0);
bus->dhd->busstate = DHD_BUS_DOWN;
}
#endif /* DHD_DEBUG */
/* Shouldn't be any others */
if (hmb_data & ~(HMB_DATA_DEVREADY |
HMB_DATA_FWHALT |
HMB_DATA_NAKHANDLED |
HMB_DATA_FC |
HMB_DATA_FWREADY |
HMB_DATA_FCDATA_MASK |
HMB_DATA_VERSION_MASK)) {
DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
}
return intstatus;
}
static bool
dhdsdio_dpc(dhd_bus_t *bus)
{
bcmsdh_info_t *sdh = bus->sdh;
sdpcmd_regs_t *regs = bus->regs;
uint32 intstatus, newstatus = 0;
uint retries = 0;
uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
uint txlimit = dhd_txbound; /* Tx frames to send before resched */
uint framecnt = 0; /* Temporary counter of tx/rx frames */
bool rxdone = TRUE; /* Flag for no more read data */
bool resched = FALSE; /* Flag indicating resched wanted */
#if defined(CUSTOMER_HW4)
bool is_resched_by_readframe = FALSE;
#endif
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
dhd_os_sdlock(bus->dhd);
if (bus->dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
bus->intstatus = 0;
dhd_os_sdunlock(bus->dhd);
return 0;
}
/* Start with leftover status bits */
intstatus = bus->intstatus;
if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
goto exit;
}
/* If waiting for HTAVAIL, check status */
if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
int err;
uint8 clkctl, devctl = 0;
#ifdef DHD_DEBUG
/* Check for inconsistent device control */
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
bus->dhd->busstate = DHD_BUS_DOWN;
} else {
ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
}
#endif /* DHD_DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
bus->dhd->busstate = DHD_BUS_DOWN;
}
DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
if (SBSDIO_HTAV(clkctl)) {
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n",
__FUNCTION__, err));
bus->dhd->busstate = DHD_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
if (err) {
DHD_ERROR(("%s: error writing DEVCTL: %d\n",
__FUNCTION__, err));
bus->dhd->busstate = DHD_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
} else {
goto clkwait;
}
}
BUS_WAKE(bus);
/* Make sure backplane clock is on */
dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
if (bus->clkstate != CLK_AVAIL)
goto clkwait;
/* Pending interrupt indicates new device status */
if (bus->ipend) {
bus->ipend = FALSE;
R_SDREG(newstatus, ®s->intstatus, retries);
bus->f1regdata++;
if (bcmsdh_regfail(bus->sdh))
newstatus = 0;
newstatus &= bus->hostintmask;
bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
if (newstatus) {
bus->f1regdata++;
#ifndef BCMSPI
if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
(newstatus == I_XMTDATA_AVAIL)) {
}
else
#endif /* BCMSPI */
W_SDREG(newstatus, ®s->intstatus, retries);
}
}
/* Merge new bits with previous */
intstatus |= newstatus;
bus->intstatus = 0;
/* Handle flow-control change: read new state in case our ack
* crossed another change interrupt. If change still set, assume
* FC ON for safety, let next loop through do the debounce.
*/
if (intstatus & I_HMB_FC_CHANGE) {
intstatus &= ~I_HMB_FC_CHANGE;
W_SDREG(I_HMB_FC_CHANGE, ®s->intstatus, retries);
R_SDREG(newstatus, ®s->intstatus, retries);
bus->f1regdata += 2;
bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
intstatus |= (newstatus & bus->hostintmask);
}
/* Just being here means nothing more to do for chipactive */
if (intstatus & I_CHIPACTIVE) {
/* ASSERT(bus->clkstate == CLK_AVAIL); */
intstatus &= ~I_CHIPACTIVE;
}
/* Handle host mailbox indication */
if (intstatus & I_HMB_HOST_INT) {
intstatus &= ~I_HMB_HOST_INT;
intstatus |= dhdsdio_hostmail(bus);
}
/* Generally don't ask for these, can get CRC errors... */
if (intstatus & I_WR_OOSYNC) {
DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
intstatus &= ~I_WR_OOSYNC;
}
if (intstatus & I_RD_OOSYNC) {
DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
intstatus &= ~I_RD_OOSYNC;
}
if (intstatus & I_SBINT) {
DHD_ERROR(("Dongle reports SBINT\n"));
intstatus &= ~I_SBINT;
}
/* Would be active due to wake-wlan in gSPI */
if (intstatus & I_CHIPACTIVE) {
DHD_INFO(("Dongle reports CHIPACTIVE\n"));
intstatus &= ~I_CHIPACTIVE;
}
/* Ignore frame indications if rxskip is set */
if (bus->rxskip) {
intstatus &= ~FRAME_AVAIL_MASK(bus);
}
/* On frame indication, read available frames */
if (PKT_AVAILABLE(bus, intstatus)) {
framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
if (rxdone || bus->rxskip)
intstatus &= ~FRAME_AVAIL_MASK(bus);
rxlimit -= MIN(framecnt, rxlimit);
}
/* Keep still-pending events for next scheduling */
bus->intstatus = intstatus;
clkwait:
/* Re-enable interrupts to detect new device events (mailbox, rx frame)
* or clock availability. (Allows tx loop to check ipend if desired.)
* (Unless register access seems hosed, as we may not be able to ACK...)
*/
if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
__FUNCTION__, rxdone, framecnt));
bus->intdis = FALSE;
#if defined(OOB_INTR_ONLY)
bcmsdh_oob_intr_set(1);
#endif /* defined(OOB_INTR_ONLY) */
bcmsdh_intr_enable(sdh);
#ifdef BCMSPI_ANDROID
if (*dhd_spi_lockcount == 0)
bcmsdh_oob_intr_set(1);
#endif /* BCMSPI_ANDROID */
}
#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
/* In case of SW-OOB(using edge trigger),
* Check interrupt status in the dongle again after enable irq on the host.
* and rechedule dpc if interrupt is pended in the dongle.
* There is a chance to miss OOB interrupt while irq is disabled on the host.
* No need to do this with HW-OOB(level trigger)
*/
R_SDREG(newstatus, ®s->intstatus, retries);
if (bcmsdh_regfail(bus->sdh))
newstatus = 0;
if (newstatus & bus->hostintmask) {
bus->ipend = TRUE;
resched = TRUE;
}
#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
#ifdef PROP_TXSTATUS
dhd_wlfc_trigger_pktcommit(bus->dhd);
#endif
if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
dhdsdio_sendpendctl(bus);
/* Send queued frames (limit 1 if rx may still be pending) */
else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
framecnt = dhdsdio_sendfromq(bus, framecnt);
txlimit -= framecnt;
}
/* Resched the DPC if ctrl cmd is pending on bus credit */
if (bus->ctrl_frame_stat) {
#ifdef CUSTOMER_HW10
OSL_SLEEP(50);
#endif
resched = TRUE;
}
/* Resched if events or tx frames are pending, else await next interrupt */
/* On failed register access, all bets are off: no resched or interrupts */
if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
/* Bus failed because of KSO */
DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
bus->kso = FALSE;
} else {
DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
__FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->intstatus = 0;
}
} else if (bus->clkstate == CLK_PENDING) {
/* Awaiting I_CHIPACTIVE; don't resched */
} else if (bus->intstatus || bus->ipend ||
(!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */
resched = TRUE;
}
bus->dpc_sched = resched;
/* If we're done for now, turn off clock request. */
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
bus->activity = FALSE;
dhdsdio_clkctl(bus, CLK_NONE, FALSE);
}
exit:
if (!resched && dhd_dpcpoll) {
if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) {
resched = TRUE;
#if defined(CUSTOMER_HW4)
is_resched_by_readframe = TRUE;
#endif
}
}
dhd_os_sdunlock(bus->dhd);
#if defined(CUSTOMER_HW4)
if (bus->dhd->dhd_bug_on) {
DHD_ERROR(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x"
" ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n",
__FUNCTION__, resched, bus->ctrl_frame_stat,
bus->intstatus, bus->ipend,
pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe));
bus->dhd->dhd_bug_on = FALSE;
}
#endif
return resched;
}
bool
dhd_bus_dpc(struct dhd_bus *bus)
{
bool resched;
/* Call the DPC directly. */
DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
resched = dhdsdio_dpc(bus);
return resched;
}
void
dhdsdio_isr(void *arg)
{
dhd_bus_t *bus = (dhd_bus_t*)arg;
bcmsdh_info_t *sdh;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (!bus) {
DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
return;
}
sdh = bus->sdh;
if (bus->dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
return;
}
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Count the interrupt call */
bus->intrcount++;
bus->ipend = TRUE;
/* Shouldn't get this interrupt if we're sleeping? */
if (!SLPAUTO_ENAB(bus)) {
if (bus->sleeping) {
DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
return;
} else if (!KSO_ENAB(bus)) {
DHD_ERROR(("ISR in devsleep 1\n"));
}
}
/* Disable additional interrupts (is this needed now)? */
if (bus->intr) {
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
} else {
DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
}
#ifdef BCMSPI_ANDROID
bcmsdh_oob_intr_set(0);
#endif /* BCMSPI_ANDROID */
bcmsdh_intr_disable(sdh);
bus->intdis = TRUE;
#if defined(SDIO_ISR_THREAD)
DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
DHD_OS_WAKE_LOCK(bus->dhd);
dhdsdio_dpc(bus);
DHD_OS_WAKE_UNLOCK(bus->dhd);
#else
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
#endif
}
#ifdef SDTEST
static void
dhdsdio_pktgen_init(dhd_bus_t *bus)
{
/* Default to specified length, or full range */
if (dhd_pktgen_len) {
bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
bus->pktgen_minlen = bus->pktgen_maxlen;
} else {
bus->pktgen_maxlen = MAX_PKTGEN_LEN;
bus->pktgen_minlen = 0;
}
bus->pktgen_len = (uint16)bus->pktgen_minlen;
/* Default to per-watchdog burst with 10s print time */
bus->pktgen_freq = 1;
bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0;
bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
/* Default to echo mode */
bus->pktgen_mode = DHD_PKTGEN_ECHO;
bus->pktgen_stop = 1;
}
static void
dhdsdio_pktgen(dhd_bus_t *bus)
{
void *pkt;
uint8 *data;
uint pktcount;
uint fillbyte;
osl_t *osh = bus->dhd->osh;
uint16 len;
ulong time_lapse;
uint sent_pkts;
uint rcvd_pkts;
/* Display current count if appropriate */
if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
bus->pktgen_ptick = 0;
printf("%s: send attempts %d, rcvd %d, errors %d\n",
__FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
/* Print throughput stats only for constant length packet runs */
if (bus->pktgen_minlen == bus->pktgen_maxlen) {
time_lapse = jiffies - bus->pktgen_prev_time;
bus->pktgen_prev_time = jiffies;
sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent;
bus->pktgen_prev_sent = bus->pktgen_sent;
rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd;
bus->pktgen_prev_rcvd = bus->pktgen_rcvd;
printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n",
__FUNCTION__,
(sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8,
(rcvd_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8);
}
}
/* For recv mode, just make sure dongle has started sending */
if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
dhdsdio_sdtest_set(bus, bus->pktgen_total);
}
return;
}
/* Otherwise, generate or request the specified number of packets */
for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
/* Stop if total has been reached */
if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
bus->pktgen_count = 0;
break;
}
/* Allocate an appropriate-sized packet */
if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
len = SDPCM_TEST_PKT_CNT_FLD_LEN;
} else {
len = bus->pktgen_len;
}
if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
TRUE))) {;
DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
break;
}
PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
/* Write test header cmd and extra based on mode */
switch (bus->pktgen_mode) {
case DHD_PKTGEN_ECHO:
*data++ = SDPCM_TEST_ECHOREQ;
*data++ = (uint8)bus->pktgen_sent;
break;
case DHD_PKTGEN_SEND:
*data++ = SDPCM_TEST_DISCARD;
*data++ = (uint8)bus->pktgen_sent;
break;
case DHD_PKTGEN_RXBURST:
*data++ = SDPCM_TEST_BURST;
*data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */
break;
default:
DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
PKTFREE(osh, pkt, TRUE);
bus->pktgen_count = 0;
return;
}
/* Write test header length field */
*data++ = (bus->pktgen_len >> 0);
*data++ = (bus->pktgen_len >> 8);
/* Write frame count in a 4 byte field adjucent to SDPCM test header for
* burst mode
*/
if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
*data++ = (uint8)(bus->pktgen_count >> 0);
*data++ = (uint8)(bus->pktgen_count >> 8);
*data++ = (uint8)(bus->pktgen_count >> 16);
*data++ = (uint8)(bus->pktgen_count >> 24);
} else {
/* Then fill in the remainder -- N/A for burst */
for (fillbyte = 0; fillbyte < len; fillbyte++)
*data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
}
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
}
#endif
/* Send it */
if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE, FALSE)) {
bus->pktgen_fail++;
if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
bus->pktgen_count = 0;
}
bus->pktgen_sent++;
/* Bump length if not fixed, wrap at max */
if (++bus->pktgen_len > bus->pktgen_maxlen)
bus->pktgen_len = (uint16)bus->pktgen_minlen;
/* Special case for burst mode: just send one request! */
if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
break;
}
}
static void
dhdsdio_sdtest_set(dhd_bus_t *bus, uint count)
{
void *pkt;
uint8 *data;
osl_t *osh = bus->dhd->osh;
/* Allocate the packet */
if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) {
DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
return;
}
PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN);
data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
/* Fill in the test header */
*data++ = SDPCM_TEST_SEND;
*data++ = (count > 0)?TRUE:FALSE;
*data++ = (bus->pktgen_maxlen >> 0);
*data++ = (bus->pktgen_maxlen >> 8);
*data++ = (uint8)(count >> 0);
*data++ = (uint8)(count >> 8);
*data++ = (uint8)(count >> 16);
*data++ = (uint8)(count >> 24);
/* Send it */
if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE, FALSE))
bus->pktgen_fail++;
}
static void
dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
{
osl_t *osh = bus->dhd->osh;
uint8 *data;
uint pktlen;
uint8 cmd;
uint8 extra;
uint16 len;
uint16 offset;
/* Check for min length */
if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
PKTFREE(osh, pkt, FALSE);
return;
}
/* Extract header fields */
data = PKTDATA(osh, pkt);
cmd = *data++;
extra = *data++;
len = *data++; len += *data++ << 8;
DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
/* Check length for relevant commands */
if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
if (pktlen != len + SDPCM_TEST_HDRLEN) {
DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
" cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
PKTFREE(osh, pkt, FALSE);
return;
}
}
/* Process as per command */
switch (cmd) {
case SDPCM_TEST_ECHOREQ:
/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE, FALSE) == 0) {
bus->pktgen_sent++;
} else {
bus->pktgen_fail++;
PKTFREE(osh, pkt, FALSE);
}
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_ECHORSP:
if (bus->ext_loop) {
PKTFREE(osh, pkt, FALSE);
bus->pktgen_rcvd++;
break;
}
for (offset = 0; offset < len; offset++, data++) {
if (*data != SDPCM_TEST_FILL(offset, extra)) {
DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
"offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
offset, len, SDPCM_TEST_FILL(offset, extra), *data));
break;
}
}
PKTFREE(osh, pkt, FALSE);
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_DISCARD:
{
int i = 0;
uint8 *prn = data;
uint8 testval = extra;
for (i = 0; i < len; i++) {
if (*prn != testval) {
DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
i, bus->pktgen_rcvd_rcvsession, testval, *prn));
prn++; testval++;
}
}
}
PKTFREE(osh, pkt, FALSE);
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_BURST:
case SDPCM_TEST_SEND:
default:
DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
" cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
PKTFREE(osh, pkt, FALSE);
break;
}
/* For recv mode, stop at limit (and tell dongle to stop sending) */
if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
bus->pktgen_rcvd_rcvsession++;
if (bus->pktgen_total &&
(bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
bus->pktgen_count = 0;
DHD_ERROR(("Pktgen:rcv test complete!\n"));
bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
dhdsdio_sdtest_set(bus, FALSE);
bus->pktgen_rcvd_rcvsession = 0;
}
}
}
}
#endif /* SDTEST */
extern void
dhd_disable_intr(dhd_pub_t *dhdp)
{
dhd_bus_t *bus;
bus = dhdp->bus;
bcmsdh_intr_disable(bus->sdh);
}
extern bool
dhd_bus_watchdog(dhd_pub_t *dhdp)
{
dhd_bus_t *bus;
DHD_TIMER(("%s: Enter\n", __FUNCTION__));
bus = dhdp->bus;
if (bus->dhd->dongle_reset)
return FALSE;
/* Ignore the timer if simulating bus down */
if (!SLPAUTO_ENAB(bus) && bus->sleeping)
return FALSE;
if (dhdp->busstate == DHD_BUS_DOWN)
return FALSE;
/* Poll period: check device if appropriate. */
if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) {
uint32 intstatus = 0;
/* Reset poll tick */
bus->polltick = 0;
/* Check device if no interrupts */
if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
#ifndef BCMSPI
if (!bus->dpc_sched) {
uint8 devpend;
devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
SDIOD_CCCR_INTPEND, NULL);
intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
}
#else
if (!bus->dpc_sched) {
uint32 devpend;
devpend = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0,
SPID_STATUS_REG, NULL);
intstatus = devpend & STATUS_F2_PKT_AVAILABLE;
}
#endif /* !BCMSPI */
/* If there is something, make like the ISR and schedule the DPC */
if (intstatus) {
bus->pollcnt++;
bus->ipend = TRUE;
if (bus->intr) {
bcmsdh_intr_disable(bus->sdh);
}
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
}
}
/* Update interrupt tracking */
bus->lastintrs = bus->intrcount;
}
#ifdef DHD_DEBUG
/* Poll for console output periodically */
if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
bus->console.count += dhd_watchdog_ms;
if (bus->console.count >= dhd_console_ms) {
bus->console.count -= dhd_console_ms;
/* Make sure backplane clock is on */
if (SLPAUTO_ENAB(bus))
dhdsdio_bussleep(bus, FALSE);
else
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
if (dhdsdio_readconsole(bus) < 0)
dhd_console_ms = 0; /* On error, stop trying */
}
}
#endif /* DHD_DEBUG */
#ifdef SDTEST
/* Generate packets if configured */
if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
/* Make sure backplane clock is on */
if (SLPAUTO_ENAB(bus))
dhdsdio_bussleep(bus, FALSE);
else
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
bus->pktgen_tick = 0;
dhdsdio_pktgen(bus);
}
#endif
/* On idle timeout clear activity flag and/or turn off clock */
#ifdef DHD_USE_IDLECOUNT
if (bus->activity)
bus->activity = FALSE;
else {
bus->idlecount++;
if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__));
if (SLPAUTO_ENAB(bus)) {
if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY)
dhd_os_wd_timer(bus->dhd, 0);
} else
dhdsdio_clkctl(bus, CLK_NONE, FALSE);
bus->idlecount = 0;
}
}
#else
if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
if (++bus->idlecount >= bus->idletime) {
bus->idlecount = 0;
if (bus->activity) {
bus->activity = FALSE;
if (SLPAUTO_ENAB(bus)) {
if (!bus->readframes)
dhdsdio_bussleep(bus, TRUE);
else
bus->reqbussleep = TRUE;
}
else
dhdsdio_clkctl(bus, CLK_NONE, FALSE);
}
}
}
#endif /* DHD_USE_IDLECOUNT */
return bus->ipend;
}
#ifdef DHD_DEBUG
extern int
dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
{
dhd_bus_t *bus = dhdp->bus;
uint32 addr, val;
int rv;
void *pkt;
/* Address could be zero if CONSOLE := 0 in dongle Makefile */
if (bus->console_addr == 0)
return BCME_UNSUPPORTED;
/* Exclusive bus access */
dhd_os_sdlock(bus->dhd);
/* Don't allow input if dongle is in reset */
if (bus->dhd->dongle_reset) {
dhd_os_sdunlock(bus->dhd);
return BCME_NOTREADY;
}
/* Request clock to allow SDIO accesses */
BUS_WAKE(bus);
/* No pend allowed since txpkt is called later, ht clk has to be on */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Zero cbuf_index */
addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
val = htol32(0);
if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
goto done;
/* Write message into cbuf */
addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
goto done;
/* Write length into vcons_in */
addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
val = htol32(msglen);
if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
goto done;
/* Bump dongle by sending an empty packet on the event channel.
* sdpcm_sendup (RX) checks for virtual console input.
*/
if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE, FALSE);
done:
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = FALSE;
dhdsdio_clkctl(bus, CLK_NONE, TRUE);
}
dhd_os_sdunlock(bus->dhd);
return rv;
}
#endif /* DHD_DEBUG */
#ifdef DHD_DEBUG
static void
dhd_dump_cis(uint fn, uint8 *cis)
{
uint byte, tag, tdata;
DHD_INFO(("Function %d CIS:\n", fn));
for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
if ((byte % 16) == 0)
DHD_INFO((" "));
DHD_INFO(("%02x ", cis[byte]));
if ((byte % 16) == 15)
DHD_INFO(("\n"));
if (!tdata--) {
tag = cis[byte];
if (tag == 0xff)
break;
else if (!tag)
tdata = 0;
else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
tdata = cis[byte + 1] + 1;
else
DHD_INFO(("]"));
}
}
if ((byte % 16) != 15)
DHD_INFO(("\n"));
}
#endif /* DHD_DEBUG */
static bool
dhdsdio_chipmatch(uint16 chipid)
{
if (chipid == BCM4325_CHIP_ID)
return TRUE;
if (chipid == BCM4329_CHIP_ID)
return TRUE;
if (chipid == BCM4315_CHIP_ID)
return TRUE;
if (chipid == BCM4319_CHIP_ID)
return TRUE;
if (chipid == BCM4336_CHIP_ID)
return TRUE;
if (chipid == BCM4330_CHIP_ID)
return TRUE;
if (chipid == BCM43237_CHIP_ID)
return TRUE;
if (chipid == BCM43362_CHIP_ID)
return TRUE;
if (chipid == BCM4314_CHIP_ID)
return TRUE;
if (chipid == BCM43242_CHIP_ID)
return TRUE;
if (chipid == BCM43340_CHIP_ID)
return TRUE;
if (chipid == BCM43341_CHIP_ID)
return TRUE;
if (chipid == BCM43143_CHIP_ID)
return TRUE;
if (chipid == BCM43342_CHIP_ID)
return TRUE;
if (chipid == BCM4334_CHIP_ID)
return TRUE;
if (chipid == BCM43239_CHIP_ID)
return TRUE;
if (chipid == BCM4324_CHIP_ID)
return TRUE;
if (chipid == BCM4335_CHIP_ID)
return TRUE;
if (chipid == BCM4339_CHIP_ID)
return TRUE;
if (chipid == BCM4350_CHIP_ID)
return TRUE;
return FALSE;
}
static void *
dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
{
int ret;
dhd_bus_t *bus;
#ifdef GET_CUSTOM_MAC_ENABLE
struct ether_addr ea_addr;
#endif /* GET_CUSTOM_MAC_ENABLE */
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
}
else {
DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
}
mutex_lock(&_dhd_sdio_mutex_lock_);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif
/* Init global variables at run-time, not as part of the declaration.
* This is required to support init/de-init of the driver. Initialization
* of globals as part of the declaration results in non-deterministic
* behavior since the value of the globals may be different on the
* first time that the driver is initialized vs subsequent initializations.
*/
dhd_txbound = DHD_TXBOUND;
dhd_rxbound = DHD_RXBOUND;
#ifdef BCMSPI
dhd_alignctl = FALSE;
#else
dhd_alignctl = TRUE;
#endif /* BCMSPI */
sd1idle = TRUE;
dhd_readahead = TRUE;
retrydata = FALSE;
#if !defined(PLATFORM_MPS) && !defined(CUSTOMER_HW4)
dhd_doflow = FALSE;
#else
dhd_doflow = TRUE;
#endif /* OEM_ANDROID */
dhd_dongle_ramsize = 0;
dhd_txminmax = DHD_TXMINMAX;
#ifdef BCMSPI
forcealign = FALSE;
#else
forcealign = TRUE;
#endif /* !BCMSPI */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
/* We make assumptions about address window mappings */
ASSERT((uintptr)regsva == SI_ENUM_BASE);
/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
* means early parse could fail, so here we should get either an ID
* we recognize OR (-1) indicating we must request power first.
*/
/* Check the Vendor ID */
switch (venid) {
case 0x0000:
case VENDOR_BROADCOM:
break;
default:
DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
__FUNCTION__, venid));
goto forcereturn;
}
/* Check the Device ID and make sure it's one that we support */
switch (devid) {
case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */
case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */
case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */
DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
break;
case BCM4329_D11N_ID: /* 4329 802.11n dualband device */
case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */
case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */
case 0x4329:
DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
break;
case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */
case BCM4315_D11G_ID: /* 4315 802.11g id */
case BCM4315_D11A_ID: /* 4315 802.11a id */
DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
break;
case BCM4319_D11N_ID: /* 4319 802.11n id */
case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */
case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */
DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
break;
case 0:
DHD_INFO(("%s: allow device id 0, will check chip internals\n",
__FUNCTION__));
break;
default:
DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
__FUNCTION__, venid, devid));
goto forcereturn;
}
if (osh == NULL) {
/* Ask the OS interface part for an OSL handle */
if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
goto forcereturn;
}
}
/* Allocate private bus interface state */
if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
goto fail;
}
bzero(bus, sizeof(dhd_bus_t));
bus->sdh = sdh;
bus->cl_devid = (uint16)devid;
bus->bus = DHD_BUS;
bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
/* attach the common module */
dhd_common_init(osh);
/* attempt to attach to the dongle */
if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
goto fail;
}
/* Attach to the dhd/OS/network interface */
if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
goto fail;
}
/* Allocate buffers */
if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
goto fail;
}
if (!(dhdsdio_probe_init(bus, osh, sdh))) {
DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
goto fail;
}
if (bus->intr) {
/* Register interrupt callback, but mask it (not operational yet). */
DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
bcmsdh_intr_disable(sdh);
if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
__FUNCTION__, ret));
goto fail;
}
DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
} else {
DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
__FUNCTION__));
}
DHD_INFO(("%s: completed!!\n", __FUNCTION__));
#ifdef GET_CUSTOM_MAC_ENABLE
/* Read MAC address from external customer place */
memset(&ea_addr, 0, sizeof(ea_addr));
ret = dhd_custom_get_mac_address(ea_addr.octet);
if (!ret) {
memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
}
#endif /* GET_CUSTOM_MAC_ENABLE */
/* if firmware path present try to download and bring up bus */
bus->dhd->hang_report = TRUE;
if (dhd_download_fw_on_driverload) {
if ((ret = dhd_bus_start(bus->dhd)) != 0) {
DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
goto fail;
}
}
#if defined(CUSTOMER_HW4)
else {
/* Set ramdom MAC address during boot time */
get_random_bytes(&bus->dhd->mac.octet[3], 3);
/* Adding BRCM OUI */
bus->dhd->mac.octet[0] = 0;
bus->dhd->mac.octet[1] = 0x90;
bus->dhd->mac.octet[2] = 0x4C;
}
#endif /* CUSTOMER_HW4 */
/* Ok, have the per-port tell the stack we're open for business */
if (dhd_net_attach(bus->dhd, 0) != 0) {
DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
goto fail;
}
#if defined(CUSTOMER_HW4) && defined(BCMHOST_XTAL_PU_TIME_MOD)
bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11);
bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001);
#endif
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
#endif
return bus;
fail:
dhdsdio_release(bus, osh);
forcereturn:
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
#endif
return NULL;
}
static bool
dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
uint16 devid)
{
#ifndef BCMSPI
int err = 0;
uint8 clkctl = 0;
#endif /* !BCMSPI */
bus->alp_only = TRUE;
bus->sih = NULL;
/* Return the window to backplane enumeration space for core access */
if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
}
#ifndef BCMSPI /* wake-wlan in gSPI will bring up the htavail/alpavail clocks. */
/* Force PLL off until si_attach() programs PLL control regs */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
if (!err)
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
err, DHD_INIT_CLKCTL1, clkctl));
goto fail;
}
#endif /* !BCMSPI */
#ifdef DHD_DEBUG
if (DHD_INFO_ON()) {
uint fn, numfn;
uint8 *cis[SDIOD_MAX_IOFUNCS];
int err = 0;
#ifndef BCMSPI
numfn = bcmsdh_query_iofnum(sdh);
ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
/* Make sure ALP is available before trying to read CIS */
SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
/* Now request ALP be put on the bus */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
DHD_INIT_CLKCTL2, &err);
OSL_DELAY(65);
#else
numfn = 0; /* internally func is hardcoded to 1 as gSPI has cis on F1 only */
#endif /* !BCMSPI */
for (fn = 0; fn <= numfn; fn++) {
if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
break;
}
bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
break;
}
dhd_dump_cis(fn, cis[fn]);
}
while (fn-- > 0) {
ASSERT(cis[fn]);
MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
}
if (err) {
DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
goto fail;
}
}
#endif /* DHD_DEBUG */
/* si_attach() will provide an SI handle and scan the backplane */
if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
&bus->vars, &bus->varsz))) {
DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
goto fail;
}
#ifdef DHD_DEBUG
DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n",
bus->sih->socitype, bus->sih->chip, bus->sih->chiprev,
bus->sih->chippkg));
#endif /* DHD_DEBUG */
bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
__FUNCTION__, bus->sih->chip));
goto fail;
}
if (bus->sih->buscorerev >= 12)
dhdsdio_clk_kso_init(bus);
else
bus->kso = TRUE;
if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
}
si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
/* Get info on the ARM and SOCRAM cores... */
if (!DHD_NOPMU(bus)) {
if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
bus->armrev = si_corerev(bus->sih);
} else {
DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
goto fail;
}
if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
goto fail;
}
} else {
/* cr4 has a different way to find the RAM size from TCM's */
if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
goto fail;
}
/* also populate base address */
switch ((uint16)bus->sih->chip) {
case BCM4335_CHIP_ID:
case BCM4339_CHIP_ID:
bus->dongle_ram_base = CR4_4335_RAM_BASE;
break;
case BCM4350_CHIP_ID:
bus->dongle_ram_base = CR4_4350_RAM_BASE;
break;
case BCM4360_CHIP_ID:
bus->dongle_ram_base = CR4_4360_RAM_BASE;
break;
default:
bus->dongle_ram_base = 0;
DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
__FUNCTION__, bus->dongle_ram_base));
}
}
bus->ramsize = bus->orig_ramsize;
if (dhd_dongle_ramsize)
dhd_dongle_setramsize(bus, dhd_dongle_ramsize);
DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
bus->srmemsize = si_socram_srmem_size(bus->sih);
}
/* ...but normally deal with the SDPCMDEV core */
if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
!(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
goto fail;
}
bus->sdpcmrev = si_corerev(bus->sih);
/* Set core control so an SDIO reset does a backplane reset */
OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
#ifndef BCMSPI
bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
(bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1))
{
uint32 val;
val = R_REG(osh, &bus->regs->corecontrol);
val &= ~CC_XMTDATAAVAIL_MODE;
val |= CC_XMTDATAAVAIL_CTRL;
W_REG(osh, &bus->regs->corecontrol, val);
}
#endif /* BCMSPI */
pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
/* Locate an appropriately-aligned portion of hdrbuf */
bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
/* Set the poll and/or interrupt flags */
bus->intr = (bool)dhd_intr;
if ((bus->poll = (bool)dhd_poll))
bus->pollrate = 1;
#ifdef BCMSDIOH_TXGLOM
/* Setting default Glom mode */
bus->glom_mode = bcmsdh_set_mode(bus->sdh, SDPCM_DEFGLOM_MODE);
/* Setting default Glom size */
bus->glomsize = SDPCM_DEFGLOM_SIZE;
#endif
return TRUE;
fail:
if (bus->sih != NULL) {
si_detach(bus->sih);
bus->sih = NULL;
}
return FALSE;
}
static bool
dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
{
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (bus->dhd->maxctl) {
bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
if (!(bus->rxbuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_RXBUF, bus->rxblen))) {
DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
__FUNCTION__, bus->rxblen));
goto fail;
}
}
/* Allocate buffer to receive glomed packet */
if (!(bus->databuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
__FUNCTION__, MAX_DATA_BUF));
/* release rxbuf which was already located as above */
if (!bus->rxblen)
DHD_OS_PREFREE(osh, bus->rxbuf, bus->rxblen);
goto fail;
}
/* Align the buffer */
if ((uintptr)bus->databuf % DHD_SDALIGN)
bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
else
bus->dataptr = bus->databuf;
return TRUE;
fail:
return FALSE;
}
static bool
dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
{
int32 fnum;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
#ifdef CUSTOMER_HW10
bus->_srenab = FALSE;
#endif
#ifdef SDTEST
dhdsdio_pktgen_init(bus);
#endif /* SDTEST */
#ifndef BCMSPI
/* Disable F2 to clear any intermediate frame state on the dongle */
bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
#endif /* !BCMSPI */
bus->dhd->busstate = DHD_BUS_DOWN;
bus->sleeping = FALSE;
bus->rxflow = FALSE;
bus->prev_rxlim_hit = 0;
#ifndef BCMSPI
/* Done with backplane-dependent accesses, can drop clock... */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
#endif /* !BCMSPI */
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
bus->idletime = (int32)dhd_idletime;
bus->idleclock = DHD_IDLE_ACTIVE;
/* Query the SD clock speed */
if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
&bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
bus->sd_divisor = -1;
} else {
DHD_INFO(("%s: Initial value for %s is %d\n",
__FUNCTION__, "sd_divisor", bus->sd_divisor));
}
/* Query the SD bus mode */
if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
&bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
bus->sd_mode = -1;
} else {
DHD_INFO(("%s: Initial value for %s is %d\n",
__FUNCTION__, "sd_mode", bus->sd_mode));
}
/* Query the F2 block size, set roundup accordingly */
fnum = 2;
if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
} else {
DHD_INFO(("%s: Initial value for %s is %d\n",
__FUNCTION__, "sd_blocksize", bus->blocksize));
if ((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID))
dhd_overflow_war(bus);
}
bus->roundup = MIN(max_roundup, bus->blocksize);
/* Query if bus module supports packet chaining, default to use if supported */
if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
&bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
bus->sd_rxchain = FALSE;
} else {
DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
__FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
}
bus->use_rxchain = (bool)bus->sd_rxchain;
return TRUE;
}
bool
dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
char *pfw_path, char *pnv_path)
{
bool ret;
bus->fw_path = pfw_path;
bus->nv_path = pnv_path;
ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
#ifdef BCMSPI
#ifdef GSPI_DWORD_MODE
/* Enable the dwordmode in gSPI before first F2 transaction */
if ((bus->sih->chip == BCM4329_CHIP_ID) && (bus->sih->chiprev > 1)) {
bcmsdh_dwordmode(bus->sdh, TRUE);
bus->dwordmode = TRUE;
}
#endif /* GSPI_DWORD_MODE */
#endif /* BCMSPI */
return ret;
}
static bool
dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
{
bool ret;
DHD_OS_WAKE_LOCK(bus->dhd);
/* Download the firmware */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
ret = _dhdsdio_download_firmware(bus) == 0;
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
DHD_OS_WAKE_UNLOCK(bus->dhd);
return ret;
}
/* Detach and free everything */
static void
dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
{
bool dongle_isolation = FALSE;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (bus) {
ASSERT(osh);
if (bus->dhd) {
dongle_isolation = bus->dhd->dongle_isolation;
dhd_detach(bus->dhd);
}
/* De-register interrupt handler */
bcmsdh_intr_disable(bus->sdh);
bcmsdh_intr_dereg(bus->sdh);
if (bus->dhd) {
dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
dhd_free(bus->dhd);
bus->dhd = NULL;
}
dhdsdio_release_malloc(bus, osh);
#ifdef DHD_DEBUG
if (bus->console.buf != NULL)
MFREE(osh, bus->console.buf, bus->console.bufsize);
#endif
MFREE(osh, bus, sizeof(dhd_bus_t));
}
if (osh)
dhd_osl_detach(osh);
DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
}
static void
dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
{
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (bus->dhd && bus->dhd->dongle_reset)
return;
if (bus->rxbuf) {
#ifndef CONFIG_DHD_USE_STATIC_BUF
MFREE(osh, bus->rxbuf, bus->rxblen);
#endif
bus->rxctl = bus->rxbuf = NULL;
bus->rxlen = 0;
}
if (bus->databuf) {
#ifndef CONFIG_DHD_USE_STATIC_BUF
MFREE(osh, bus->databuf, MAX_DATA_BUF);
#endif
bus->databuf = NULL;
}
if (bus->vars && bus->varsz) {
MFREE(osh, bus->vars, bus->varsz);
bus->vars = NULL;
}
}
static void
dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
{
DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
bus->dhd, bus->dhd->dongle_reset));
if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
return;
if (bus->sih) {
#if !defined(BCMLXSDMMC)
if (bus->dhd) {
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
}
if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
si_watchdog(bus->sih, 4);
#endif /* !defined(BCMLXSDMMC) */
if (bus->dhd) {
dhdsdio_clkctl(bus, CLK_NONE, FALSE);
}
si_detach(bus->sih);
bus->sih = NULL;
if (bus->vars && bus->varsz)
MFREE(osh, bus->vars, bus->varsz);
bus->vars = NULL;
}
DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
}
static void
dhdsdio_disconnect(void *ptr)
{
dhd_bus_t *bus = (dhd_bus_t *)ptr;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
}
else {
DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
}
mutex_lock(&_dhd_sdio_mutex_lock_);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif
if (bus) {
ASSERT(bus->dhd);
dhdsdio_release(bus, bus->dhd->osh);
}
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif /* LINUX */
DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
}
/* Register/Unregister functions are called by the main DHD entry
* point (e.g. module insertion) to link with the bus driver, in
* order to look for or await the device.
*/
static bcmsdh_driver_t dhd_sdio = {
dhdsdio_probe,
dhdsdio_disconnect
};
int
dhd_bus_register(void)
{
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
return bcmsdh_register(&dhd_sdio);
}
void
dhd_bus_unregister(void)
{
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
bcmsdh_unregister();
}
#if defined(BCMLXSDMMC)
/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
int dhd_bus_reg_sdio_notify(void* semaphore)
{
return bcmsdh_reg_sdio_notify(semaphore);
}
void dhd_bus_unreg_sdio_notify(void)
{
bcmsdh_unreg_sdio_notify();
}
#endif /* defined(BCMLXSDMMC) */
#ifdef BCMEMBEDIMAGE
static int
dhdsdio_download_code_array(struct dhd_bus *bus)
{
int bcmerror = -1;
int offset = 0;
unsigned char *ularray = NULL;
DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
/* Download image */
while ((offset + MEMBLOCK) < sizeof(dlarray)) {
/* check if CR4 */
if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
/* if address is 0, store the reset instruction to be written in 0 */
if (offset == 0) {
bus->resetinstr = *(((uint32*)dlarray));
/* Add start of RAM address to the address given by user */
offset += bus->dongle_ram_base;
}
}
bcmerror = dhdsdio_membytes(bus, TRUE, offset,
(uint8 *) (dlarray + offset), MEMBLOCK);
if (bcmerror) {
DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
__FUNCTION__, bcmerror, MEMBLOCK, offset));
goto err;
}
offset += MEMBLOCK;
}
if (offset < sizeof(dlarray)) {
bcmerror = dhdsdio_membytes(bus, TRUE, offset,
(uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
if (bcmerror) {
DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
goto err;
}
}
#ifdef DHD_DEBUG
/* Upload and compare the downloaded code */
{
ularray = MALLOC(bus->dhd->osh, bus->ramsize);
/* Upload image to verify downloaded contents. */
offset = 0;
memset(ularray, 0xaa, bus->ramsize);
while ((offset + MEMBLOCK) < sizeof(dlarray)) {
bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
if (bcmerror) {
DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
__FUNCTION__, bcmerror, MEMBLOCK, offset));
goto err;
}
offset += MEMBLOCK;
}
if (offset < sizeof(dlarray)) {
bcmerror = dhdsdio_membytes(bus, FALSE, offset,
ularray + offset, sizeof(dlarray) - offset);
if (bcmerror) {
DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
goto err;
}
}
if (memcmp(dlarray, ularray, sizeof(dlarray))) {
DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
__FUNCTION__, dlimagename, dlimagever, dlimagedate));
goto err;
} else
DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
__FUNCTION__, dlimagename, dlimagever, dlimagedate));
}
#endif /* DHD_DEBUG */
err:
if (ularray)
MFREE(bus->dhd->osh, ularray, bus->ramsize);
return bcmerror;
}
#endif /* BCMEMBEDIMAGE */
static int
dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
{
int bcmerror = -1;
int offset = 0;
int len;
void *image = NULL;
uint8 *memblock = NULL, *memptr;
DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
image = dhd_os_open_image(pfw_path);
if (image == NULL)
goto err;
memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
if (memblock == NULL) {
DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
goto err;
}
if ((uint32)(uintptr)memblock % DHD_SDALIGN)
memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
/* Download image */
while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
if (len < 0) {
DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
bcmerror = BCME_ERROR;
goto err;
}
/* check if CR4 */
if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
/* if address is 0, store the reset instruction to be written in 0 */
if (offset == 0) {
bus->resetinstr = *(((uint32*)memptr));
/* Add start of RAM address to the address given by user */
offset += bus->dongle_ram_base;
}
}
bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
if (bcmerror) {
DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
__FUNCTION__, bcmerror, MEMBLOCK, offset));
goto err;
}
offset += MEMBLOCK;
}
err:
if (memblock)
MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
if (image)
dhd_os_close_image(image);
return bcmerror;
}
/*
EXAMPLE: nvram_array
nvram_arry format:
name=value
Use carriage return at the end of each assignment, and an empty string with
carriage return at the end of array.
For example:
unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
Search "EXAMPLE: nvram_array" to see how the array is activated.
*/
void
dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
{
bus->nvram_params = nvram_params;
}
static int
dhdsdio_download_nvram(struct dhd_bus *bus)
{
int bcmerror = -1;
uint len;
void * image = NULL;
char * memblock = NULL;
char *bufp;
char *pnv_path;
bool nvram_file_exists;
pnv_path = bus->nv_path;
nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
if (!nvram_file_exists && (bus->nvram_params == NULL))
return (0);
if (nvram_file_exists) {
image = dhd_os_open_image(pnv_path);
if (image == NULL)
goto err;
}
memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
if (memblock == NULL) {
DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
__FUNCTION__, MAX_NVRAMBUF_SIZE));
goto err;
}
/* Download variables */
if (nvram_file_exists) {
len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
}
else {
len = strlen(bus->nvram_params);
ASSERT(len <= MAX_NVRAMBUF_SIZE);
memcpy(memblock, bus->nvram_params, len);
}
if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
bufp = (char *)memblock;
bufp[len] = 0;
len = process_nvram_vars(bufp, len);
if (len % 4) {
len += 4 - (len % 4);
}
bufp += len;
*bufp++ = 0;
if (len)
bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
if (bcmerror) {
DHD_ERROR(("%s: error downloading vars: %d\n",
__FUNCTION__, bcmerror));
}
}
else {
DHD_ERROR(("%s: error reading nvram file: %d\n",
__FUNCTION__, len));
bcmerror = BCME_SDIO_ERROR;
}
err:
if (memblock)
MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
if (image)
dhd_os_close_image(image);
return bcmerror;
}
static int
_dhdsdio_download_firmware(struct dhd_bus *bus)
{
int bcmerror = -1;
bool embed = FALSE; /* download embedded firmware */
bool dlok = FALSE; /* download firmware succeeded */
/* Out immediately if no image to download */
if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
#ifdef BCMEMBEDIMAGE
embed = TRUE;
#else
return 0;
#endif
}
/* Keep arm in reset */
if (dhdsdio_download_state(bus, TRUE)) {
DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
goto err;
}
/* External image takes precedence if specified */
if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
if (dhdsdio_download_code_file(bus, bus->fw_path)) {
DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
#ifdef BCMEMBEDIMAGE
embed = TRUE;
#else
goto err;
#endif
}
else {
embed = FALSE;
dlok = TRUE;
}
}
#ifdef BCMEMBEDIMAGE
if (embed) {
if (dhdsdio_download_code_array(bus)) {
DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
goto err;
}
else {
dlok = TRUE;
}
}
#else
BCM_REFERENCE(embed);
#endif
if (!dlok) {
DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
goto err;
}
/* EXAMPLE: nvram_array */
/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
/* External nvram takes precedence if specified */
if (dhdsdio_download_nvram(bus)) {
DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
goto err;
}
/* Take arm out of reset */
if (dhdsdio_download_state(bus, FALSE)) {
DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
goto err;
}
bcmerror = 0;
err:
return bcmerror;
}
static int
dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
{
int status;
if (!KSO_ENAB(bus)) {
DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
return BCME_NODEVICE;
}
status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
return status;
}
static int
dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
{
if (!KSO_ENAB(bus)) {
DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
return BCME_NODEVICE;
}
return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
}
#ifdef BCMSDIOH_TXGLOM
static void
dhd_bcmsdh_glom_post(dhd_bus_t *bus, uint8 *frame, void *pkt, uint len)
{
bcmsdh_glom_post(bus->sdh, frame, pkt, len);
}
static void
dhd_bcmsdh_glom_clear(dhd_bus_t *bus)
{
bcmsdh_glom_clear(bus->sdh);
}
#endif
uint
dhd_bus_chip(struct dhd_bus *bus)
{
ASSERT(bus->sih != NULL);
return bus->sih->chip;
}
void *
dhd_bus_pub(struct dhd_bus *bus)
{
return bus->dhd;
}
void *
dhd_bus_txq(struct dhd_bus *bus)
{
return &bus->txq;
}
uint
dhd_bus_hdrlen(struct dhd_bus *bus)
{
return SDPCM_HDRLEN;
}
int
dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
{
int bcmerror = 0;
dhd_bus_t *bus;
bus = dhdp->bus;
if (flag == TRUE) {
if (!bus->dhd->dongle_reset) {
dhd_os_sdlock(dhdp);
dhd_os_wd_timer(dhdp, 0);
#if !defined(IGNORE_ETH0_DOWN)
/* Force flow control as protection when stop come before ifconfig_down */
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
#endif /* !defined(IGNORE_ETH0_DOWN) */
/* Expect app to have torn down any connection before calling */
/* Stop the bus, disable F2 */
dhd_bus_stop(bus, FALSE);
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
/* Clean up any pending IRQ */
bcmsdh_set_irq(FALSE);
#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
/* Clean tx/rx buffer pointers, detach from the dongle */
dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
bus->dhd->dongle_reset = TRUE;
bus->dhd->up = FALSE;
#ifdef BCMSDIOH_TXGLOM
dhd_txglom_enable(dhdp, FALSE);
#endif
dhd_os_sdunlock(dhdp);
DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__));
/* App can now remove power from device */
} else
bcmerror = BCME_SDIO_ERROR;
} else {
/* App must have restored power to device before calling */
DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
if (bus->dhd->dongle_reset) {
/* Turn on WLAN */
#ifdef DHDTHREAD
dhd_os_sdlock(dhdp);
#endif /* DHDTHREAD */
/* Reset SD client */
bcmsdh_reset(bus->sdh);
/* Attempt to re-attach & download */
if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
(uint32 *)SI_ENUM_BASE,
bus->cl_devid)) {
/* Attempt to download binary to the dongle */
if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
/* Re-init bus, enable F2 transfer */
bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
if (bcmerror == BCME_OK) {
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
/* make sure oob intr get registered */
if (!bcmsdh_is_oob_intr_registered()) {
sdioh_start(NULL, 1);
bcmsdh_register_oob_intr(dhdp);
dhdp->iswl = TRUE;
}
bcmsdh_set_irq(TRUE);
dhd_enable_oob_intr(bus, TRUE);
#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
bus->dhd->dongle_reset = FALSE;
bus->dhd->up = TRUE;
#if !defined(IGNORE_ETH0_DOWN)
/* Restore flow control */
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
#endif
dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
} else {
dhd_bus_stop(bus, FALSE);
dhdsdio_release_dongle(bus, bus->dhd->osh,
TRUE, FALSE);
}
} else
bcmerror = BCME_SDIO_ERROR;
} else
bcmerror = BCME_SDIO_ERROR;
#ifdef DHDTHREAD
dhd_os_sdunlock(dhdp);
#endif /* DHDTHREAD */
} else {
bcmerror = BCME_SDIO_ERROR;
DHD_INFO(("%s called when dongle is not in reset\n",
__FUNCTION__));
DHD_INFO(("Will call dhd_bus_start instead\n"));
sdioh_start(NULL, 1);
if ((bcmerror = dhd_bus_start(dhdp)) != 0)
DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
__FUNCTION__, bcmerror));
}
}
return bcmerror;
}
/* Get Chip ID version */
uint dhd_bus_chip_id(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
return bus->sih->chip;
}
/* Get Chip Rev ID version */
uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
return bus->sih->chiprev;
}
/* Get Chip Pkg ID version */
uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
return bus->sih->chippkg;
}
int
dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
{
dhd_bus_t *bus;
bus = dhdp->bus;
return dhdsdio_membytes(bus, set, address, data, size);
}
#if defined(SUPPORT_MULTIPLE_REVISION)
/* Just print chip revision for BCM4330 */
static int
concate_revision_bcm4330(dhd_bus_t *bus)
{
uint chipver;
chipver = bus->sih->chiprev;
if (chipver == 3)
DHD_ERROR(("---- CHIP bcm4330 B1 ----\n"));
else if (chipver == 4)
DHD_ERROR(("---- CHIP bcm4330 B2 ----\n"));
else
DHD_ERROR(("----- Invalid chip version -----\n"));
return 0;
}
static int
concate_revision_bcm4334(dhd_bus_t *bus,
char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
{
#define REV_ID_ADDR 0x1E008F90
#define BCM4334_B1_UNIQUE 0x30312E36
uint chipver;
uint32 unique_id;
uint8 data[4];
#if defined(SUPPORT_MULTIPLE_CHIPS)
char chipver_tag[10] = "_4334";
#else
char chipver_tag[4] = "";
#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
DHD_TRACE(("%s: BCM4334 Multiple Revision Check\n", __FUNCTION__));
if (bus->sih->chip != BCM4334_CHIP_ID) {
DHD_ERROR(("%s:Chip is not BCM4334\n", __FUNCTION__));
return -1;
}
chipver = bus->sih->chiprev;
if (chipver == 0x2) {
dhdsdio_membytes(bus, FALSE, REV_ID_ADDR, data, 4);
unique_id = load32_ua(data);
if (unique_id == BCM4334_B1_UNIQUE)
chipver = 0x01;
}
DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
if (chipver == 1) {
DHD_ERROR(("----- CHIP bcm4334_B0 -----\n"));
strcat(chipver_tag, "_b0");
} else if (chipver == 2) {
DHD_ERROR(("----- CHIP bcm4334_B1 -----\n"));
strcat(chipver_tag, "_b1");
} else if (chipver == 3) {
DHD_ERROR(("----- CHIP bcm4334_B2 -----\n"));
strcat(chipver_tag, "_b2");
}
else {
DHD_ERROR(("----- Invalid chip version -----\n"));
return -1;
}
strcat(fw_path, chipver_tag);
#if defined(SUPPORT_MULTIPLE_CHIPS)
strcat(nv_path, chipver_tag);
#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
#undef REV_ID_ADDR
#undef BCM4334_B1_UNIQUE
return 0;
}
static int
concate_revision_bcm4335
(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
{
uint chipver;
#if defined(SUPPORT_MULTIPLE_CHIPS)
char chipver_tag[10] = "_4335";
#else
char chipver_tag[4] = {0, };
#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
DHD_TRACE(("%s: BCM4335 Multiple Revision Check\n", __FUNCTION__));
if (bus->sih->chip != BCM4335_CHIP_ID) {
DHD_ERROR(("%s:Chip is not BCM4335\n", __FUNCTION__));
return -1;
}
chipver = bus->sih->chiprev;
DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
if (chipver == 0x0) {
DHD_ERROR(("----- CHIP bcm4335_A0 -----\n"));
strcat(chipver_tag, "_a0");
} else if (chipver == 0x1) {
DHD_ERROR(("----- CHIP bcm4335_B0 -----\n"));
#if defined(SUPPORT_MULTIPLE_CHIPS)
strcat(chipver_tag, "_b0");
#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
}
strcat(fw_path, chipver_tag);
strcat(nv_path, chipver_tag);
return 0;
}
static int
concate_revision_bcm4339
(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
{
uint chipver;
#if defined(SUPPORT_MULTIPLE_CHIPS)
char chipver_tag[10] = "_4339";
#else
char chipver_tag[4] = {0, };
#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
DHD_TRACE(("%s: BCM4339 Multiple Revision Check\n", __FUNCTION__));
if (bus->sih->chip != BCM4339_CHIP_ID) {
DHD_ERROR(("%s:Chip is not BCM4339\n", __FUNCTION__));
return -1;
}
chipver = bus->sih->chiprev;
DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
if (chipver == 0x1) {
DHD_ERROR(("----- CHIP bcm4339_A0 -----\n"));
strcat(chipver_tag, "_a0");
} else {
DHD_ERROR(("----- CHIP bcm4339 unknown revision %d -----\n",
chipver));
}
strcat(fw_path, chipver_tag);
strcat(nv_path, chipver_tag);
return 0;
}
static int concate_revision_bcm43241(dhd_bus_t *bus,
char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
{
uint32 chip_id, chip_ver;
#if defined(SUPPORT_MULTIPLE_CHIPS)
char chipver_tag[10] = "_43241";
#else
char chipver_tag[4] = {0, };
#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
DHD_TRACE(("%s: BCM43241 Multiple Revision Check\n", __FUNCTION__));
chip_id = bus->sih->chip;
chip_ver = bus->sih->chiprev;
if (chip_ver == 2) {
DHD_ERROR(("----- CHIP bcm43241_B0 -----\n"));
strcat(chipver_tag, "_b0");
} else if (chip_ver == 5) {
DHD_ERROR(("----- CHIP bcm43241_B4 -----\n"));
strcat(chipver_tag, "_b4");
} else {
DHD_ERROR(("----- Invalid chip version -----\n"));
return -1;
}
strcat(fw_path, chipver_tag);
strcat(nv_path, chipver_tag);
return 0;
}
static int
concate_revision_bcm43341
(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
{
uint chipver;
#if defined(SUPPORT_MULTIPLE_CHIPS)
char chipver_tag[10] = "_43341";
#else
char chipver_tag[4] = {0, };
#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
DHD_TRACE(("%s: BCM43341 Multiple Revision Check\n", __FUNCTION__));
if (bus->sih->chip != BCM43341_CHIP_ID) {
DHD_ERROR(("%s:Chip is not BCM43341\n", __FUNCTION__));
return -1;
}
chipver = bus->sih->chiprev;
DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
if (chipver == 0x1) {
DHD_ERROR(("----- CHIP bcm43341_A0 -----\n"));
strcat(chipver_tag, "_a0");
} else if (chipver == 0x2) {
DHD_ERROR(("----- CHIP bcm43341_B0 -----\n"));
strcat(chipver_tag, "_b0");
} else {
DHD_ERROR(("----- CHIP bcm43341 unknown revision %d -----\n",
chipver));
}
strcat(fw_path, chipver_tag);
strcat(nv_path, chipver_tag);
return 0;
}
int
concate_revision(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
{
int res = 0;
if (!bus || !bus->sih) {
DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
return -1;
}
switch (bus->sih->chip) {
case BCM4330_CHIP_ID:
res = concate_revision_bcm4330(bus);
break;
case BCM4334_CHIP_ID:
res = concate_revision_bcm4334(bus, fw_path, fw_path_len,
nv_path, nv_path_len);
break;
case BCM4335_CHIP_ID:
res = concate_revision_bcm4335(bus, fw_path, fw_path_len,
nv_path, nv_path_len);
break;
case BCM4339_CHIP_ID:
res = concate_revision_bcm4339(bus, fw_path, fw_path_len,
nv_path, nv_path_len);
break;
case BCM4324_CHIP_ID:
res = concate_revision_bcm43241(bus, fw_path, fw_path_len, nv_path, nv_path_len);
break;
case BCM43341_CHIP_ID:
res = concate_revision_bcm43341(bus, fw_path, fw_path_len,
nv_path, nv_path_len);
break;
default:
DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
return res;
}
if (res == 0) {
}
return res;
}
#endif /* SUPPORT_MULTIPLE_REVISION */
int
dhd_enableOOB(dhd_pub_t *dhd, bool sleep)
{
dhd_bus_t *bus = dhd->bus;
sdpcmd_regs_t *regs = bus->regs;
uint retries = 0;
if (sleep) {
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Tell device to start using OOB wakeup */
W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries);
if (retries > retry_limit) {
DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
return BCME_BUSY;
}
/* Turn off our contribution to the HT clock request */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
} else {
/* Make sure the controller has the bus up */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Send misc interrupt to indicate OOB not needed */
W_SDREG(0, ®s->tosbmailboxdata, retries);
if (retries <= retry_limit)
W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries);
if (retries > retry_limit)
DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
/* Make sure we have SD bus access */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
return BCME_OK;
}
void
dhd_bus_pktq_flush(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
/* Clear the data packet queues */
pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0);
}
int
dhd_sr_config(dhd_pub_t *dhd, bool on)
{
dhd_bus_t *bus = dhd->bus;
if (!bus->_srenab)
return -1;
return dhdsdio_clk_devsleep_iovar(bus, on);
}
uint16
dhd_get_chipid(dhd_pub_t *dhd)
{
dhd_bus_t *bus = dhd->bus;
if (bus && bus->sih)
return (uint16)bus->sih->chip;
else
return 0;
}
#if defined( CUSTOMER_HW10 )
void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time)
{
if ( !dhdp || !dhdp->bus ) {
DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
return;
}
dhdp->bus->idletime = idle_time;
}
void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time)
{
if (!dhdp || !dhdp->bus) {
DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
return;
}
if (!idle_time) {
DHD_ERROR(("%s:Arg idle_time is NULL\n", __FUNCTION__));
return;
}
*idle_time = dhdp->bus->idletime;
}
#endif //CUSTOMER_HW10
| gpl-2.0 |
sourabgu/linux | kernel/trace/trace_event_perf.c | 285 | 9161 | /*
* trace event based perf event profiling/tracing
*
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
*/
#include <linux/module.h>
#include <linux/kprobes.h>
#include "trace.h"
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
/*
* Force it to be aligned to unsigned long to avoid misaligned accesses
* suprises
*/
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
perf_trace_t;
/* Count the events in use (per event id, not per instance) */
static int total_ref_count;
static int perf_trace_event_perm(struct trace_event_call *tp_event,
struct perf_event *p_event)
{
if (tp_event->perf_perm) {
int ret = tp_event->perf_perm(tp_event, p_event);
if (ret)
return ret;
}
/*
* We checked and allowed to create parent,
* allow children without checking.
*/
if (p_event->parent)
return 0;
/*
* It's ok to check current process (owner) permissions in here,
* because code below is called only via perf_event_open syscall.
*/
/* The ftrace function trace is allowed only for root. */
if (ftrace_event_is_function(tp_event)) {
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (!is_sampling_event(p_event))
return 0;
/*
* We don't allow user space callchains for function trace
* event, due to issues with page faults while tracing page
* fault handler and its overall trickiness nature.
*/
if (!p_event->attr.exclude_callchain_user)
return -EINVAL;
/*
* Same reason to disable user stack dump as for user space
* callchains above.
*/
if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
return -EINVAL;
}
/* No tracing, just counting, so no obvious leak */
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
return 0;
/* Some events are ok to be traced by non-root users... */
if (p_event->attach_state == PERF_ATTACH_TASK) {
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
return 0;
}
/*
* ...otherwise raw tracepoint data can be a severe data leak,
* only allow root to have these.
*/
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
static int perf_trace_event_reg(struct trace_event_call *tp_event,
struct perf_event *p_event)
{
struct hlist_head __percpu *list;
int ret = -ENOMEM;
int cpu;
p_event->tp_event = tp_event;
if (tp_event->perf_refcount++ > 0)
return 0;
list = alloc_percpu(struct hlist_head);
if (!list)
goto fail;
for_each_possible_cpu(cpu)
INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
tp_event->perf_events = list;
if (!total_ref_count) {
char __percpu *buf;
int i;
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
buf = (char __percpu *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail;
perf_trace_buf[i] = buf;
}
}
ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
if (ret)
goto fail;
total_ref_count++;
return 0;
fail:
if (!total_ref_count) {
int i;
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}
}
if (!--tp_event->perf_refcount) {
free_percpu(tp_event->perf_events);
tp_event->perf_events = NULL;
}
return ret;
}
static void perf_trace_event_unreg(struct perf_event *p_event)
{
struct trace_event_call *tp_event = p_event->tp_event;
int i;
if (--tp_event->perf_refcount > 0)
goto out;
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
/*
* Ensure our callback won't be called anymore. The buffers
* will be freed after that.
*/
tracepoint_synchronize_unregister();
free_percpu(tp_event->perf_events);
tp_event->perf_events = NULL;
if (!--total_ref_count) {
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}
}
out:
module_put(tp_event->mod);
}
static int perf_trace_event_open(struct perf_event *p_event)
{
struct trace_event_call *tp_event = p_event->tp_event;
return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
}
static void perf_trace_event_close(struct perf_event *p_event)
{
struct trace_event_call *tp_event = p_event->tp_event;
tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
}
static int perf_trace_event_init(struct trace_event_call *tp_event,
struct perf_event *p_event)
{
int ret;
ret = perf_trace_event_perm(tp_event, p_event);
if (ret)
return ret;
ret = perf_trace_event_reg(tp_event, p_event);
if (ret)
return ret;
ret = perf_trace_event_open(p_event);
if (ret) {
perf_trace_event_unreg(p_event);
return ret;
}
return 0;
}
int perf_trace_init(struct perf_event *p_event)
{
struct trace_event_call *tp_event;
u64 event_id = p_event->attr.config;
int ret = -EINVAL;
mutex_lock(&event_mutex);
list_for_each_entry(tp_event, &ftrace_events, list) {
if (tp_event->event.type == event_id &&
tp_event->class && tp_event->class->reg &&
try_module_get(tp_event->mod)) {
ret = perf_trace_event_init(tp_event, p_event);
if (ret)
module_put(tp_event->mod);
break;
}
}
mutex_unlock(&event_mutex);
return ret;
}
void perf_trace_destroy(struct perf_event *p_event)
{
mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
mutex_unlock(&event_mutex);
}
int perf_trace_add(struct perf_event *p_event, int flags)
{
struct trace_event_call *tp_event = p_event->tp_event;
struct hlist_head __percpu *pcpu_list;
struct hlist_head *list;
pcpu_list = tp_event->perf_events;
if (WARN_ON_ONCE(!pcpu_list))
return -EINVAL;
if (!(flags & PERF_EF_START))
p_event->hw.state = PERF_HES_STOPPED;
list = this_cpu_ptr(pcpu_list);
hlist_add_head_rcu(&p_event->hlist_entry, list);
return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
}
void perf_trace_del(struct perf_event *p_event, int flags)
{
struct trace_event_call *tp_event = p_event->tp_event;
hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
{
char *raw_data;
int rctx;
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
"perf buffer not large enough"))
return NULL;
*rctxp = rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return NULL;
if (regs)
*regs = this_cpu_ptr(&__perf_regs[rctx]);
raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
/* zero the dead bytes from align to not leak stack to user */
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
return raw_data;
}
EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
NOKPROBE_SYMBOL(perf_trace_buf_alloc);
void perf_trace_buf_update(void *record, u16 type)
{
struct trace_entry *entry = record;
int pc = preempt_count();
unsigned long flags;
local_save_flags(flags);
tracing_generic_entry_update(entry, flags, pc);
entry->type = type;
}
NOKPROBE_SYMBOL(perf_trace_buf_update);
#ifdef CONFIG_FUNCTION_TRACER
static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *pt_regs)
{
struct ftrace_entry *entry;
struct hlist_head *head;
struct pt_regs regs;
int rctx;
head = this_cpu_ptr(event_function.perf_events);
if (hlist_empty(head))
return;
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
sizeof(u64)) - sizeof(u32))
BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
memset(®s, 0, sizeof(regs));
perf_fetch_caller_regs(®s);
entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
if (!entry)
return;
entry->ip = ip;
entry->parent_ip = parent_ip;
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
1, ®s, head, NULL);
#undef ENTRY_SIZE
}
static int perf_ftrace_function_register(struct perf_event *event)
{
struct ftrace_ops *ops = &event->ftrace_ops;
ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
ops->func = perf_ftrace_function_call;
return register_ftrace_function(ops);
}
static int perf_ftrace_function_unregister(struct perf_event *event)
{
struct ftrace_ops *ops = &event->ftrace_ops;
int ret = unregister_ftrace_function(ops);
ftrace_free_filter(ops);
return ret;
}
static void perf_ftrace_function_enable(struct perf_event *event)
{
ftrace_function_local_enable(&event->ftrace_ops);
}
static void perf_ftrace_function_disable(struct perf_event *event)
{
ftrace_function_local_disable(&event->ftrace_ops);
}
int perf_ftrace_event_register(struct trace_event_call *call,
enum trace_reg type, void *data)
{
switch (type) {
case TRACE_REG_REGISTER:
case TRACE_REG_UNREGISTER:
break;
case TRACE_REG_PERF_REGISTER:
case TRACE_REG_PERF_UNREGISTER:
return 0;
case TRACE_REG_PERF_OPEN:
return perf_ftrace_function_register(data);
case TRACE_REG_PERF_CLOSE:
return perf_ftrace_function_unregister(data);
case TRACE_REG_PERF_ADD:
perf_ftrace_function_enable(data);
return 0;
case TRACE_REG_PERF_DEL:
perf_ftrace_function_disable(data);
return 0;
}
return -EINVAL;
}
#endif /* CONFIG_FUNCTION_TRACER */
| gpl-2.0 |
lolhi/at1-S0834211 | net/ipv4/xfrm4_policy.c | 541 | 7318 | /*
* xfrm4_policy.c
*
* Changes:
* Kazunori MIYAZAWA @USAGI
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/inetdevice.h>
#include <linux/if_tunnel.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/ip.h>
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
int tos,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct rtable *rt;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
if (saddr)
fl4->saddr = saddr->a4;
rt = __ip_route_output_key(net, fl4);
if (!IS_ERR(rt))
return &rt->dst;
return ERR_CAST(rt);
}
static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct flowi4 fl4;
return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
}
static int xfrm4_get_saddr(struct net *net,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct flowi4 fl4;
dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
saddr->a4 = fl4.saddr;
dst_release(dst);
return 0;
}
static int xfrm4_get_tos(const struct flowi *fl)
{
return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
}
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
return 0;
}
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
struct rtable *rt = (struct rtable *)xdst->route;
const struct flowi4 *fl4 = &fl->u.ip4;
rt->rt_key_dst = fl4->daddr;
rt->rt_key_src = fl4->saddr;
rt->rt_key_tos = fl4->flowi4_tos;
rt->rt_route_iif = fl4->flowi4_iif;
rt->rt_iif = fl4->flowi4_iif;
rt->rt_oif = fl4->flowi4_oif;
rt->rt_mark = fl4->flowi4_mark;
xdst->u.dst.dev = dev;
dev_hold(dev);
xdst->u.rt.peer = rt->peer;
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
RTCF_LOCAL);
xdst->u.rt.rt_type = rt->rt_type;
xdst->u.rt.rt_src = rt->rt_src;
xdst->u.rt.rt_dst = rt->rt_dst;
xdst->u.rt.rt_gateway = rt->rt_gateway;
xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
return 0;
}
static void
_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
{
const struct iphdr *iph = ip_hdr(skb);
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
struct flowi4 *fl4 = &fl->u.ip4;
memset(fl4, 0, sizeof(struct flowi4));
fl4->flowi4_mark = skb->mark;
if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
switch (iph->protocol) {
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
if (xprth + 4 < skb->data ||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ports = (__be16 *)xprth;
fl4->fl4_sport = ports[!!reverse];
fl4->fl4_dport = ports[!reverse];
}
break;
case IPPROTO_ICMP:
if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
u8 *icmp = xprth;
fl4->fl4_icmp_type = icmp[0];
fl4->fl4_icmp_code = icmp[1];
}
break;
case IPPROTO_ESP:
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be32 *ehdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ehdr[0];
}
break;
case IPPROTO_AH:
if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
__be32 *ah_hdr = (__be32*)xprth;
fl4->fl4_ipsec_spi = ah_hdr[1];
}
break;
case IPPROTO_COMP:
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ipcomp_hdr = (__be16 *)xprth;
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
}
break;
case IPPROTO_GRE:
if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
__be16 *greflags = (__be16 *)xprth;
__be32 *gre_hdr = (__be32 *)xprth;
if (greflags[0] & GRE_KEY) {
if (greflags[0] & GRE_CSUM)
gre_hdr++;
fl4->fl4_gre_key = gre_hdr[1];
}
}
break;
default:
fl4->fl4_ipsec_spi = 0;
break;
}
}
fl4->flowi4_proto = iph->protocol;
fl4->daddr = reverse ? iph->saddr : iph->daddr;
fl4->saddr = reverse ? iph->daddr : iph->saddr;
fl4->flowi4_tos = iph->tos;
}
static inline int xfrm4_garbage_collect(struct dst_ops *ops)
{
struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
xfrm4_policy_afinfo.garbage_collect(net);
return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
}
static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
path->ops->update_pmtu(path, mtu);
}
static void xfrm4_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
dst_destroy_metrics_generic(dst);
if (likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
xfrm_dst_destroy(xdst);
}
static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int unregister)
{
if (!unregister)
return;
xfrm_dst_ifdown(dst, dev);
}
static struct dst_ops xfrm4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out,
.gc_thresh = 1024,
};
static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.family = AF_INET,
.dst_ops = &xfrm4_dst_ops,
.dst_lookup = xfrm4_dst_lookup,
.get_saddr = xfrm4_get_saddr,
.decode_session = _decode_session4,
.get_tos = xfrm4_get_tos,
.init_path = xfrm4_init_path,
.fill_dst = xfrm4_fill_dst,
.blackhole_route = ipv4_blackhole_route,
};
#ifdef CONFIG_SYSCTL
static struct ctl_table xfrm4_policy_table[] = {
{
.procname = "xfrm4_gc_thresh",
.data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_table_header *sysctl_hdr;
#endif
static void __init xfrm4_policy_init(void)
{
xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
}
static void __exit xfrm4_policy_fini(void)
{
#ifdef CONFIG_SYSCTL
if (sysctl_hdr)
unregister_net_sysctl_table(sysctl_hdr);
#endif
xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
}
void __init xfrm4_init(int rt_max_size)
{
/*
* Select a default value for the gc_thresh based on the main route
* table hash size. It seems to me the worst case scenario is when
* we have ipsec operating in transport mode, in which we create a
* dst_entry per socket. The xfrm gc algorithm starts trying to remove
* entries at gc_thresh, and prevents new allocations as 2*gc_thresh
* so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
* That will let us store an ipsec connection per route table entry,
* and start cleaning when were 1/2 full
*/
xfrm4_dst_ops.gc_thresh = rt_max_size/2;
dst_entries_init(&xfrm4_dst_ops);
xfrm4_state_init();
xfrm4_policy_init();
#ifdef CONFIG_SYSCTL
sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
xfrm4_policy_table);
#endif
}
| gpl-2.0 |
TEAM-Gummy/kernel_asus_grouper | kernel/trace/trace_stack.c | 541 | 7789 | /*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/fs.h>
#include "trace.h"
#define STACK_TRACE_ENTRIES 500
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
static struct stack_trace max_stack_trace = {
.max_entries = STACK_TRACE_ENTRIES,
.entries = stack_dump_trace,
};
static unsigned long max_stack_size;
static arch_spinlock_t max_stack_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
static int last_stack_tracer_enabled;
static inline void check_stack(void)
{
unsigned long this_size, flags;
unsigned long *p, *top, *start;
int i;
this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
this_size = THREAD_SIZE - this_size;
if (this_size <= max_stack_size)
return;
/* we do not handle interrupt stacks yet */
if (!object_is_on_stack(&this_size))
return;
local_irq_save(flags);
arch_spin_lock(&max_stack_lock);
/* a race could have already updated it */
if (this_size <= max_stack_size)
goto out;
max_stack_size = this_size;
max_stack_trace.nr_entries = 0;
max_stack_trace.skip = 3;
save_stack_trace(&max_stack_trace);
/*
* Now find where in the stack these are.
*/
i = 0;
start = &this_size;
top = (unsigned long *)
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
/*
* Loop through all the entries. One of the entries may
* for some reason be missed on the stack, so we may
* have to account for them. If they are all there, this
* loop will only happen once. This code only takes place
* on a new max, so it is far from a fast path.
*/
while (i < max_stack_trace.nr_entries) {
int found = 0;
stack_dump_index[i] = this_size;
p = start;
for (; p < top && i < max_stack_trace.nr_entries; p++) {
if (*p == stack_dump_trace[i]) {
this_size = stack_dump_index[i++] =
(top - p) * sizeof(unsigned long);
found = 1;
/* Start the search from here */
start = p + 1;
}
}
if (!found)
i++;
}
out:
arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags);
}
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
int cpu;
if (unlikely(!ftrace_enabled || stack_trace_disabled))
return;
preempt_disable_notrace();
cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
if (per_cpu(trace_active, cpu)++ != 0)
goto out;
check_stack();
out:
per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */
preempt_enable_notrace();
}
static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL,
};
static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
char buf[64];
int r;
r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
if (r > sizeof(buf))
r = sizeof(buf);
return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}
static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
long *ptr = filp->private_data;
unsigned long val, flags;
int ret;
int cpu;
ret = kstrtoul_from_user(ubuf, count, 10, &val);
if (ret)
return ret;
local_irq_save(flags);
/*
* In case we trace inside arch_spin_lock() or after (NMI),
* we will cause circular lock, so we also need to increase
* the percpu trace_active here.
*/
cpu = smp_processor_id();
per_cpu(trace_active, cpu)++;
arch_spin_lock(&max_stack_lock);
*ptr = val;
arch_spin_unlock(&max_stack_lock);
per_cpu(trace_active, cpu)--;
local_irq_restore(flags);
return count;
}
static const struct file_operations stack_max_size_fops = {
.open = tracing_open_generic,
.read = stack_max_size_read,
.write = stack_max_size_write,
.llseek = default_llseek,
};
static void *
__next(struct seq_file *m, loff_t *pos)
{
long n = *pos - 1;
if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
return NULL;
m->private = (void *)n;
return &m->private;
}
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return __next(m, pos);
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
int cpu;
local_irq_disable();
cpu = smp_processor_id();
per_cpu(trace_active, cpu)++;
arch_spin_lock(&max_stack_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
return __next(m, pos);
}
static void t_stop(struct seq_file *m, void *p)
{
int cpu;
arch_spin_unlock(&max_stack_lock);
cpu = smp_processor_id();
per_cpu(trace_active, cpu)--;
local_irq_enable();
}
static int trace_lookup_stack(struct seq_file *m, long i)
{
unsigned long addr = stack_dump_trace[i];
return seq_printf(m, "%pS\n", (void *)addr);
}
static void print_disabled(struct seq_file *m)
{
seq_puts(m, "#\n"
"# Stack tracer disabled\n"
"#\n"
"# To enable the stack tracer, either add 'stacktrace' to the\n"
"# kernel command line\n"
"# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
"#\n");
}
static int t_show(struct seq_file *m, void *v)
{
long i;
int size;
if (v == SEQ_START_TOKEN) {
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
max_stack_trace.nr_entries - 1);
if (!stack_tracer_enabled && !max_stack_size)
print_disabled(m);
return 0;
}
i = *(long *)v;
if (i >= max_stack_trace.nr_entries ||
stack_dump_trace[i] == ULONG_MAX)
return 0;
if (i+1 == max_stack_trace.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX)
size = stack_dump_index[i];
else
size = stack_dump_index[i] - stack_dump_index[i+1];
seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
trace_lookup_stack(m, i);
return 0;
}
static const struct seq_operations stack_trace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = t_show,
};
static int stack_trace_open(struct inode *inode, struct file *file)
{
return seq_open(file, &stack_trace_seq_ops);
}
static const struct file_operations stack_trace_fops = {
.open = stack_trace_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int
stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
mutex_lock(&stack_sysctl_mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write ||
(last_stack_tracer_enabled == !!stack_tracer_enabled))
goto out;
last_stack_tracer_enabled = !!stack_tracer_enabled;
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
else
unregister_ftrace_function(&trace_ops);
out:
mutex_unlock(&stack_sysctl_mutex);
return ret;
}
static __init int enable_stacktrace(char *str)
{
stack_tracer_enabled = 1;
last_stack_tracer_enabled = 1;
return 1;
}
__setup("stacktrace", enable_stacktrace);
static __init int stack_trace_init(void)
{
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
trace_create_file("stack_max_size", 0644, d_tracer,
&max_stack_size, &stack_max_size_fops);
trace_create_file("stack_trace", 0444, d_tracer,
NULL, &stack_trace_fops);
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
return 0;
}
device_initcall(stack_trace_init);
| gpl-2.0 |
osmc/vero-linux | net/ieee802154/nl-phy.c | 797 | 8080 | /*
* Netlink interface for IEEE 802.15.4 stack
*
* Copyright 2007, 2008 Siemens AG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Maxim Osipov <maxim.osipov@siemens.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/if_arp.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/cfg802154.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/rtnetlink.h> /* for rtnl_{un,}lock */
#include <linux/nl802154.h>
#include "ieee802154.h"
#include "rdev-ops.h"
#include "core.h"
static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
u32 seq, int flags, struct wpan_phy *phy)
{
void *hdr;
int i, pages = 0;
uint32_t *buf = kzalloc(32 * sizeof(uint32_t), GFP_KERNEL);
pr_debug("%s\n", __func__);
if (!buf)
return -EMSGSIZE;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
IEEE802154_LIST_PHY);
if (!hdr)
goto out;
rtnl_lock();
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
goto nla_put_failure;
for (i = 0; i < 32; i++) {
if (phy->supported.channels[i])
buf[pages++] = phy->supported.channels[i] | (i << 27);
}
if (pages &&
nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
pages * sizeof(uint32_t), buf))
goto nla_put_failure;
rtnl_unlock();
kfree(buf);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
rtnl_unlock();
genlmsg_cancel(msg, hdr);
out:
kfree(buf);
return -EMSGSIZE;
}
int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
{
/* Request for interface name, index, type, IEEE address,
* PAN Id, short address
*/
struct sk_buff *msg;
struct wpan_phy *phy;
const char *name;
int rc = -ENOBUFS;
pr_debug("%s\n", __func__);
if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
return -EINVAL;
name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
return -EINVAL; /* phy name should be null-terminated */
phy = wpan_phy_find(name);
if (!phy)
return -ENODEV;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
0, phy);
if (rc < 0)
goto out_free;
wpan_phy_put(phy);
return genlmsg_reply(msg, info);
out_free:
nlmsg_free(msg);
out_dev:
wpan_phy_put(phy);
return rc;
}
struct dump_phy_data {
struct sk_buff *skb;
struct netlink_callback *cb;
int idx, s_idx;
};
static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
{
int rc;
struct dump_phy_data *data = _data;
pr_debug("%s\n", __func__);
if (data->idx++ < data->s_idx)
return 0;
rc = ieee802154_nl_fill_phy(data->skb,
NETLINK_CB(data->cb->skb).portid,
data->cb->nlh->nlmsg_seq,
NLM_F_MULTI,
phy);
if (rc < 0) {
data->idx--;
return rc;
}
return 0;
}
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct dump_phy_data data = {
.cb = cb,
.skb = skb,
.s_idx = cb->args[0],
.idx = 0,
};
pr_debug("%s\n", __func__);
wpan_phy_for_each(ieee802154_dump_phy_iter, &data);
cb->args[0] = data.idx;
return skb->len;
}
int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct wpan_phy *phy;
const char *name;
const char *devname;
int rc = -ENOBUFS;
struct net_device *dev;
int type = __IEEE802154_DEV_INVALID;
unsigned char name_assign_type;
pr_debug("%s\n", __func__);
if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
return -EINVAL;
name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
return -EINVAL; /* phy name should be null-terminated */
if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
!= '\0')
return -EINVAL; /* phy name should be null-terminated */
name_assign_type = NET_NAME_USER;
} else {
devname = "wpan%d";
name_assign_type = NET_NAME_ENUM;
}
if (strlen(devname) >= IFNAMSIZ)
return -ENAMETOOLONG;
phy = wpan_phy_find(name);
if (!phy)
return -ENODEV;
msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE);
if (!msg)
goto out_dev;
if (info->attrs[IEEE802154_ATTR_HW_ADDR] &&
nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) !=
IEEE802154_ADDR_LEN) {
rc = -EINVAL;
goto nla_put_failure;
}
if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
if (type >= __IEEE802154_DEV_MAX) {
rc = -EINVAL;
goto nla_put_failure;
}
}
dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname,
name_assign_type, type);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
goto nla_put_failure;
}
dev_hold(dev);
if (info->attrs[IEEE802154_ATTR_HW_ADDR]) {
struct sockaddr addr;
addr.sa_family = ARPHRD_IEEE802154;
nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
IEEE802154_ADDR_LEN);
/* strangely enough, some callbacks (inetdev_event) from
* dev_set_mac_address require RTNL_LOCK
*/
rtnl_lock();
rc = dev_set_mac_address(dev, &addr);
rtnl_unlock();
if (rc)
goto dev_unregister;
}
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
goto nla_put_failure;
dev_put(dev);
wpan_phy_put(phy);
return ieee802154_nl_reply(msg, info);
dev_unregister:
rtnl_lock(); /* del_iface must be called with RTNL lock */
rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);
dev_put(dev);
rtnl_unlock();
nla_put_failure:
nlmsg_free(msg);
out_dev:
wpan_phy_put(phy);
return rc;
}
int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct wpan_phy *phy;
const char *name;
int rc;
struct net_device *dev;
pr_debug("%s\n", __func__);
if (!info->attrs[IEEE802154_ATTR_DEV_NAME])
return -EINVAL;
name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0')
return -EINVAL; /* name should be null-terminated */
dev = dev_get_by_name(genl_info_net(info), name);
if (!dev)
return -ENODEV;
phy = dev->ieee802154_ptr->wpan_phy;
BUG_ON(!phy);
get_device(&phy->dev);
rc = -EINVAL;
/* phy name is optional, but should be checked if it's given */
if (info->attrs[IEEE802154_ATTR_PHY_NAME]) {
struct wpan_phy *phy2;
const char *pname =
nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1]
!= '\0')
/* name should be null-terminated */
goto out_dev;
phy2 = wpan_phy_find(pname);
if (!phy2)
goto out_dev;
if (phy != phy2) {
wpan_phy_put(phy2);
goto out_dev;
}
}
rc = -ENOBUFS;
msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE);
if (!msg)
goto out_dev;
rtnl_lock();
rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);
/* We don't have device anymore */
dev_put(dev);
dev = NULL;
rtnl_unlock();
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
goto nla_put_failure;
wpan_phy_put(phy);
return ieee802154_nl_reply(msg, info);
nla_put_failure:
nlmsg_free(msg);
out_dev:
wpan_phy_put(phy);
if (dev)
dev_put(dev);
return rc;
}
| gpl-2.0 |
mirror-androidarmv6/android_kernel_lge_msm7x27 | arch/x86/kernel/dumpstack_64.c | 797 | 8625 | /*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
*/
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <asm/stacktrace.h>
#include "dumpstack.h"
#define N_EXCEPTION_STACKS_END \
(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
static char x86_stack_ids[][8] = {
[ DEBUG_STACK-1 ] = "#DB",
[ NMI_STACK-1 ] = "NMI",
[ DOUBLEFAULT_STACK-1 ] = "#DF",
[ STACKFAULT_STACK-1 ] = "#SS",
[ MCE_STACK-1 ] = "#MC",
#if DEBUG_STKSZ > EXCEPTION_STKSZ
[ N_EXCEPTION_STACKS ...
N_EXCEPTION_STACKS_END ] = "#DB[?]"
#endif
};
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
unsigned *usedp, char **idp)
{
unsigned k;
/*
* Iterate over all exception stacks, and figure out whether
* 'stack' is in one of them:
*/
for (k = 0; k < N_EXCEPTION_STACKS; k++) {
unsigned long end = per_cpu(orig_ist, cpu).ist[k];
/*
* Is 'stack' above this exception frame's end?
* If yes then skip to the next frame.
*/
if (stack >= end)
continue;
/*
* Is 'stack' above this exception frame's start address?
* If yes then we found the right frame.
*/
if (stack >= end - EXCEPTION_STKSZ) {
/*
* Make sure we only iterate through an exception
* stack once. If it comes up for the second time
* then there's something wrong going on - just
* break out and return NULL:
*/
if (*usedp & (1U << k))
break;
*usedp |= 1U << k;
*idp = x86_stack_ids[k];
return (unsigned long *)end;
}
/*
* If this is a debug stack, and if it has a larger size than
* the usual exception stacks, then 'stack' might still
* be within the lower portion of the debug stack:
*/
#if DEBUG_STKSZ > EXCEPTION_STKSZ
if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
unsigned j = N_EXCEPTION_STACKS - 1;
/*
* Black magic. A large debug stack is composed of
* multiple exception stack entries, which we
* iterate through now. Dont look:
*/
do {
++j;
end -= EXCEPTION_STKSZ;
x86_stack_ids[j][4] = '1' +
(j - N_EXCEPTION_STACKS);
} while (stack < end - EXCEPTION_STKSZ);
if (*usedp & (1U << j))
break;
*usedp |= 1U << j;
*idp = x86_stack_ids[j];
return (unsigned long *)end;
}
#endif
}
return NULL;
}
static inline int
in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
unsigned long *irq_stack_end)
{
return (stack >= irq_stack && stack < irq_stack_end);
}
/*
* We are returning from the irq stack and go to the previous one.
* If the previous stack is also in the irq stack, then bp in the first
* frame of the irq stack points to the previous, interrupted one.
* Otherwise we have another level of indirection: We first save
* the bp of the previous stack, then we switch the stack to the irq one
* and save a new bp that links to the previous one.
* (See save_args())
*/
static inline unsigned long
fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
unsigned long *irq_stack, unsigned long *irq_stack_end)
{
#ifdef CONFIG_FRAME_POINTER
struct stack_frame *frame = (struct stack_frame *)bp;
unsigned long next;
if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
if (!probe_kernel_address(&frame->next_frame, next))
return next;
else
WARN_ONCE(1, "Perf: bad frame pointer = %p in "
"callchain\n", &frame->next_frame);
}
#endif
return bp;
}
/*
* x86-64 can have up to three kernel stacks:
* process stack
* interrupt stack
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
unsigned long *irq_stack_end =
(unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned used = 0;
struct thread_info *tinfo;
int graph = 0;
if (!task)
task = current;
if (!stack) {
unsigned long dummy;
stack = &dummy;
if (task && task != current)
stack = (unsigned long *)task->thread.sp;
}
#ifdef CONFIG_FRAME_POINTER
if (!bp) {
if (task == current) {
/* Grab bp right from our regs */
get_bp(bp);
} else {
/* bp is the last reg pushed by switch_to */
bp = *(unsigned long *) task->thread.sp;
}
}
#endif
/*
* Print function call entries in all stacks, starting at the
* current stack address. If the stacks consist of nested
* exceptions
*/
tinfo = task_thread_info(task);
for (;;) {
char *id;
unsigned long *estack_end;
estack_end = in_exception_stack(cpu, (unsigned long)stack,
&used, &id);
if (estack_end) {
if (ops->stack(data, id) < 0)
break;
bp = ops->walk_stack(tinfo, stack, bp, ops,
data, estack_end, &graph);
ops->stack(data, "<EOE>");
/*
* We link to the next stack via the
* second-to-last pointer (index -2 to end) in the
* exception stack:
*/
stack = (unsigned long *) estack_end[-2];
continue;
}
if (irq_stack_end) {
unsigned long *irq_stack;
irq_stack = irq_stack_end -
(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
if (ops->stack(data, "IRQ") < 0)
break;
bp = ops->walk_stack(tinfo, stack, bp,
ops, data, irq_stack_end, &graph);
/*
* We link to the next stack (which would be
* the process stack normally) the last
* pointer (index -1 to end) in the IRQ stack:
*/
stack = (unsigned long *) (irq_stack_end[-1]);
bp = fixup_bp_irq_link(bp, stack, irq_stack,
irq_stack_end);
irq_stack_end = NULL;
ops->stack(data, "EOI");
continue;
}
}
break;
}
/*
* This handles the process stack:
*/
bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
put_cpu();
}
EXPORT_SYMBOL(dump_trace);
void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *irq_stack_end;
unsigned long *irq_stack;
unsigned long *stack;
int cpu;
int i;
preempt_disable();
cpu = smp_processor_id();
irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
/*
* Debugging aid: "show_stack(NULL, NULL);" prints the
* back trace for this cpu:
*/
if (sp == NULL) {
if (task)
sp = (unsigned long *)task->thread.sp;
else
sp = (unsigned long *)&sp;
}
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]);
printk(" <EOI> ");
}
} else {
if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
}
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
printk("\n%s", log_lvl);
printk(" %016lx", *stack++);
touch_nmi_watchdog();
}
preempt_enable();
printk("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
void show_registers(struct pt_regs *regs)
{
int i;
unsigned long sp;
const int cpu = smp_processor_id();
struct task_struct *cur = current;
sp = regs->sp;
printk("CPU %d ", cpu);
print_modules();
__show_regs(regs, 1);
printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
cur->comm, cur->pid, task_thread_info(cur), cur);
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
if (!user_mode(regs)) {
unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes;
unsigned char c;
u8 *ip;
printk(KERN_EMERG "Stack:\n");
show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
regs->bp, KERN_EMERG);
printk(KERN_EMERG "Code: ");
ip = (u8 *)regs->ip - code_prologue;
if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
/* try starting at IP */
ip = (u8 *)regs->ip;
code_len = code_len - code_prologue + 1;
}
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
printk(" Bad RIP value.");
break;
}
if (ip == (u8 *)regs->ip)
printk("<%02x> ", c);
else
printk("%02x ", c);
}
}
printk("\n");
}
int is_valid_bugaddr(unsigned long ip)
{
unsigned short ud2;
if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
return 0;
return ud2 == 0x0b0f;
}
| gpl-2.0 |
AiJiaZone/linux-4.0 | virt/drivers/usb/host/ehci-tilegx.c | 1309 | 5200 | /*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/*
* Tilera TILE-Gx USB EHCI host controller driver.
*/
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/usb/tilegx.h>
#include <linux/usb.h>
#include <asm/homecache.h>
#include <gxio/iorpc_usb_host.h>
#include <gxio/usb_host.h>
static void tilegx_start_ehc(void)
{
}
static void tilegx_stop_ehc(void)
{
}
static int tilegx_ehci_setup(struct usb_hcd *hcd)
{
int ret = ehci_init(hcd);
/*
* Some drivers do:
*
* struct ehci_hcd *ehci = hcd_to_ehci(hcd);
* ehci->need_io_watchdog = 0;
*
* here, but since this is a new driver we're going to leave the
* watchdog enabled. Later we may try to turn it off and see
* whether we run into any problems.
*/
return ret;
}
static const struct hc_driver ehci_tilegx_hc_driver = {
.description = hcd_name,
.product_desc = "Tile-Gx EHCI",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* Generic hardware linkage.
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* Basic lifecycle operations.
*/
.reset = tilegx_ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* Managing I/O requests and associated device resources.
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* Scheduling support.
*/
.get_frame_number = ehci_get_frame,
/*
* Root hub support.
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
pte_t pte = { 0 };
int my_cpu = smp_processor_id();
int ret;
if (usb_disabled())
return -ENODEV;
/*
* Try to initialize our GXIO context; if we can't, the device
* doesn't exist.
*/
if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 1) != 0)
return -ENXIO;
hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
ret = -ENOMEM;
goto err_hcd;
}
/*
* We don't use rsrc_start to map in our registers, but seems like
* we ought to set it to something, so we use the register VA.
*/
hcd->rsrc_start =
(ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
tilegx_start_ehc();
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
ehci->regs =
hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = readl(&ehci->caps->hcs_params);
/* Create our IRQs and register them. */
pdata->irq = irq_alloc_hwirq(-1);
if (!pdata->irq) {
ret = -ENXIO;
goto err_no_irq;
}
tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
/* Configure interrupts. */
ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
cpu_x(my_cpu), cpu_y(my_cpu),
KERNEL_PL, pdata->irq);
if (ret) {
ret = -ENXIO;
goto err_have_irq;
}
/* Register all of our memory. */
pte = pte_set_home(pte, PAGE_HOME_HASH);
ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
if (ret) {
ret = -ENXIO;
goto err_have_irq;
}
ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
if (ret == 0) {
platform_set_drvdata(pdev, hcd);
device_wakeup_enable(hcd->self.controller);
return ret;
}
err_have_irq:
irq_free_hwirq(pdata->irq);
err_no_irq:
tilegx_stop_ehc();
usb_put_hcd(hcd);
err_hcd:
gxio_usb_host_destroy(&pdata->usb_ctx);
return ret;
}
static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
tilegx_stop_ehc();
gxio_usb_host_destroy(&pdata->usb_ctx);
irq_free_hwirq(pdata->irq);
return 0;
}
static void ehci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
{
usb_hcd_platform_shutdown(pdev);
ehci_hcd_tilegx_drv_remove(pdev);
}
static struct platform_driver ehci_hcd_tilegx_driver = {
.probe = ehci_hcd_tilegx_drv_probe,
.remove = ehci_hcd_tilegx_drv_remove,
.shutdown = ehci_hcd_tilegx_drv_shutdown,
.driver = {
.name = "tilegx-ehci",
}
};
MODULE_ALIAS("platform:tilegx-ehci");
| gpl-2.0 |
papi92/android_kernel_samsung_g906s | drivers/gpu/drm/mgag200/mgag200_fb.c | 2077 | 7476 | /*
* Copyright 2010 Matt Turner.
* Copyright 2012 Red Hat
*
* This file is subject to the terms and conditions of the GNU General
* Public License version 2. See the file COPYING in the main
* directory of this archive for more details.
*
* Authors: Matthew Garrett
* Matt Turner
* Dave Airlie
*/
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include "mgag200_drv.h"
static void mga_dirty_update(struct mga_fbdev *mfbdev,
int x, int y, int width, int height)
{
int i;
struct drm_gem_object *obj;
struct mgag200_bo *bo;
int src_offset, dst_offset;
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = mfbdev->mfb.obj;
bo = gem_to_mga_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
return;
store_for_later = true;
}
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&mfbdev->dirty_lock, flags);
if (mfbdev->y1 < y)
y = mfbdev->y1;
if (mfbdev->y2 > y2)
y2 = mfbdev->y2;
if (mfbdev->x1 < x)
x = mfbdev->x1;
if (mfbdev->x2 > x2)
x2 = mfbdev->x2;
if (store_for_later) {
mfbdev->x1 = x;
mfbdev->x2 = x2;
mfbdev->y1 = y;
mfbdev->y2 = y2;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
return;
}
mfbdev->x1 = mfbdev->y1 = INT_MAX;
mfbdev->x2 = mfbdev->y2 = 0;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
DRM_ERROR("failed to kmap fb updates\n");
mgag200_bo_unreserve(bo);
return;
}
unmap = true;
}
for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
}
if (unmap)
ttm_bo_kunmap(&bo->kmap);
mgag200_bo_unreserve(bo);
}
static void mga_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct mga_fbdev *mfbdev = info->par;
sys_fillrect(info, rect);
mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
static void mga_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct mga_fbdev *mfbdev = info->par;
sys_copyarea(info, area);
mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
area->height);
}
static void mga_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct mga_fbdev *mfbdev = info->par;
sys_imageblit(info, image);
mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
image->height);
}
static struct fb_ops mgag200fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = mga_fillrect,
.fb_copyarea = mga_copyarea,
.fb_imageblit = mga_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
};
static int mgag200fb_create_object(struct mga_fbdev *afbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
u32 size;
struct drm_gem_object *gobj;
int ret = 0;
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = mgag200_gem_create(dev, size, true, &gobj);
if (ret)
return ret;
*gobj_p = gobj;
return ret;
}
static int mgag200fb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
struct drm_device *dev = mfbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct mga_device *mdev = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_gem_object *gobj = NULL;
struct device *device = &dev->pdev->dev;
struct mgag200_bo *bo;
int ret;
void *sysram;
int size;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_mga_bo(gobj);
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
info = framebuffer_alloc(0, device);
if (info == NULL)
return -ENOMEM;
info->par = mfbdev;
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
if (ret)
return ret;
mfbdev->sysram = sysram;
mfbdev->size = size;
fb = &mfbdev->mfb.base;
/* setup helper */
mfbdev->helper.fb = fb;
mfbdev->helper.fbdev = info;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
ret = -ENOMEM;
goto out;
}
strcpy(info->fix.id, "mgadrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out;
}
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
sizes->fb_height);
info->screen_base = sysram;
info->screen_size = size;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
DRM_DEBUG_KMS("allocated %dx%d\n",
fb->width, fb->height);
return 0;
out:
return ret;
}
static int mga_fbdev_destroy(struct drm_device *dev,
struct mga_fbdev *mfbdev)
{
struct fb_info *info;
struct mga_framebuffer *mfb = &mfbdev->mfb;
if (mfbdev->helper.fbdev) {
info = mfbdev->helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
if (mfb->obj) {
drm_gem_object_unreference_unlocked(mfb->obj);
mfb->obj = NULL;
}
drm_fb_helper_fini(&mfbdev->helper);
vfree(mfbdev->sysram);
drm_framebuffer_unregister_private(&mfb->base);
drm_framebuffer_cleanup(&mfb->base);
return 0;
}
static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
.gamma_set = mga_crtc_fb_gamma_set,
.gamma_get = mga_crtc_fb_gamma_get,
.fb_probe = mgag200fb_create,
};
int mgag200_fbdev_init(struct mga_device *mdev)
{
struct mga_fbdev *mfbdev;
int ret;
mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
if (!mfbdev)
return -ENOMEM;
mdev->mfbdev = mfbdev;
mfbdev->helper.funcs = &mga_fb_helper_funcs;
spin_lock_init(&mfbdev->dirty_lock);
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
if (ret)
return ret;
drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(mdev->dev);
drm_fb_helper_initial_config(&mfbdev->helper, 32);
return 0;
}
void mgag200_fbdev_fini(struct mga_device *mdev)
{
if (!mdev->mfbdev)
return;
mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
}
| gpl-2.0 |
omnirom/android_kernel_asus_tf300t | drivers/input/serio/altera_ps2.c | 3101 | 4838 | /*
* Altera University Program PS2 controller driver
*
* Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
*
* Based on sa1111ps2.c, which is:
* Copyright (C) 2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of.h>
#define DRV_NAME "altera_ps2"
struct ps2if {
struct serio *io;
struct resource *iomem_res;
void __iomem *base;
unsigned irq;
};
/*
* Read all bytes waiting in the PS2 port. There should be
* at the most one, but we loop for safety.
*/
static irqreturn_t altera_ps2_rxint(int irq, void *dev_id)
{
struct ps2if *ps2if = dev_id;
unsigned int status;
int handled = IRQ_NONE;
while ((status = readl(ps2if->base)) & 0xffff0000) {
serio_interrupt(ps2if->io, status & 0xff, 0);
handled = IRQ_HANDLED;
}
return handled;
}
/*
* Write a byte to the PS2 port.
*/
static int altera_ps2_write(struct serio *io, unsigned char val)
{
struct ps2if *ps2if = io->port_data;
writel(val, ps2if->base);
return 0;
}
static int altera_ps2_open(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
/* clear fifo */
while (readl(ps2if->base) & 0xffff0000)
/* empty */;
writel(1, ps2if->base + 4); /* enable rx irq */
return 0;
}
static void altera_ps2_close(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
writel(0, ps2if->base); /* disable rx irq */
}
/*
* Add one device to this driver.
*/
static int __devinit altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
struct serio *serio;
int error, irq;
ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ps2if || !serio) {
error = -ENOMEM;
goto err_free_mem;
}
serio->id.type = SERIO_8042;
serio->write = altera_ps2_write;
serio->open = altera_ps2_open;
serio->close = altera_ps2_close;
strlcpy(serio->name, dev_name(&pdev->dev), sizeof(serio->name));
strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &pdev->dev;
ps2if->io = serio;
ps2if->iomem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (ps2if->iomem_res == NULL) {
error = -ENOENT;
goto err_free_mem;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
error = -ENXIO;
goto err_free_mem;
}
ps2if->irq = irq;
if (!request_mem_region(ps2if->iomem_res->start,
resource_size(ps2if->iomem_res), pdev->name)) {
error = -EBUSY;
goto err_free_mem;
}
ps2if->base = ioremap(ps2if->iomem_res->start,
resource_size(ps2if->iomem_res));
if (!ps2if->base) {
error = -ENOMEM;
goto err_free_res;
}
error = request_irq(ps2if->irq, altera_ps2_rxint, 0, pdev->name, ps2if);
if (error) {
dev_err(&pdev->dev, "could not allocate IRQ %d: %d\n",
ps2if->irq, error);
goto err_unmap;
}
dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, ps2if->irq);
serio_register_port(ps2if->io);
platform_set_drvdata(pdev, ps2if);
return 0;
err_unmap:
iounmap(ps2if->base);
err_free_res:
release_mem_region(ps2if->iomem_res->start,
resource_size(ps2if->iomem_res));
err_free_mem:
kfree(ps2if);
kfree(serio);
return error;
}
/*
* Remove one device from this driver.
*/
static int __devexit altera_ps2_remove(struct platform_device *pdev)
{
struct ps2if *ps2if = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
serio_unregister_port(ps2if->io);
free_irq(ps2if->irq, ps2if);
iounmap(ps2if->base);
release_mem_region(ps2if->iomem_res->start,
resource_size(ps2if->iomem_res));
kfree(ps2if);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id altera_ps2_match[] = {
{ .compatible = "ALTR,ps2-1.0", },
{},
};
MODULE_DEVICE_TABLE(of, altera_ps2_match);
#else /* CONFIG_OF */
#define altera_ps2_match NULL
#endif /* CONFIG_OF */
/*
* Our device driver structure
*/
static struct platform_driver altera_ps2_driver = {
.probe = altera_ps2_probe,
.remove = __devexit_p(altera_ps2_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = altera_ps2_match,
},
};
static int __init altera_ps2_init(void)
{
return platform_driver_register(&altera_ps2_driver);
}
module_init(altera_ps2_init);
static void __exit altera_ps2_exit(void)
{
platform_driver_unregister(&altera_ps2_driver);
}
module_exit(altera_ps2_exit);
MODULE_DESCRIPTION("Altera University Program PS2 controller driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
GalaxyTab4/android_kernel_samsung_matissevewifi | fs/ocfs2/export.c | 3357 | 6341 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* export.c
*
* Functions to facilitate NFS exporting
*
* Copyright (C) 2002, 2005 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "dir.h"
#include "dlmglue.h"
#include "dcache.h"
#include "export.h"
#include "inode.h"
#include "buffer_head_io.h"
#include "suballoc.h"
#include "ocfs2_trace.h"
struct ocfs2_inode_handle
{
u64 ih_blkno;
u32 ih_generation;
};
static struct dentry *ocfs2_get_dentry(struct super_block *sb,
struct ocfs2_inode_handle *handle)
{
struct inode *inode;
struct ocfs2_super *osb = OCFS2_SB(sb);
u64 blkno = handle->ih_blkno;
int status, set;
struct dentry *result;
trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno);
if (blkno == 0) {
result = ERR_PTR(-ESTALE);
goto bail;
}
inode = ocfs2_ilookup(sb, blkno);
/*
* If the inode exists in memory, we only need to check it's
* generation number
*/
if (inode)
goto check_gen;
/*
* This will synchronize us against ocfs2_delete_inode() on
* all nodes
*/
status = ocfs2_nfs_sync_lock(osb, 1);
if (status < 0) {
mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
goto check_err;
}
status = ocfs2_test_inode_bit(osb, blkno, &set);
trace_ocfs2_get_dentry_test_bit(status, set);
if (status < 0) {
if (status == -EINVAL) {
/*
* The blkno NFS gave us doesn't even show up
* as an inode, we return -ESTALE to be
* nice
*/
status = -ESTALE;
} else
mlog(ML_ERROR, "test inode bit failed %d\n", status);
goto unlock_nfs_sync;
}
/* If the inode allocator bit is clear, this inode must be stale */
if (!set) {
status = -ESTALE;
goto unlock_nfs_sync;
}
inode = ocfs2_iget(osb, blkno, 0, 0);
unlock_nfs_sync:
ocfs2_nfs_sync_unlock(osb, 1);
check_err:
if (status < 0) {
if (status == -ESTALE) {
trace_ocfs2_get_dentry_stale((unsigned long long)blkno,
handle->ih_generation);
}
result = ERR_PTR(status);
goto bail;
}
if (IS_ERR(inode)) {
mlog_errno(PTR_ERR(inode));
result = (void *)inode;
goto bail;
}
check_gen:
if (handle->ih_generation != inode->i_generation) {
iput(inode);
trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
handle->ih_generation,
inode->i_generation);
result = ERR_PTR(-ESTALE);
goto bail;
}
result = d_obtain_alias(inode);
if (IS_ERR(result))
mlog_errno(PTR_ERR(result));
bail:
trace_ocfs2_get_dentry_end(result);
return result;
}
static struct dentry *ocfs2_get_parent(struct dentry *child)
{
int status;
u64 blkno;
struct dentry *parent;
struct inode *dir = child->d_inode;
trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno);
status = ocfs2_inode_lock(dir, NULL, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
parent = ERR_PTR(status);
goto bail;
}
status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
if (status < 0) {
parent = ERR_PTR(-ENOENT);
goto bail_unlock;
}
parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
bail_unlock:
ocfs2_inode_unlock(dir, 0);
bail:
trace_ocfs2_get_parent_end(parent);
return parent;
}
static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
struct inode *parent)
{
int len = *max_len;
int type = 1;
u64 blkno;
u32 generation;
__le32 *fh = (__force __le32 *) fh_in;
#ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION
#error "You go ahead and fix that mess, then. Somehow"
trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
dentry->d_name.name,
fh, len, connectable);
#endif
if (parent && (len < 6)) {
*max_len = 6;
type = FILEID_INVALID;
goto bail;
} else if (len < 3) {
*max_len = 3;
type = FILEID_INVALID;
goto bail;
}
blkno = OCFS2_I(inode)->ip_blkno;
generation = inode->i_generation;
trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation);
len = 3;
fh[0] = cpu_to_le32((u32)(blkno >> 32));
fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff));
fh[2] = cpu_to_le32(generation);
if (parent) {
blkno = OCFS2_I(parent)->ip_blkno;
generation = parent->i_generation;
fh[3] = cpu_to_le32((u32)(blkno >> 32));
fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
fh[5] = cpu_to_le32(generation);
len = 6;
type = 2;
trace_ocfs2_encode_fh_parent((unsigned long long)blkno,
generation);
}
*max_len = len;
bail:
trace_ocfs2_encode_fh_type(type);
return type;
}
static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct ocfs2_inode_handle handle;
if (fh_len < 3 || fh_type > 2)
return NULL;
handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32;
handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]);
handle.ih_generation = le32_to_cpu(fid->raw[2]);
return ocfs2_get_dentry(sb, &handle);
}
static struct dentry *ocfs2_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct ocfs2_inode_handle parent;
if (fh_type != 2 || fh_len < 6)
return NULL;
parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32;
parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]);
parent.ih_generation = le32_to_cpu(fid->raw[5]);
return ocfs2_get_dentry(sb, &parent);
}
const struct export_operations ocfs2_export_ops = {
.encode_fh = ocfs2_encode_fh,
.fh_to_dentry = ocfs2_fh_to_dentry,
.fh_to_parent = ocfs2_fh_to_parent,
.get_parent = ocfs2_get_parent,
};
| gpl-2.0 |
lyapota/m8_sense_lollipop | drivers/net/can/c_can/c_can.c | 3357 | 31834 | /*
* CAN bus driver for Bosch C_CAN controller
*
* Copyright (C) 2010 ST Microelectronics
* Bhupesh Sharma <bhupesh.sharma@st.com>
*
* Borrowed heavily from the C_CAN driver originally written by:
* Copyright (C) 2007
* - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
* - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
*
* TX and RX NAPI implementation has been borrowed from at91 CAN driver
* written by:
* Copyright
* (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
* (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
* Bosch C_CAN user manual can be obtained from:
* http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
* users_manual_c_can.pdf
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include "c_can.h"
/* control register */
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
#define CONTROL_DISABLE_AR BIT(5)
#define CONTROL_ENABLE_AR (0 << 5)
#define CONTROL_EIE BIT(3)
#define CONTROL_SIE BIT(2)
#define CONTROL_IE BIT(1)
#define CONTROL_INIT BIT(0)
/* test register */
#define TEST_RX BIT(7)
#define TEST_TX1 BIT(6)
#define TEST_TX2 BIT(5)
#define TEST_LBACK BIT(4)
#define TEST_SILENT BIT(3)
#define TEST_BASIC BIT(2)
/* status register */
#define STATUS_BOFF BIT(7)
#define STATUS_EWARN BIT(6)
#define STATUS_EPASS BIT(5)
#define STATUS_RXOK BIT(4)
#define STATUS_TXOK BIT(3)
/* error counter register */
#define ERR_CNT_TEC_MASK 0xff
#define ERR_CNT_TEC_SHIFT 0
#define ERR_CNT_REC_SHIFT 8
#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
#define ERR_CNT_RP_SHIFT 15
#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
/* bit-timing register */
#define BTR_BRP_MASK 0x3f
#define BTR_BRP_SHIFT 0
#define BTR_SJW_SHIFT 6
#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
#define BTR_TSEG1_SHIFT 8
#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
#define BTR_TSEG2_SHIFT 12
#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
/* brp extension register */
#define BRP_EXT_BRPE_MASK 0x0f
#define BRP_EXT_BRPE_SHIFT 0
/* IFx command request */
#define IF_COMR_BUSY BIT(15)
/* IFx command mask */
#define IF_COMM_WR BIT(7)
#define IF_COMM_MASK BIT(6)
#define IF_COMM_ARB BIT(5)
#define IF_COMM_CONTROL BIT(4)
#define IF_COMM_CLR_INT_PND BIT(3)
#define IF_COMM_TXRQST BIT(2)
#define IF_COMM_DATAA BIT(1)
#define IF_COMM_DATAB BIT(0)
#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
IF_COMM_CONTROL | IF_COMM_TXRQST | \
IF_COMM_DATAA | IF_COMM_DATAB)
/* IFx arbitration */
#define IF_ARB_MSGVAL BIT(15)
#define IF_ARB_MSGXTD BIT(14)
#define IF_ARB_TRANSMIT BIT(13)
/* IFx message control */
#define IF_MCONT_NEWDAT BIT(15)
#define IF_MCONT_MSGLST BIT(14)
#define IF_MCONT_CLR_MSGLST (0 << 14)
#define IF_MCONT_INTPND BIT(13)
#define IF_MCONT_UMASK BIT(12)
#define IF_MCONT_TXIE BIT(11)
#define IF_MCONT_RXIE BIT(10)
#define IF_MCONT_RMTEN BIT(9)
#define IF_MCONT_TXRQST BIT(8)
#define IF_MCONT_EOB BIT(7)
#define IF_MCONT_DLC_MASK 0xf
/*
* IFx register masks:
* allow easy operation on 16-bit registers when the
* argument is 32-bit instead
*/
#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
/* message object split */
#define C_CAN_NO_OF_OBJECTS 32
#define C_CAN_MSG_OBJ_RX_NUM 16
#define C_CAN_MSG_OBJ_TX_NUM 16
#define C_CAN_MSG_OBJ_RX_FIRST 1
#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
C_CAN_MSG_OBJ_RX_NUM - 1)
#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
C_CAN_MSG_OBJ_TX_NUM - 1)
#define C_CAN_MSG_OBJ_RX_SPLIT 9
#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
#define RECEIVE_OBJECT_BITS 0x0000ffff
/* status interrupt */
#define STATUS_INTERRUPT 0x8000
/* global interrupt masks */
#define ENABLE_ALL_INTERRUPTS 1
#define DISABLE_ALL_INTERRUPTS 0
/* minimum timeout for checking BUSY status */
#define MIN_TIMEOUT_VALUE 6
/* napi related */
#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
/* c_can lec values */
enum c_can_lec_type {
LEC_NO_ERROR = 0,
LEC_STUFF_ERROR,
LEC_FORM_ERROR,
LEC_ACK_ERROR,
LEC_BIT1_ERROR,
LEC_BIT0_ERROR,
LEC_CRC_ERROR,
LEC_UNUSED,
};
/*
* c_can error types:
* Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
*/
enum c_can_bus_error_types {
C_CAN_NO_ERROR = 0,
C_CAN_BUS_OFF,
C_CAN_ERROR_WARNING,
C_CAN_ERROR_PASSIVE,
};
static struct can_bittiming_const c_can_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 16,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
.brp_inc = 1,
};
static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
{
return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
C_CAN_MSG_OBJ_TX_FIRST;
}
static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
{
return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
C_CAN_MSG_OBJ_TX_FIRST;
}
static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
{
u32 val = priv->read_reg(priv, reg);
val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
return val;
}
static void c_can_enable_all_interrupts(struct c_can_priv *priv,
int enable)
{
unsigned int cntrl_save = priv->read_reg(priv,
&priv->regs->control);
if (enable)
cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
else
cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
priv->write_reg(priv, &priv->regs->control, cntrl_save);
}
static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
{
int count = MIN_TIMEOUT_VALUE;
while (count && priv->read_reg(priv,
&priv->regs->ifregs[iface].com_req) &
IF_COMR_BUSY) {
count--;
udelay(1);
}
if (!count)
return 1;
return 0;
}
static inline void c_can_object_get(struct net_device *dev,
int iface, int objno, int mask)
{
struct c_can_priv *priv = netdev_priv(dev);
/*
* As per specs, after writting the message object number in the
* IF command request register the transfer b/w interface
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
IFX_WRITE_LOW_16BIT(mask));
priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
netdev_err(dev, "timed out in object get\n");
}
static inline void c_can_object_put(struct net_device *dev,
int iface, int objno, int mask)
{
struct c_can_priv *priv = netdev_priv(dev);
/*
* As per specs, after writting the message object number in the
* IF command request register the transfer b/w interface
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
(IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
netdev_err(dev, "timed out in object put\n");
}
static void c_can_write_msg_object(struct net_device *dev,
int iface, struct can_frame *frame, int objno)
{
int i;
u16 flags = 0;
unsigned int id;
struct c_can_priv *priv = netdev_priv(dev);
if (!(frame->can_id & CAN_RTR_FLAG))
flags |= IF_ARB_TRANSMIT;
if (frame->can_id & CAN_EFF_FLAG) {
id = frame->can_id & CAN_EFF_MASK;
flags |= IF_ARB_MSGXTD;
} else
id = ((frame->can_id & CAN_SFF_MASK) << 18);
flags |= IF_ARB_MSGVAL;
priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
IFX_WRITE_LOW_16BIT(id));
priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
IFX_WRITE_HIGH_16BIT(id));
for (i = 0; i < frame->can_dlc; i += 2) {
priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
frame->data[i] | (frame->data[i + 1] << 8));
}
/* enable interrupt for this message object */
priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
frame->can_dlc);
c_can_object_put(dev, iface, objno, IF_COMM_ALL);
}
static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
int iface, int ctrl_mask,
int obj)
{
struct c_can_priv *priv = netdev_priv(dev);
priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
}
static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
int iface,
int ctrl_mask)
{
int i;
struct c_can_priv *priv = netdev_priv(dev);
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
}
}
static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
int iface, int ctrl_mask,
int obj)
{
struct c_can_priv *priv = netdev_priv(dev);
priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
}
static void c_can_handle_lost_msg_obj(struct net_device *dev,
int iface, int objno)
{
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *frame;
netdev_err(dev, "msg lost in buffer %d\n", objno);
c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
IF_MCONT_CLR_MSGLST);
c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
/* create an error msg */
skb = alloc_can_err_skb(dev, &frame);
if (unlikely(!skb))
return;
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_errors++;
stats->rx_over_errors++;
netif_receive_skb(skb);
}
static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
{
u16 flags, data;
int i;
unsigned int val;
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *frame;
skb = alloc_can_skb(dev, &frame);
if (!skb) {
stats->rx_dropped++;
return -ENOMEM;
}
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
(flags << 16);
if (flags & IF_ARB_MSGXTD)
frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
frame->can_id = (val >> 18) & CAN_SFF_MASK;
if (flags & IF_ARB_TRANSMIT)
frame->can_id |= CAN_RTR_FLAG;
else {
for (i = 0; i < frame->can_dlc; i += 2) {
data = priv->read_reg(priv,
&priv->regs->ifregs[iface].data[i / 2]);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
}
}
netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
return 0;
}
static void c_can_setup_receive_object(struct net_device *dev, int iface,
int objno, unsigned int mask,
unsigned int id, unsigned int mcont)
{
struct c_can_priv *priv = netdev_priv(dev);
priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
IFX_WRITE_LOW_16BIT(mask));
priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
IFX_WRITE_HIGH_16BIT(mask));
priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
IFX_WRITE_LOW_16BIT(id));
priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
(IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
c_can_read_reg32(priv, &priv->regs->msgval1));
}
static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
{
struct c_can_priv *priv = netdev_priv(dev);
priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
c_can_read_reg32(priv, &priv->regs->msgval1));
}
static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
{
int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
/*
* as transmission request register's bit n-1 corresponds to
* message object n, we need to handle the same properly.
*/
if (val & (1 << (objno - 1)))
return 1;
return 0;
}
static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
u32 msg_obj_no;
struct c_can_priv *priv = netdev_priv(dev);
struct can_frame *frame = (struct can_frame *)skb->data;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
msg_obj_no = get_tx_next_msg_obj(priv);
/* prepare message object for transmission */
c_can_write_msg_object(dev, 0, frame, msg_obj_no);
can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
/*
* we have to stop the queue in case of a wrap around or
* if the next TX message object is still in use
*/
priv->tx_next++;
if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
(priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
netif_stop_queue(dev);
return NETDEV_TX_OK;
}
static int c_can_set_bittiming(struct net_device *dev)
{
unsigned int reg_btr, reg_brpe, ctrl_save;
u8 brp, brpe, sjw, tseg1, tseg2;
u32 ten_bit_brp;
struct c_can_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
/* c_can provides a 6-bit brp and 4-bit brpe fields */
ten_bit_brp = bt->brp - 1;
brp = ten_bit_brp & BTR_BRP_MASK;
brpe = ten_bit_brp >> 6;
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
(tseg2 << BTR_TSEG2_SHIFT);
reg_brpe = brpe & BRP_EXT_BRPE_MASK;
netdev_info(dev,
"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
ctrl_save = priv->read_reg(priv, &priv->regs->control);
priv->write_reg(priv, &priv->regs->control,
ctrl_save | CONTROL_CCE | CONTROL_INIT);
priv->write_reg(priv, &priv->regs->btr, reg_btr);
priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
priv->write_reg(priv, &priv->regs->control, ctrl_save);
return 0;
}
/*
* Configure C_CAN message objects for Tx and Rx purposes:
* C_CAN provides a total of 32 message objects that can be configured
* either for Tx or Rx purposes. Here the first 16 message objects are used as
* a reception FIFO. The end of reception FIFO is signified by the EoB bit
* being SET. The remaining 16 message objects are kept aside for Tx purposes.
* See user guide document for further details on configuring message
* objects.
*/
static void c_can_configure_msg_objects(struct net_device *dev)
{
int i;
/* first invalidate all message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
c_can_inval_msg_object(dev, 0, i);
/* setup receive message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
c_can_setup_receive_object(dev, 0, i, 0, 0,
(IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
}
/*
* Configure C_CAN chip:
* - enable/disable auto-retransmission
* - set operating mode
* - configure message objects
*/
static void c_can_chip_config(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
/* enable automatic retransmission */
priv->write_reg(priv, &priv->regs->control,
CONTROL_ENABLE_AR);
if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
CAN_CTRLMODE_LOOPBACK)) {
/* loopback + silent mode : useful for hot self-test */
priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
priv->write_reg(priv, &priv->regs->test,
TEST_LBACK | TEST_SILENT);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* loopback mode : useful for self-test function */
priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
/* silent mode : bus-monitoring mode */
priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
} else
/* normal mode*/
priv->write_reg(priv, &priv->regs->control,
CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
/* configure message objects */
c_can_configure_msg_objects(dev);
/* set a `lec` value so that we can check for updates later */
priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
/* set bittiming params */
c_can_set_bittiming(dev);
}
static void c_can_start(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
/* basic c_can configuration */
c_can_chip_config(dev);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* reset tx helper pointers */
priv->tx_next = priv->tx_echo = 0;
/* enable status change, error and module interrupts */
c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
}
static void c_can_stop(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
/* disable all interrupts */
c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
/* set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
}
static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
c_can_start(dev);
netif_wake_queue(dev);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int c_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
unsigned int reg_err_counter;
struct c_can_priv *priv = netdev_priv(dev);
reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
ERR_CNT_REC_SHIFT;
bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
return 0;
}
/*
* theory of operation:
*
* priv->tx_echo holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
* complete IRQ.
*
* We iterate from priv->tx_echo to priv->tx_next and check if the
* packet has been transmitted, echo it back to the CAN framework.
* If we discover a not yet transmitted package, stop looking for more.
*/
static void c_can_do_tx(struct net_device *dev)
{
u32 val;
u32 msg_obj_no;
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
msg_obj_no = get_tx_echo_msg_obj(priv);
val = c_can_read_reg32(priv, &priv->regs->txrqst1);
if (!(val & (1 << msg_obj_no))) {
can_get_echo_skb(dev,
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
stats->tx_bytes += priv->read_reg(priv,
&priv->regs->ifregs[0].msg_cntrl)
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
c_can_inval_msg_object(dev, 0, msg_obj_no);
}
}
/* restart queue if wrap-up or if queue stalled on last pkt */
if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
netif_wake_queue(dev);
}
/*
* theory of operation:
*
* c_can core saves a received CAN message into the first free message
* object it finds free (starting with the lowest). Bits NEWDAT and
* INTPND are set for this message object indicating that a new message
* has arrived. To work-around this issue, we keep two groups of message
* objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
*
* To ensure in-order frame reception we use the following
* approach while re-activating a message object to receive further
* frames:
* - if the current message object number is lower than
* C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
* the INTPND bit.
* - if the current message object number is equal to
* C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
* receive message objects.
* - if the current message object number is greater than
* C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
* only this message object.
*/
static int c_can_do_rx_poll(struct net_device *dev, int quota)
{
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
val = c_can_read_reg32(priv, &priv->regs->intpnd1),
msg_obj++) {
/*
* as interrupt pending register's bit n-1 corresponds to
* message object n, we need to handle the same properly.
*/
if (val & (1 << (msg_obj - 1))) {
c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
~IF_COMM_TXRQST);
msg_ctrl_save = priv->read_reg(priv,
&priv->regs->ifregs[0].msg_cntrl);
if (msg_ctrl_save & IF_MCONT_EOB)
return num_rx_pkts;
if (msg_ctrl_save & IF_MCONT_MSGLST) {
c_can_handle_lost_msg_obj(dev, 0, msg_obj);
num_rx_pkts++;
quota--;
continue;
}
if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
continue;
/* read the data from the message object */
c_can_read_msg_object(dev, 0, msg_ctrl_save);
if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
c_can_mark_rx_msg_obj(dev, 0,
msg_ctrl_save, msg_obj);
else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
/* activate this msg obj */
c_can_activate_rx_msg_obj(dev, 0,
msg_ctrl_save, msg_obj);
else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
/* activate all lower message objects */
c_can_activate_all_lower_rx_msg_obj(dev,
0, msg_ctrl_save);
num_rx_pkts++;
quota--;
}
}
return num_rx_pkts;
}
static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
{
return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
(priv->current_status & LEC_UNUSED);
}
static int c_can_handle_state_change(struct net_device *dev,
enum c_can_bus_error_types error_type)
{
unsigned int reg_err_counter;
unsigned int rx_err_passive;
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return 0;
c_can_get_berr_counter(dev, &bec);
reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
ERR_CNT_RP_SHIFT;
switch (error_type) {
case C_CAN_ERROR_WARNING:
/* error warning state */
priv->can.can_stats.error_warning++;
priv->can.state = CAN_STATE_ERROR_WARNING;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_ERROR_PASSIVE:
/* error passive state */
priv->can.can_stats.error_passive++;
priv->can.state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
if (rx_err_passive)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_BUS_OFF:
/* bus-off state */
priv->can.state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
/*
* disable all interrupts in bus-off mode to ensure that
* the CPU is not hogged down
*/
c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
can_bus_off(dev);
break;
default:
break;
}
netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
return 1;
}
static int c_can_handle_bus_err(struct net_device *dev,
enum c_can_lec_type lec_type)
{
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
/*
* early exit if no lec update or no error.
* no lec update means that no CAN bus event has been detected
* since CPU wrote 0x7 value to status reg.
*/
if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
return 0;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return 0;
/*
* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
/* common for all type of bus errors */
priv->can.can_stats.bus_error++;
stats->rx_errors++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
cf->data[2] |= CAN_ERR_PROT_UNSPEC;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
cf->data[2] |= CAN_ERR_PROT_FORM;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL);
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
cf->data[2] |= CAN_ERR_PROT_BIT1;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL);
break;
default:
break;
}
/* set a `lec` value so that we can check for updates later */
priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
return 1;
}
static int c_can_poll(struct napi_struct *napi, int quota)
{
u16 irqstatus;
int lec_type = 0;
int work_done = 0;
struct net_device *dev = napi->dev;
struct c_can_priv *priv = netdev_priv(dev);
irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
if (!irqstatus)
goto end;
/* status events have the highest priority */
if (irqstatus == STATUS_INTERRUPT) {
priv->current_status = priv->read_reg(priv,
&priv->regs->status);
/* handle Tx/Rx events */
if (priv->current_status & STATUS_TXOK)
priv->write_reg(priv, &priv->regs->status,
priv->current_status & ~STATUS_TXOK);
if (priv->current_status & STATUS_RXOK)
priv->write_reg(priv, &priv->regs->status,
priv->current_status & ~STATUS_RXOK);
/* handle state changes */
if ((priv->current_status & STATUS_EWARN) &&
(!(priv->last_status & STATUS_EWARN))) {
netdev_dbg(dev, "entered error warning state\n");
work_done += c_can_handle_state_change(dev,
C_CAN_ERROR_WARNING);
}
if ((priv->current_status & STATUS_EPASS) &&
(!(priv->last_status & STATUS_EPASS))) {
netdev_dbg(dev, "entered error passive state\n");
work_done += c_can_handle_state_change(dev,
C_CAN_ERROR_PASSIVE);
}
if ((priv->current_status & STATUS_BOFF) &&
(!(priv->last_status & STATUS_BOFF))) {
netdev_dbg(dev, "entered bus off state\n");
work_done += c_can_handle_state_change(dev,
C_CAN_BUS_OFF);
}
/* handle bus recovery events */
if ((!(priv->current_status & STATUS_BOFF)) &&
(priv->last_status & STATUS_BOFF)) {
netdev_dbg(dev, "left bus off state\n");
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
if ((!(priv->current_status & STATUS_EPASS)) &&
(priv->last_status & STATUS_EPASS)) {
netdev_dbg(dev, "left error passive state\n");
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
priv->last_status = priv->current_status;
/* handle lec errors on the bus */
lec_type = c_can_has_and_handle_berr(priv);
if (lec_type)
work_done += c_can_handle_bus_err(dev, lec_type);
} else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
(irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
/* handle events corresponding to receive message objects */
work_done += c_can_do_rx_poll(dev, (quota - work_done));
} else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
(irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
/* handle events corresponding to transmit message objects */
c_can_do_tx(dev);
}
end:
if (work_done < quota) {
napi_complete(napi);
/* enable all IRQs */
c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
}
return work_done;
}
static irqreturn_t c_can_isr(int irq, void *dev_id)
{
u16 irqstatus;
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
if (!irqstatus)
return IRQ_NONE;
/* disable all interrupts and schedule the NAPI */
c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
napi_schedule(&priv->napi);
return IRQ_HANDLED;
}
static int c_can_open(struct net_device *dev)
{
int err;
struct c_can_priv *priv = netdev_priv(dev);
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
return err;
}
/* register interrupt handler */
err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
dev);
if (err < 0) {
netdev_err(dev, "failed to request interrupt\n");
goto exit_irq_fail;
}
/* start the c_can controller */
c_can_start(dev);
napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
exit_irq_fail:
close_candev(dev);
return err;
}
static int c_can_close(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
c_can_stop(dev);
free_irq(dev->irq, dev);
close_candev(dev);
return 0;
}
struct net_device *alloc_c_can_dev(void)
{
struct net_device *dev;
struct c_can_priv *priv;
dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
if (!dev)
return NULL;
priv = netdev_priv(dev);
netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
priv->dev = dev;
priv->can.bittiming_const = &c_can_bittiming_const;
priv->can.do_set_mode = c_can_set_mode;
priv->can.do_get_berr_counter = c_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
return dev;
}
EXPORT_SYMBOL_GPL(alloc_c_can_dev);
void free_c_can_dev(struct net_device *dev)
{
free_candev(dev);
}
EXPORT_SYMBOL_GPL(free_c_can_dev);
static const struct net_device_ops c_can_netdev_ops = {
.ndo_open = c_can_open,
.ndo_stop = c_can_close,
.ndo_start_xmit = c_can_start_xmit,
};
int register_c_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
void unregister_c_can_dev(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
/* disable all interrupts */
c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
unregister_candev(dev);
}
EXPORT_SYMBOL_GPL(unregister_c_can_dev);
MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
| gpl-2.0 |
Mustaavalkosta/htc7x30-3.0 | net/sunrpc/xprtrdma/svc_rdma.c | 3357 | 8357 | /*
* Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the BSD-type
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* Neither the name of the Network Appliance, Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Author: Tom Tucker <tom@opengridcomputing.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svc_rdma.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
/* RPC/RDMA parameters */
unsigned int svcrdma_ord = RPCRDMA_ORD;
static unsigned int min_ord = 1;
static unsigned int max_ord = 4096;
unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS;
static unsigned int min_max_requests = 4;
static unsigned int max_max_requests = 16384;
unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
static unsigned int min_max_inline = 4096;
static unsigned int max_max_inline = 65536;
atomic_t rdma_stat_recv;
atomic_t rdma_stat_read;
atomic_t rdma_stat_write;
atomic_t rdma_stat_sq_starve;
atomic_t rdma_stat_rq_starve;
atomic_t rdma_stat_rq_poll;
atomic_t rdma_stat_rq_prod;
atomic_t rdma_stat_sq_poll;
atomic_t rdma_stat_sq_prod;
/* Temporary NFS request map and context caches */
struct kmem_cache *svc_rdma_map_cachep;
struct kmem_cache *svc_rdma_ctxt_cachep;
struct workqueue_struct *svc_rdma_wq;
/*
* This function implements reading and resetting an atomic_t stat
* variable through read/write to a proc file. Any write to the file
* resets the associated statistic to zero. Any read returns it's
* current value.
*/
static int read_reset_stat(ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
atomic_t *stat = (atomic_t *)table->data;
if (!stat)
return -EINVAL;
if (write)
atomic_set(stat, 0);
else {
char str_buf[32];
char *data;
int len = snprintf(str_buf, 32, "%d\n", atomic_read(stat));
if (len >= 32)
return -EFAULT;
len = strlen(str_buf);
if (*ppos > len) {
*lenp = 0;
return 0;
}
data = &str_buf[*ppos];
len -= *ppos;
if (len > *lenp)
len = *lenp;
if (len && copy_to_user(buffer, str_buf, len))
return -EFAULT;
*lenp = len;
*ppos += len;
}
return 0;
}
static struct ctl_table_header *svcrdma_table_header;
static ctl_table svcrdma_parm_table[] = {
{
.procname = "max_requests",
.data = &svcrdma_max_requests,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_max_requests,
.extra2 = &max_max_requests
},
{
.procname = "max_req_size",
.data = &svcrdma_max_req_size,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_max_inline,
.extra2 = &max_max_inline
},
{
.procname = "max_outbound_read_requests",
.data = &svcrdma_ord,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ord,
.extra2 = &max_ord,
},
{
.procname = "rdma_stat_read",
.data = &rdma_stat_read,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_recv",
.data = &rdma_stat_recv,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_write",
.data = &rdma_stat_write,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_sq_starve",
.data = &rdma_stat_sq_starve,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_rq_starve",
.data = &rdma_stat_rq_starve,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_rq_poll",
.data = &rdma_stat_rq_poll,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_rq_prod",
.data = &rdma_stat_rq_prod,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_sq_poll",
.data = &rdma_stat_sq_poll,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{
.procname = "rdma_stat_sq_prod",
.data = &rdma_stat_sq_prod,
.maxlen = sizeof(atomic_t),
.mode = 0644,
.proc_handler = read_reset_stat,
},
{ },
};
static ctl_table svcrdma_table[] = {
{
.procname = "svc_rdma",
.mode = 0555,
.child = svcrdma_parm_table
},
{ },
};
static ctl_table svcrdma_root_table[] = {
{
.procname = "sunrpc",
.mode = 0555,
.child = svcrdma_table
},
{ },
};
void svc_rdma_cleanup(void)
{
dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
destroy_workqueue(svc_rdma_wq);
if (svcrdma_table_header) {
unregister_sysctl_table(svcrdma_table_header);
svcrdma_table_header = NULL;
}
svc_unreg_xprt_class(&svc_rdma_class);
kmem_cache_destroy(svc_rdma_map_cachep);
kmem_cache_destroy(svc_rdma_ctxt_cachep);
}
int svc_rdma_init(void)
{
dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord);
dprintk("\tmax_requests : %d\n", svcrdma_max_requests);
dprintk("\tsq_depth : %d\n",
svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);
if (!svc_rdma_wq)
return -ENOMEM;
if (!svcrdma_table_header)
svcrdma_table_header =
register_sysctl_table(svcrdma_root_table);
/* Create the temporary map cache */
svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache",
sizeof(struct svc_rdma_req_map),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!svc_rdma_map_cachep) {
printk(KERN_INFO "Could not allocate map cache.\n");
goto err0;
}
/* Create the temporary context cache */
svc_rdma_ctxt_cachep =
kmem_cache_create("svc_rdma_ctxt_cache",
sizeof(struct svc_rdma_op_ctxt),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!svc_rdma_ctxt_cachep) {
printk(KERN_INFO "Could not allocate WR ctxt cache.\n");
goto err1;
}
/* Register RDMA with the SVC transport switch */
svc_reg_xprt_class(&svc_rdma_class);
return 0;
err1:
kmem_cache_destroy(svc_rdma_map_cachep);
err0:
unregister_sysctl_table(svcrdma_table_header);
destroy_workqueue(svc_rdma_wq);
return -ENOMEM;
}
MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
MODULE_DESCRIPTION("SVC RDMA Transport");
MODULE_LICENSE("Dual BSD/GPL");
module_init(svc_rdma_init);
module_exit(svc_rdma_cleanup);
| gpl-2.0 |
EPDCenterSpain/bq-DC-v1 | sound/pci/echoaudio/indigodjx.c | 3613 | 2982 | /*
* ALSA driver for Echoaudio soundcards.
* Copyright (C) 2009 Giuliano Pochini <pochini@shiny.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#define INDIGO_FAMILY
#define ECHOCARD_INDIGO_DJX
#define ECHOCARD_NAME "Indigo DJx"
#define ECHOCARD_HAS_SUPER_INTERLEAVE
#define ECHOCARD_HAS_VMIXER
#define ECHOCARD_HAS_STEREO_BIG_ENDIAN32
/* Pipe indexes */
#define PX_ANALOG_OUT 0 /* 8 */
#define PX_DIGITAL_OUT 8 /* 0 */
#define PX_ANALOG_IN 8 /* 0 */
#define PX_DIGITAL_IN 8 /* 0 */
#define PX_NUM 8
/* Bus indexes */
#define BX_ANALOG_OUT 0 /* 4 */
#define BX_DIGITAL_OUT 4 /* 0 */
#define BX_ANALOG_IN 4 /* 0 */
#define BX_DIGITAL_IN 4 /* 0 */
#define BX_NUM 4
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/asoundef.h>
#include <sound/initval.h>
#include <asm/atomic.h>
#include "echoaudio.h"
MODULE_FIRMWARE("ea/loader_dsp.fw");
MODULE_FIRMWARE("ea/indigo_djx_dsp.fw");
#define FW_361_LOADER 0
#define FW_INDIGO_DJX_DSP 1
static const struct firmware card_fw[] = {
{0, "loader_dsp.fw"},
{0, "indigo_djx_dsp.fw"}
};
static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = {
{0x1057, 0x3410, 0xECC0, 0x00E0, 0, 0, 0}, /* Indigo DJx*/
{0,}
};
static struct snd_pcm_hardware pcm_hardware_skel = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START,
.formats = SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_3LE |
SNDRV_PCM_FMTBIT_S32_LE |
SNDRV_PCM_FMTBIT_S32_BE,
.rates = SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_64000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000,
.rate_min = 32000,
.rate_max = 96000,
.channels_min = 1,
.channels_max = 4,
.buffer_bytes_max = 262144,
.period_bytes_min = 32,
.period_bytes_max = 131072,
.periods_min = 2,
.periods_max = 220,
};
#include "indigodjx_dsp.c"
#include "indigo_express_dsp.c"
#include "echoaudio_dsp.c"
#include "echoaudio.c"
| gpl-2.0 |
pasterp/kernel_S8515 | drivers/mtd/nand/sm_common.c | 4893 | 4711 | /*
* Copyright © 2009 - Maxim Levitsky
* Common routines & support for xD format
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/mtd/nand.h>
#include <linux/module.h>
#include "sm_common.h"
static struct nand_ecclayout nand_oob_sm = {
.eccbytes = 6,
.eccpos = {8, 9, 10, 13, 14, 15},
.oobfree = {
{.offset = 0 , .length = 4}, /* reserved */
{.offset = 6 , .length = 2}, /* LBA1 */
{.offset = 11, .length = 2} /* LBA2 */
}
};
/* NOTE: This layout is is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
/* If you not, then you break SmartMedia compliance anyway */
static struct nand_ecclayout nand_oob_sm_small = {
.eccbytes = 3,
.eccpos = {0, 1, 2},
.oobfree = {
{.offset = 3 , .length = 2}, /* reserved */
{.offset = 6 , .length = 2}, /* LBA1 */
}
};
static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_oob_ops ops;
struct sm_oob oob;
int ret, error = 0;
memset(&oob, -1, SM_OOB_SIZE);
oob.block_status = 0x0F;
/* As long as this function is called on erase block boundaries
it will work correctly for 256 byte nand */
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = (void *)&oob;
ops.datbuf = NULL;
ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
(int)ofs);
error = -EIO;
} else
mtd->ecc_stats.badblocks++;
return error;
}
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
{"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
{"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
{"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
{"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
{"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
{"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
{"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
{"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
{"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
{"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
{"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
{"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
{"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
{"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
{"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
{"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
{"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
{"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
{"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
{"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
{"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
{"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
{NULL,}
};
#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
static struct nand_flash_dev nand_xd_flash_ids[] = {
{"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
{"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
{"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
{"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
{"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
{"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
{"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
{"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
{NULL,}
};
int sm_register_device(struct mtd_info *mtd, int smartmedia)
{
struct nand_chip *chip = mtd->priv;
int ret;
chip->options |= NAND_SKIP_BBTSCAN;
/* Scan for card properties */
ret = nand_scan_ident(mtd, 1, smartmedia ?
nand_smartmedia_flash_ids : nand_xd_flash_ids);
if (ret)
return ret;
/* Bad block marker position */
chip->badblockpos = 0x05;
chip->badblockbits = 7;
chip->block_markbad = sm_block_markbad;
/* ECC layout */
if (mtd->writesize == SM_SECTOR_SIZE)
chip->ecc.layout = &nand_oob_sm;
else if (mtd->writesize == SM_SMALL_PAGE)
chip->ecc.layout = &nand_oob_sm_small;
else
return -ENODEV;
ret = nand_scan_tail(mtd);
if (ret)
return ret;
return mtd_device_register(mtd, NULL, 0);
}
EXPORT_SYMBOL_GPL(sm_register_device);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
MODULE_DESCRIPTION("Common SmartMedia/xD functions");
| gpl-2.0 |
fideoman/Alucard-Kernel-jfltexx | drivers/staging/rtl8712/rtl871x_sta_mgt.c | 7453 | 9548 | /******************************************************************************
* rtl871x_sta_mgt.c
*
* Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192SU
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
* Contact information:
* WLAN FAE <wlanfae@realtek.com>
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
#define _RTL871X_STA_MGT_C_
#include "osdep_service.h"
#include "drv_types.h"
#include "recv_osdep.h"
#include "xmit_osdep.h"
#include "sta_info.h"
static void _init_stainfo(struct sta_info *psta)
{
memset((u8 *)psta, 0, sizeof(struct sta_info));
spin_lock_init(&psta->lock);
_init_listhead(&psta->list);
_init_listhead(&psta->hash_list);
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
_r8712_init_sta_recv_priv(&psta->sta_recvpriv);
_init_listhead(&psta->asoc_list);
_init_listhead(&psta->auth_list);
}
u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
{
struct sta_info *psta;
s32 i;
pstapriv->pallocated_stainfo_buf = _malloc(sizeof(struct sta_info) *
NUM_STA + 4);
if (pstapriv->pallocated_stainfo_buf == NULL)
return _FAIL;
pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
((addr_t)(pstapriv->pallocated_stainfo_buf) & 3);
_init_queue(&pstapriv->free_sta_queue);
spin_lock_init(&pstapriv->sta_hash_lock);
pstapriv->asoc_sta_count = 0;
_init_queue(&pstapriv->sleep_q);
_init_queue(&pstapriv->wakeup_q);
psta = (struct sta_info *)(pstapriv->pstainfo_buf);
for (i = 0; i < NUM_STA; i++) {
_init_stainfo(psta);
_init_listhead(&(pstapriv->sta_hash[i]));
list_insert_tail(&psta->list,
get_list_head(&pstapriv->free_sta_queue));
psta++;
}
_init_listhead(&pstapriv->asoc_list);
_init_listhead(&pstapriv->auth_list);
return _SUCCESS;
}
/* this function is used to free the memory of lock || sema for all stainfos */
static void mfree_all_stainfo(struct sta_priv *pstapriv)
{
unsigned long irqL;
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL);
phead = get_list_head(&pstapriv->free_sta_queue);
plist = get_next(phead);
while ((end_of_queue_search(phead, plist)) == false) {
psta = LIST_CONTAINOR(plist, struct sta_info, list);
plist = get_next(plist);
}
spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL);
}
static void mfree_sta_priv_lock(struct sta_priv *pstapriv)
{
mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
}
u32 _r8712_free_sta_priv(struct sta_priv *pstapriv)
{
if (pstapriv) {
mfree_sta_priv_lock(pstapriv);
kfree(pstapriv->pallocated_stainfo_buf);
}
return _SUCCESS;
}
struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
uint tmp_aid;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
int i = 0;
u16 wRxSeqInitialValue = 0xffff;
unsigned long flags;
pfree_sta_queue = &pstapriv->free_sta_queue;
spin_lock_irqsave(&(pfree_sta_queue->lock), flags);
if (_queue_empty(pfree_sta_queue) == true)
psta = NULL;
else {
psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue),
struct sta_info, list);
list_delete(&(psta->list));
tmp_aid = psta->aid;
_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
if (index >= NUM_STA) {
psta = NULL;
goto exit;
}
phash_list = &(pstapriv->sta_hash[index]);
list_insert_tail(&psta->hash_list, phash_list);
pstapriv->asoc_sta_count++ ;
/* For the SMC router, the sequence number of first packet of WPS handshake
* will be 0. In this case, this packet will be dropped by recv_decache function
* if we use the 0x00 as the default value for tid_rxseq variable. So, we
* initialize the tid_rxseq variable as the 0xffff.
*/
for (i = 0; i < 16; i++)
memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i],
&wRxSeqInitialValue, 2);
/* for A-MPDU Rx reordering buffer control */
for (i = 0; i < 16 ; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
preorder_ctrl->padapter = pstapriv->padapter;
preorder_ctrl->indicate_seq = 0xffff;
preorder_ctrl->wend_b = 0xffff;
preorder_ctrl->wsize_b = 64;
_init_queue(&preorder_ctrl->pending_recvframe_queue);
r8712_init_recv_timer(preorder_ctrl);
}
}
exit:
spin_unlock_irqrestore(&(pfree_sta_queue->lock), flags);
return psta;
}
/* using pstapriv->sta_hash_lock to protect */
void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
{
int i;
unsigned long irqL0;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
struct sta_xmit_priv *pstaxmitpriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
if (psta == NULL)
return;
pfree_sta_queue = &pstapriv->free_sta_queue;
pstaxmitpriv = &psta->sta_xmitpriv;
spin_lock_irqsave(&(pxmitpriv->vo_pending.lock), irqL0);
r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
list_delete(&(pstaxmitpriv->vo_q.tx_pending));
spin_unlock_irqrestore(&(pxmitpriv->vo_pending.lock), irqL0);
spin_lock_irqsave(&(pxmitpriv->vi_pending.lock), irqL0);
r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
list_delete(&(pstaxmitpriv->vi_q.tx_pending));
spin_unlock_irqrestore(&(pxmitpriv->vi_pending.lock), irqL0);
spin_lock_irqsave(&(pxmitpriv->bk_pending.lock), irqL0);
r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
list_delete(&(pstaxmitpriv->bk_q.tx_pending));
spin_unlock_irqrestore(&(pxmitpriv->bk_pending.lock), irqL0);
spin_lock_irqsave(&(pxmitpriv->be_pending.lock), irqL0);
r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
list_delete(&(pstaxmitpriv->be_q.tx_pending));
spin_unlock_irqrestore(&(pxmitpriv->be_pending.lock), irqL0);
list_delete(&psta->hash_list);
pstapriv->asoc_sta_count--;
/* re-init sta_info; 20061114 */
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
_r8712_init_sta_recv_priv(&psta->sta_recvpriv);
/* for A-MPDU Rx reordering buffer control,
* cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
_cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
}
spin_lock(&(pfree_sta_queue->lock));
/* insert into free_sta_queue; 20061114 */
list_insert_tail(&psta->list, get_list_head(pfree_sta_queue));
spin_unlock(&(pfree_sta_queue->lock));
}
/* free all stainfo which in sta_hash[all] */
void r8712_free_all_stainfo(struct _adapter *padapter)
{
unsigned long irqL;
struct list_head *plist, *phead;
s32 index;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = r8712_get_bcmc_stainfo(padapter);
if (pstapriv->asoc_sta_count == 1)
return;
spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
plist = get_next(phead);
while ((end_of_queue_search(phead, plist)) == false) {
psta = LIST_CONTAINOR(plist,
struct sta_info, hash_list);
plist = get_next(plist);
if (pbcmc_stainfo != psta)
r8712_free_stainfo(padapter , psta);
}
}
spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL);
}
/* any station allocated can be searched by hash list */
struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
unsigned long irqL;
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
u32 index;
if (hwaddr == NULL)
return NULL;
index = wifi_mac_hash(hwaddr);
spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL);
phead = &(pstapriv->sta_hash[index]);
plist = get_next(phead);
while ((end_of_queue_search(phead, plist)) == false) {
psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
if ((!memcmp(psta->hwaddr, hwaddr, ETH_ALEN))) {
/* if found the matched address */
break;
}
psta = NULL;
plist = get_next(plist);
}
spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL);
return psta;
}
void r8712_init_bcmc_stainfo(struct _adapter *padapter)
{
struct sta_info *psta;
struct tx_servq *ptxservq;
unsigned char bcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
psta = r8712_alloc_stainfo(pstapriv, bcast_addr);
if (psta == NULL)
return;
ptxservq = &(psta->sta_xmitpriv.be_q);
}
struct sta_info *r8712_get_bcmc_stainfo(struct _adapter *padapter)
{
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
psta = r8712_get_stainfo(pstapriv, bc_addr);
return psta;
}
u8 r8712_access_ctrl(struct wlan_acl_pool *pacl_list, u8 *mac_addr)
{
return true;
}
| gpl-2.0 |
stargo/android_kernel_amazon_ford | fs/ext3/xattr_security.c | 7709 | 1991 | /*
* linux/fs/ext3/xattr_security.c
* Handler for storing security labels as extended attributes.
*/
#include <linux/security.h>
#include "ext3.h"
#include "xattr.h"
static size_t
ext3_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
if (list && total_len <= list_size) {
memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
return total_len;
}
static int
ext3_xattr_security_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
name, buffer, size);
}
static int
ext3_xattr_security_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
int ext3_initxattrs(struct inode *inode, const struct xattr *xattr_array,
void *fs_info)
{
const struct xattr *xattr;
handle_t *handle = fs_info;
int err = 0;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
err = ext3_xattr_set_handle(handle, inode,
EXT3_XATTR_INDEX_SECURITY,
xattr->name, xattr->value,
xattr->value_len, 0);
if (err < 0)
break;
}
return err;
}
int
ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
return security_inode_init_security(inode, dir, qstr,
&ext3_initxattrs, handle);
}
const struct xattr_handler ext3_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext3_xattr_security_list,
.get = ext3_xattr_security_get,
.set = ext3_xattr_security_set,
};
| gpl-2.0 |
Philippe12/linux-3.4-a20 | arch/mips/mm/cerr-sb1.c | 8733 | 16695 | /*
* Copyright (C) 2001,2002,2003 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/sched.h>
#include <asm/mipsregs.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE)
#include <asm/io.h>
#include <asm/sibyte/sb1250_scd.h>
#endif
/*
* We'd like to dump the L2_ECC_TAG register on errors, but errata make
* that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
*/
#undef DUMP_L2_ECC_TAG_ON_ERROR
/* SB1 definitions */
/* XXX should come from config1 XXX */
#define SB1_CACHE_INDEX_MASK 0x1fe0
#define CP0_ERRCTL_RECOVERABLE (1 << 31)
#define CP0_ERRCTL_DCACHE (1 << 30)
#define CP0_ERRCTL_ICACHE (1 << 29)
#define CP0_ERRCTL_MULTIBUS (1 << 23)
#define CP0_ERRCTL_MC_TLB (1 << 15)
#define CP0_ERRCTL_MC_TIMEOUT (1 << 14)
#define CP0_CERRI_TAG_PARITY (1 << 29)
#define CP0_CERRI_DATA_PARITY (1 << 28)
#define CP0_CERRI_EXTERNAL (1 << 26)
#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
#define CP0_CERRD_MULTIPLE (1 << 31)
#define CP0_CERRD_TAG_STATE (1 << 30)
#define CP0_CERRD_TAG_ADDRESS (1 << 29)
#define CP0_CERRD_DATA_SBE (1 << 28)
#define CP0_CERRD_DATA_DBE (1 << 27)
#define CP0_CERRD_EXTERNAL (1 << 26)
#define CP0_CERRD_LOAD (1 << 25)
#define CP0_CERRD_STORE (1 << 24)
#define CP0_CERRD_FILLWB (1 << 23)
#define CP0_CERRD_COHERENCY (1 << 22)
#define CP0_CERRD_DUPTAG (1 << 21)
#define CP0_CERRD_DPA_VALID(c) (!((c) & CP0_CERRD_EXTERNAL))
#define CP0_CERRD_IDX_VALID(c) \
(((c) & (CP0_CERRD_LOAD | CP0_CERRD_STORE)) ? (!((c) & CP0_CERRD_EXTERNAL)) : 0)
#define CP0_CERRD_CAUSES \
(CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
#define CP0_CERRD_TYPES \
(CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
static uint32_t extract_ic(unsigned short addr, int data);
static uint32_t extract_dc(unsigned short addr, int data);
static inline void breakout_errctl(unsigned int val)
{
if (val & CP0_ERRCTL_RECOVERABLE)
printk(" recoverable");
if (val & CP0_ERRCTL_DCACHE)
printk(" dcache");
if (val & CP0_ERRCTL_ICACHE)
printk(" icache");
if (val & CP0_ERRCTL_MULTIBUS)
printk(" multiple-buserr");
printk("\n");
}
static inline void breakout_cerri(unsigned int val)
{
if (val & CP0_CERRI_TAG_PARITY)
printk(" tag-parity");
if (val & CP0_CERRI_DATA_PARITY)
printk(" data-parity");
if (val & CP0_CERRI_EXTERNAL)
printk(" external");
printk("\n");
}
static inline void breakout_cerrd(unsigned int val)
{
switch (val & CP0_CERRD_CAUSES) {
case CP0_CERRD_LOAD:
printk(" load,");
break;
case CP0_CERRD_STORE:
printk(" store,");
break;
case CP0_CERRD_FILLWB:
printk(" fill/wb,");
break;
case CP0_CERRD_COHERENCY:
printk(" coherency,");
break;
case CP0_CERRD_DUPTAG:
printk(" duptags,");
break;
default:
printk(" NO CAUSE,");
break;
}
if (!(val & CP0_CERRD_TYPES))
printk(" NO TYPE");
else {
if (val & CP0_CERRD_MULTIPLE)
printk(" multi-err");
if (val & CP0_CERRD_TAG_STATE)
printk(" tag-state");
if (val & CP0_CERRD_TAG_ADDRESS)
printk(" tag-address");
if (val & CP0_CERRD_DATA_SBE)
printk(" data-SBE");
if (val & CP0_CERRD_DATA_DBE)
printk(" data-DBE");
if (val & CP0_CERRD_EXTERNAL)
printk(" external");
}
printk("\n");
}
#ifndef CONFIG_SIBYTE_BUS_WATCHER
static void check_bus_watcher(void)
{
uint32_t status, l2_err, memio_err;
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
uint64_t l2_tag;
#endif
/* Destructive read, clears register and interrupt */
status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
/* Bit 31 is always on, but there's no #define for that */
if (status & ~(1UL << 31)) {
l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
l2_tag = in64(IOADDR(A_L2_ECC_TAG));
#endif
memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
printk("\nLast recorded signature:\n");
printk("Request %02x from %d, answered by %d with Dcode %d\n",
(unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
(int)(G_SCD_BERR_TID(status) >> 6),
(int)G_SCD_BERR_RID(status),
(int)G_SCD_BERR_DCODE(status));
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
printk("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
#endif
} else {
printk("Bus watcher indicates no error\n");
}
}
#else
extern void check_bus_watcher(void);
#endif
asmlinkage void sb1_cache_error(void)
{
uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
unsigned long long cerr_dpa;
#ifdef CONFIG_SIBYTE_BW_TRACE
/* Freeze the trace buffer now */
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
#else
csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
#endif
printk("Trace buffer frozen\n");
#endif
printk("Cache error exception on CPU %x:\n",
(read_c0_prid() >> 25) & 0x7);
__asm__ __volatile__ (
" .set push\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" mfc0 %0, $26\n\t"
" mfc0 %1, $27\n\t"
" mfc0 %2, $27, 1\n\t"
" dmfc0 $1, $27, 3\n\t"
" dsrl32 %3, $1, 0 \n\t"
" sll %4, $1, 0 \n\t"
" mfc0 %5, $30\n\t"
" .set pop"
: "=r" (errctl), "=r" (cerr_i), "=r" (cerr_d),
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
printk(" c0_errorepc == %08x\n", eepc);
printk(" c0_errctl == %08x", errctl);
breakout_errctl(errctl);
if (errctl & CP0_ERRCTL_ICACHE) {
printk(" c0_cerr_i == %08x", cerr_i);
breakout_cerri(cerr_i);
if (CP0_CERRI_IDX_VALID(cerr_i)) {
/* Check index of EPC, allowing for delay slot */
if (((eepc & SB1_CACHE_INDEX_MASK) != (cerr_i & SB1_CACHE_INDEX_MASK)) &&
((eepc & SB1_CACHE_INDEX_MASK) != ((cerr_i & SB1_CACHE_INDEX_MASK) - 4)))
printk(" cerr_i idx doesn't match eepc\n");
else {
res = extract_ic(cerr_i & SB1_CACHE_INDEX_MASK,
(cerr_i & CP0_CERRI_DATA) != 0);
if (!(res & cerr_i))
printk("...didn't see indicated icache problem\n");
}
}
}
if (errctl & CP0_ERRCTL_DCACHE) {
printk(" c0_cerr_d == %08x", cerr_d);
breakout_cerrd(cerr_d);
if (CP0_CERRD_DPA_VALID(cerr_d)) {
printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
if (!CP0_CERRD_IDX_VALID(cerr_d)) {
res = extract_dc(cerr_dpa & SB1_CACHE_INDEX_MASK,
(cerr_d & CP0_CERRD_DATA) != 0);
if (!(res & cerr_d))
printk("...didn't see indicated dcache problem\n");
} else {
if ((cerr_dpa & SB1_CACHE_INDEX_MASK) != (cerr_d & SB1_CACHE_INDEX_MASK))
printk(" cerr_d idx doesn't match cerr_dpa\n");
else {
res = extract_dc(cerr_d & SB1_CACHE_INDEX_MASK,
(cerr_d & CP0_CERRD_DATA) != 0);
if (!(res & cerr_d))
printk("...didn't see indicated problem\n");
}
}
}
}
check_bus_watcher();
/*
* Calling panic() when a fatal cache error occurs scrambles the
* state of the system (and the cache), making it difficult to
* investigate after the fact. However, if you just stall the CPU,
* the other CPU may keep on running, which is typically very
* undesirable.
*/
#ifdef CONFIG_SB1_CERR_STALL
while (1)
;
#else
panic("unhandled cache error");
#endif
}
/* Parity lookup table. */
static const uint8_t parity[256] = {
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
};
/* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */
static const uint64_t mask_72_64[8] = {
0x0738C808099264FFULL,
0x38C808099264FF07ULL,
0xC808099264FF0738ULL,
0x08099264FF0738C8ULL,
0x099264FF0738C808ULL,
0x9264FF0738C80809ULL,
0x64FF0738C8080992ULL,
0xFF0738C808099264ULL
};
/* Calculate the parity on a range of bits */
static char range_parity(uint64_t dword, int max, int min)
{
char parity = 0;
int i;
dword >>= min;
for (i=max-min; i>=0; i--) {
if (dword & 0x1)
parity = !parity;
dword >>= 1;
}
return parity;
}
/* Calculate the 4-bit even byte-parity for an instruction */
static unsigned char inst_parity(uint32_t word)
{
int i, j;
char parity = 0;
for (j=0; j<4; j++) {
char byte_parity = 0;
for (i=0; i<8; i++) {
if (word & 0x80000000)
byte_parity = !byte_parity;
word <<= 1;
}
parity <<= 1;
parity |= byte_parity;
}
return parity;
}
static uint32_t extract_ic(unsigned short addr, int data)
{
unsigned short way;
int valid;
uint32_t taghi, taglolo, taglohi;
unsigned long long taglo, va;
uint64_t tlo_tmp;
uint8_t lru;
int res = 0;
printk("Icache index 0x%04x ", addr);
for (way = 0; way < 4; way++) {
/* Index-load-tag-I */
__asm__ __volatile__ (
" .set push \n\t"
" .set noreorder \n\t"
" .set mips64 \n\t"
" .set noat \n\t"
" cache 4, 0(%3) \n\t"
" mfc0 %0, $29 \n\t"
" dmfc0 $1, $28 \n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
" .set pop"
: "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
: "r" ((way << 13) | addr));
taglo = ((unsigned long long)taglohi << 32) | taglolo;
if (way == 0) {
lru = (taghi >> 14) & 0xff;
printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
((addr >> 5) & 0x3), /* bank */
((addr >> 7) & 0x3f), /* index */
(lru & 0x3),
((lru >> 2) & 0x3),
((lru >> 4) & 0x3),
((lru >> 6) & 0x3));
}
va = (taglo & 0xC0000FFFFFFFE000ULL) | addr;
if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3))
va |= 0x3FFFF00000000000ULL;
valid = ((taghi >> 29) & 1);
if (valid) {
tlo_tmp = taglo & 0xfff3ff;
if (((taglo >> 10) & 1) ^ range_parity(tlo_tmp, 23, 0)) {
printk(" ** bad parity in VTag0/G/ASID\n");
res |= CP0_CERRI_TAG_PARITY;
}
if (((taglo >> 11) & 1) ^ range_parity(taglo, 63, 24)) {
printk(" ** bad parity in R/VTag1\n");
res |= CP0_CERRI_TAG_PARITY;
}
}
if (valid ^ ((taghi >> 27) & 1)) {
printk(" ** bad parity for valid bit\n");
res |= CP0_CERRI_TAG_PARITY;
}
printk(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
way, va, valid, taghi, taglo);
if (data) {
uint32_t datahi, insta, instb;
uint8_t predecode;
int offset;
/* (hit all banks and ways) */
for (offset = 0; offset < 4; offset++) {
/* Index-load-data-I */
__asm__ __volatile__ (
" .set push\n\t"
" .set noreorder\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" cache 6, 0(%3) \n\t"
" mfc0 %0, $29, 1\n\t"
" dmfc0 $1, $28, 1\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
" .set pop \n"
: "=r" (datahi), "=r" (insta), "=r" (instb)
: "r" ((way << 13) | addr | (offset << 3)));
predecode = (datahi >> 8) & 0xff;
if (((datahi >> 16) & 1) != (uint32_t)range_parity(predecode, 7, 0)) {
printk(" ** bad parity in predecode\n");
res |= CP0_CERRI_DATA_PARITY;
}
/* XXXKW should/could check predecode bits themselves */
if (((datahi >> 4) & 0xf) ^ inst_parity(insta)) {
printk(" ** bad parity in instruction a\n");
res |= CP0_CERRI_DATA_PARITY;
}
if ((datahi & 0xf) ^ inst_parity(instb)) {
printk(" ** bad parity in instruction b\n");
res |= CP0_CERRI_DATA_PARITY;
}
printk(" %05X-%08X%08X", datahi, insta, instb);
}
printk("\n");
}
}
return res;
}
/* Compute the ECC for a data doubleword */
static uint8_t dc_ecc(uint64_t dword)
{
uint64_t t;
uint32_t w;
uint8_t p;
int i;
p = 0;
for (i = 7; i >= 0; i--)
{
p <<= 1;
t = dword & mask_72_64[i];
w = (uint32_t)(t >> 32);
p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
w = (uint32_t)(t & 0xFFFFFFFF);
p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
}
return p;
}
struct dc_state {
unsigned char val;
char *name;
};
static struct dc_state dc_states[] = {
{ 0x00, "INVALID" },
{ 0x0f, "COH-SHD" },
{ 0x13, "NCO-E-C" },
{ 0x19, "NCO-E-D" },
{ 0x16, "COH-E-C" },
{ 0x1c, "COH-E-D" },
{ 0xff, "*ERROR*" }
};
#define DC_TAG_VALID(state) \
(((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \
((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c))
static char *dc_state_str(unsigned char state)
{
struct dc_state *dsc = dc_states;
while (dsc->val != 0xff) {
if (dsc->val == state)
break;
dsc++;
}
return dsc->name;
}
static uint32_t extract_dc(unsigned short addr, int data)
{
int valid, way;
unsigned char state;
uint32_t taghi, taglolo, taglohi;
unsigned long long taglo, pa;
uint8_t ecc, lru;
int res = 0;
printk("Dcache index 0x%04x ", addr);
for (way = 0; way < 4; way++) {
__asm__ __volatile__ (
" .set push\n\t"
" .set noreorder\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" cache 5, 0(%3)\n\t" /* Index-load-tag-D */
" mfc0 %0, $29, 2\n\t"
" dmfc0 $1, $28, 2\n\t"
" dsrl32 %1, $1, 0\n\t"
" sll %2, $1, 0\n\t"
" .set pop"
: "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
: "r" ((way << 13) | addr));
taglo = ((unsigned long long)taglohi << 32) | taglolo;
pa = (taglo & 0xFFFFFFE000ULL) | addr;
if (way == 0) {
lru = (taghi >> 14) & 0xff;
printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
((addr >> 11) & 0x2) | ((addr >> 5) & 1), /* bank */
((addr >> 6) & 0x3f), /* index */
(lru & 0x3),
((lru >> 2) & 0x3),
((lru >> 4) & 0x3),
((lru >> 6) & 0x3));
}
state = (taghi >> 25) & 0x1f;
valid = DC_TAG_VALID(state);
printk(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
way, pa, dc_state_str(state), state, taghi, taglo);
if (valid) {
if (((taglo >> 11) & 1) ^ range_parity(taglo, 39, 26)) {
printk(" ** bad parity in PTag1\n");
res |= CP0_CERRD_TAG_ADDRESS;
}
if (((taglo >> 10) & 1) ^ range_parity(taglo, 25, 13)) {
printk(" ** bad parity in PTag0\n");
res |= CP0_CERRD_TAG_ADDRESS;
}
} else {
res |= CP0_CERRD_TAG_STATE;
}
if (data) {
uint32_t datalohi, datalolo, datahi;
unsigned long long datalo;
int offset;
char bad_ecc = 0;
for (offset = 0; offset < 4; offset++) {
/* Index-load-data-D */
__asm__ __volatile__ (
" .set push\n\t"
" .set noreorder\n\t"
" .set mips64\n\t"
" .set noat\n\t"
" cache 7, 0(%3)\n\t" /* Index-load-data-D */
" mfc0 %0, $29, 3\n\t"
" dmfc0 $1, $28, 3\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
" .set pop"
: "=r" (datahi), "=r" (datalohi), "=r" (datalolo)
: "r" ((way << 13) | addr | (offset << 3)));
datalo = ((unsigned long long)datalohi << 32) | datalolo;
ecc = dc_ecc(datalo);
if (ecc != datahi) {
int bits;
bad_ecc |= 1 << (3-offset);
ecc ^= datahi;
bits = hweight8(ecc);
res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
}
printk(" %02X-%016llX", datahi, datalo);
}
printk("\n");
if (bad_ecc)
printk(" dwords w/ bad ECC: %d %d %d %d\n",
!!(bad_ecc & 8), !!(bad_ecc & 4),
!!(bad_ecc & 2), !!(bad_ecc & 1));
}
}
return res;
}
| gpl-2.0 |
agrabren/android_kernel_htc_shooter | sound/pci/pcxhr/pcxhr_mix22.c | 9757 | 28127 | /*
* Driver for Digigram pcxhr compatible soundcards
*
* mixer interface for stereo cards
*
* Copyright (c) 2004 by Digigram <alsa@digigram.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/asoundef.h>
#include "pcxhr.h"
#include "pcxhr_core.h"
#include "pcxhr_mix22.h"
/* registers used on the DSP and Xilinx (port 2) : HR stereo cards only */
#define PCXHR_DSP_RESET 0x20
#define PCXHR_XLX_CFG 0x24
#define PCXHR_XLX_RUER 0x28
#define PCXHR_XLX_DATA 0x2C
#define PCXHR_XLX_STATUS 0x30
#define PCXHR_XLX_LOFREQ 0x34
#define PCXHR_XLX_HIFREQ 0x38
#define PCXHR_XLX_CSUER 0x3C
#define PCXHR_XLX_SELMIC 0x40
#define PCXHR_DSP 2
/* byte access only ! */
#define PCXHR_INPB(mgr, x) inb((mgr)->port[PCXHR_DSP] + (x))
#define PCXHR_OUTPB(mgr, x, data) outb((data), (mgr)->port[PCXHR_DSP] + (x))
/* values for PCHR_DSP_RESET register */
#define PCXHR_DSP_RESET_DSP 0x01
#define PCXHR_DSP_RESET_MUTE 0x02
#define PCXHR_DSP_RESET_CODEC 0x08
#define PCXHR_DSP_RESET_GPO_OFFSET 5
#define PCXHR_DSP_RESET_GPO_MASK 0x60
/* values for PCHR_XLX_CFG register */
#define PCXHR_CFG_SYNCDSP_MASK 0x80
#define PCXHR_CFG_DEPENDENCY_MASK 0x60
#define PCXHR_CFG_INDEPENDANT_SEL 0x00
#define PCXHR_CFG_MASTER_SEL 0x40
#define PCXHR_CFG_SLAVE_SEL 0x20
#define PCXHR_CFG_DATA_UER1_SEL_MASK 0x10 /* 0 (UER0), 1(UER1) */
#define PCXHR_CFG_DATAIN_SEL_MASK 0x08 /* 0 (ana), 1 (UER) */
#define PCXHR_CFG_SRC_MASK 0x04 /* 0 (Bypass), 1 (SRC Actif) */
#define PCXHR_CFG_CLOCK_UER1_SEL_MASK 0x02 /* 0 (UER0), 1(UER1) */
#define PCXHR_CFG_CLOCKIN_SEL_MASK 0x01 /* 0 (internal), 1 (AES/EBU) */
/* values for PCHR_XLX_DATA register */
#define PCXHR_DATA_CODEC 0x80
#define AKM_POWER_CONTROL_CMD 0xA007
#define AKM_RESET_ON_CMD 0xA100
#define AKM_RESET_OFF_CMD 0xA103
#define AKM_CLOCK_INF_55K_CMD 0xA240
#define AKM_CLOCK_SUP_55K_CMD 0xA24D
#define AKM_MUTE_CMD 0xA38D
#define AKM_UNMUTE_CMD 0xA30D
#define AKM_LEFT_LEVEL_CMD 0xA600
#define AKM_RIGHT_LEVEL_CMD 0xA700
/* values for PCHR_XLX_STATUS register - READ */
#define PCXHR_STAT_SRC_LOCK 0x01
#define PCXHR_STAT_LEVEL_IN 0x02
#define PCXHR_STAT_GPI_OFFSET 2
#define PCXHR_STAT_GPI_MASK 0x0C
#define PCXHR_STAT_MIC_CAPS 0x10
/* values for PCHR_XLX_STATUS register - WRITE */
#define PCXHR_STAT_FREQ_SYNC_MASK 0x01
#define PCXHR_STAT_FREQ_UER1_MASK 0x02
#define PCXHR_STAT_FREQ_SAVE_MASK 0x80
/* values for PCHR_XLX_CSUER register */
#define PCXHR_SUER1_BIT_U_READ_MASK 0x80
#define PCXHR_SUER1_BIT_C_READ_MASK 0x40
#define PCXHR_SUER1_DATA_PRESENT_MASK 0x20
#define PCXHR_SUER1_CLOCK_PRESENT_MASK 0x10
#define PCXHR_SUER_BIT_U_READ_MASK 0x08
#define PCXHR_SUER_BIT_C_READ_MASK 0x04
#define PCXHR_SUER_DATA_PRESENT_MASK 0x02
#define PCXHR_SUER_CLOCK_PRESENT_MASK 0x01
#define PCXHR_SUER_BIT_U_WRITE_MASK 0x02
#define PCXHR_SUER_BIT_C_WRITE_MASK 0x01
/* values for PCXHR_XLX_SELMIC register - WRITE */
#define PCXHR_SELMIC_PREAMPLI_OFFSET 2
#define PCXHR_SELMIC_PREAMPLI_MASK 0x0C
#define PCXHR_SELMIC_PHANTOM_ALIM 0x80
static const unsigned char g_hr222_p_level[] = {
0x00, /* [000] -49.5 dB: AKM[000] = -1.#INF dB (mute) */
0x01, /* [001] -49.0 dB: AKM[001] = -48.131 dB (diff=0.86920 dB) */
0x01, /* [002] -48.5 dB: AKM[001] = -48.131 dB (diff=0.36920 dB) */
0x01, /* [003] -48.0 dB: AKM[001] = -48.131 dB (diff=0.13080 dB) */
0x01, /* [004] -47.5 dB: AKM[001] = -48.131 dB (diff=0.63080 dB) */
0x01, /* [005] -46.5 dB: AKM[001] = -48.131 dB (diff=1.63080 dB) */
0x01, /* [006] -47.0 dB: AKM[001] = -48.131 dB (diff=1.13080 dB) */
0x01, /* [007] -46.0 dB: AKM[001] = -48.131 dB (diff=2.13080 dB) */
0x01, /* [008] -45.5 dB: AKM[001] = -48.131 dB (diff=2.63080 dB) */
0x02, /* [009] -45.0 dB: AKM[002] = -42.110 dB (diff=2.88980 dB) */
0x02, /* [010] -44.5 dB: AKM[002] = -42.110 dB (diff=2.38980 dB) */
0x02, /* [011] -44.0 dB: AKM[002] = -42.110 dB (diff=1.88980 dB) */
0x02, /* [012] -43.5 dB: AKM[002] = -42.110 dB (diff=1.38980 dB) */
0x02, /* [013] -43.0 dB: AKM[002] = -42.110 dB (diff=0.88980 dB) */
0x02, /* [014] -42.5 dB: AKM[002] = -42.110 dB (diff=0.38980 dB) */
0x02, /* [015] -42.0 dB: AKM[002] = -42.110 dB (diff=0.11020 dB) */
0x02, /* [016] -41.5 dB: AKM[002] = -42.110 dB (diff=0.61020 dB) */
0x02, /* [017] -41.0 dB: AKM[002] = -42.110 dB (diff=1.11020 dB) */
0x02, /* [018] -40.5 dB: AKM[002] = -42.110 dB (diff=1.61020 dB) */
0x03, /* [019] -40.0 dB: AKM[003] = -38.588 dB (diff=1.41162 dB) */
0x03, /* [020] -39.5 dB: AKM[003] = -38.588 dB (diff=0.91162 dB) */
0x03, /* [021] -39.0 dB: AKM[003] = -38.588 dB (diff=0.41162 dB) */
0x03, /* [022] -38.5 dB: AKM[003] = -38.588 dB (diff=0.08838 dB) */
0x03, /* [023] -38.0 dB: AKM[003] = -38.588 dB (diff=0.58838 dB) */
0x03, /* [024] -37.5 dB: AKM[003] = -38.588 dB (diff=1.08838 dB) */
0x04, /* [025] -37.0 dB: AKM[004] = -36.090 dB (diff=0.91040 dB) */
0x04, /* [026] -36.5 dB: AKM[004] = -36.090 dB (diff=0.41040 dB) */
0x04, /* [027] -36.0 dB: AKM[004] = -36.090 dB (diff=0.08960 dB) */
0x04, /* [028] -35.5 dB: AKM[004] = -36.090 dB (diff=0.58960 dB) */
0x05, /* [029] -35.0 dB: AKM[005] = -34.151 dB (diff=0.84860 dB) */
0x05, /* [030] -34.5 dB: AKM[005] = -34.151 dB (diff=0.34860 dB) */
0x05, /* [031] -34.0 dB: AKM[005] = -34.151 dB (diff=0.15140 dB) */
0x05, /* [032] -33.5 dB: AKM[005] = -34.151 dB (diff=0.65140 dB) */
0x06, /* [033] -33.0 dB: AKM[006] = -32.568 dB (diff=0.43222 dB) */
0x06, /* [034] -32.5 dB: AKM[006] = -32.568 dB (diff=0.06778 dB) */
0x06, /* [035] -32.0 dB: AKM[006] = -32.568 dB (diff=0.56778 dB) */
0x07, /* [036] -31.5 dB: AKM[007] = -31.229 dB (diff=0.27116 dB) */
0x07, /* [037] -31.0 dB: AKM[007] = -31.229 dB (diff=0.22884 dB) */
0x08, /* [038] -30.5 dB: AKM[008] = -30.069 dB (diff=0.43100 dB) */
0x08, /* [039] -30.0 dB: AKM[008] = -30.069 dB (diff=0.06900 dB) */
0x09, /* [040] -29.5 dB: AKM[009] = -29.046 dB (diff=0.45405 dB) */
0x09, /* [041] -29.0 dB: AKM[009] = -29.046 dB (diff=0.04595 dB) */
0x0a, /* [042] -28.5 dB: AKM[010] = -28.131 dB (diff=0.36920 dB) */
0x0a, /* [043] -28.0 dB: AKM[010] = -28.131 dB (diff=0.13080 dB) */
0x0b, /* [044] -27.5 dB: AKM[011] = -27.303 dB (diff=0.19705 dB) */
0x0b, /* [045] -27.0 dB: AKM[011] = -27.303 dB (diff=0.30295 dB) */
0x0c, /* [046] -26.5 dB: AKM[012] = -26.547 dB (diff=0.04718 dB) */
0x0d, /* [047] -26.0 dB: AKM[013] = -25.852 dB (diff=0.14806 dB) */
0x0e, /* [048] -25.5 dB: AKM[014] = -25.208 dB (diff=0.29176 dB) */
0x0e, /* [049] -25.0 dB: AKM[014] = -25.208 dB (diff=0.20824 dB) */
0x0f, /* [050] -24.5 dB: AKM[015] = -24.609 dB (diff=0.10898 dB) */
0x10, /* [051] -24.0 dB: AKM[016] = -24.048 dB (diff=0.04840 dB) */
0x11, /* [052] -23.5 dB: AKM[017] = -23.522 dB (diff=0.02183 dB) */
0x12, /* [053] -23.0 dB: AKM[018] = -23.025 dB (diff=0.02535 dB) */
0x13, /* [054] -22.5 dB: AKM[019] = -22.556 dB (diff=0.05573 dB) */
0x14, /* [055] -22.0 dB: AKM[020] = -22.110 dB (diff=0.11020 dB) */
0x15, /* [056] -21.5 dB: AKM[021] = -21.686 dB (diff=0.18642 dB) */
0x17, /* [057] -21.0 dB: AKM[023] = -20.896 dB (diff=0.10375 dB) */
0x18, /* [058] -20.5 dB: AKM[024] = -20.527 dB (diff=0.02658 dB) */
0x1a, /* [059] -20.0 dB: AKM[026] = -19.831 dB (diff=0.16866 dB) */
0x1b, /* [060] -19.5 dB: AKM[027] = -19.504 dB (diff=0.00353 dB) */
0x1d, /* [061] -19.0 dB: AKM[029] = -18.883 dB (diff=0.11716 dB) */
0x1e, /* [062] -18.5 dB: AKM[030] = -18.588 dB (diff=0.08838 dB) */
0x20, /* [063] -18.0 dB: AKM[032] = -18.028 dB (diff=0.02780 dB) */
0x22, /* [064] -17.5 dB: AKM[034] = -17.501 dB (diff=0.00123 dB) */
0x24, /* [065] -17.0 dB: AKM[036] = -17.005 dB (diff=0.00475 dB) */
0x26, /* [066] -16.5 dB: AKM[038] = -16.535 dB (diff=0.03513 dB) */
0x28, /* [067] -16.0 dB: AKM[040] = -16.090 dB (diff=0.08960 dB) */
0x2b, /* [068] -15.5 dB: AKM[043] = -15.461 dB (diff=0.03857 dB) */
0x2d, /* [069] -15.0 dB: AKM[045] = -15.067 dB (diff=0.06655 dB) */
0x30, /* [070] -14.5 dB: AKM[048] = -14.506 dB (diff=0.00598 dB) */
0x33, /* [071] -14.0 dB: AKM[051] = -13.979 dB (diff=0.02060 dB) */
0x36, /* [072] -13.5 dB: AKM[054] = -13.483 dB (diff=0.01707 dB) */
0x39, /* [073] -13.0 dB: AKM[057] = -13.013 dB (diff=0.01331 dB) */
0x3c, /* [074] -12.5 dB: AKM[060] = -12.568 dB (diff=0.06778 dB) */
0x40, /* [075] -12.0 dB: AKM[064] = -12.007 dB (diff=0.00720 dB) */
0x44, /* [076] -11.5 dB: AKM[068] = -11.481 dB (diff=0.01937 dB) */
0x48, /* [077] -11.0 dB: AKM[072] = -10.984 dB (diff=0.01585 dB) */
0x4c, /* [078] -10.5 dB: AKM[076] = -10.515 dB (diff=0.01453 dB) */
0x51, /* [079] -10.0 dB: AKM[081] = -9.961 dB (diff=0.03890 dB) */
0x55, /* [080] -9.5 dB: AKM[085] = -9.542 dB (diff=0.04243 dB) */
0x5a, /* [081] -9.0 dB: AKM[090] = -9.046 dB (diff=0.04595 dB) */
0x60, /* [082] -8.5 dB: AKM[096] = -8.485 dB (diff=0.01462 dB) */
0x66, /* [083] -8.0 dB: AKM[102] = -7.959 dB (diff=0.04120 dB) */
0x6c, /* [084] -7.5 dB: AKM[108] = -7.462 dB (diff=0.03767 dB) */
0x72, /* [085] -7.0 dB: AKM[114] = -6.993 dB (diff=0.00729 dB) */
0x79, /* [086] -6.5 dB: AKM[121] = -6.475 dB (diff=0.02490 dB) */
0x80, /* [087] -6.0 dB: AKM[128] = -5.987 dB (diff=0.01340 dB) */
0x87, /* [088] -5.5 dB: AKM[135] = -5.524 dB (diff=0.02413 dB) */
0x8f, /* [089] -5.0 dB: AKM[143] = -5.024 dB (diff=0.02408 dB) */
0x98, /* [090] -4.5 dB: AKM[152] = -4.494 dB (diff=0.00607 dB) */
0xa1, /* [091] -4.0 dB: AKM[161] = -3.994 dB (diff=0.00571 dB) */
0xaa, /* [092] -3.5 dB: AKM[170] = -3.522 dB (diff=0.02183 dB) */
0xb5, /* [093] -3.0 dB: AKM[181] = -2.977 dB (diff=0.02277 dB) */
0xbf, /* [094] -2.5 dB: AKM[191] = -2.510 dB (diff=0.01014 dB) */
0xcb, /* [095] -2.0 dB: AKM[203] = -1.981 dB (diff=0.01912 dB) */
0xd7, /* [096] -1.5 dB: AKM[215] = -1.482 dB (diff=0.01797 dB) */
0xe3, /* [097] -1.0 dB: AKM[227] = -1.010 dB (diff=0.01029 dB) */
0xf1, /* [098] -0.5 dB: AKM[241] = -0.490 dB (diff=0.00954 dB) */
0xff, /* [099] +0.0 dB: AKM[255] = +0.000 dB (diff=0.00000 dB) */
};
static void hr222_config_akm(struct pcxhr_mgr *mgr, unsigned short data)
{
unsigned short mask = 0x8000;
/* activate access to codec registers */
PCXHR_INPB(mgr, PCXHR_XLX_HIFREQ);
while (mask) {
PCXHR_OUTPB(mgr, PCXHR_XLX_DATA,
data & mask ? PCXHR_DATA_CODEC : 0);
mask >>= 1;
}
/* termiate access to codec registers */
PCXHR_INPB(mgr, PCXHR_XLX_RUER);
}
static int hr222_set_hw_playback_level(struct pcxhr_mgr *mgr,
int idx, int level)
{
unsigned short cmd;
if (idx > 1 ||
level < 0 ||
level >= ARRAY_SIZE(g_hr222_p_level))
return -EINVAL;
if (idx == 0)
cmd = AKM_LEFT_LEVEL_CMD;
else
cmd = AKM_RIGHT_LEVEL_CMD;
/* conversion from PmBoardCodedLevel to AKM nonlinear programming */
cmd += g_hr222_p_level[level];
hr222_config_akm(mgr, cmd);
return 0;
}
static int hr222_set_hw_capture_level(struct pcxhr_mgr *mgr,
int level_l, int level_r, int level_mic)
{
/* program all input levels at the same time */
unsigned int data;
int i;
if (!mgr->capture_chips)
return -EINVAL; /* no PCX22 */
data = ((level_mic & 0xff) << 24); /* micro is mono, but apply */
data |= ((level_mic & 0xff) << 16); /* level on both channels */
data |= ((level_r & 0xff) << 8); /* line input right channel */
data |= (level_l & 0xff); /* line input left channel */
PCXHR_INPB(mgr, PCXHR_XLX_DATA); /* activate input codec */
/* send 32 bits (4 x 8 bits) */
for (i = 0; i < 32; i++, data <<= 1) {
PCXHR_OUTPB(mgr, PCXHR_XLX_DATA,
(data & 0x80000000) ? PCXHR_DATA_CODEC : 0);
}
PCXHR_INPB(mgr, PCXHR_XLX_RUER); /* close input level codec */
return 0;
}
static void hr222_micro_boost(struct pcxhr_mgr *mgr, int level);
int hr222_sub_init(struct pcxhr_mgr *mgr)
{
unsigned char reg;
mgr->board_has_analog = 1; /* analog always available */
mgr->xlx_cfg = PCXHR_CFG_SYNCDSP_MASK;
reg = PCXHR_INPB(mgr, PCXHR_XLX_STATUS);
if (reg & PCXHR_STAT_MIC_CAPS)
mgr->board_has_mic = 1; /* microphone available */
snd_printdd("MIC input available = %d\n", mgr->board_has_mic);
/* reset codec */
PCXHR_OUTPB(mgr, PCXHR_DSP_RESET,
PCXHR_DSP_RESET_DSP);
msleep(5);
mgr->dsp_reset = PCXHR_DSP_RESET_DSP |
PCXHR_DSP_RESET_MUTE |
PCXHR_DSP_RESET_CODEC;
PCXHR_OUTPB(mgr, PCXHR_DSP_RESET, mgr->dsp_reset);
/* hr222_write_gpo(mgr, 0); does the same */
msleep(5);
/* config AKM */
hr222_config_akm(mgr, AKM_POWER_CONTROL_CMD);
hr222_config_akm(mgr, AKM_CLOCK_INF_55K_CMD);
hr222_config_akm(mgr, AKM_UNMUTE_CMD);
hr222_config_akm(mgr, AKM_RESET_OFF_CMD);
/* init micro boost */
hr222_micro_boost(mgr, 0);
return 0;
}
/* calc PLL register */
/* TODO : there is a very similar fct in pcxhr.c */
static int hr222_pll_freq_register(unsigned int freq,
unsigned int *pllreg,
unsigned int *realfreq)
{
unsigned int reg;
if (freq < 6900 || freq > 219000)
return -EINVAL;
reg = (28224000 * 2) / freq;
reg = (reg - 1) / 2;
if (reg < 0x100)
*pllreg = reg + 0xC00;
else if (reg < 0x200)
*pllreg = reg + 0x800;
else if (reg < 0x400)
*pllreg = reg & 0x1ff;
else if (reg < 0x800) {
*pllreg = ((reg >> 1) & 0x1ff) + 0x200;
reg &= ~1;
} else {
*pllreg = ((reg >> 2) & 0x1ff) + 0x400;
reg &= ~3;
}
if (realfreq)
*realfreq = (28224000 / (reg + 1));
return 0;
}
int hr222_sub_set_clock(struct pcxhr_mgr *mgr,
unsigned int rate,
int *changed)
{
unsigned int speed, pllreg = 0;
int err;
unsigned realfreq = rate;
switch (mgr->use_clock_type) {
case HR22_CLOCK_TYPE_INTERNAL:
err = hr222_pll_freq_register(rate, &pllreg, &realfreq);
if (err)
return err;
mgr->xlx_cfg &= ~(PCXHR_CFG_CLOCKIN_SEL_MASK |
PCXHR_CFG_CLOCK_UER1_SEL_MASK);
break;
case HR22_CLOCK_TYPE_AES_SYNC:
mgr->xlx_cfg |= PCXHR_CFG_CLOCKIN_SEL_MASK;
mgr->xlx_cfg &= ~PCXHR_CFG_CLOCK_UER1_SEL_MASK;
break;
case HR22_CLOCK_TYPE_AES_1:
if (!mgr->board_has_aes1)
return -EINVAL;
mgr->xlx_cfg |= (PCXHR_CFG_CLOCKIN_SEL_MASK |
PCXHR_CFG_CLOCK_UER1_SEL_MASK);
break;
default:
return -EINVAL;
}
hr222_config_akm(mgr, AKM_MUTE_CMD);
if (mgr->use_clock_type == HR22_CLOCK_TYPE_INTERNAL) {
PCXHR_OUTPB(mgr, PCXHR_XLX_HIFREQ, pllreg >> 8);
PCXHR_OUTPB(mgr, PCXHR_XLX_LOFREQ, pllreg & 0xff);
}
/* set clock source */
PCXHR_OUTPB(mgr, PCXHR_XLX_CFG, mgr->xlx_cfg);
/* codec speed modes */
speed = rate < 55000 ? 0 : 1;
if (mgr->codec_speed != speed) {
mgr->codec_speed = speed;
if (speed == 0)
hr222_config_akm(mgr, AKM_CLOCK_INF_55K_CMD);
else
hr222_config_akm(mgr, AKM_CLOCK_SUP_55K_CMD);
}
mgr->sample_rate_real = realfreq;
mgr->cur_clock_type = mgr->use_clock_type;
if (changed)
*changed = 1;
hr222_config_akm(mgr, AKM_UNMUTE_CMD);
snd_printdd("set_clock to %dHz (realfreq=%d pllreg=%x)\n",
rate, realfreq, pllreg);
return 0;
}
int hr222_get_external_clock(struct pcxhr_mgr *mgr,
enum pcxhr_clock_type clock_type,
int *sample_rate)
{
int rate, calc_rate = 0;
unsigned int ticks;
unsigned char mask, reg;
if (clock_type == HR22_CLOCK_TYPE_AES_SYNC) {
mask = (PCXHR_SUER_CLOCK_PRESENT_MASK |
PCXHR_SUER_DATA_PRESENT_MASK);
reg = PCXHR_STAT_FREQ_SYNC_MASK;
} else if (clock_type == HR22_CLOCK_TYPE_AES_1 && mgr->board_has_aes1) {
mask = (PCXHR_SUER1_CLOCK_PRESENT_MASK |
PCXHR_SUER1_DATA_PRESENT_MASK);
reg = PCXHR_STAT_FREQ_UER1_MASK;
} else {
snd_printdd("get_external_clock : type %d not supported\n",
clock_type);
return -EINVAL; /* other clocks not supported */
}
if ((PCXHR_INPB(mgr, PCXHR_XLX_CSUER) & mask) != mask) {
snd_printdd("get_external_clock(%d) = 0 Hz\n", clock_type);
*sample_rate = 0;
return 0; /* no external clock locked */
}
PCXHR_OUTPB(mgr, PCXHR_XLX_STATUS, reg); /* calculate freq */
/* save the measured clock frequency */
reg |= PCXHR_STAT_FREQ_SAVE_MASK;
if (mgr->last_reg_stat != reg) {
udelay(500); /* wait min 2 cycles of lowest freq (8000) */
mgr->last_reg_stat = reg;
}
PCXHR_OUTPB(mgr, PCXHR_XLX_STATUS, reg); /* save */
/* get the frequency */
ticks = (unsigned int)PCXHR_INPB(mgr, PCXHR_XLX_CFG);
ticks = (ticks & 0x03) << 8;
ticks |= (unsigned int)PCXHR_INPB(mgr, PCXHR_DSP_RESET);
if (ticks != 0)
calc_rate = 28224000 / ticks;
/* rounding */
if (calc_rate > 184200)
rate = 192000;
else if (calc_rate > 152200)
rate = 176400;
else if (calc_rate > 112000)
rate = 128000;
else if (calc_rate > 92100)
rate = 96000;
else if (calc_rate > 76100)
rate = 88200;
else if (calc_rate > 56000)
rate = 64000;
else if (calc_rate > 46050)
rate = 48000;
else if (calc_rate > 38050)
rate = 44100;
else if (calc_rate > 28000)
rate = 32000;
else if (calc_rate > 23025)
rate = 24000;
else if (calc_rate > 19025)
rate = 22050;
else if (calc_rate > 14000)
rate = 16000;
else if (calc_rate > 11512)
rate = 12000;
else if (calc_rate > 9512)
rate = 11025;
else if (calc_rate > 7000)
rate = 8000;
else
rate = 0;
snd_printdd("External clock is at %d Hz (measured %d Hz)\n",
rate, calc_rate);
*sample_rate = rate;
return 0;
}
int hr222_read_gpio(struct pcxhr_mgr *mgr, int is_gpi, int *value)
{
if (is_gpi) {
unsigned char reg = PCXHR_INPB(mgr, PCXHR_XLX_STATUS);
*value = (int)(reg & PCXHR_STAT_GPI_MASK) >>
PCXHR_STAT_GPI_OFFSET;
} else {
*value = (int)(mgr->dsp_reset & PCXHR_DSP_RESET_GPO_MASK) >>
PCXHR_DSP_RESET_GPO_OFFSET;
}
return 0;
}
int hr222_write_gpo(struct pcxhr_mgr *mgr, int value)
{
unsigned char reg = mgr->dsp_reset & ~PCXHR_DSP_RESET_GPO_MASK;
reg |= (unsigned char)(value << PCXHR_DSP_RESET_GPO_OFFSET) &
PCXHR_DSP_RESET_GPO_MASK;
PCXHR_OUTPB(mgr, PCXHR_DSP_RESET, reg);
mgr->dsp_reset = reg;
return 0;
}
int hr222_update_analog_audio_level(struct snd_pcxhr *chip,
int is_capture, int channel)
{
snd_printdd("hr222_update_analog_audio_level(%s chan=%d)\n",
is_capture ? "capture" : "playback", channel);
if (is_capture) {
int level_l, level_r, level_mic;
/* we have to update all levels */
if (chip->analog_capture_active) {
level_l = chip->analog_capture_volume[0];
level_r = chip->analog_capture_volume[1];
} else {
level_l = HR222_LINE_CAPTURE_LEVEL_MIN;
level_r = HR222_LINE_CAPTURE_LEVEL_MIN;
}
if (chip->mic_active)
level_mic = chip->mic_volume;
else
level_mic = HR222_MICRO_CAPTURE_LEVEL_MIN;
return hr222_set_hw_capture_level(chip->mgr,
level_l, level_r, level_mic);
} else {
int vol;
if (chip->analog_playback_active[channel])
vol = chip->analog_playback_volume[channel];
else
vol = HR222_LINE_PLAYBACK_LEVEL_MIN;
return hr222_set_hw_playback_level(chip->mgr, channel, vol);
}
}
/*texts[5] = {"Line", "Digital", "Digi+SRC", "Mic", "Line+Mic"}*/
#define SOURCE_LINE 0
#define SOURCE_DIGITAL 1
#define SOURCE_DIGISRC 2
#define SOURCE_MIC 3
#define SOURCE_LINEMIC 4
int hr222_set_audio_source(struct snd_pcxhr *chip)
{
int digital = 0;
/* default analog source */
chip->mgr->xlx_cfg &= ~(PCXHR_CFG_SRC_MASK |
PCXHR_CFG_DATAIN_SEL_MASK |
PCXHR_CFG_DATA_UER1_SEL_MASK);
if (chip->audio_capture_source == SOURCE_DIGISRC) {
chip->mgr->xlx_cfg |= PCXHR_CFG_SRC_MASK;
digital = 1;
} else {
if (chip->audio_capture_source == SOURCE_DIGITAL)
digital = 1;
}
if (digital) {
chip->mgr->xlx_cfg |= PCXHR_CFG_DATAIN_SEL_MASK;
if (chip->mgr->board_has_aes1) {
/* get data from the AES1 plug */
chip->mgr->xlx_cfg |= PCXHR_CFG_DATA_UER1_SEL_MASK;
}
/* chip->mic_active = 0; */
/* chip->analog_capture_active = 0; */
} else {
int update_lvl = 0;
chip->analog_capture_active = 0;
chip->mic_active = 0;
if (chip->audio_capture_source == SOURCE_LINE ||
chip->audio_capture_source == SOURCE_LINEMIC) {
if (chip->analog_capture_active == 0)
update_lvl = 1;
chip->analog_capture_active = 1;
}
if (chip->audio_capture_source == SOURCE_MIC ||
chip->audio_capture_source == SOURCE_LINEMIC) {
if (chip->mic_active == 0)
update_lvl = 1;
chip->mic_active = 1;
}
if (update_lvl) {
/* capture: update all 3 mutes/unmutes with one call */
hr222_update_analog_audio_level(chip, 1, 0);
}
}
/* set the source infos (max 3 bits modified) */
PCXHR_OUTPB(chip->mgr, PCXHR_XLX_CFG, chip->mgr->xlx_cfg);
return 0;
}
int hr222_iec958_capture_byte(struct snd_pcxhr *chip,
int aes_idx, unsigned char *aes_bits)
{
unsigned char idx = (unsigned char)(aes_idx * 8);
unsigned char temp = 0;
unsigned char mask = chip->mgr->board_has_aes1 ?
PCXHR_SUER1_BIT_C_READ_MASK : PCXHR_SUER_BIT_C_READ_MASK;
int i;
for (i = 0; i < 8; i++) {
PCXHR_OUTPB(chip->mgr, PCXHR_XLX_RUER, idx++); /* idx < 192 */
temp <<= 1;
if (PCXHR_INPB(chip->mgr, PCXHR_XLX_CSUER) & mask)
temp |= 1;
}
snd_printdd("read iec958 AES %d byte %d = 0x%x\n",
chip->chip_idx, aes_idx, temp);
*aes_bits = temp;
return 0;
}
int hr222_iec958_update_byte(struct snd_pcxhr *chip,
int aes_idx, unsigned char aes_bits)
{
int i;
unsigned char new_bits = aes_bits;
unsigned char old_bits = chip->aes_bits[aes_idx];
unsigned char idx = (unsigned char)(aes_idx * 8);
for (i = 0; i < 8; i++) {
if ((old_bits & 0x01) != (new_bits & 0x01)) {
/* idx < 192 */
PCXHR_OUTPB(chip->mgr, PCXHR_XLX_RUER, idx);
/* write C and U bit */
PCXHR_OUTPB(chip->mgr, PCXHR_XLX_CSUER, new_bits&0x01 ?
PCXHR_SUER_BIT_C_WRITE_MASK : 0);
}
idx++;
old_bits >>= 1;
new_bits >>= 1;
}
chip->aes_bits[aes_idx] = aes_bits;
return 0;
}
static void hr222_micro_boost(struct pcxhr_mgr *mgr, int level)
{
unsigned char boost_mask;
boost_mask = (unsigned char) (level << PCXHR_SELMIC_PREAMPLI_OFFSET);
if (boost_mask & (~PCXHR_SELMIC_PREAMPLI_MASK))
return; /* only values form 0 to 3 accepted */
mgr->xlx_selmic &= ~PCXHR_SELMIC_PREAMPLI_MASK;
mgr->xlx_selmic |= boost_mask;
PCXHR_OUTPB(mgr, PCXHR_XLX_SELMIC, mgr->xlx_selmic);
snd_printdd("hr222_micro_boost : set %x\n", boost_mask);
}
static void hr222_phantom_power(struct pcxhr_mgr *mgr, int power)
{
if (power)
mgr->xlx_selmic |= PCXHR_SELMIC_PHANTOM_ALIM;
else
mgr->xlx_selmic &= ~PCXHR_SELMIC_PHANTOM_ALIM;
PCXHR_OUTPB(mgr, PCXHR_XLX_SELMIC, mgr->xlx_selmic);
snd_printdd("hr222_phantom_power : set %d\n", power);
}
/* mic level */
static const DECLARE_TLV_DB_SCALE(db_scale_mic_hr222, -9850, 50, 650);
static int hr222_mic_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = HR222_MICRO_CAPTURE_LEVEL_MIN; /* -98 dB */
/* gains from 9 dB to 31.5 dB not recommended; use micboost instead */
uinfo->value.integer.max = HR222_MICRO_CAPTURE_LEVEL_MAX; /* +7 dB */
return 0;
}
static int hr222_mic_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->mgr->mixer_mutex);
ucontrol->value.integer.value[0] = chip->mic_volume;
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int hr222_mic_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
mutex_lock(&chip->mgr->mixer_mutex);
if (chip->mic_volume != ucontrol->value.integer.value[0]) {
changed = 1;
chip->mic_volume = ucontrol->value.integer.value[0];
hr222_update_analog_audio_level(chip, 1, 0);
}
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new hr222_control_mic_level = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Mic Capture Volume",
.info = hr222_mic_vol_info,
.get = hr222_mic_vol_get,
.put = hr222_mic_vol_put,
.tlv = { .p = db_scale_mic_hr222 },
};
/* mic boost level */
static const DECLARE_TLV_DB_SCALE(db_scale_micboost_hr222, 0, 1800, 5400);
static int hr222_mic_boost_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0; /* 0 dB */
uinfo->value.integer.max = 3; /* 54 dB */
return 0;
}
static int hr222_mic_boost_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->mgr->mixer_mutex);
ucontrol->value.integer.value[0] = chip->mic_boost;
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int hr222_mic_boost_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
mutex_lock(&chip->mgr->mixer_mutex);
if (chip->mic_boost != ucontrol->value.integer.value[0]) {
changed = 1;
chip->mic_boost = ucontrol->value.integer.value[0];
hr222_micro_boost(chip->mgr, chip->mic_boost);
}
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new hr222_control_mic_boost = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "MicBoost Capture Volume",
.info = hr222_mic_boost_info,
.get = hr222_mic_boost_get,
.put = hr222_mic_boost_put,
.tlv = { .p = db_scale_micboost_hr222 },
};
/******************* Phantom power switch *******************/
#define hr222_phantom_power_info snd_ctl_boolean_mono_info
static int hr222_phantom_power_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->mgr->mixer_mutex);
ucontrol->value.integer.value[0] = chip->phantom_power;
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int hr222_phantom_power_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int power, changed = 0;
mutex_lock(&chip->mgr->mixer_mutex);
power = !!ucontrol->value.integer.value[0];
if (chip->phantom_power != power) {
hr222_phantom_power(chip->mgr, power);
chip->phantom_power = power;
changed = 1;
}
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new hr222_phantom_power_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Phantom Power Switch",
.info = hr222_phantom_power_info,
.get = hr222_phantom_power_get,
.put = hr222_phantom_power_put,
};
int hr222_add_mic_controls(struct snd_pcxhr *chip)
{
int err;
if (!chip->mgr->board_has_mic)
return 0;
/* controls */
err = snd_ctl_add(chip->card, snd_ctl_new1(&hr222_control_mic_level,
chip));
if (err < 0)
return err;
err = snd_ctl_add(chip->card, snd_ctl_new1(&hr222_control_mic_boost,
chip));
if (err < 0)
return err;
err = snd_ctl_add(chip->card, snd_ctl_new1(&hr222_phantom_power_switch,
chip));
return err;
}
| gpl-2.0 |
Nicklas373/AoiCore-Kernel-MSM8627-CM13 | arch/arm/mach-iop13xx/irq.c | 11805 | 5193 | /*
* iop13xx IRQ handling / support functions
* Copyright (c) 2005-2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/sysctl.h>
#include <asm/uaccess.h>
#include <asm/mach/irq.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/msi.h>
/* INTCTL0 CP6 R0 Page 4
*/
static u32 read_intctl_0(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c0, c4, 0":"=r" (val));
return val;
}
static void write_intctl_0(u32 val)
{
asm volatile("mcr p6, 0, %0, c0, c4, 0"::"r" (val));
}
/* INTCTL1 CP6 R1 Page 4
*/
static u32 read_intctl_1(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c1, c4, 0":"=r" (val));
return val;
}
static void write_intctl_1(u32 val)
{
asm volatile("mcr p6, 0, %0, c1, c4, 0"::"r" (val));
}
/* INTCTL2 CP6 R2 Page 4
*/
static u32 read_intctl_2(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c2, c4, 0":"=r" (val));
return val;
}
static void write_intctl_2(u32 val)
{
asm volatile("mcr p6, 0, %0, c2, c4, 0"::"r" (val));
}
/* INTCTL3 CP6 R3 Page 4
*/
static u32 read_intctl_3(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c3, c4, 0":"=r" (val));
return val;
}
static void write_intctl_3(u32 val)
{
asm volatile("mcr p6, 0, %0, c3, c4, 0"::"r" (val));
}
/* INTSTR0 CP6 R0 Page 5
*/
static void write_intstr_0(u32 val)
{
asm volatile("mcr p6, 0, %0, c0, c5, 0"::"r" (val));
}
/* INTSTR1 CP6 R1 Page 5
*/
static void write_intstr_1(u32 val)
{
asm volatile("mcr p6, 0, %0, c1, c5, 0"::"r" (val));
}
/* INTSTR2 CP6 R2 Page 5
*/
static void write_intstr_2(u32 val)
{
asm volatile("mcr p6, 0, %0, c2, c5, 0"::"r" (val));
}
/* INTSTR3 CP6 R3 Page 5
*/
static void write_intstr_3(u32 val)
{
asm volatile("mcr p6, 0, %0, c3, c5, 0"::"r" (val));
}
/* INTBASE CP6 R0 Page 2
*/
static void write_intbase(u32 val)
{
asm volatile("mcr p6, 0, %0, c0, c2, 0"::"r" (val));
}
/* INTSIZE CP6 R2 Page 2
*/
static void write_intsize(u32 val)
{
asm volatile("mcr p6, 0, %0, c2, c2, 0"::"r" (val));
}
/* 0 = Interrupt Masked and 1 = Interrupt not masked */
static void
iop13xx_irq_mask0 (struct irq_data *d)
{
write_intctl_0(read_intctl_0() & ~(1 << (d->irq - 0)));
}
static void
iop13xx_irq_mask1 (struct irq_data *d)
{
write_intctl_1(read_intctl_1() & ~(1 << (d->irq - 32)));
}
static void
iop13xx_irq_mask2 (struct irq_data *d)
{
write_intctl_2(read_intctl_2() & ~(1 << (d->irq - 64)));
}
static void
iop13xx_irq_mask3 (struct irq_data *d)
{
write_intctl_3(read_intctl_3() & ~(1 << (d->irq - 96)));
}
static void
iop13xx_irq_unmask0(struct irq_data *d)
{
write_intctl_0(read_intctl_0() | (1 << (d->irq - 0)));
}
static void
iop13xx_irq_unmask1(struct irq_data *d)
{
write_intctl_1(read_intctl_1() | (1 << (d->irq - 32)));
}
static void
iop13xx_irq_unmask2(struct irq_data *d)
{
write_intctl_2(read_intctl_2() | (1 << (d->irq - 64)));
}
static void
iop13xx_irq_unmask3(struct irq_data *d)
{
write_intctl_3(read_intctl_3() | (1 << (d->irq - 96)));
}
static struct irq_chip iop13xx_irqchip1 = {
.name = "IOP13xx-1",
.irq_ack = iop13xx_irq_mask0,
.irq_mask = iop13xx_irq_mask0,
.irq_unmask = iop13xx_irq_unmask0,
};
static struct irq_chip iop13xx_irqchip2 = {
.name = "IOP13xx-2",
.irq_ack = iop13xx_irq_mask1,
.irq_mask = iop13xx_irq_mask1,
.irq_unmask = iop13xx_irq_unmask1,
};
static struct irq_chip iop13xx_irqchip3 = {
.name = "IOP13xx-3",
.irq_ack = iop13xx_irq_mask2,
.irq_mask = iop13xx_irq_mask2,
.irq_unmask = iop13xx_irq_unmask2,
};
static struct irq_chip iop13xx_irqchip4 = {
.name = "IOP13xx-4",
.irq_ack = iop13xx_irq_mask3,
.irq_mask = iop13xx_irq_mask3,
.irq_unmask = iop13xx_irq_unmask3,
};
extern void iop_init_cp6_handler(void);
void __init iop13xx_init_irq(void)
{
unsigned int i;
iop_init_cp6_handler();
/* disable all interrupts */
write_intctl_0(0);
write_intctl_1(0);
write_intctl_2(0);
write_intctl_3(0);
/* treat all as IRQ */
write_intstr_0(0);
write_intstr_1(0);
write_intstr_2(0);
write_intstr_3(0);
/* initialize the interrupt vector generator */
write_intbase(INTBASE);
write_intsize(INTSIZE_4);
for(i = 0; i <= IRQ_IOP13XX_HPI; i++) {
if (i < 32)
irq_set_chip(i, &iop13xx_irqchip1);
else if (i < 64)
irq_set_chip(i, &iop13xx_irqchip2);
else if (i < 96)
irq_set_chip(i, &iop13xx_irqchip3);
else
irq_set_chip(i, &iop13xx_irqchip4);
irq_set_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
iop13xx_msi_init();
}
| gpl-2.0 |
rukin5197/android_kernel_htc_msm7x30 | arch/blackfin/kernel/pseudodbg.c | 13085 | 5423 | /* The fake debug assert instructions
*
* Copyright 2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
const char * const greg_names[] = {
"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
"P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP",
"I0", "I1", "I2", "I3", "M0", "M1", "M2", "M3",
"B0", "B1", "B2", "B3", "L0", "L1", "L2", "L3",
"A0.X", "A0.W", "A1.X", "A1.W", "<res>", "<res>", "ASTAT", "RETS",
"<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>",
"LC0", "LT0", "LB0", "LC1", "LT1", "LB1", "CYCLES", "CYCLES2",
"USP", "SEQSTAT", "SYSCFG", "RETI", "RETX", "RETN", "RETE", "EMUDAT",
};
static const char *get_allreg_name(int grp, int reg)
{
return greg_names[(grp << 3) | reg];
}
/*
* Unfortunately, the pt_regs structure is not laid out the same way as the
* hardware register file, so we need to do some fix ups.
*
* CYCLES is not stored in the pt_regs structure - so, we just read it from
* the hardware.
*
* Don't support:
* - All reserved registers
* - All in group 7 are (supervisors only)
*/
static bool fix_up_reg(struct pt_regs *fp, long *value, int grp, int reg)
{
long *val = &fp->r0;
unsigned long tmp;
/* Only do Dregs and Pregs for now */
if (grp == 5 ||
(grp == 4 && (reg == 4 || reg == 5)) ||
(grp == 7))
return false;
if (grp == 0 || (grp == 1 && reg < 6))
val -= (reg + 8 * grp);
else if (grp == 1 && reg == 6)
val = &fp->usp;
else if (grp == 1 && reg == 7)
val = &fp->fp;
else if (grp == 2) {
val = &fp->i0;
val -= reg;
} else if (grp == 3 && reg >= 4) {
val = &fp->l0;
val -= (reg - 4);
} else if (grp == 3 && reg < 4) {
val = &fp->b0;
val -= reg;
} else if (grp == 4 && reg < 4) {
val = &fp->a0x;
val -= reg;
} else if (grp == 4 && reg == 6)
val = &fp->astat;
else if (grp == 4 && reg == 7)
val = &fp->rets;
else if (grp == 6 && reg < 6) {
val = &fp->lc0;
val -= reg;
} else if (grp == 6 && reg == 6) {
__asm__ __volatile__("%0 = cycles;\n" : "=d"(tmp));
val = &tmp;
} else if (grp == 6 && reg == 7) {
__asm__ __volatile__("%0 = cycles2;\n" : "=d"(tmp));
val = &tmp;
}
*value = *val;
return true;
}
#define PseudoDbg_Assert_opcode 0xf0000000
#define PseudoDbg_Assert_expected_bits 0
#define PseudoDbg_Assert_expected_mask 0xffff
#define PseudoDbg_Assert_regtest_bits 16
#define PseudoDbg_Assert_regtest_mask 0x7
#define PseudoDbg_Assert_grp_bits 19
#define PseudoDbg_Assert_grp_mask 0x7
#define PseudoDbg_Assert_dbgop_bits 22
#define PseudoDbg_Assert_dbgop_mask 0x3
#define PseudoDbg_Assert_dontcare_bits 24
#define PseudoDbg_Assert_dontcare_mask 0x7
#define PseudoDbg_Assert_code_bits 27
#define PseudoDbg_Assert_code_mask 0x1f
/*
* DBGA - debug assert
*/
bool execute_pseudodbg_assert(struct pt_regs *fp, unsigned int opcode)
{
int expected = ((opcode >> PseudoDbg_Assert_expected_bits) & PseudoDbg_Assert_expected_mask);
int dbgop = ((opcode >> (PseudoDbg_Assert_dbgop_bits)) & PseudoDbg_Assert_dbgop_mask);
int grp = ((opcode >> (PseudoDbg_Assert_grp_bits)) & PseudoDbg_Assert_grp_mask);
int regtest = ((opcode >> (PseudoDbg_Assert_regtest_bits)) & PseudoDbg_Assert_regtest_mask);
long value;
if ((opcode & 0xFF000000) != PseudoDbg_Assert_opcode)
return false;
if (!fix_up_reg(fp, &value, grp, regtest))
return false;
if (dbgop == 0 || dbgop == 2) {
/* DBGA ( regs_lo , uimm16 ) */
/* DBGAL ( regs , uimm16 ) */
if (expected != (value & 0xFFFF)) {
pr_notice("DBGA (%s.L,0x%x) failure, got 0x%x\n",
get_allreg_name(grp, regtest),
expected, (unsigned int)(value & 0xFFFF));
return false;
}
} else if (dbgop == 1 || dbgop == 3) {
/* DBGA ( regs_hi , uimm16 ) */
/* DBGAH ( regs , uimm16 ) */
if (expected != ((value >> 16) & 0xFFFF)) {
pr_notice("DBGA (%s.H,0x%x) failure, got 0x%x\n",
get_allreg_name(grp, regtest),
expected, (unsigned int)((value >> 16) & 0xFFFF));
return false;
}
}
fp->pc += 4;
return true;
}
#define PseudoDbg_opcode 0xf8000000
#define PseudoDbg_reg_bits 0
#define PseudoDbg_reg_mask 0x7
#define PseudoDbg_grp_bits 3
#define PseudoDbg_grp_mask 0x7
#define PseudoDbg_fn_bits 6
#define PseudoDbg_fn_mask 0x3
#define PseudoDbg_code_bits 8
#define PseudoDbg_code_mask 0xff
/*
* DBG - debug (dump a register value out)
*/
bool execute_pseudodbg(struct pt_regs *fp, unsigned int opcode)
{
int grp, fn, reg;
long value, value1;
if ((opcode & 0xFF000000) != PseudoDbg_opcode)
return false;
opcode >>= 16;
grp = ((opcode >> PseudoDbg_grp_bits) & PseudoDbg_reg_mask);
fn = ((opcode >> PseudoDbg_fn_bits) & PseudoDbg_fn_mask);
reg = ((opcode >> PseudoDbg_reg_bits) & PseudoDbg_reg_mask);
if (fn == 3 && (reg == 0 || reg == 1)) {
if (!fix_up_reg(fp, &value, 4, 2 * reg))
return false;
if (!fix_up_reg(fp, &value1, 4, 2 * reg + 1))
return false;
pr_notice("DBG A%i = %02lx%08lx\n", reg, value & 0xFF, value1);
fp->pc += 2;
return true;
} else if (fn == 0) {
if (!fix_up_reg(fp, &value, grp, reg))
return false;
pr_notice("DBG %s = %08lx\n", get_allreg_name(grp, reg), value);
fp->pc += 2;
return true;
}
return false;
}
| gpl-2.0 |
silverbullettechnology/linux-adi | arch/avr32/boards/atstk1000/atstk1003.c | 13597 | 3765 | /*
* ATSTK1003 daughterboard-specific init code
*
* Copyright (C) 2007 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/spi/at73c213.h>
#include <linux/spi/spi.h>
#include <linux/atmel-mci.h>
#include <asm/setup.h>
#include <mach/at32ap700x.h>
#include <mach/board.h>
#include <mach/init.h>
#include <mach/portmux.h>
#include "atstk1000.h"
/* Oscillator frequencies. These are board specific */
unsigned long at32_board_osc_rates[3] = {
[0] = 32768, /* 32.768 kHz on RTC osc */
[1] = 20000000, /* 20 MHz on osc0 */
[2] = 12000000, /* 12 MHz on osc1 */
};
#ifdef CONFIG_BOARD_ATSTK1000_EXTDAC
static struct at73c213_board_info at73c213_data = {
.ssc_id = 0,
.shortname = "AVR32 STK1000 external DAC",
};
#endif
#ifndef CONFIG_BOARD_ATSTK100X_SW1_CUSTOM
static struct spi_board_info spi0_board_info[] __initdata = {
#ifdef CONFIG_BOARD_ATSTK1000_EXTDAC
{
/* AT73C213 */
.modalias = "at73c213",
.max_speed_hz = 200000,
.chip_select = 0,
.mode = SPI_MODE_1,
.platform_data = &at73c213_data,
},
#endif
/*
* We can control the LTV350QV LCD panel, but it isn't much
* point since we don't have an LCD controller...
*/
};
#endif
#ifdef CONFIG_BOARD_ATSTK100X_SPI1
static struct spi_board_info spi1_board_info[] __initdata = { {
/* patch in custom entries here */
} };
#endif
#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
static struct mci_platform_data __initdata mci0_data = {
.slot[0] = {
.bus_width = 4,
.detect_pin = -ENODEV,
.wp_pin = -ENODEV,
},
};
#endif
#ifdef CONFIG_BOARD_ATSTK1000_EXTDAC
static void __init atstk1003_setup_extdac(void)
{
struct clk *gclk;
struct clk *pll;
gclk = clk_get(NULL, "gclk0");
if (IS_ERR(gclk))
goto err_gclk;
pll = clk_get(NULL, "pll0");
if (IS_ERR(pll))
goto err_pll;
if (clk_set_parent(gclk, pll)) {
pr_debug("STK1000: failed to set pll0 as parent for DAC clock\n");
goto err_set_clk;
}
at32_select_periph(GPIO_PIOA_BASE, (1 << 30), GPIO_PERIPH_A, 0);
at73c213_data.dac_clk = gclk;
err_set_clk:
clk_put(pll);
err_pll:
clk_put(gclk);
err_gclk:
return;
}
#else
static void __init atstk1003_setup_extdac(void)
{
}
#endif /* CONFIG_BOARD_ATSTK1000_EXTDAC */
void __init setup_board(void)
{
#ifdef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
at32_map_usart(0, 1, 0); /* USART 0/B: /dev/ttyS1, IRDA */
#else
at32_map_usart(1, 0, 0); /* USART 1/A: /dev/ttyS0, DB9 */
#endif
/* USART 2/unused: expansion connector */
at32_map_usart(3, 2, 0); /* USART 3/C: /dev/ttyS2, DB9 */
at32_setup_serial_console(0);
}
static int __init atstk1003_init(void)
{
/*
* ATSTK1000 uses 32-bit SDRAM interface. Reserve the
* SDRAM-specific pins so that nobody messes with them.
*/
at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
#ifdef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
at32_add_device_usart(1);
#else
at32_add_device_usart(0);
#endif
at32_add_device_usart(2);
#ifndef CONFIG_BOARD_ATSTK100X_SW1_CUSTOM
at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
#endif
#ifdef CONFIG_BOARD_ATSTK100X_SPI1
at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
#endif
#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
at32_add_device_mci(0, &mci0_data);
#endif
at32_add_device_usba(0, NULL);
#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM
at32_add_device_ssc(0, ATMEL_SSC_TX);
#endif
atstk1000_setup_j2_leds();
atstk1003_setup_extdac();
return 0;
}
postcore_initcall(atstk1003_init);
| gpl-2.0 |
Stefan-Schmidt/linux-2.6 | drivers/usb/musb/cppi_dma.c | 30 | 44469 | /*
* Copyright (C) 2005-2006 by Texas Instruments
*
* This file implements a DMA interface using TI's CPPI DMA.
* For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
* The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "musb_core.h"
#include "musb_debug.h"
#include "cppi_dma.h"
/* CPPI DMA status 7-mar-2006:
*
* - See musb_{host,gadget}.c for more info
*
* - Correct RX DMA generally forces the engine into irq-per-packet mode,
* which can easily saturate the CPU under non-mass-storage loads.
*
* NOTES 24-aug-2006 (2.6.18-rc4):
*
* - peripheral RXDMA wedged in a test with packets of length 512/512/1.
* evidently after the 1 byte packet was received and acked, the queue
* of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
* and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
* 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
* of its next (512 byte) packet. IRQ issues?
*
* REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
* evidently also directly update the RX and TX CSRs ... so audit all
* host and peripheral side DMA code to avoid CSR access after DMA has
* been started.
*/
/* REVISIT now we can avoid preallocating these descriptors; or
* more simply, switch to a global freelist not per-channel ones.
* Note: at full speed, 64 descriptors == 4K bulk data.
*/
#define NUM_TXCHAN_BD 64
#define NUM_RXCHAN_BD 64
static inline void cpu_drain_writebuffer(void)
{
wmb();
#ifdef CONFIG_CPU_ARM926T
/* REVISIT this "should not be needed",
* but lack of it sure seemed to hurt ...
*/
asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
#endif
}
static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
{
struct cppi_descriptor *bd = c->freelist;
if (bd)
c->freelist = bd->next;
return bd;
}
static inline void
cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
{
if (!bd)
return;
bd->next = c->freelist;
c->freelist = bd;
}
/*
* Start DMA controller
*
* Initialize the DMA controller as necessary.
*/
/* zero out entire rx state RAM entry for the channel */
static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
{
musb_writel(&rx->rx_skipbytes, 0, 0);
musb_writel(&rx->rx_head, 0, 0);
musb_writel(&rx->rx_sop, 0, 0);
musb_writel(&rx->rx_current, 0, 0);
musb_writel(&rx->rx_buf_current, 0, 0);
musb_writel(&rx->rx_len_len, 0, 0);
musb_writel(&rx->rx_cnt_cnt, 0, 0);
}
/* zero out entire tx state RAM entry for the channel */
static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
{
musb_writel(&tx->tx_head, 0, 0);
musb_writel(&tx->tx_buf, 0, 0);
musb_writel(&tx->tx_current, 0, 0);
musb_writel(&tx->tx_buf_current, 0, 0);
musb_writel(&tx->tx_info, 0, 0);
musb_writel(&tx->tx_rem_len, 0, 0);
/* musb_writel(&tx->tx_dummy, 0, 0); */
musb_writel(&tx->tx_complete, 0, ptr);
}
static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
{
int j;
/* initialize channel fields */
c->head = NULL;
c->tail = NULL;
c->last_processed = NULL;
c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = cppi;
c->is_rndis = 0;
c->freelist = NULL;
/* build the BD Free list for the channel */
for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
struct cppi_descriptor *bd;
dma_addr_t dma;
bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
bd->dma = dma;
cppi_bd_free(c, bd);
}
}
static int cppi_channel_abort(struct dma_channel *);
static void cppi_pool_free(struct cppi_channel *c)
{
struct cppi *cppi = c->controller;
struct cppi_descriptor *bd;
(void) cppi_channel_abort(&c->channel);
c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = NULL;
/* free all its bds */
bd = c->last_processed;
do {
if (bd)
dma_pool_free(cppi->pool, bd, bd->dma);
bd = cppi_bd_alloc(c);
} while (bd);
c->last_processed = NULL;
}
static int __init cppi_controller_start(struct dma_controller *c)
{
struct cppi *controller;
void __iomem *tibase;
int i;
controller = container_of(c, struct cppi, controller);
/* do whatever is necessary to start controller */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
controller->tx[i].transmit = true;
controller->tx[i].index = i;
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
controller->rx[i].transmit = false;
controller->rx[i].index = i;
}
/* setup BD list on a per channel basis */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
cppi_pool_init(controller, controller->tx + i);
for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
cppi_pool_init(controller, controller->rx + i);
tibase = controller->tibase;
INIT_LIST_HEAD(&controller->tx_complete);
/* initialise tx/rx channel head pointers to zero */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
struct cppi_channel *tx_ch = controller->tx + i;
struct cppi_tx_stateram __iomem *tx;
INIT_LIST_HEAD(&tx_ch->tx_complete);
tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
tx_ch->state_ram = tx;
cppi_reset_tx(tx, 0);
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
struct cppi_channel *rx_ch = controller->rx + i;
struct cppi_rx_stateram __iomem *rx;
INIT_LIST_HEAD(&rx_ch->tx_complete);
rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
rx_ch->state_ram = rx;
cppi_reset_rx(rx);
}
/* enable individual cppi channels */
musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
/* enable tx/rx CPPI control */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
/* disable RNDIS mode, also host rx RNDIS autorequest */
musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
return 0;
}
/*
* Stop DMA controller
*
* De-Init the DMA controller as necessary.
*/
static int cppi_controller_stop(struct dma_controller *c)
{
struct cppi *controller;
void __iomem *tibase;
int i;
controller = container_of(c, struct cppi, controller);
tibase = controller->tibase;
/* DISABLE INDIVIDUAL CHANNEL Interrupts */
musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
DBG(1, "Tearing down RX and TX Channels\n");
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
/* FIXME restructure of txdma to use bds like rxdma */
controller->tx[i].last_processed = NULL;
cppi_pool_free(controller->tx + i);
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
cppi_pool_free(controller->rx + i);
/* in Tx Case proper teardown is supported. We resort to disabling
* Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
* complete TX CPPI cannot be disabled.
*/
/*disable tx/rx cppi */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
return 0;
}
/* While dma channel is allocated, we only want the core irqs active
* for fault reports, otherwise we'd get irqs that we don't care about.
* Except for TX irqs, where dma done != fifo empty and reusable ...
*
* NOTE: docs don't say either way, but irq masking **enables** irqs.
*
* REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
*/
static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
{
musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
}
static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
{
musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
}
/*
* Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
* each transfer direction of a non-control endpoint, so allocating
* (and deallocating) is mostly a way to notice bad housekeeping on
* the software side. We assume the irqs are always active.
*/
static struct dma_channel *
cppi_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *ep, u8 transmit)
{
struct cppi *controller;
u8 index;
struct cppi_channel *cppi_ch;
void __iomem *tibase;
controller = container_of(c, struct cppi, controller);
tibase = controller->tibase;
/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
index = ep->epnum - 1;
/* return the corresponding CPPI Channel Handle, and
* probably disable the non-CPPI irq until we need it.
*/
if (transmit) {
if (index >= ARRAY_SIZE(controller->tx)) {
DBG(1, "no %cX%d CPPI channel\n", 'T', index);
return NULL;
}
cppi_ch = controller->tx + index;
} else {
if (index >= ARRAY_SIZE(controller->rx)) {
DBG(1, "no %cX%d CPPI channel\n", 'R', index);
return NULL;
}
cppi_ch = controller->rx + index;
core_rxirq_disable(tibase, ep->epnum);
}
/* REVISIT make this an error later once the same driver code works
* with the other DMA engine too
*/
if (cppi_ch->hw_ep)
DBG(1, "re-allocating DMA%d %cX channel %p\n",
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
cppi_ch->channel.max_len = 0x7fffffff;
DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
}
/* Release a CPPI Channel. */
static void cppi_channel_release(struct dma_channel *channel)
{
struct cppi_channel *c;
void __iomem *tibase;
/* REVISIT: for paranoia, check state and abort if needed... */
c = container_of(channel, struct cppi_channel, channel);
tibase = c->controller->tibase;
if (!c->hw_ep)
DBG(1, "releasing idle DMA channel %p\n", c);
else if (!c->transmit)
core_rxirq_enable(tibase, c->index + 1);
/* for now, leave its cppi IRQ enabled (we won't trigger it) */
c->hw_ep = NULL;
channel->status = MUSB_DMA_STATUS_UNKNOWN;
}
/* Context: controller irqlocked */
static void
cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
{
void __iomem *base = c->controller->mregs;
struct cppi_rx_stateram __iomem *rx = c->state_ram;
musb_ep_select(base, c->index + 1);
DBG(level, "RX DMA%d%s: %d left, csr %04x, "
"%08x H%08x S%08x C%08x, "
"B%08x L%08x %08x .. %08x"
"\n",
c->index, tag,
musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
musb_readw(c->hw_ep->regs, MUSB_RXCSR),
musb_readl(&rx->rx_skipbytes, 0),
musb_readl(&rx->rx_head, 0),
musb_readl(&rx->rx_sop, 0),
musb_readl(&rx->rx_current, 0),
musb_readl(&rx->rx_buf_current, 0),
musb_readl(&rx->rx_len_len, 0),
musb_readl(&rx->rx_cnt_cnt, 0),
musb_readl(&rx->rx_complete, 0)
);
}
/* Context: controller irqlocked */
static void
cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
{
void __iomem *base = c->controller->mregs;
struct cppi_tx_stateram __iomem *tx = c->state_ram;
musb_ep_select(base, c->index + 1);
DBG(level, "TX DMA%d%s: csr %04x, "
"H%08x S%08x C%08x %08x, "
"F%08x L%08x .. %08x"
"\n",
c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR),
musb_readl(&tx->tx_head, 0),
musb_readl(&tx->tx_buf, 0),
musb_readl(&tx->tx_current, 0),
musb_readl(&tx->tx_buf_current, 0),
musb_readl(&tx->tx_info, 0),
musb_readl(&tx->tx_rem_len, 0),
/* dummy/unused word 6 */
musb_readl(&tx->tx_complete, 0)
);
}
/* Context: controller irqlocked */
static inline void
cppi_rndis_update(struct cppi_channel *c, int is_rx,
void __iomem *tibase, int is_rndis)
{
/* we may need to change the rndis flag for this cppi channel */
if (c->is_rndis != is_rndis) {
u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
u32 temp = 1 << (c->index);
if (is_rx)
temp <<= 16;
if (is_rndis)
value |= temp;
else
value &= ~temp;
musb_writel(tibase, DAVINCI_RNDIS_REG, value);
c->is_rndis = is_rndis;
}
}
#ifdef CONFIG_USB_MUSB_DEBUG
static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
{
pr_debug("RXBD/%s %08x: "
"nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
tag, bd->dma,
bd->hw_next, bd->hw_bufp, bd->hw_off_len,
bd->hw_options);
}
#endif
static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
{
#ifdef CONFIG_USB_MUSB_DEBUG
struct cppi_descriptor *bd;
if (!_dbg_level(level))
return;
cppi_dump_rx(level, rx, tag);
if (rx->last_processed)
cppi_dump_rxbd("last", rx->last_processed);
for (bd = rx->head; bd; bd = bd->next)
cppi_dump_rxbd("active", bd);
#endif
}
/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
* so we won't ever use it (see "CPPI RX Woes" below).
*/
static inline int cppi_autoreq_update(struct cppi_channel *rx,
void __iomem *tibase, int onepacket, unsigned n_bds)
{
u32 val;
#ifdef RNDIS_RX_IS_USABLE
u32 tmp;
/* assert(is_host_active(musb)) */
/* start from "AutoReq never" */
tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
val = tmp & ~((0x3) << (rx->index * 2));
/* HCD arranged reqpkt for packet #1. we arrange int
* for all but the last one, maybe in two segments.
*/
if (!onepacket) {
#if 0
/* use two segments, autoreq "all" then the last "never" */
val |= ((0x3) << (rx->index * 2));
n_bds--;
#else
/* one segment, autoreq "all-but-last" */
val |= ((0x1) << (rx->index * 2));
#endif
}
if (val != tmp) {
int n = 100;
/* make sure that autoreq is updated before continuing */
musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
do {
tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
if (tmp == val)
break;
cpu_relax();
} while (n-- > 0);
}
#endif
/* REQPKT is turned off after each segment */
if (n_bds && rx->channel.actual_len) {
void __iomem *regs = rx->hw_ep->regs;
val = musb_readw(regs, MUSB_RXCSR);
if (!(val & MUSB_RXCSR_H_REQPKT)) {
val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, val);
/* flush writebufer */
val = musb_readw(regs, MUSB_RXCSR);
}
}
return n_bds;
}
/* Buffer enqueuing Logic:
*
* - RX builds new queues each time, to help handle routine "early
* termination" cases (faults, including errors and short reads)
* more correctly.
*
* - for now, TX reuses the same queue of BDs every time
*
* REVISIT long term, we want a normal dynamic model.
* ... the goal will be to append to the
* existing queue, processing completed "dma buffers" (segments) on the fly.
*
* Otherwise we force an IRQ latency between requests, which slows us a lot
* (especially in "transparent" dma). Unfortunately that model seems to be
* inherent in the DMA model from the Mentor code, except in the rare case
* of transfers big enough (~128+ KB) that we could append "middle" segments
* in the TX paths. (RX can't do this, see below.)
*
* That's true even in the CPPI- friendly iso case, where most urbs have
* several small segments provided in a group and where the "packet at a time"
* "transparent" DMA model is always correct, even on the RX side.
*/
/*
* CPPI TX:
* ========
* TX is a lot more reasonable than RX; it doesn't need to run in
* irq-per-packet mode very often. RNDIS mode seems to behave too
* (except how it handles the exactly-N-packets case). Building a
* txdma queue with multiple requests (urb or usb_request) looks
* like it would work ... but fault handling would need much testing.
*
* The main issue with TX mode RNDIS relates to transfer lengths that
* are an exact multiple of the packet length. It appears that there's
* a hiccup in that case (maybe the DMA completes before the ZLP gets
* written?) boiling down to not being able to rely on CPPI writing any
* terminating zero length packet before the next transfer is written.
* So that's punted to PIO; better yet, gadget drivers can avoid it.
*
* Plus, there's allegedly an undocumented constraint that rndis transfer
* length be a multiple of 64 bytes ... but the chip doesn't act that
* way, and we really don't _want_ that behavior anyway.
*
* On TX, "transparent" mode works ... although experiments have shown
* problems trying to use the SOP/EOP bits in different USB packets.
*
* REVISIT try to handle terminating zero length packets using CPPI
* instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
* links avoid that issue by forcing them to avoid zlps.)
*/
static void
cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
{
unsigned maxpacket = tx->maxpacket;
dma_addr_t addr = tx->buf_dma + tx->offset;
size_t length = tx->buf_len - tx->offset;
struct cppi_descriptor *bd;
unsigned n_bds;
unsigned i;
struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
int rndis;
/* TX can use the CPPI "rndis" mode, where we can probably fit this
* transfer in one BD and one IRQ. The only time we would NOT want
* to use it is when hardware constraints prevent it, or if we'd
* trigger the "send a ZLP?" confusion.
*/
rndis = (maxpacket & 0x3f) == 0
&& length > maxpacket
&& length < 0xffff
&& (length % maxpacket) != 0;
if (rndis) {
maxpacket = length;
n_bds = 1;
} else {
n_bds = length / maxpacket;
if (!length || (length % maxpacket))
n_bds++;
n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
length = min(n_bds * maxpacket, length);
}
DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
n_bds,
addr, length);
cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
/* assuming here that channel_program is called during
* transfer initiation ... current code maintains state
* for one outstanding request only (no queues, not even
* the implicit ones of an iso urb).
*/
bd = tx->freelist;
tx->head = bd;
tx->last_processed = NULL;
/* FIXME use BD pool like RX side does, and just queue
* the minimum number for this request.
*/
/* Prepare queue of BDs first, then hand it to hardware.
* All BDs except maybe the last should be of full packet
* size; for RNDIS there _is_ only that last packet.
*/
for (i = 0; i < n_bds; ) {
if (++i < n_bds && bd->next)
bd->hw_next = bd->next->dma;
else
bd->hw_next = 0;
bd->hw_bufp = tx->buf_dma + tx->offset;
/* FIXME set EOP only on the last packet,
* SOP only on the first ... avoid IRQs
*/
if ((tx->offset + maxpacket) <= tx->buf_len) {
tx->offset += maxpacket;
bd->hw_off_len = maxpacket;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | maxpacket;
} else {
/* only this one may be a partial USB Packet */
u32 partial_len;
partial_len = tx->buf_len - tx->offset;
tx->offset = tx->buf_len;
bd->hw_off_len = partial_len;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | partial_len;
if (partial_len == 0)
bd->hw_options |= CPPI_ZERO_SET;
}
DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
/* update the last BD enqueued to the list */
tx->tail = bd;
bd = bd->next;
}
/* BDs live in DMA-coherent memory, but writes might be pending */
cpu_drain_writebuffer();
/* Write to the HeadPtr in state RAM to trigger */
musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
cppi_dump_tx(5, tx, "/S");
}
/*
* CPPI RX Woes:
* =============
* Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
* packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
* (Full speed transfers have similar scenarios.)
*
* The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
* and the next packet goes into a buffer that's queued later; while (b) fills
* the buffer with 1024 bytes. How to do that with CPPI?
*
* - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
* (b) loses **BADLY** because nothing (!) happens when that second packet
* fills the buffer, much less when a third one arrives. (Which makes this
* not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
* is optional, and it's fine if peripherals -- not hosts! -- pad messages
* out to end-of-buffer. Standard PCI host controller DMA descriptors
* implement that mode by default ... which is no accident.)
*
* - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
* converse problems: (b) is handled right, but (a) loses badly. CPPI RX
* ignores SOP/EOP markings and processes both of those BDs; so both packets
* are loaded into the buffer (with a 212 byte gap between them), and the next
* buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
* are intended as outputs for RX queues, not inputs...)
*
* - A variant of "transparent" mode -- one BD at a time -- is the only way to
* reliably make both cases work, with software handling both cases correctly
* and at the significant penalty of needing an IRQ per packet. (The lack of
* I/O overlap can be slightly ameliorated by enabling double buffering.)
*
* So how to get rid of IRQ-per-packet? The transparent multi-BD case could
* be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
* (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
* with guaranteed driver level fault recovery and scrubbing out what's left
* of that garbaged datastream.
*
* But there seems to be no way to identify the cases where CPPI RNDIS mode
* is appropriate -- which do NOT include RNDIS host drivers, but do include
* the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
* So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
* that applies best on the peripheral side (and which could fail rudely).
*
* Leaving only "transparent" mode; we avoid multi-bd modes in almost all
* cases other than mass storage class. Otherwise we're correct but slow,
* since CPPI penalizes our need for a "true RNDIS" default mode.
*/
/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
*
* IFF
* (a) peripheral mode ... since rndis peripherals could pad their
* writes to hosts, causing i/o failure; or we'd have to cope with
* a largely unknowable variety of host side protocol variants
* (b) and short reads are NOT errors ... since full reads would
* cause those same i/o failures
* (c) and read length is
* - less than 64KB (max per cppi descriptor)
* - not a multiple of 4096 (g_zero default, full reads typical)
* - N (>1) packets long, ditto (full reads not EXPECTED)
* THEN
* try rx rndis mode
*
* Cost of heuristic failing: RXDMA wedges at the end of transfers that
* fill out the whole buffer. Buggy host side usb network drivers could
* trigger that, but "in the field" such bugs seem to be all but unknown.
*
* So this module parameter lets the heuristic be disabled. When using
* gadgetfs, the heuristic will probably need to be disabled.
*/
static int cppi_rx_rndis = 1;
module_param(cppi_rx_rndis, bool, 0);
MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
/**
* cppi_next_rx_segment - dma read for the next chunk of a buffer
* @musb: the controller
* @rx: dma channel
* @onepacket: true unless caller treats short reads as errors, and
* performs fault recovery above usbcore.
* Context: controller irqlocked
*
* See above notes about why we can't use multi-BD RX queues except in
* rare cases (mass storage class), and can never use the hardware "rndis"
* mode (since it's not a "true" RNDIS mode) with complete safety..
*
* It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
* code to recover from corrupted datastreams after each short transfer.
*/
static void
cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
{
unsigned maxpacket = rx->maxpacket;
dma_addr_t addr = rx->buf_dma + rx->offset;
size_t length = rx->buf_len - rx->offset;
struct cppi_descriptor *bd, *tail;
unsigned n_bds;
unsigned i;
void __iomem *tibase = musb->ctrl_base;
int is_rndis = 0;
struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
if (onepacket) {
/* almost every USB driver, host or peripheral side */
n_bds = 1;
/* maybe apply the heuristic above */
if (cppi_rx_rndis
&& is_peripheral_active(musb)
&& length > maxpacket
&& (length & ~0xffff) == 0
&& (length & 0x0fff) != 0
&& (length & (maxpacket - 1)) == 0) {
maxpacket = length;
is_rndis = 1;
}
} else {
/* virtually nothing except mass storage class */
if (length > 0xffff) {
n_bds = 0xffff / maxpacket;
length = n_bds * maxpacket;
} else {
n_bds = length / maxpacket;
if (length % maxpacket)
n_bds++;
}
if (n_bds == 1)
onepacket = 1;
else
n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
}
/* In host mode, autorequest logic can generate some IN tokens; it's
* tricky since we can't leave REQPKT set in RXCSR after the transfer
* finishes. So: multipacket transfers involve two or more segments.
* And always at least two IRQs ... RNDIS mode is not an option.
*/
if (is_host_active(musb))
n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
length = min(n_bds * maxpacket, length);
DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
"dma 0x%x len %u %u/%u\n",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
: "multipacket",
n_bds,
musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff,
addr, length, rx->channel.actual_len, rx->buf_len);
/* only queue one segment at a time, since the hardware prevents
* correct queue shutdown after unexpected short packets
*/
bd = cppi_bd_alloc(rx);
rx->head = bd;
/* Build BDs for all packets in this segment */
for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
u32 bd_len;
if (i) {
bd = cppi_bd_alloc(rx);
if (!bd)
break;
tail->next = bd;
tail->hw_next = bd->dma;
}
bd->hw_next = 0;
/* all but the last packet will be maxpacket size */
if (maxpacket < length)
bd_len = maxpacket;
else
bd_len = length;
bd->hw_bufp = addr;
addr += bd_len;
rx->offset += bd_len;
bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
bd->buflen = bd_len;
bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
length -= bd_len;
}
/* we always expect at least one reusable BD! */
if (!tail) {
WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
return;
} else if (i < n_bds)
WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
tail->next = NULL;
tail->hw_next = 0;
bd = rx->head;
rx->tail = tail;
/* short reads and other faults should terminate this entire
* dma segment. we want one "dma packet" per dma segment, not
* one per USB packet, terminating the whole queue at once...
* NOTE that current hardware seems to ignore SOP and EOP.
*/
bd->hw_options |= CPPI_SOP_SET;
tail->hw_options |= CPPI_EOP_SET;
#ifdef CONFIG_USB_MUSB_DEBUG
if (_dbg_level(5)) {
struct cppi_descriptor *d;
for (d = rx->head; d; d = d->next)
cppi_dump_rxbd("S", d);
}
#endif
/* in case the preceding transfer left some state... */
tail = rx->last_processed;
if (tail) {
tail->next = bd;
tail->hw_next = bd->dma;
}
core_rxirq_enable(tibase, rx->index + 1);
/* BDs live in DMA-coherent memory, but writes might be pending */
cpu_drain_writebuffer();
/* REVISIT specs say to write this AFTER the BUFCNT register
* below ... but that loses badly.
*/
musb_writel(&rx_ram->rx_head, 0, bd->dma);
/* bufferCount must be at least 3, and zeroes on completion
* unless it underflows below zero, or stops at two, or keeps
* growing ... grr.
*/
i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (!i)
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2);
else if (n_bds > (i - 3))
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds - (i - 3));
i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (i < (2 + n_bds)) {
DBG(2, "bufcnt%d underrun - %d (for %d)\n",
rx->index, i, n_bds);
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2);
}
cppi_dump_rx(4, rx, "/S");
}
/**
* cppi_channel_program - program channel for data transfer
* @ch: the channel
* @maxpacket: max packet size
* @mode: For RX, 1 unless the usb protocol driver promised to treat
* all short reads as errors and kick in high level fault recovery.
* For TX, ignored because of RNDIS mode races/glitches.
* @dma_addr: dma address of buffer
* @len: length of buffer
* Context: controller irqlocked
*/
static int cppi_channel_program(struct dma_channel *ch,
u16 maxpacket, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct cppi_channel *cppi_ch;
struct cppi *controller;
struct musb *musb;
cppi_ch = container_of(ch, struct cppi_channel, channel);
controller = cppi_ch->controller;
musb = controller->musb;
switch (ch->status) {
case MUSB_DMA_STATUS_BUS_ABORT:
case MUSB_DMA_STATUS_CORE_ABORT:
/* fault irq handler should have handled cleanup */
WARNING("%cX DMA%d not cleaned up after abort!\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_BUSY:
WARNING("program active channel? %cX DMA%d\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_UNKNOWN:
DBG(1, "%cX DMA%d not allocated!\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* FALLTHROUGH */
case MUSB_DMA_STATUS_FREE:
break;
}
ch->status = MUSB_DMA_STATUS_BUSY;
/* set transfer parameters, then queue up its first segment */
cppi_ch->buf_dma = dma_addr;
cppi_ch->offset = 0;
cppi_ch->maxpacket = maxpacket;
cppi_ch->buf_len = len;
cppi_ch->channel.actual_len = 0;
/* TX channel? or RX? */
if (cppi_ch->transmit)
cppi_next_tx_segment(musb, cppi_ch);
else
cppi_next_rx_segment(musb, cppi_ch, mode);
return true;
}
static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
{
struct cppi_channel *rx = &cppi->rx[ch];
struct cppi_rx_stateram __iomem *state = rx->state_ram;
struct cppi_descriptor *bd;
struct cppi_descriptor *last = rx->last_processed;
bool completed = false;
bool acked = false;
int i;
dma_addr_t safe2ack;
void __iomem *regs = rx->hw_ep->regs;
cppi_dump_rx(6, rx, "/K");
bd = last ? last->next : rx->head;
if (!bd)
return false;
/* run through all completed BDs */
for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
(safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
i++, bd = bd->next) {
u16 len;
/* catch latest BD writes from CPPI */
rmb();
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
"off.len %08x opt.len %08x (%d)\n",
bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
/* actual packet received length */
if ((bd->hw_options & CPPI_SOP_SET) && !completed)
len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
else
len = 0;
if (bd->hw_options & CPPI_EOQ_MASK)
completed = true;
if (!completed && len < bd->buflen) {
/* NOTE: when we get a short packet, RXCSR_H_REQPKT
* must have been cleared, and no more DMA packets may
* active be in the queue... TI docs didn't say, but
* CPPI ignores those BDs even though OWN is still set.
*/
completed = true;
DBG(3, "rx short %d/%d (%d)\n",
len, bd->buflen,
rx->channel.actual_len);
}
/* If we got here, we expect to ack at least one BD; meanwhile
* CPPI may completing other BDs while we scan this list...
*
* RACE: we can notice OWN cleared before CPPI raises the
* matching irq by writing that BD as the completion pointer.
* In such cases, stop scanning and wait for the irq, avoiding
* lost acks and states where BD ownership is unclear.
*/
if (bd->dma == safe2ack) {
musb_writel(&state->rx_complete, 0, safe2ack);
safe2ack = musb_readl(&state->rx_complete, 0);
acked = true;
if (bd->dma == safe2ack)
safe2ack = 0;
}
rx->channel.actual_len += len;
cppi_bd_free(rx, last);
last = bd;
/* stop scanning on end-of-segment */
if (bd->hw_next == 0)
completed = true;
}
rx->last_processed = last;
/* dma abort, lost ack, or ... */
if (!acked && last) {
int csr;
if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
musb_writel(&state->rx_complete, 0, safe2ack);
if (safe2ack == 0) {
cppi_bd_free(rx, last);
rx->last_processed = NULL;
/* if we land here on the host side, H_REQPKT will
* be clear and we need to restart the queue...
*/
WARN_ON(rx->head);
}
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
rx->index,
rx->head, rx->tail,
rx->last_processed
? rx->last_processed->dma
: 0,
completed ? ", completed" : "",
csr);
cppi_dump_rxq(4, "/what?", rx);
}
}
if (!completed) {
int csr;
rx->head = bd;
/* REVISIT seems like "autoreq all but EOP" doesn't...
* setting it here "should" be racey, but seems to work
*/
csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
if (is_host_active(cppi->musb)
&& bd
&& !(csr & MUSB_RXCSR_H_REQPKT)) {
csr |= MUSB_RXCSR_H_REQPKT;
musb_writew(regs, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | csr);
csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
}
} else {
rx->head = NULL;
rx->tail = NULL;
}
cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
return completed;
}
irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
struct musb *musb = dev_id;
struct cppi *cppi;
void __iomem *tibase;
struct musb_hw_ep *hw_ep = NULL;
u32 rx, tx;
int i, index;
unsigned long uninitialized_var(flags);
cppi = container_of(musb->dma_controller, struct cppi, controller);
if (cppi->irq)
spin_lock_irqsave(&musb->lock, flags);
tibase = musb->ctrl_base;
tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
if (!tx && !rx)
return IRQ_NONE;
DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
/* process TX channels */
for (index = 0; tx; tx = tx >> 1, index++) {
struct cppi_channel *tx_ch;
struct cppi_tx_stateram __iomem *tx_ram;
bool completed = false;
struct cppi_descriptor *bd;
if (!(tx & 1))
continue;
tx_ch = cppi->tx + index;
tx_ram = tx_ch->state_ram;
/* FIXME need a cppi_tx_scan() routine, which
* can also be called from abort code
*/
cppi_dump_tx(5, tx_ch, "/E");
bd = tx_ch->head;
/*
* If Head is null then this could mean that a abort interrupt
* that needs to be acknowledged.
*/
if (NULL == bd) {
DBG(1, "null BD\n");
tx_ram->tx_complete = 0;
continue;
}
/* run through all completed BDs */
for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
i++, bd = bd->next) {
u16 len;
/* catch latest BD writes from CPPI */
rmb();
if (bd->hw_options & CPPI_OWN_SET)
break;
DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
tx_ch->channel.actual_len += len;
tx_ch->last_processed = bd;
/* write completion register to acknowledge
* processing of completed BDs, and possibly
* release the IRQ; EOQ might not be set ...
*
* REVISIT use the same ack strategy as rx
*
* REVISIT have observed bit 18 set; huh??
*/
/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
musb_writel(&tx_ram->tx_complete, 0, bd->dma);
/* stop scanning on end-of-segment */
if (bd->hw_next == 0)
completed = true;
}
/* on end of segment, maybe go to next one */
if (completed) {
/* cppi_dump_tx(4, tx_ch, "/complete"); */
/* transfer more, or report completion */
if (tx_ch->offset >= tx_ch->buf_len) {
tx_ch->head = NULL;
tx_ch->tail = NULL;
tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
hw_ep = tx_ch->hw_ep;
musb_dma_completion(musb, index + 1, 1);
} else {
/* Bigger transfer than we could fit in
* that first batch of descriptors...
*/
cppi_next_tx_segment(musb, tx_ch);
}
} else
tx_ch->head = bd;
}
/* Start processing the RX block */
for (index = 0; rx; rx = rx >> 1, index++) {
if (rx & 1) {
struct cppi_channel *rx_ch;
rx_ch = cppi->rx + index;
/* let incomplete dma segments finish */
if (!cppi_rx_scan(cppi, index))
continue;
/* start another dma segment if needed */
if (rx_ch->channel.actual_len != rx_ch->buf_len
&& rx_ch->channel.actual_len
== rx_ch->offset) {
cppi_next_rx_segment(musb, rx_ch, 1);
continue;
}
/* all segments completed! */
rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
hw_ep = rx_ch->hw_ep;
core_rxirq_disable(tibase, index + 1);
musb_dma_completion(musb, index + 1, 0);
}
}
/* write to CPPI EOI register to re-enable interrupts */
musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
if (cppi->irq)
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_HANDLED;
}
/* Instantiate a software object representing a DMA controller. */
struct dma_controller *__init
dma_controller_create(struct musb *musb, void __iomem *mregs)
{
struct cppi *controller;
struct device *dev = musb->controller;
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq(pdev, 1);
controller = kzalloc(sizeof *controller, GFP_KERNEL);
if (!controller)
return NULL;
controller->mregs = mregs;
controller->tibase = mregs - DAVINCI_BASE_OFFSET;
controller->musb = musb;
controller->controller.start = cppi_controller_start;
controller->controller.stop = cppi_controller_stop;
controller->controller.channel_alloc = cppi_channel_allocate;
controller->controller.channel_release = cppi_channel_release;
controller->controller.channel_program = cppi_channel_program;
controller->controller.channel_abort = cppi_channel_abort;
/* NOTE: allocating from on-chip SRAM would give the least
* contention for memory access, if that ever matters here.
*/
/* setup BufferPool */
controller->pool = dma_pool_create("cppi",
controller->musb->controller,
sizeof(struct cppi_descriptor),
CPPI_DESCRIPTOR_ALIGN, 0);
if (!controller->pool) {
kfree(controller);
return NULL;
}
if (irq > 0) {
if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);
return NULL;
}
controller->irq = irq;
}
return &controller->controller;
}
/*
* Destroy a previously-instantiated DMA controller.
*/
void dma_controller_destroy(struct dma_controller *c)
{
struct cppi *cppi;
cppi = container_of(c, struct cppi, controller);
if (cppi->irq)
free_irq(cppi->irq, cppi->musb);
/* assert: caller stopped the controller first */
dma_pool_destroy(cppi->pool);
kfree(cppi);
}
/*
* Context: controller irqlocked, endpoint selected
*/
static int cppi_channel_abort(struct dma_channel *channel)
{
struct cppi_channel *cppi_ch;
struct cppi *controller;
void __iomem *mbase;
void __iomem *tibase;
void __iomem *regs;
u32 value;
struct cppi_descriptor *queue;
cppi_ch = container_of(channel, struct cppi_channel, channel);
controller = cppi_ch->controller;
switch (channel->status) {
case MUSB_DMA_STATUS_BUS_ABORT:
case MUSB_DMA_STATUS_CORE_ABORT:
/* from RX or TX fault irq handler */
case MUSB_DMA_STATUS_BUSY:
/* the hardware needs shutting down */
regs = cppi_ch->hw_ep->regs;
break;
case MUSB_DMA_STATUS_UNKNOWN:
case MUSB_DMA_STATUS_FREE:
return 0;
default:
return -EINVAL;
}
if (!cppi_ch->transmit && cppi_ch->head)
cppi_dump_rxq(3, "/abort", cppi_ch);
mbase = controller->mregs;
tibase = controller->tibase;
queue = cppi_ch->head;
cppi_ch->head = NULL;
cppi_ch->tail = NULL;
/* REVISIT should rely on caller having done this,
* and caller should rely on us not changing it.
* peripheral code is safe ... check host too.
*/
musb_ep_select(mbase, cppi_ch->index + 1);
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
/* REVISIT put timeouts on these controller handshakes */
cppi_dump_tx(6, cppi_ch, " (teardown)");
/* teardown DMA engine then usb core */
do {
value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
} while (!(value & CPPI_TEAR_READY));
musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
tx_ram = cppi_ch->state_ram;
do {
value = musb_readl(&tx_ram->tx_complete, 0);
} while (0xFFFFFFFC != value);
/* FIXME clean up the transfer state ... here?
* the completion routine should get called with
* an appropriate status code.
*/
value = musb_readw(regs, MUSB_TXCSR);
value &= ~MUSB_TXCSR_DMAENAB;
value |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, value);
/*
* 1. Write to completion Ptr value 0x1(bit 0 set)
* (write back mode)
* 2. Wait for abort interrupt and then put the channel in
* compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
cppi_ch->head = 0;
musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
/* REVISIT tx side _should_ clean up the same way
* as the RX side ... this does no cleanup at all!
*/
} else /* RX */ {
u16 csr;
/* NOTE: docs don't guarantee any of this works ... we
* expect that if the usb core stops telling the cppi core
* to pull more data from it, then it'll be safe to flush
* current RX DMA state iff any pending fifo transfer is done.
*/
core_rxirq_disable(tibase, cppi_ch->index + 1);
/* for host, ensure ReqPkt is never set again */
if (is_host_active(cppi_ch->controller->musb)) {
value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
value &= ~((0x3) << (cppi_ch->index * 2));
musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
}
csr = musb_readw(regs, MUSB_RXCSR);
/* for host, clear (just) ReqPkt at end of current packet(s) */
if (is_host_active(cppi_ch->controller->musb)) {
csr |= MUSB_RXCSR_H_WZC_BITS;
csr &= ~MUSB_RXCSR_H_REQPKT;
} else
csr |= MUSB_RXCSR_P_WZC_BITS;
/* clear dma enable */
csr &= ~(MUSB_RXCSR_DMAENAB);
musb_writew(regs, MUSB_RXCSR, csr);
csr = musb_readw(regs, MUSB_RXCSR);
/* Quiesce: wait for current dma to finish (if not cleanup).
* We can't use bit zero of stateram->rx_sop, since that
* refers to an entire "DMA packet" not just emptying the
* current fifo. Most segments need multiple usb packets.
*/
if (channel->status == MUSB_DMA_STATUS_BUSY)
udelay(50);
/* scan the current list, reporting any data that was
* transferred and acking any IRQ
*/
cppi_rx_scan(controller, cppi_ch->index);
/* clobber the existing state once it's idle
*
* NOTE: arguably, we should also wait for all the other
* RX channels to quiesce (how??) and then temporarily
* disable RXCPPI_CTRL_REG ... but it seems that we can
* rely on the controller restarting from state ram, with
* only RXCPPI_BUFCNT state being bogus. BUFCNT will
* correct itself after the next DMA transfer though.
*
* REVISIT does using rndis mode change that?
*/
cppi_reset_rx(cppi_ch->state_ram);
/* next DMA request _should_ load cppi head ptr */
/* ... we don't "free" that list, only mutate it in place. */
cppi_dump_rx(5, cppi_ch, " (done abort)");
/* clean up previously pending bds */
cppi_bd_free(cppi_ch, cppi_ch->last_processed);
cppi_ch->last_processed = NULL;
while (queue) {
struct cppi_descriptor *tmp = queue->next;
cppi_bd_free(cppi_ch, queue);
queue = tmp;
}
}
channel->status = MUSB_DMA_STATUS_FREE;
cppi_ch->buf_dma = 0;
cppi_ch->offset = 0;
cppi_ch->buf_len = 0;
cppi_ch->maxpacket = 0;
return 0;
}
/* TBD Queries:
*
* Power Management ... probably turn off cppi during suspend, restart;
* check state ram? Clocking is presumably shared with usb core.
*/
| gpl-2.0 |
xbmc/android | lib/ffmpeg/libavformat/rl2.c | 30 | 9106 | /*
* RL2 Format Demuxer
* Copyright (c) 2008 Sascha Sommer (saschasommer@freenet.de)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* RL2 file demuxer
* @file
* @author Sascha Sommer (saschasommer@freenet.de)
* @see http://wiki.multimedia.cx/index.php?title=RL2
*
* extradata:
* 2 byte le initial drawing offset within 320x200 viewport
* 4 byte le number of used colors
* 256 * 3 bytes rgb palette
* optional background_frame
*/
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "internal.h"
#define EXTRADATA1_SIZE (6 + 256 * 3) ///< video base, clr, palette
#define FORM_TAG MKBETAG('F', 'O', 'R', 'M')
#define RLV2_TAG MKBETAG('R', 'L', 'V', '2')
#define RLV3_TAG MKBETAG('R', 'L', 'V', '3')
typedef struct Rl2DemuxContext {
unsigned int index_pos[2]; ///< indexes in the sample tables
} Rl2DemuxContext;
/**
* check if the file is in rl2 format
* @param p probe buffer
* @return 0 when the probe buffer does not contain rl2 data, > 0 otherwise
*/
static int rl2_probe(AVProbeData *p)
{
if(AV_RB32(&p->buf[0]) != FORM_TAG)
return 0;
if(AV_RB32(&p->buf[8]) != RLV2_TAG &&
AV_RB32(&p->buf[8]) != RLV3_TAG)
return 0;
return AVPROBE_SCORE_MAX;
}
/**
* read rl2 header data and setup the avstreams
* @param s demuxer context
* @param ap format parameters
* @return 0 on success, AVERROR otherwise
*/
static av_cold int rl2_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
AVIOContext *pb = s->pb;
AVStream *st;
unsigned int frame_count;
unsigned int audio_frame_counter = 0;
unsigned int video_frame_counter = 0;
unsigned int back_size;
unsigned short sound_rate;
unsigned short rate;
unsigned short channels;
unsigned short def_sound_size;
unsigned int signature;
unsigned int pts_den = 11025; /* video only case */
unsigned int pts_num = 1103;
unsigned int* chunk_offset = NULL;
int* chunk_size = NULL;
int* audio_size = NULL;
int i;
int ret = 0;
avio_skip(pb,4); /* skip FORM tag */
back_size = avio_rl32(pb); /**< get size of the background frame */
signature = avio_rb32(pb);
avio_skip(pb, 4); /* data size */
frame_count = avio_rl32(pb);
/* disallow back_sizes and frame_counts that may lead to overflows later */
if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t))
return AVERROR_INVALIDDATA;
avio_skip(pb, 2); /* encoding mentod */
sound_rate = avio_rl16(pb);
rate = avio_rl16(pb);
channels = avio_rl16(pb);
def_sound_size = avio_rl16(pb);
/** setup video stream */
st = avformat_new_stream(s, NULL);
if(!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RL2;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = 320;
st->codec->height = 200;
/** allocate and fill extradata */
st->codec->extradata_size = EXTRADATA1_SIZE;
if(signature == RLV3_TAG && back_size > 0)
st->codec->extradata_size += back_size;
st->codec->extradata = av_mallocz(st->codec->extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if(!st->codec->extradata)
return AVERROR(ENOMEM);
if(avio_read(pb,st->codec->extradata,st->codec->extradata_size) !=
st->codec->extradata_size)
return AVERROR(EIO);
/** setup audio stream if present */
if(sound_rate){
if(channels <= 0)
return AVERROR_INVALIDDATA;
pts_num = def_sound_size;
pts_den = rate;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_PCM_U8;
st->codec->codec_tag = 1;
st->codec->channels = channels;
st->codec->bits_per_coded_sample = 8;
st->codec->sample_rate = rate;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = st->codec->channels *
st->codec->bits_per_coded_sample / 8;
avpriv_set_pts_info(st,32,1,rate);
}
avpriv_set_pts_info(s->streams[0], 32, pts_num, pts_den);
chunk_size = av_malloc(frame_count * sizeof(uint32_t));
audio_size = av_malloc(frame_count * sizeof(uint32_t));
chunk_offset = av_malloc(frame_count * sizeof(uint32_t));
if(!chunk_size || !audio_size || !chunk_offset){
av_free(chunk_size);
av_free(audio_size);
av_free(chunk_offset);
return AVERROR(ENOMEM);
}
/** read offset and size tables */
for(i=0; i < frame_count;i++)
chunk_size[i] = avio_rl32(pb);
for(i=0; i < frame_count;i++)
chunk_offset[i] = avio_rl32(pb);
for(i=0; i < frame_count;i++)
audio_size[i] = avio_rl32(pb) & 0xFFFF;
/** build the sample index */
for(i=0;i<frame_count;i++){
if(chunk_size[i] < 0 || audio_size[i] > chunk_size[i]){
ret = AVERROR_INVALIDDATA;
break;
}
if(sound_rate && audio_size[i]){
av_add_index_entry(s->streams[1], chunk_offset[i],
audio_frame_counter,audio_size[i], 0, AVINDEX_KEYFRAME);
audio_frame_counter += audio_size[i] / channels;
}
av_add_index_entry(s->streams[0], chunk_offset[i] + audio_size[i],
video_frame_counter,chunk_size[i]-audio_size[i],0,AVINDEX_KEYFRAME);
++video_frame_counter;
}
av_free(chunk_size);
av_free(audio_size);
av_free(chunk_offset);
return ret;
}
/**
* read a single audio or video packet
* @param s demuxer context
* @param pkt the packet to be filled
* @return 0 on success, AVERROR otherwise
*/
static int rl2_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
Rl2DemuxContext *rl2 = s->priv_data;
AVIOContext *pb = s->pb;
AVIndexEntry *sample = NULL;
int i;
int ret = 0;
int stream_id = -1;
int64_t pos = INT64_MAX;
/** check if there is a valid video or audio entry that can be used */
for(i=0; i<s->nb_streams; i++){
if(rl2->index_pos[i] < s->streams[i]->nb_index_entries
&& s->streams[i]->index_entries[ rl2->index_pos[i] ].pos < pos){
sample = &s->streams[i]->index_entries[ rl2->index_pos[i] ];
pos= sample->pos;
stream_id= i;
}
}
if(stream_id == -1)
return AVERROR(EIO);
++rl2->index_pos[stream_id];
/** position the stream (will probably be there anyway) */
avio_seek(pb, sample->pos, SEEK_SET);
/** fill the packet */
ret = av_get_packet(pb, pkt, sample->size);
if(ret != sample->size){
av_free_packet(pkt);
return AVERROR(EIO);
}
pkt->stream_index = stream_id;
pkt->pts = sample->timestamp;
return ret;
}
/**
* seek to a new timestamp
* @param s demuxer context
* @param stream_index index of the stream that should be seeked
* @param timestamp wanted timestamp
* @param flags direction and seeking mode
* @return 0 on success, -1 otherwise
*/
static int rl2_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
Rl2DemuxContext *rl2 = s->priv_data;
int i;
int index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0)
return -1;
rl2->index_pos[stream_index] = index;
timestamp = st->index_entries[index].timestamp;
for(i=0; i < s->nb_streams; i++){
AVStream *st2 = s->streams[i];
index = av_index_search_timestamp(st2,
av_rescale_q(timestamp, st->time_base, st2->time_base),
flags | AVSEEK_FLAG_BACKWARD);
if(index < 0)
index = 0;
rl2->index_pos[i] = index;
}
return 0;
}
AVInputFormat ff_rl2_demuxer = {
.name = "rl2",
.long_name = NULL_IF_CONFIG_SMALL("RL2 format"),
.priv_data_size = sizeof(Rl2DemuxContext),
.read_probe = rl2_probe,
.read_header = rl2_read_header,
.read_packet = rl2_read_packet,
.read_seek = rl2_read_seek,
};
| gpl-2.0 |
ccotter/linux-deterministic | kernel/rcutree.c | 30 | 92675 | /*
* Read-Copy Update mechanism for mutual exclusion
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2008
*
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
* Manfred Spraul <manfred@colorfullife.com>
* Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
*
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/nmi.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <linux/kernel_stat.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
#include <linux/delay.h>
#include <linux/stop_machine.h>
#include <linux/random.h>
#include "rcutree.h"
#include <trace/events/rcu.h>
#include "rcu.h"
/* Data structures. */
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
#define RCU_STATE_INITIALIZER(sname, cr) { \
.level = { &sname##_state.node[0] }, \
.call = cr, \
.fqs_state = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
.name = #sname, \
}
struct rcu_state rcu_sched_state =
RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched);
DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
static struct rcu_state *rcu_state;
LIST_HEAD(rcu_struct_flavors);
/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
module_param(rcu_fanout_leaf, int, 0444);
int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */
NUM_RCU_LVL_0,
NUM_RCU_LVL_1,
NUM_RCU_LVL_2,
NUM_RCU_LVL_3,
NUM_RCU_LVL_4,
};
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
/*
* The rcu_scheduler_active variable transitions from zero to one just
* before the first task is spawned. So when this variable is zero, RCU
* can assume that there is but one task, allowing RCU to (for example)
* optimized synchronize_sched() to a simple barrier(). When this variable
* is one, RCU must actually do all the hard work required to detect real
* grace periods. This variable is also used to suppress boot-time false
* positives from lockdep-RCU error checking.
*/
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
/*
* The rcu_scheduler_fully_active variable transitions from zero to one
* during the early_initcall() processing, which is after the scheduler
* is capable of creating new tasks. So RCU processing (for example,
* creating tasks for RCU priority boosting) must be delayed until after
* rcu_scheduler_fully_active transitions from zero to one. We also
* currently delay invocation of any RCU callbacks until after this point.
*
* It might later prove better for people registering RCU callbacks during
* early boot to take responsibility for these callbacks, but one step at
* a time.
*/
static int rcu_scheduler_fully_active __read_mostly;
#ifdef CONFIG_RCU_BOOST
/*
* Control variables for per-CPU and per-rcu_node kthreads. These
* handle all flavors of RCU.
*/
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
#endif /* #ifdef CONFIG_RCU_BOOST */
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
/*
* Track the rcutorture test sequence number and the update version
* number within a given test. The rcutorture_testseq is incremented
* on every rcutorture module load and unload, so has an odd value
* when a test is running. The rcutorture_vernum is set to zero
* when rcutorture starts and is incremented on each rcutorture update.
* These variables enable correlating rcutorture output with the
* RCU tracing information.
*/
unsigned long rcutorture_testseq;
unsigned long rcutorture_vernum;
/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
* permit this function to be invoked without holding the root rcu_node
* structure's ->lock, but of course results can be subject to change.
*/
static int rcu_gp_in_progress(struct rcu_state *rsp)
{
return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
}
/*
* Note a quiescent state. Because we do not need to know
* how many quiescent states passed, just if there was at least
* one since the start of the grace period, this just sets a flag.
* The caller must have disabled preemption.
*/
void rcu_sched_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
if (rdp->passed_quiesce == 0)
trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
rdp->passed_quiesce = 1;
}
void rcu_bh_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
if (rdp->passed_quiesce == 0)
trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
rdp->passed_quiesce = 1;
}
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
* The caller must have disabled preemption.
*/
void rcu_note_context_switch(int cpu)
{
trace_rcu_utilization("Start context switch");
rcu_sched_qs(cpu);
rcu_preempt_note_context_switch(cpu);
trace_rcu_utilization("End context switch");
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
.dynticks = ATOMIC_INIT(1),
#if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE)
.ignore_user_qs = true,
#endif
};
static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static int qhimark = 10000; /* If this many pending, ignore blimit. */
static int qlowmark = 100; /* Once only this many pending, use blimit. */
module_param(blimit, int, 0444);
module_param(qhimark, int, 0444);
module_param(qlowmark, int, 0444);
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
module_param(rcu_cpu_stall_suppress, int, 0644);
module_param(rcu_cpu_stall_timeout, int, 0644);
static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;
static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
module_param(jiffies_till_first_fqs, ulong, 0644);
module_param(jiffies_till_next_fqs, ulong, 0644);
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
static void force_quiescent_state(struct rcu_state *rsp);
static int rcu_pending(int cpu);
/*
* Return the number of RCU-sched batches processed thus far for debug & stats.
*/
long rcu_batches_completed_sched(void)
{
return rcu_sched_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
/*
* Return the number of RCU BH batches processed thus far for debug & stats.
*/
long rcu_batches_completed_bh(void)
{
return rcu_bh_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
/*
* Force a quiescent state for RCU BH.
*/
void rcu_bh_force_quiescent_state(void)
{
force_quiescent_state(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
/*
* Record the number of times rcutorture tests have been initiated and
* terminated. This information allows the debugfs tracing stats to be
* correlated to the rcutorture messages, even when the rcutorture module
* is being repeatedly loaded and unloaded. In other words, we cannot
* store this state in rcutorture itself.
*/
void rcutorture_record_test_transition(void)
{
rcutorture_testseq++;
rcutorture_vernum = 0;
}
EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
/*
* Record the number of writer passes through the current rcutorture test.
* This is also used to correlate debugfs tracing stats with the rcutorture
* messages.
*/
void rcutorture_record_progress(unsigned long vernum)
{
rcutorture_vernum++;
}
EXPORT_SYMBOL_GPL(rcutorture_record_progress);
/*
* Force a quiescent state for RCU-sched.
*/
void rcu_sched_force_quiescent_state(void)
{
force_quiescent_state(&rcu_sched_state);
}
EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
/*
* Does the CPU have callbacks ready to be invoked?
*/
static int
cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
{
return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
}
/*
* Does the current CPU require a yet-as-unscheduled grace period?
*/
static int
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
{
return *rdp->nxttail[RCU_DONE_TAIL +
ACCESS_ONCE(rsp->completed) != rdp->completed] &&
!rcu_gp_in_progress(rsp);
}
/*
* Return the root node of the specified rcu_state structure.
*/
static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
{
return &rsp->node[0];
}
/*
* rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
*
* If the new value of the ->dynticks_nesting counter now is zero,
* we really have entered idle, and must do the appropriate accounting.
* The caller must have disabled interrupts.
*/
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
bool user)
{
trace_rcu_dyntick("Start", oldval, 0);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
}
rcu_prepare_for_idle(smp_processor_id());
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
atomic_inc(&rdtp->dynticks);
smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
/*
* It is illegal to enter an extended quiescent state while
* in an RCU read-side critical section.
*/
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
"Illegal idle entry in RCU read-side critical section.");
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
"Illegal idle entry in RCU-bh read-side critical section.");
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
"Illegal idle entry in RCU-sched read-side critical section.");
}
/*
* Enter an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*/
static void rcu_eqs_enter(bool user)
{
long long oldval;
struct rcu_dynticks *rdtp;
rdtp = &__get_cpu_var(rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
rdtp->dynticks_nesting = 0;
else
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
rcu_eqs_enter_common(rdtp, oldval, user);
}
/**
* rcu_idle_enter - inform RCU that current CPU is entering idle
*
* Enter idle mode, in other words, -leave- the mode in which RCU
* read-side critical sections can occur. (Though RCU read-side
* critical sections can occur in irq handlers in idle, a possibility
* handled by irq_enter() and irq_exit().)
*
* We crowbar the ->dynticks_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period.
*/
void rcu_idle_enter(void)
{
unsigned long flags;
local_irq_save(flags);
rcu_eqs_enter(false);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
#ifdef CONFIG_RCU_USER_QS
/**
* rcu_user_enter - inform RCU that we are resuming userspace.
*
* Enter RCU idle mode right before resuming userspace. No use of RCU
* is permitted between this call and rcu_user_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*/
void rcu_user_enter(void)
{
unsigned long flags;
struct rcu_dynticks *rdtp;
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
if (in_interrupt())
return;
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
if (!rdtp->ignore_user_qs && !rdtp->in_user) {
rdtp->in_user = true;
rcu_eqs_enter(true);
}
local_irq_restore(flags);
}
/**
* rcu_user_enter_after_irq - inform RCU that we are going to resume userspace
* after the current irq returns.
*
* This is similar to rcu_user_enter() but in the context of a non-nesting
* irq. After this call, RCU enters into idle mode when the interrupt
* returns.
*/
void rcu_user_enter_after_irq(void)
{
unsigned long flags;
struct rcu_dynticks *rdtp;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
/* Ensure this irq is interrupting a non-idle RCU state. */
WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK));
rdtp->dynticks_nesting = 1;
local_irq_restore(flags);
}
#endif /* CONFIG_RCU_USER_QS */
/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler, which might possibly result in entering
* idle mode, in other words, leaving the mode in which read-side critical
* sections can occur.
*
* This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter() and irq_exit(). If your
* architecture violates this assumption, RCU will give you what you
* deserve, good and hard. But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
* You have been warned.
*/
void rcu_irq_exit(void)
{
unsigned long flags;
long long oldval;
struct rcu_dynticks *rdtp;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
if (rdtp->dynticks_nesting)
trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
else
rcu_eqs_enter_common(rdtp, oldval, true);
local_irq_restore(flags);
}
/*
* rcu_eqs_exit_common - current CPU moving away from extended quiescent state
*
* If the new value of the ->dynticks_nesting counter was previously zero,
* we really have exited idle, and must do the appropriate accounting.
* The caller must have disabled interrupts.
*/
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
int user)
{
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
rcu_cleanup_after_idle(smp_processor_id());
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
trace_rcu_dyntick("Error on exit: not idle task",
oldval, rdtp->dynticks_nesting);
ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
}
}
/*
* Exit an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*/
static void rcu_eqs_exit(bool user)
{
struct rcu_dynticks *rdtp;
long long oldval;
rdtp = &__get_cpu_var(rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(oldval < 0);
if (oldval & DYNTICK_TASK_NEST_MASK)
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
else
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_eqs_exit_common(rdtp, oldval, user);
}
/**
* rcu_idle_exit - inform RCU that current CPU is leaving idle
*
* Exit idle mode, in other words, -enter- the mode in which RCU
* read-side critical sections can occur.
*
* We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
* allow for the possibility of usermode upcalls messing up our count
* of interrupt nesting level during the busy period that is just
* now starting.
*/
void rcu_idle_exit(void)
{
unsigned long flags;
local_irq_save(flags);
rcu_eqs_exit(false);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
#ifdef CONFIG_RCU_USER_QS
/**
* rcu_user_exit - inform RCU that we are exiting userspace.
*
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
*/
void rcu_user_exit(void)
{
unsigned long flags;
struct rcu_dynticks *rdtp;
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
if (in_interrupt())
return;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
if (rdtp->in_user) {
rdtp->in_user = false;
rcu_eqs_exit(true);
}
local_irq_restore(flags);
}
/**
* rcu_user_exit_after_irq - inform RCU that we won't resume to userspace
* idle mode after the current non-nesting irq returns.
*
* This is similar to rcu_user_exit() but in the context of an irq.
* This is called when the irq has interrupted a userspace RCU idle mode
* context. When the current non-nesting interrupt returns after this call,
* the CPU won't restore the RCU idle mode.
*/
void rcu_user_exit_after_irq(void)
{
unsigned long flags;
struct rcu_dynticks *rdtp;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
/* Ensure we are interrupting an RCU idle mode. */
WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK);
rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE;
local_irq_restore(flags);
}
#endif /* CONFIG_RCU_USER_QS */
/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
* Enter an interrupt handler, which might possibly result in exiting
* idle mode, in other words, entering the mode in which read-side critical
* sections can occur.
*
* Note that the Linux kernel is fully capable of entering an interrupt
* handler that it never exits, for example when doing upcalls to
* user mode! This code assumes that the idle loop never does upcalls to
* user mode. If your architecture does do upcalls from the idle loop (or
* does anything else that results in unbalanced calls to the irq_enter()
* and irq_exit() functions), RCU will give you what you deserve, good
* and hard. But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
* You have been warned.
*/
void rcu_irq_enter(void)
{
unsigned long flags;
struct rcu_dynticks *rdtp;
long long oldval;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
if (oldval)
trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
else
rcu_eqs_exit_common(rdtp, oldval, true);
local_irq_restore(flags);
}
/**
* rcu_nmi_enter - inform RCU of entry to NMI context
*
* If the CPU was idle with dynamic ticks active, and there is no
* irq handler running, this updates rdtp->dynticks_nmi to let the
* RCU grace-period handling know that the CPU is active.
*/
void rcu_nmi_enter(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
if (rdtp->dynticks_nmi_nesting == 0 &&
(atomic_read(&rdtp->dynticks) & 0x1))
return;
rdtp->dynticks_nmi_nesting++;
smp_mb__before_atomic_inc(); /* Force delay from prior write. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
}
/**
* rcu_nmi_exit - inform RCU of exit from NMI context
*
* If the CPU was idle with dynamic ticks active, and there is no
* irq handler running, this updates rdtp->dynticks_nmi to let the
* RCU grace-period handling know that the CPU is no longer active.
*/
void rcu_nmi_exit(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
if (rdtp->dynticks_nmi_nesting == 0 ||
--rdtp->dynticks_nmi_nesting != 0)
return;
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
atomic_inc(&rdtp->dynticks);
smp_mb__after_atomic_inc(); /* Force delay to next write. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
/**
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
*
* If the current CPU is in its idle loop and is neither in an interrupt
* or NMI handler, return true.
*/
int rcu_is_cpu_idle(void)
{
int ret;
preempt_disable();
ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
preempt_enable();
return ret;
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
#ifdef CONFIG_RCU_USER_QS
void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next)
{
struct rcu_dynticks *rdtp;
/* Interrupts are disabled in context switch */
rdtp = &__get_cpu_var(rcu_dynticks);
if (!rdtp->ignore_user_qs) {
clear_tsk_thread_flag(prev, TIF_NOHZ);
set_tsk_thread_flag(next, TIF_NOHZ);
}
}
#endif /* #ifdef CONFIG_RCU_USER_QS */
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
* Is the current CPU online? Disable preemption to avoid false positives
* that could otherwise happen due to the current CPU number being sampled,
* this task being preempted, its old CPU being taken offline, resuming
* on some other CPU, then determining that its old CPU is now offline.
* It is OK to use RCU on an offline processor during initial boot, hence
* the check for rcu_scheduler_fully_active. Note also that it is OK
* for a CPU coming online to use RCU for one jiffy prior to marking itself
* online in the cpu_online_mask. Similarly, it is OK for a CPU going
* offline to continue to use RCU for one jiffy after marking itself
* offline in the cpu_online_mask. This leniency is necessary given the
* non-atomic nature of the online and offline processing, for example,
* the fact that a CPU enters the scheduler after completing the CPU_DYING
* notifiers.
*
* This is also why RCU internally marks CPUs online during the
* CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
*
* Disable checking if in an NMI handler because we cannot safely report
* errors from NMI handlers anyway.
*/
bool rcu_lockdep_current_cpu_online(void)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
bool ret;
if (in_nmi())
return 1;
preempt_disable();
rdp = &__get_cpu_var(rcu_sched_data);
rnp = rdp->mynode;
ret = (rdp->grpmask & rnp->qsmaskinit) ||
!rcu_scheduler_fully_active;
preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
/**
* rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
*
* If the current CPU is idle or running at a first-level (not nested)
* interrupt from idle, return true. The caller must have at least
* disabled preemption.
*/
int rcu_is_cpu_rrupt_from_idle(void)
{
return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
}
/*
* Snapshot the specified CPU's dynticks counter so that we can later
* credit them with an implicit quiescent state. Return 1 if this CPU
* is in dynticks idle mode, which is an extended quiescent state.
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
return (rdp->dynticks_snap & 0x1) == 0;
}
/*
* Return true if the specified CPU has passed through a quiescent
* state by virtue of being in or having passed through an dynticks
* idle state since the last call to dyntick_save_progress_counter()
* for this same CPU, or by virtue of having been offline.
*/
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
unsigned int curr;
unsigned int snap;
curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
snap = (unsigned int)rdp->dynticks_snap;
/*
* If the CPU passed through or entered a dynticks idle phase with
* no active irq/NMI handlers, then we can safely pretend that the CPU
* already acknowledged the request to pass through a quiescent
* state. Either way, that CPU cannot possibly be in an RCU
* read-side critical section that started before the beginning
* of the current RCU grace period.
*/
if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
rdp->dynticks_fqs++;
return 1;
}
/*
* Check for the CPU being offline, but only if the grace period
* is old enough. We don't need to worry about the CPU changing
* state: If we see it offline even once, it has been through a
* quiescent state.
*
* The reason for insisting that the grace period be at least
* one jiffy old is that CPUs that are not quite online and that
* have just gone offline can still execute RCU read-side critical
* sections.
*/
if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
return 0; /* Grace period is not old enough. */
barrier();
if (cpu_is_offline(rdp->cpu)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
rdp->offline_fqs++;
return 1;
}
return 0;
}
static int jiffies_till_stall_check(void)
{
int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
/*
* Limit check must be consistent with the Kconfig limits
* for CONFIG_RCU_CPU_STALL_TIMEOUT.
*/
if (till_stall_check < 3) {
ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
till_stall_check = 3;
} else if (till_stall_check > 300) {
ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
till_stall_check = 300;
}
return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
}
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
rsp->gp_start = jiffies;
rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
}
static void print_other_cpu_stall(struct rcu_state *rsp)
{
int cpu;
long delta;
unsigned long flags;
int ndetected = 0;
struct rcu_node *rnp = rcu_get_root(rsp);
/* Only let one CPU complain about others per time interval. */
raw_spin_lock_irqsave(&rnp->lock, flags);
delta = jiffies - rsp->jiffies_stall;
if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
/*
* OK, time to rat on our buddy...
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks:",
rsp->name);
print_cpu_stall_info_begin();
rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave(&rnp->lock, flags);
ndetected += rcu_print_task_stall(rnp);
if (rnp->qsmask != 0) {
for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
if (rnp->qsmask & (1UL << cpu)) {
print_cpu_stall_info(rsp,
rnp->grplo + cpu);
ndetected++;
}
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
* Now rat on any tasks that got kicked up to the root rcu_node
* due to CPU offlining.
*/
rnp = rcu_get_root(rsp);
raw_spin_lock_irqsave(&rnp->lock, flags);
ndetected += rcu_print_task_stall(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
print_cpu_stall_info_end();
printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start));
if (ndetected == 0)
printk(KERN_ERR "INFO: Stall ended before state dump start\n");
else if (!trigger_all_cpu_backtrace())
dump_stack();
/* Complain about tasks blocking the grace period. */
rcu_print_detail_task_stall(rsp);
force_quiescent_state(rsp); /* Kick them all. */
}
static void print_cpu_stall(struct rcu_state *rsp)
{
unsigned long flags;
struct rcu_node *rnp = rcu_get_root(rsp);
/*
* OK, time to rat on ourselves...
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
printk(KERN_ERR "INFO: %s self-detected stall on CPU", rsp->name);
print_cpu_stall_info_begin();
print_cpu_stall_info(rsp, smp_processor_id());
print_cpu_stall_info_end();
printk(KERN_CONT " (t=%lu jiffies)\n", jiffies - rsp->gp_start);
if (!trigger_all_cpu_backtrace())
dump_stack();
raw_spin_lock_irqsave(&rnp->lock, flags);
if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
rsp->jiffies_stall = jiffies +
3 * jiffies_till_stall_check() + 3;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
set_need_resched(); /* kick ourselves to get things going. */
}
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long j;
unsigned long js;
struct rcu_node *rnp;
if (rcu_cpu_stall_suppress)
return;
j = ACCESS_ONCE(jiffies);
js = ACCESS_ONCE(rsp->jiffies_stall);
rnp = rdp->mynode;
if (rcu_gp_in_progress(rsp) &&
(ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(rsp);
} else if (rcu_gp_in_progress(rsp) &&
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
/* They had a few time units to dump stack, so complain. */
print_other_cpu_stall(rsp);
}
}
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
{
rcu_cpu_stall_suppress = 1;
return NOTIFY_DONE;
}
/**
* rcu_cpu_stall_reset - prevent further stall warnings in current grace period
*
* Set the stall-warning timeout way off into the future, thus preventing
* any RCU CPU stall-warning messages from appearing in the current set of
* RCU grace periods.
*
* The caller must disable hard irqs.
*/
void rcu_cpu_stall_reset(void)
{
struct rcu_state *rsp;
for_each_rcu_flavor(rsp)
rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
}
static struct notifier_block rcu_panic_block = {
.notifier_call = rcu_panic,
};
static void __init check_cpu_stall_init(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
}
/*
* Update CPU-local rcu_data state to record the newly noticed grace period.
* This is used both when we started the grace period and when we notice
* that someone else started the grace period. The caller must hold the
* ->lock of the leaf rcu_node structure corresponding to the current CPU,
* and must have irqs disabled.
*/
static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
{
if (rdp->gpnum != rnp->gpnum) {
/*
* If the current grace period is waiting for this CPU,
* set up to detect a quiescent state, otherwise don't
* go looking for one.
*/
rdp->gpnum = rnp->gpnum;
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
rdp->passed_quiesce = 0;
rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
zero_cpu_stall_ticks(rdp);
}
}
static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_node *rnp;
local_irq_save(flags);
rnp = rdp->mynode;
if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
local_irq_restore(flags);
return;
}
__note_new_gpnum(rsp, rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
* Did someone else start a new RCU grace period start since we last
* checked? Update local state appropriately if so. Must be called
* on the CPU corresponding to rdp.
*/
static int
check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
int ret = 0;
local_irq_save(flags);
if (rdp->gpnum != rsp->gpnum) {
note_new_gpnum(rsp, rdp);
ret = 1;
}
local_irq_restore(flags);
return ret;
}
/*
* Initialize the specified rcu_data structure's callback list to empty.
*/
static void init_callback_list(struct rcu_data *rdp)
{
int i;
rdp->nxtlist = NULL;
for (i = 0; i < RCU_NEXT_SIZE; i++)
rdp->nxttail[i] = &rdp->nxtlist;
}
/*
* Advance this CPU's callbacks, but only if the current grace period
* has ended. This may be called only from the CPU to whom the rdp
* belongs. In addition, the corresponding leaf rcu_node structure's
* ->lock must be held by the caller, with irqs disabled.
*/
static void
__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
{
/* Did another grace period end? */
if (rdp->completed != rnp->completed) {
/* Advance callbacks. No harm if list empty. */
rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
/* Remember that we saw this grace-period completion. */
rdp->completed = rnp->completed;
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
/*
* If we were in an extended quiescent state, we may have
* missed some grace periods that others CPUs handled on
* our behalf. Catch up with this state to avoid noting
* spurious new grace periods. If another grace period
* has started, then rnp->gpnum will have advanced, so
* we will detect this later on. Of course, any quiescent
* states we found for the old GP are now invalid.
*/
if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) {
rdp->gpnum = rdp->completed;
rdp->passed_quiesce = 0;
}
/*
* If RCU does not need a quiescent state from this CPU,
* then make sure that this CPU doesn't go looking for one.
*/
if ((rnp->qsmask & rdp->grpmask) == 0)
rdp->qs_pending = 0;
}
}
/*
* Advance this CPU's callbacks, but only if the current grace period
* has ended. This may be called only from the CPU to whom the rdp
* belongs.
*/
static void
rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_node *rnp;
local_irq_save(flags);
rnp = rdp->mynode;
if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
local_irq_restore(flags);
return;
}
__rcu_process_gp_end(rsp, rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
* Do per-CPU grace-period initialization for running CPU. The caller
* must hold the lock of the leaf rcu_node structure corresponding to
* this CPU.
*/
static void
rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
{
/* Prior grace period ended, so advance callbacks for current CPU. */
__rcu_process_gp_end(rsp, rnp, rdp);
/* Set state so that this CPU will detect the next quiescent state. */
__note_new_gpnum(rsp, rnp, rdp);
}
/*
* Initialize a new grace period.
*/
static int rcu_gp_init(struct rcu_state *rsp)
{
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
rsp->gp_flags = 0; /* Clear all flags: New grace period. */
if (rcu_gp_in_progress(rsp)) {
/* Grace period already in progress, don't start another. */
raw_spin_unlock_irq(&rnp->lock);
return 0;
}
/* Advance to a new grace period and initialize state. */
rsp->gpnum++;
trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
record_gp_stall_check_time(rsp);
raw_spin_unlock_irq(&rnp->lock);
/* Exclude any concurrent CPU-hotplug operations. */
mutex_lock(&rsp->onoff_mutex);
/*
* Set the quiescent-state-needed bits in all the rcu_node
* structures for all currently online CPUs in breadth-first order,
* starting from the root rcu_node structure, relying on the layout
* of the tree within the rsp->node[] array. Note that other CPUs
* will access only the leaves of the hierarchy, thus seeing that no
* grace period is in progress, at least until the corresponding
* leaf node has been initialized. In addition, we have excluded
* CPU-hotplug operations.
*
* The grace period cannot complete until the initialization
* process finishes, because this kthread handles both.
*/
rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock);
rdp = this_cpu_ptr(rsp->rda);
rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
rnp->gpnum = rsp->gpnum;
WARN_ON_ONCE(rnp->completed != rsp->completed);
rnp->completed = rsp->completed;
if (rnp == rdp->mynode)
rcu_start_gp_per_cpu(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
rnp->level, rnp->grplo,
rnp->grphi, rnp->qsmask);
raw_spin_unlock_irq(&rnp->lock);
#ifdef CONFIG_PROVE_RCU_DELAY
if ((random32() % (rcu_num_nodes * 8)) == 0)
schedule_timeout_uninterruptible(2);
#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
cond_resched();
}
mutex_unlock(&rsp->onoff_mutex);
return 1;
}
/*
* Do one round of quiescent-state forcing.
*/
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
{
int fqs_state = fqs_state_in;
struct rcu_node *rnp = rcu_get_root(rsp);
rsp->n_force_qs++;
if (fqs_state == RCU_SAVE_DYNTICK) {
/* Collect dyntick-idle snapshots. */
force_qs_rnp(rsp, dyntick_save_progress_counter);
fqs_state = RCU_FORCE_QS;
} else {
/* Handle dyntick-idle and offline CPUs. */
force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
}
/* Clear flag to prevent immediate re-entry. */
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
raw_spin_lock_irq(&rnp->lock);
rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
raw_spin_unlock_irq(&rnp->lock);
}
return fqs_state;
}
/*
* Clean up after the old grace period.
*/
static void rcu_gp_cleanup(struct rcu_state *rsp)
{
unsigned long gp_duration;
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
gp_duration = jiffies - rsp->gp_start;
if (gp_duration > rsp->gp_max)
rsp->gp_max = gp_duration;
/*
* We know the grace period is complete, but to everyone else
* it appears to still be ongoing. But it is also the case
* that to everyone else it looks like there is nothing that
* they can do to advance the grace period. It is therefore
* safe for us to drop the lock in order to mark the grace
* period as completed in all of the rcu_node structures.
*/
raw_spin_unlock_irq(&rnp->lock);
/*
* Propagate new ->completed value to rcu_node structures so
* that other CPUs don't have to wait until the start of the next
* grace period to process their callbacks. This also avoids
* some nasty RCU grace-period initialization races by forcing
* the end of the current grace period to be completely recorded in
* all of the rcu_node structures before the beginning of the next
* grace period is recorded in any of the rcu_node structures.
*/
rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock);
rnp->completed = rsp->gpnum;
raw_spin_unlock_irq(&rnp->lock);
cond_resched();
}
rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
rsp->completed = rsp->gpnum; /* Declare grace period done. */
trace_rcu_grace_period(rsp->name, rsp->completed, "end");
rsp->fqs_state = RCU_GP_IDLE;
rdp = this_cpu_ptr(rsp->rda);
if (cpu_needs_another_gp(rsp, rdp))
rsp->gp_flags = 1;
raw_spin_unlock_irq(&rnp->lock);
}
/*
* Body of kthread that handles grace periods.
*/
static int __noreturn rcu_gp_kthread(void *arg)
{
int fqs_state;
unsigned long j;
int ret;
struct rcu_state *rsp = arg;
struct rcu_node *rnp = rcu_get_root(rsp);
for (;;) {
/* Handle grace-period start. */
for (;;) {
wait_event_interruptible(rsp->gp_wq,
rsp->gp_flags &
RCU_GP_FLAG_INIT);
if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
rcu_gp_init(rsp))
break;
cond_resched();
flush_signals(current);
}
/* Handle quiescent-state forcing. */
fqs_state = RCU_SAVE_DYNTICK;
j = jiffies_till_first_fqs;
if (j > HZ) {
j = HZ;
jiffies_till_first_fqs = HZ;
}
for (;;) {
rsp->jiffies_force_qs = jiffies + j;
ret = wait_event_interruptible_timeout(rsp->gp_wq,
(rsp->gp_flags & RCU_GP_FLAG_FQS) ||
(!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp)),
j);
/* If grace period done, leave loop. */
if (!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp))
break;
/* If time for quiescent-state forcing, do it. */
if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) {
fqs_state = rcu_gp_fqs(rsp, fqs_state);
cond_resched();
} else {
/* Deal with stray signal. */
cond_resched();
flush_signals(current);
}
j = jiffies_till_next_fqs;
if (j > HZ) {
j = HZ;
jiffies_till_next_fqs = HZ;
} else if (j < 1) {
j = 1;
jiffies_till_next_fqs = 1;
}
}
/* Handle grace-period end. */
rcu_gp_cleanup(rsp);
}
}
/*
* Start a new RCU grace period if warranted, re-initializing the hierarchy
* in preparation for detecting the next grace period. The caller must hold
* the root node's ->lock, which is released before return. Hard irqs must
* be disabled.
*
* Note that it is legal for a dying CPU (which is marked as offline) to
* invoke this function. This can happen when the dying CPU reports its
* quiescent state.
*/
static void
rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock)
{
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rcu_get_root(rsp);
if (!rsp->gp_kthread ||
!cpu_needs_another_gp(rsp, rdp)) {
/*
* Either we have not yet spawned the grace-period
* task or this CPU does not need another grace period.
* Either way, don't start a new grace period.
*/
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
rsp->gp_flags = RCU_GP_FLAG_INIT;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up(&rsp->gp_wq);
}
/*
* Report a full set of quiescent states to the specified rcu_state
* data structure. This involves cleaning up after the prior grace
* period and letting rcu_start_gp() start up the next grace period
* if one is needed. Note that the caller must hold rnp->lock, as
* required by rcu_start_gp(), which will release it.
*/
static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock)
{
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
}
/*
* Similar to rcu_report_qs_rdp(), for which it is a helper function.
* Allows quiescent states for a group of CPUs to be reported at one go
* to the specified rcu_node structure, though all the CPUs in the group
* must be represented by the same rcu_node structure (which need not be
* a leaf rcu_node structure, though it often will be). That structure's
* lock must be held upon entry, and it is released before return.
*/
static void
rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
struct rcu_node *rnp_c;
/* Walk up the rcu_node hierarchy. */
for (;;) {
if (!(rnp->qsmask & mask)) {
/* Our bit has already been cleared, so done. */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
rnp->qsmask &= ~mask;
trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
mask, rnp->qsmask, rnp->level,
rnp->grplo, rnp->grphi,
!!rnp->gp_tasks);
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
/* Other bits still set at this level, so done. */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
mask = rnp->grpmask;
if (rnp->parent == NULL) {
/* No more levels. Exit loop holding root lock. */
break;
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rnp_c = rnp;
rnp = rnp->parent;
raw_spin_lock_irqsave(&rnp->lock, flags);
WARN_ON_ONCE(rnp_c->qsmask);
}
/*
* Get here if we are the last CPU to pass through a quiescent
* state for this grace period. Invoke rcu_report_qs_rsp()
* to clean up and start the next grace period if one is needed.
*/
rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
}
/*
* Record a quiescent state for the specified CPU to that CPU's rcu_data
* structure. This must be either called from the specified CPU, or
* called when the specified CPU is known to be offline (and when it is
* also known that no other CPU is concurrently trying to help the offline
* CPU). The lastcomp argument is used to make sure we are still in the
* grace period of interest. We don't want to end the current grace period
* based on quiescent states detected in an earlier grace period!
*/
static void
rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
unsigned long mask;
struct rcu_node *rnp;
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
rnp->completed == rnp->gpnum) {
/*
* The grace period in which this quiescent state was
* recorded has ended, so don't report it upwards.
* We will instead need a new quiescent state that lies
* within the current grace period.
*/
rdp->passed_quiesce = 0; /* need qs for new gp. */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
mask = rdp->grpmask;
if ((rnp->qsmask & mask) == 0) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else {
rdp->qs_pending = 0;
/*
* This GP can't end until cpu checks in, so all of our
* callbacks can be processed during the next GP.
*/
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
}
}
/*
* Check to see if there is a new grace period of which this CPU
* is not yet aware, and if so, set up local rcu_data state for it.
* Otherwise, see if this CPU has just passed through its first
* quiescent state for this grace period, and record that fact if so.
*/
static void
rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
{
/* If there is now a new grace period, record and return. */
if (check_for_new_grace_period(rsp, rdp))
return;
/*
* Does this CPU still need to do its part for current grace period?
* If no, return and let the other CPUs do their part as well.
*/
if (!rdp->qs_pending)
return;
/*
* Was there a quiescent state since the beginning of the grace
* period? If no, then exit and wait for the next call.
*/
if (!rdp->passed_quiesce)
return;
/*
* Tell RCU we are done (but rcu_report_qs_rdp() will be the
* judge of that).
*/
rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Send the specified CPU's RCU callbacks to the orphanage. The
* specified CPU must be offline, and the caller must hold the
* ->onofflock.
*/
static void
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_data *rdp)
{
/*
* Orphan the callbacks. First adjust the counts. This is safe
* because ->onofflock excludes _rcu_barrier()'s adoption of
* the callbacks, thus no memory barrier is required.
*/
if (rdp->nxtlist != NULL) {
rsp->qlen_lazy += rdp->qlen_lazy;
rsp->qlen += rdp->qlen;
rdp->n_cbs_orphaned += rdp->qlen;
rdp->qlen_lazy = 0;
ACCESS_ONCE(rdp->qlen) = 0;
}
/*
* Next, move those callbacks still needing a grace period to
* the orphanage, where some other CPU will pick them up.
* Some of the callbacks might have gone partway through a grace
* period, but that is too bad. They get to start over because we
* cannot assume that grace periods are synchronized across CPUs.
* We don't bother updating the ->nxttail[] array yet, instead
* we just reset the whole thing later on.
*/
if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
*rdp->nxttail[RCU_DONE_TAIL] = NULL;
}
/*
* Then move the ready-to-invoke callbacks to the orphanage,
* where some other CPU will pick them up. These will not be
* required to pass though another grace period: They are done.
*/
if (rdp->nxtlist != NULL) {
*rsp->orphan_donetail = rdp->nxtlist;
rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
}
/* Finally, initialize the rcu_data structure's list to empty. */
init_callback_list(rdp);
}
/*
* Adopt the RCU callbacks from the specified rcu_state structure's
* orphanage. The caller must hold the ->onofflock.
*/
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
{
int i;
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
/* Do the accounting first. */
rdp->qlen_lazy += rsp->qlen_lazy;
rdp->qlen += rsp->qlen;
rdp->n_cbs_adopted += rsp->qlen;
if (rsp->qlen_lazy != rsp->qlen)
rcu_idle_count_callbacks_posted();
rsp->qlen_lazy = 0;
rsp->qlen = 0;
/*
* We do not need a memory barrier here because the only way we
* can get here if there is an rcu_barrier() in flight is if
* we are the task doing the rcu_barrier().
*/
/* First adopt the ready-to-invoke callbacks. */
if (rsp->orphan_donelist != NULL) {
*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
rdp->nxttail[i] = rsp->orphan_donetail;
rsp->orphan_donelist = NULL;
rsp->orphan_donetail = &rsp->orphan_donelist;
}
/* And then adopt the callbacks that still need a grace period. */
if (rsp->orphan_nxtlist != NULL) {
*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
rsp->orphan_nxtlist = NULL;
rsp->orphan_nxttail = &rsp->orphan_nxtlist;
}
}
/*
* Trace the fact that this CPU is going offline.
*/
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
{
RCU_TRACE(unsigned long mask);
RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
RCU_TRACE(mask = rdp->grpmask);
trace_rcu_grace_period(rsp->name,
rnp->gpnum + 1 - !!(rnp->qsmask & mask),
"cpuofl");
}
/*
* The CPU has been completely removed, and some other CPU is reporting
* this fact from process context. Do the remainder of the cleanup,
* including orphaning the outgoing CPU's RCU callbacks, and also
* adopting them. There can only be one CPU hotplug operation at a time,
* so no other CPU can be attempting to update rcu_cpu_kthread_task.
*/
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
unsigned long mask;
int need_report = 0;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
/* Adjust any no-longer-needed kthreads. */
rcu_boost_kthread_setaffinity(rnp, -1);
/* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
/* Exclude any attempts to start a new grace period. */
mutex_lock(&rsp->onoff_mutex);
raw_spin_lock_irqsave(&rsp->onofflock, flags);
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
rcu_adopt_orphan_cbs(rsp);
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
mask = rdp->grpmask; /* rnp->grplo is constant. */
do {
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
rnp->qsmaskinit &= ~mask;
if (rnp->qsmaskinit != 0) {
if (rnp != rdp->mynode)
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
break;
}
if (rnp == rdp->mynode)
need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
else
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
mask = rnp->grpmask;
rnp = rnp->parent;
} while (rnp != NULL);
/*
* We still hold the leaf rcu_node structure lock here, and
* irqs are still disabled. The reason for this subterfuge is
* because invoking rcu_report_unblock_qs_rnp() with ->onofflock
* held leads to deadlock.
*/
raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
rnp = rdp->mynode;
if (need_report & RCU_OFL_TASKS_NORM_GP)
rcu_report_unblock_qs_rnp(rnp, flags);
else
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (need_report & RCU_OFL_TASKS_EXP_GP)
rcu_report_exp_rnp(rsp, rnp, true);
WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
cpu, rdp->qlen, rdp->nxtlist);
init_callback_list(rdp);
/* Disallow further callbacks on this CPU. */
rdp->nxttail[RCU_NEXT_TAIL] = NULL;
mutex_unlock(&rsp->onoff_mutex);
}
#else /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
{
}
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
{
}
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
/*
* Invoke any RCU callbacks that have made it to the end of their grace
* period. Thottle as specified by rdp->blimit.
*/
static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_head *next, *list, **tail;
int bl, count, count_lazy, i;
/* If no callbacks are ready, just return.*/
if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
need_resched(), is_idle_task(current),
rcu_is_callbacks_kthread());
return;
}
/*
* Extract the list of ready callbacks, disabling to prevent
* races with call_rcu() from interrupt handlers.
*/
local_irq_save(flags);
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
bl = rdp->blimit;
trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
list = rdp->nxtlist;
rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
*rdp->nxttail[RCU_DONE_TAIL] = NULL;
tail = rdp->nxttail[RCU_DONE_TAIL];
for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
rdp->nxttail[i] = &rdp->nxtlist;
local_irq_restore(flags);
/* Invoke callbacks. */
count = count_lazy = 0;
while (list) {
next = list->next;
prefetch(next);
debug_rcu_head_unqueue(list);
if (__rcu_reclaim(rsp->name, list))
count_lazy++;
list = next;
/* Stop only if limit reached and CPU has something to do. */
if (++count >= bl &&
(need_resched() ||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
}
local_irq_save(flags);
trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
is_idle_task(current),
rcu_is_callbacks_kthread());
/* Update count, and requeue any remaining callbacks. */
if (list != NULL) {
*tail = rdp->nxtlist;
rdp->nxtlist = list;
for (i = 0; i < RCU_NEXT_SIZE; i++)
if (&rdp->nxtlist == rdp->nxttail[i])
rdp->nxttail[i] = tail;
else
break;
}
smp_mb(); /* List handling before counting for rcu_barrier(). */
rdp->qlen_lazy -= count_lazy;
ACCESS_ONCE(rdp->qlen) -= count;
rdp->n_cbs_invoked += count;
/* Reinstate batch limit if we have worked down the excess. */
if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
rdp->blimit = blimit;
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = rdp->qlen;
WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
local_irq_restore(flags);
/* Re-invoke RCU core processing if there are callbacks remaining. */
if (cpu_has_callbacks_ready_to_invoke(rdp))
invoke_rcu_core();
}
/*
* Check to see if this CPU is in a non-context-switch quiescent state
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
* Also schedule RCU core processing.
*
* This function must be called from hardirq context. It is normally
* invoked from the scheduling-clock interrupt. If rcu_pending returns
* false, there is no point in invoking rcu_check_callbacks().
*/
void rcu_check_callbacks(int cpu, int user)
{
trace_rcu_utilization("Start scheduler-tick");
increment_cpu_stall_ticks();
if (user || rcu_is_cpu_rrupt_from_idle()) {
/*
* Get here if this CPU took its interrupt from user
* mode or from the idle loop, and if this is not a
* nested interrupt. In this case, the CPU is in
* a quiescent state, so note it.
*
* No memory barrier is required here because both
* rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
* variables that other CPUs neither access nor modify,
* at least not while the corresponding CPU is online.
*/
rcu_sched_qs(cpu);
rcu_bh_qs(cpu);
} else if (!in_softirq()) {
/*
* Get here if this CPU did not take its interrupt from
* softirq, in other words, if it is not interrupting
* a rcu_bh read-side critical section. This is an _bh
* critical section, so note it.
*/
rcu_bh_qs(cpu);
}
rcu_preempt_check_callbacks(cpu);
if (rcu_pending(cpu))
invoke_rcu_core();
trace_rcu_utilization("End scheduler-tick");
}
/*
* Scan the leaf rcu_node structures, processing dyntick state for any that
* have not yet encountered a quiescent state, using the function specified.
* Also initiate boosting for any threads blocked on the root rcu_node.
*
* The caller must have suppressed start of new grace periods.
*/
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
{
unsigned long bit;
int cpu;
unsigned long flags;
unsigned long mask;
struct rcu_node *rnp;
rcu_for_each_leaf_node(rsp, rnp) {
cond_resched();
mask = 0;
raw_spin_lock_irqsave(&rnp->lock, flags);
if (!rcu_gp_in_progress(rsp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
if (rnp->qsmask == 0) {
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
continue;
}
cpu = rnp->grplo;
bit = 1;
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
if ((rnp->qsmask & bit) != 0 &&
f(per_cpu_ptr(rsp->rda, cpu)))
mask |= bit;
}
if (mask != 0) {
/* rcu_report_qs_rnp() releases rnp->lock. */
rcu_report_qs_rnp(mask, rsp, rnp, flags);
continue;
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
rnp = rcu_get_root(rsp);
if (rnp->qsmask == 0) {
raw_spin_lock_irqsave(&rnp->lock, flags);
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
}
}
/*
* Force quiescent states on reluctant CPUs, and also detect which
* CPUs are in dyntick-idle mode.
*/
static void force_quiescent_state(struct rcu_state *rsp)
{
unsigned long flags;
bool ret;
struct rcu_node *rnp;
struct rcu_node *rnp_old = NULL;
/* Funnel through hierarchy to reduce memory contention. */
rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
for (; rnp != NULL; rnp = rnp->parent) {
ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
!raw_spin_trylock(&rnp->fqslock);
if (rnp_old != NULL)
raw_spin_unlock(&rnp_old->fqslock);
if (ret) {
rsp->n_force_qs_lh++;
return;
}
rnp_old = rnp;
}
/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
/* Reached the root of the rcu_node tree, acquire lock. */
raw_spin_lock_irqsave(&rnp_old->lock, flags);
raw_spin_unlock(&rnp_old->fqslock);
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
rsp->n_force_qs_lh++;
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
return; /* Someone beat us to it. */
}
rsp->gp_flags |= RCU_GP_FLAG_FQS;
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
}
/*
* This does the RCU core processing work for the specified rcu_state
* and rcu_data structures. This may be called only from the CPU to
* whom the rdp belongs.
*/
static void
__rcu_process_callbacks(struct rcu_state *rsp)
{
unsigned long flags;
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
WARN_ON_ONCE(rdp->beenonline == 0);
/*
* Advance callbacks in response to end of earlier grace
* period that some other CPU ended.
*/
rcu_process_gp_end(rsp, rdp);
/* Update RCU state based on any recent quiescent states. */
rcu_check_quiescent_state(rsp, rdp);
/* Does this CPU require a not-yet-started grace period? */
if (cpu_needs_another_gp(rsp, rdp)) {
raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
rcu_start_gp(rsp, flags); /* releases above lock */
}
/* If there are callbacks ready, invoke them. */
if (cpu_has_callbacks_ready_to_invoke(rdp))
invoke_rcu_callbacks(rsp, rdp);
}
/*
* Do RCU core processing for the current CPU.
*/
static void rcu_process_callbacks(struct softirq_action *unused)
{
struct rcu_state *rsp;
if (cpu_is_offline(smp_processor_id()))
return;
trace_rcu_utilization("Start RCU core");
for_each_rcu_flavor(rsp)
__rcu_process_callbacks(rsp);
trace_rcu_utilization("End RCU core");
}
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
* otherwise wake up the per-CPU kernel kthread. Note that because we
* are running on the current CPU with interrupts disabled, the
* rcu_cpu_kthread_task cannot disappear out from under us.
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
return;
if (likely(!rsp->boost)) {
rcu_do_batch(rsp, rdp);
return;
}
invoke_rcu_callbacks_kthread();
}
static void invoke_rcu_core(void)
{
raise_softirq(RCU_SOFTIRQ);
}
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
*/
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
struct rcu_head *head, unsigned long flags)
{
/*
* If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness.
*/
if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
invoke_rcu_core();
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
return;
/*
* Force the grace period if too many callbacks or too long waiting.
* Enforce hysteresis, and don't invoke force_quiescent_state()
* if some other CPU has recently done so. Also, don't bother
* invoking force_quiescent_state() if the newly enqueued callback
* is the only one waiting for a grace period to complete.
*/
if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
/* Are we ignoring a completed grace period? */
rcu_process_gp_end(rsp, rdp);
check_for_new_grace_period(rsp, rdp);
/* Start a new grace period if one not already started. */
if (!rcu_gp_in_progress(rsp)) {
unsigned long nestflag;
struct rcu_node *rnp_root = rcu_get_root(rsp);
raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
} else {
/* Give the grace period a kick. */
rdp->blimit = LONG_MAX;
if (rsp->n_force_qs == rdp->n_force_qs_snap &&
*rdp->nxttail[RCU_DONE_TAIL] != head)
force_quiescent_state(rsp);
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->qlen_last_fqs_check = rdp->qlen;
}
}
}
static void
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
struct rcu_state *rsp, bool lazy)
{
unsigned long flags;
struct rcu_data *rdp;
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
debug_rcu_head_queue(head);
head->func = func;
head->next = NULL;
/*
* Opportunistically note grace-period endings and beginnings.
* Note that we might see a beginning right after we see an
* end, but never vice versa, since this CPU has to pass through
* a quiescent state betweentimes.
*/
local_irq_save(flags);
rdp = this_cpu_ptr(rsp->rda);
/* Add the callback to our list. */
if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL)) {
/* _call_rcu() is illegal on offline CPU; leak the callback. */
WARN_ON_ONCE(1);
local_irq_restore(flags);
return;
}
ACCESS_ONCE(rdp->qlen)++;
if (lazy)
rdp->qlen_lazy++;
else
rcu_idle_count_callbacks_posted();
smp_mb(); /* Count before adding callback for rcu_barrier(). */
*rdp->nxttail[RCU_NEXT_TAIL] = head;
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
if (__is_kfree_rcu_offset((unsigned long)func))
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
rdp->qlen_lazy, rdp->qlen);
else
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
/* Go handle any RCU core processing required. */
__call_rcu_core(rsp, rdp, head, flags);
local_irq_restore(flags);
}
/*
* Queue an RCU-sched callback for invocation after a grace period.
*/
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_state, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_bh_state, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
/*
* Because a context switch is a grace period for RCU-sched and RCU-bh,
* any blocking grace-period wait automatically implies a grace period
* if there is only one CPU online at any point time during execution
* of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
* occasionally incorrectly indicate that there are multiple CPUs online
* when there was in fact only one the whole time, as this just adds
* some overhead: RCU still operates correctly.
*/
static inline int rcu_blocking_is_gp(void)
{
int ret;
might_sleep(); /* Check for RCU read-side critical section. */
preempt_disable();
ret = num_online_cpus() <= 1;
preempt_enable();
return ret;
}
/**
* synchronize_sched - wait until an rcu-sched grace period has elapsed.
*
* Control will return to the caller some time after a full rcu-sched
* grace period has elapsed, in other words after all currently executing
* rcu-sched read-side critical sections have completed. These read-side
* critical sections are delimited by rcu_read_lock_sched() and
* rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
* local_irq_disable(), and so on may be used in place of
* rcu_read_lock_sched().
*
* This means that all preempt_disable code sequences, including NMI and
* hardware-interrupt handlers, in progress on entry will have completed
* before this primitive returns. However, this does not guarantee that
* softirq handlers will have completed, since in some kernels, these
* handlers can run in process context, and can block.
*
* This primitive provides the guarantees made by the (now removed)
* synchronize_kernel() API. In contrast, synchronize_rcu() only
* guarantees that rcu_read_lock() sections will have completed.
* In "classic RCU", these two guarantees happen to be one and
* the same, but can differ in realtime RCU implementations.
*/
void synchronize_sched(void)
{
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
!lock_is_held(&rcu_lock_map) &&
!lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU-sched read-side critical section");
if (rcu_blocking_is_gp())
return;
wait_rcu_gp(call_rcu_sched);
}
EXPORT_SYMBOL_GPL(synchronize_sched);
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
* Control will return to the caller some time after a full rcu_bh grace
* period has elapsed, in other words after all currently executing rcu_bh
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
* and may be nested.
*/
void synchronize_rcu_bh(void)
{
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
!lock_is_held(&rcu_lock_map) &&
!lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
if (rcu_blocking_is_gp())
return;
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
static int synchronize_sched_expedited_cpu_stop(void *data)
{
/*
* There must be a full memory barrier on each affected CPU
* between the time that try_stop_cpus() is called and the
* time that it returns.
*
* In the current initial implementation of cpu_stop, the
* above condition is already met when the control reaches
* this point and the following smp_mb() is not strictly
* necessary. Do smp_mb() anyway for documentation and
* robustness against future implementation changes.
*/
smp_mb(); /* See above comment block. */
return 0;
}
/**
* synchronize_sched_expedited - Brute-force RCU-sched grace period
*
* Wait for an RCU-sched grace period to elapse, but use a "big hammer"
* approach to force the grace period to end quickly. This consumes
* significant time on all CPUs and is unfriendly to real-time workloads,
* so is thus not recommended for any sort of common-case code. In fact,
* if you are using synchronize_sched_expedited() in a loop, please
* restructure your code to batch your updates, and then use a single
* synchronize_sched() instead.
*
* Note that it is illegal to call this function while holding any lock
* that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
* to call this function from a CPU-hotplug notifier. Failing to observe
* these restriction will result in deadlock.
*
* This implementation can be thought of as an application of ticket
* locking to RCU, with sync_sched_expedited_started and
* sync_sched_expedited_done taking on the roles of the halves
* of the ticket-lock word. Each task atomically increments
* sync_sched_expedited_started upon entry, snapshotting the old value,
* then attempts to stop all the CPUs. If this succeeds, then each
* CPU will have executed a context switch, resulting in an RCU-sched
* grace period. We are then done, so we use atomic_cmpxchg() to
* update sync_sched_expedited_done to match our snapshot -- but
* only if someone else has not already advanced past our snapshot.
*
* On the other hand, if try_stop_cpus() fails, we check the value
* of sync_sched_expedited_done. If it has advanced past our
* initial snapshot, then someone else must have forced a grace period
* some time after we took our snapshot. In this case, our work is
* done for us, and we can simply return. Otherwise, we try again,
* but keep our initial snapshot for purposes of checking for someone
* doing our work for us.
*
* If we fail too many times in a row, we fall back to synchronize_sched().
*/
void synchronize_sched_expedited(void)
{
int firstsnap, s, snap, trycount = 0;
/* Note that atomic_inc_return() implies full memory barrier. */
firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
/*
* Each pass through the following loop attempts to force a
* context switch on each CPU.
*/
while (try_stop_cpus(cpu_online_mask,
synchronize_sched_expedited_cpu_stop,
NULL) == -EAGAIN) {
put_online_cpus();
/* No joy, try again later. Or just synchronize_sched(). */
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
} else {
synchronize_sched();
return;
}
/* Check to see if someone else did our work for us. */
s = atomic_read(&sync_sched_expedited_done);
if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
}
/*
* Refetching sync_sched_expedited_started allows later
* callers to piggyback on our grace period. We subtract
* 1 to get the same token that the last incrementer got.
* We retry after they started, so our grace period works
* for them, and they started after our first try, so their
* grace period works for us.
*/
get_online_cpus();
snap = atomic_read(&sync_sched_expedited_started);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}
/*
* Everyone up to our most recent fetch is covered by our grace
* period. Update the counter, but only if our work is still
* relevant -- which it won't be if someone who started later
* than we did beat us to the punch.
*/
do {
s = atomic_read(&sync_sched_expedited_done);
if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
smp_mb(); /* ensure test happens before caller kfree */
break;
}
} while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
put_online_cpus();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
/*
* Check to see if there is any immediate RCU-related work to be done
* by the current CPU, for the specified type of RCU, returning 1 if so.
* The checks are in order of increasing expense: checks that can be
* carried out against CPU-local state are performed first. However,
* we must check for CPU stalls first, else we might not get a chance.
*/
static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
{
struct rcu_node *rnp = rdp->mynode;
rdp->n_rcu_pending++;
/* Check for CPU stalls, if enabled. */
check_cpu_stall(rsp, rdp);
/* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active &&
rdp->qs_pending && !rdp->passed_quiesce) {
rdp->n_rp_qs_pending++;
} else if (rdp->qs_pending && rdp->passed_quiesce) {
rdp->n_rp_report_qs++;
return 1;
}
/* Does this CPU have callbacks ready to invoke? */
if (cpu_has_callbacks_ready_to_invoke(rdp)) {
rdp->n_rp_cb_ready++;
return 1;
}
/* Has RCU gone idle with this CPU needing another grace period? */
if (cpu_needs_another_gp(rsp, rdp)) {
rdp->n_rp_cpu_needs_gp++;
return 1;
}
/* Has another RCU grace period completed? */
if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
rdp->n_rp_gp_completed++;
return 1;
}
/* Has a new RCU grace period started? */
if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
rdp->n_rp_gp_started++;
return 1;
}
/* nothing to do */
rdp->n_rp_need_nothing++;
return 0;
}
/*
* Check to see if there is any immediate RCU-related work to be done
* by the current CPU, returning 1 if so. This function is part of the
* RCU implementation; it is -not- an exported member of the RCU API.
*/
static int rcu_pending(int cpu)
{
struct rcu_state *rsp;
for_each_rcu_flavor(rsp)
if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
return 1;
return 0;
}
/*
* Check to see if any future RCU-related work will need to be done
* by the current CPU, even if none need be done immediately, returning
* 1 if so.
*/
static int rcu_cpu_has_callbacks(int cpu)
{
struct rcu_state *rsp;
/* RCU callbacks either ready or pending? */
for_each_rcu_flavor(rsp)
if (per_cpu_ptr(rsp->rda, cpu)->nxtlist)
return 1;
return 0;
}
/*
* Helper function for _rcu_barrier() tracing. If tracing is disabled,
* the compiler is expected to optimize this away.
*/
static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
int cpu, unsigned long done)
{
trace_rcu_barrier(rsp->name, s, cpu,
atomic_read(&rsp->barrier_cpu_count), done);
}
/*
* RCU callback function for _rcu_barrier(). If we are last, wake
* up the task executing _rcu_barrier().
*/
static void rcu_barrier_callback(struct rcu_head *rhp)
{
struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
struct rcu_state *rsp = rdp->rsp;
if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
complete(&rsp->barrier_completion);
} else {
_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
}
}
/*
* Called with preemption disabled, and from cross-cpu IRQ context.
*/
static void rcu_barrier_func(void *type)
{
struct rcu_state *rsp = type;
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
atomic_inc(&rsp->barrier_cpu_count);
rsp->call(&rdp->barrier_head, rcu_barrier_callback);
}
/*
* Orchestrate the specified type of RCU barrier, waiting for all
* RCU callbacks of the specified type to complete.
*/
static void _rcu_barrier(struct rcu_state *rsp)
{
int cpu;
struct rcu_data *rdp;
unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
unsigned long snap_done;
_rcu_barrier_trace(rsp, "Begin", -1, snap);
/* Take mutex to serialize concurrent rcu_barrier() requests. */
mutex_lock(&rsp->barrier_mutex);
/*
* Ensure that all prior references, including to ->n_barrier_done,
* are ordered before the _rcu_barrier() machinery.
*/
smp_mb(); /* See above block comment. */
/*
* Recheck ->n_barrier_done to see if others did our work for us.
* This means checking ->n_barrier_done for an even-to-odd-to-even
* transition. The "if" expression below therefore rounds the old
* value up to the next even number and adds two before comparing.
*/
snap_done = ACCESS_ONCE(rsp->n_barrier_done);
_rcu_barrier_trace(rsp, "Check", -1, snap_done);
if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rsp->barrier_mutex);
return;
}
/*
* Increment ->n_barrier_done to avoid duplicate work. Use
* ACCESS_ONCE() to prevent the compiler from speculating
* the increment to precede the early-exit check.
*/
ACCESS_ONCE(rsp->n_barrier_done)++;
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
/*
* Initialize the count to one rather than to zero in order to
* avoid a too-soon return to zero in case of a short grace period
* (or preemption of this task). Exclude CPU-hotplug operations
* to ensure that no offline CPU has callbacks queued.
*/
init_completion(&rsp->barrier_completion);
atomic_set(&rsp->barrier_cpu_count, 1);
get_online_cpus();
/*
* Force each CPU with callbacks to register a new callback.
* When that callback is invoked, we will know that all of the
* corresponding CPU's preceding callbacks have been invoked.
*/
for_each_online_cpu(cpu) {
rdp = per_cpu_ptr(rsp->rda, cpu);
if (ACCESS_ONCE(rdp->qlen)) {
_rcu_barrier_trace(rsp, "OnlineQ", cpu,
rsp->n_barrier_done);
smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
} else {
_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
rsp->n_barrier_done);
}
}
put_online_cpus();
/*
* Now that we have an rcu_barrier_callback() callback on each
* CPU, and thus each counted, remove the initial count.
*/
if (atomic_dec_and_test(&rsp->barrier_cpu_count))
complete(&rsp->barrier_completion);
/* Increment ->n_barrier_done to prevent duplicate work. */
smp_mb(); /* Keep increment after above mechanism. */
ACCESS_ONCE(rsp->n_barrier_done)++;
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
smp_mb(); /* Keep increment before caller's subsequent code. */
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
wait_for_completion(&rsp->barrier_completion);
/* Other rcu_barrier() invocations can now safely proceed. */
mutex_unlock(&rsp->barrier_mutex);
}
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
void rcu_barrier_bh(void)
{
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
*/
void rcu_barrier_sched(void)
{
_rcu_barrier(&rcu_sched_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
/*
* Do boot-time initialization of a CPU's per-CPU RCU data.
*/
static void __init
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave(&rnp->lock, flags);
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
init_callback_list(rdp);
rdp->qlen_lazy = 0;
ACCESS_ONCE(rdp->qlen) = 0;
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
#ifdef CONFIG_RCU_USER_QS
WARN_ON_ONCE(rdp->dynticks->in_user);
#endif
rdp->cpu = cpu;
rdp->rsp = rsp;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
* Initialize a CPU's per-CPU RCU data. Note that only one online or
* offline event can be happening at a given time. Note also that we
* can accept some slop in the rsp->completed access due to the fact
* that this CPU cannot possibly have any RCU callbacks in flight yet.
*/
static void __cpuinit
rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
{
unsigned long flags;
unsigned long mask;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
/* Exclude new grace periods. */
mutex_lock(&rsp->onoff_mutex);
/* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave(&rnp->lock, flags);
rdp->beenonline = 1; /* We have now been online. */
rdp->preemptible = preemptible;
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;
init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
atomic_set(&rdp->dynticks->dynticks,
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
rcu_prepare_for_idle_init(cpu);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
/* Add CPU to rcu_node bitmasks. */
rnp = rdp->mynode;
mask = rdp->grpmask;
do {
/* Exclude any attempts to start a new GP on small systems. */
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
rnp->qsmaskinit |= mask;
mask = rnp->grpmask;
if (rnp == rdp->mynode) {
/*
* If there is a grace period in progress, we will
* set up to wait for it next time we run the
* RCU core code.
*/
rdp->gpnum = rnp->completed;
rdp->completed = rnp->completed;
rdp->passed_quiesce = 0;
rdp->qs_pending = 0;
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
}
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
rnp = rnp->parent;
} while (rnp != NULL && !(rnp->qsmaskinit & mask));
local_irq_restore(flags);
mutex_unlock(&rsp->onoff_mutex);
}
static void __cpuinit rcu_prepare_cpu(int cpu)
{
struct rcu_state *rsp;
for_each_rcu_flavor(rsp)
rcu_init_percpu_data(cpu, rsp,
strcmp(rsp->name, "rcu_preempt") == 0);
}
/*
* Handle CPU online/offline notification events.
*/
static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
struct rcu_node *rnp = rdp->mynode;
struct rcu_state *rsp;
trace_rcu_utilization("Start CPU hotplug");
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
rcu_prepare_cpu(cpu);
rcu_prepare_kthreads(cpu);
break;
case CPU_ONLINE:
case CPU_DOWN_FAILED:
rcu_boost_kthread_setaffinity(rnp, -1);
break;
case CPU_DOWN_PREPARE:
rcu_boost_kthread_setaffinity(rnp, cpu);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/*
* The whole machine is "stopped" except this CPU, so we can
* touch any data without introducing corruption. We send the
* dying CPU's callbacks to an arbitrarily chosen online CPU.
*/
for_each_rcu_flavor(rsp)
rcu_cleanup_dying_cpu(rsp);
rcu_cleanup_after_idle(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
for_each_rcu_flavor(rsp)
rcu_cleanup_dead_cpu(cpu, rsp);
break;
default:
break;
}
trace_rcu_utilization("End CPU hotplug");
return NOTIFY_OK;
}
/*
* Spawn the kthread that handles this RCU flavor's grace periods.
*/
static int __init rcu_spawn_gp_kthread(void)
{
unsigned long flags;
struct rcu_node *rnp;
struct rcu_state *rsp;
struct task_struct *t;
for_each_rcu_flavor(rsp) {
t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
BUG_ON(IS_ERR(t));
rnp = rcu_get_root(rsp);
raw_spin_lock_irqsave(&rnp->lock, flags);
rsp->gp_kthread = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
return 0;
}
early_initcall(rcu_spawn_gp_kthread);
/*
* This function is invoked towards the end of the scheduler's initialization
* process. Before this is called, the idle task might contain
* RCU read-side critical sections (during which time, this idle
* task is booting the system). After this function is called, the
* idle tasks are prohibited from containing RCU read-side critical
* sections. This function also enables RCU lockdep checking.
*/
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_scheduler_active = 1;
}
/*
* Compute the per-level fanout, either using the exact fanout specified
* or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
*/
#ifdef CONFIG_RCU_FANOUT_EXACT
static void __init rcu_init_levelspread(struct rcu_state *rsp)
{
int i;
for (i = rcu_num_lvls - 1; i > 0; i--)
rsp->levelspread[i] = CONFIG_RCU_FANOUT;
rsp->levelspread[0] = rcu_fanout_leaf;
}
#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
static void __init rcu_init_levelspread(struct rcu_state *rsp)
{
int ccur;
int cprv;
int i;
cprv = nr_cpu_ids;
for (i = rcu_num_lvls - 1; i >= 0; i--) {
ccur = rsp->levelcnt[i];
rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
cprv = ccur;
}
}
#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
/*
* Helper function for rcu_init() that initializes one rcu_state structure.
*/
static void __init rcu_init_one(struct rcu_state *rsp,
struct rcu_data __percpu *rda)
{
static char *buf[] = { "rcu_node_0",
"rcu_node_1",
"rcu_node_2",
"rcu_node_3" }; /* Match MAX_RCU_LVLS */
static char *fqs[] = { "rcu_node_fqs_0",
"rcu_node_fqs_1",
"rcu_node_fqs_2",
"rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
int cpustride = 1;
int i;
int j;
struct rcu_node *rnp;
BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
/* Initialize the level-tracking arrays. */
for (i = 0; i < rcu_num_lvls; i++)
rsp->levelcnt[i] = num_rcu_lvl[i];
for (i = 1; i < rcu_num_lvls; i++)
rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
rcu_init_levelspread(rsp);
/* Initialize the elements themselves, starting from the leaves. */
for (i = rcu_num_lvls - 1; i >= 0; i--) {
cpustride *= rsp->levelspread[i];
rnp = rsp->level[i];
for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
raw_spin_lock_init(&rnp->lock);
lockdep_set_class_and_name(&rnp->lock,
&rcu_node_class[i], buf[i]);
raw_spin_lock_init(&rnp->fqslock);
lockdep_set_class_and_name(&rnp->fqslock,
&rcu_fqs_class[i], fqs[i]);
rnp->gpnum = rsp->gpnum;
rnp->completed = rsp->completed;
rnp->qsmask = 0;
rnp->qsmaskinit = 0;
rnp->grplo = j * cpustride;
rnp->grphi = (j + 1) * cpustride - 1;
if (rnp->grphi >= NR_CPUS)
rnp->grphi = NR_CPUS - 1;
if (i == 0) {
rnp->grpnum = 0;
rnp->grpmask = 0;
rnp->parent = NULL;
} else {
rnp->grpnum = j % rsp->levelspread[i - 1];
rnp->grpmask = 1UL << rnp->grpnum;
rnp->parent = rsp->level[i - 1] +
j / rsp->levelspread[i - 1];
}
rnp->level = i;
INIT_LIST_HEAD(&rnp->blkd_tasks);
}
}
rsp->rda = rda;
init_waitqueue_head(&rsp->gp_wq);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
rnp++;
per_cpu_ptr(rsp->rda, i)->mynode = rnp;
rcu_boot_init_percpu_data(i, rsp);
}
list_add(&rsp->flavors, &rcu_struct_flavors);
}
/*
* Compute the rcu_node tree geometry from kernel parameters. This cannot
* replace the definitions in rcutree.h because those are needed to size
* the ->node array in the rcu_state structure.
*/
static void __init rcu_init_geometry(void)
{
int i;
int j;
int n = nr_cpu_ids;
int rcu_capacity[MAX_RCU_LVLS + 1];
/* If the compile-time values are accurate, just leave. */
if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
nr_cpu_ids == NR_CPUS)
return;
/*
* Compute number of nodes that can be handled an rcu_node tree
* with the given number of levels. Setting rcu_capacity[0] makes
* some of the arithmetic easier.
*/
rcu_capacity[0] = 1;
rcu_capacity[1] = rcu_fanout_leaf;
for (i = 2; i <= MAX_RCU_LVLS; i++)
rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
/*
* The boot-time rcu_fanout_leaf parameter is only permitted
* to increase the leaf-level fanout, not decrease it. Of course,
* the leaf-level fanout cannot exceed the number of bits in
* the rcu_node masks. Finally, the tree must be able to accommodate
* the configured number of CPUs. Complain and fall back to the
* compile-time values if these limits are exceeded.
*/
if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
n > rcu_capacity[MAX_RCU_LVLS]) {
WARN_ON(1);
return;
}
/* Calculate the number of rcu_nodes at each level of the tree. */
for (i = 1; i <= MAX_RCU_LVLS; i++)
if (n <= rcu_capacity[i]) {
for (j = 0; j <= i; j++)
num_rcu_lvl[j] =
DIV_ROUND_UP(n, rcu_capacity[i - j]);
rcu_num_lvls = i;
for (j = i + 1; j <= MAX_RCU_LVLS; j++)
num_rcu_lvl[j] = 0;
break;
}
/* Calculate the total number of rcu_node structures. */
rcu_num_nodes = 0;
for (i = 0; i <= MAX_RCU_LVLS; i++)
rcu_num_nodes += num_rcu_lvl[i];
rcu_num_nodes -= n;
}
void __init rcu_init(void)
{
int cpu;
rcu_bootup_announce();
rcu_init_geometry();
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
/*
* We don't need protection against CPU-hotplug here because
* this is called early in boot, before either interrupts
* or the scheduler are operational.
*/
cpu_notifier(rcu_cpu_notify, 0);
for_each_online_cpu(cpu)
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
check_cpu_stall_init();
}
#include "rcutree_plugin.h"
| gpl-2.0 |
tplavcic/percona-xtrabackup | storage/innobase/xtrabackup/src/libarchive/libarchive/test/test_read_format_isorr_bz2.c | 30 | 9027 | /*-
* Copyright (c) 2003-2007 Tim Kientzle
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test.h"
__FBSDID("$FreeBSD: head/lib/libarchive/test/test_read_format_isorr_bz2.c 201247 2009-12-30 05:59:21Z kientzle $");
/*
PLEASE use old cdrtools; mkisofs verion is 2.01.
This version mkisofs made wrong "SL" System Use Entry of RRIP.
Execute the following command to rebuild the data for this program:
tail -n +34 test_read_format_isorr_bz2.c | /bin/sh
rm -rf /tmp/iso
mkdir /tmp/iso
mkdir /tmp/iso/dir
echo "hello" >/tmp/iso/file
dd if=/dev/zero count=1 bs=12345678 >>/tmp/iso/file
ln /tmp/iso/file /tmp/iso/hardlink
(cd /tmp/iso; ln -s file symlink)
(cd /tmp/iso; ln -s /tmp/ symlink2)
(cd /tmp/iso; ln -s /tmp/../ symlink3)
(cd /tmp/iso; ln -s .././../tmp/ symlink4)
(cd /tmp/iso; ln -s .///file symlink5)
(cd /tmp/iso; ln -s /tmp//../ symlink6)
TZ=utc touch -afhm -t 197001020000.01 /tmp/iso /tmp/iso/file /tmp/iso/dir
TZ=utc touch -afhm -t 197001030000.02 /tmp/iso/symlink /tmp/iso/symlink5
F=test_read_format_iso_rockridge.iso.Z
mkhybrid -R -uid 1 -gid 2 /tmp/iso | compress > $F
uuencode $F $F > $F.uu
exit 1
*/
DEFINE_TEST(test_read_format_isorr_bz2)
{
const char *refname = "test_read_format_iso_rockridge.iso.Z";
struct archive_entry *ae;
struct archive *a;
const void *p;
size_t size;
off_t offset;
int i;
extract_reference_file(refname);
assert((a = archive_read_new()) != NULL);
assertEqualInt(0, archive_read_support_compression_all(a));
assertEqualInt(0, archive_read_support_format_all(a));
assertEqualInt(ARCHIVE_OK,
archive_read_open_filename(a, refname, 10240));
/* Retrieve each of the 8 files on the ISO image and
* verify that each one is what we expect. */
for (i = 0; i < 10; ++i) {
assertEqualInt(0, archive_read_next_header(a, &ae));
if (strcmp(".", archive_entry_pathname(ae)) == 0) {
/* '.' root directory. */
assertEqualInt(AE_IFDIR, archive_entry_filetype(ae));
assertEqualInt(2048, archive_entry_size(ae));
/* Now, we read timestamp recorded by RRIP "TF". */
assertEqualInt(86401, archive_entry_mtime(ae));
assertEqualInt(0, archive_entry_mtime_nsec(ae));
/* Now, we read links recorded by RRIP "PX". */
assertEqualInt(3, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualIntA(a, ARCHIVE_EOF,
archive_read_data_block(a, &p, &size, &offset));
assertEqualInt((int)size, 0);
} else if (strcmp("dir", archive_entry_pathname(ae)) == 0) {
/* A directory. */
assertEqualString("dir", archive_entry_pathname(ae));
assertEqualInt(AE_IFDIR, archive_entry_filetype(ae));
assertEqualInt(2048, archive_entry_size(ae));
assertEqualInt(86401, archive_entry_mtime(ae));
assertEqualInt(86401, archive_entry_atime(ae));
assertEqualInt(2, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("hardlink", archive_entry_pathname(ae)) == 0) {
/* A regular file. */
assertEqualString("hardlink", archive_entry_pathname(ae));
assertEqualInt(AE_IFREG, archive_entry_filetype(ae));
assertEqualInt(12345684, archive_entry_size(ae));
assertEqualInt(0,
archive_read_data_block(a, &p, &size, &offset));
assertEqualInt(0, offset);
assertEqualMem(p, "hello\n", 6);
assertEqualInt(86401, archive_entry_mtime(ae));
assertEqualInt(86401, archive_entry_atime(ae));
assertEqualInt(2, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("file", archive_entry_pathname(ae)) == 0) {
/* A hardlink to the regular file. */
/* Note: If "hardlink" gets returned before "file",
* then "hardlink" will get returned as a regular file
* and "file" will get returned as the hardlink.
* This test should tolerate that, since it's a
* perfectly permissible thing for libarchive to do. */
assertEqualString("file", archive_entry_pathname(ae));
assertEqualInt(AE_IFREG, archive_entry_filetype(ae));
assertEqualString("hardlink", archive_entry_hardlink(ae));
assertEqualInt(0, archive_entry_size_is_set(ae));
assertEqualInt(0, archive_entry_size(ae));
assertEqualInt(86401, archive_entry_mtime(ae));
assertEqualInt(86401, archive_entry_atime(ae));
assertEqualInt(2, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("symlink", archive_entry_pathname(ae)) == 0) {
/* A symlink to the regular file. */
assertEqualInt(AE_IFLNK, archive_entry_filetype(ae));
assertEqualString("file", archive_entry_symlink(ae));
assertEqualInt(0, archive_entry_size(ae));
assertEqualInt(172802, archive_entry_mtime(ae));
assertEqualInt(172802, archive_entry_atime(ae));
assertEqualInt(1, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("symlink2", archive_entry_pathname(ae)) == 0) {
/* A symlink to /tmp (an absolute path) */
assertEqualInt(AE_IFLNK, archive_entry_filetype(ae));
assertEqualString("/tmp", archive_entry_symlink(ae));
assertEqualInt(0, archive_entry_size(ae));
assertEqualInt(1, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("symlink3", archive_entry_pathname(ae)) == 0) {
/* A symlink to /tmp/.. (with a ".." component) */
assertEqualInt(AE_IFLNK, archive_entry_filetype(ae));
assertEqualString("/tmp/..", archive_entry_symlink(ae));
assertEqualInt(0, archive_entry_size(ae));
assertEqualInt(1, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("symlink4", archive_entry_pathname(ae)) == 0) {
/* A symlink to a path with ".." and "." components */
assertEqualInt(AE_IFLNK, archive_entry_filetype(ae));
assertEqualString(".././../tmp",
archive_entry_symlink(ae));
assertEqualInt(0, archive_entry_size(ae));
assertEqualInt(1, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("symlink5", archive_entry_pathname(ae)) == 0) {
/* A symlink to the regular file with "/" components. */
assertEqualInt(AE_IFLNK, archive_entry_filetype(ae));
assertEqualString(".///file", archive_entry_symlink(ae));
assertEqualInt(0, archive_entry_size(ae));
assertEqualInt(172802, archive_entry_mtime(ae));
assertEqualInt(172802, archive_entry_atime(ae));
assertEqualInt(1, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else if (strcmp("symlink6", archive_entry_pathname(ae)) == 0) {
/* A symlink to /tmp//..
* (with "/" and ".." components) */
assertEqualInt(AE_IFLNK, archive_entry_filetype(ae));
assertEqualString("/tmp//..", archive_entry_symlink(ae));
assertEqualInt(0, archive_entry_size(ae));
assertEqualInt(1, archive_entry_stat(ae)->st_nlink);
assertEqualInt(1, archive_entry_uid(ae));
assertEqualInt(2, archive_entry_gid(ae));
} else {
failure("Saw a file that shouldn't have been there");
assertEqualString(archive_entry_pathname(ae), "");
}
}
/* End of archive. */
assertEqualInt(ARCHIVE_EOF, archive_read_next_header(a, &ae));
/* Verify archive format. */
assertEqualInt(archive_compression(a), ARCHIVE_COMPRESSION_COMPRESS);
assertEqualInt(archive_format(a), ARCHIVE_FORMAT_ISO9660_ROCKRIDGE);
/* Close the archive. */
assertEqualInt(0, archive_read_close(a));
assertEqualInt(0, archive_read_finish(a));
}
| gpl-2.0 |
neuros/linux-davinci-2.6 | drivers/usb/gadget/goku_udc.c | 30 | 47736 | /*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
* Copyright (C) 2000-2002 Lineo
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/*
* This device has ep0 and three semi-configurable bulk/interrupt endpoints.
*
* - Endpoint numbering is fixed: ep{1,2,3}-bulk
* - Gadget drivers can choose ep maxpacket (8/16/32/64)
* - Gadget drivers can choose direction (IN, OUT)
* - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
*/
// #define VERBOSE /* extra debug messages (success too) */
// #define USB_TRACE /* packet-level success messages */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb_gadget.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include "goku_udc.h"
#define DRIVER_DESC "TC86C001 USB Device Controller"
#define DRIVER_VERSION "30-Oct 2003"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name [] = "goku_udc";
static const char driver_desc [] = DRIVER_DESC;
MODULE_AUTHOR("source@mvista.com");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* IN dma behaves ok under testing, though the IN-dma abort paths don't
* seem to behave quite as expected. Used by default.
*
* OUT dma documents design problems handling the common "short packet"
* transfer termination policy; it couldn't be enabled by default, even
* if the OUT-dma abort problems had a resolution.
*/
static unsigned use_dma = 1;
#if 0
//#include <linux/moduleparam.h>
/* "modprobe goku_udc use_dma=1" etc
* 0 to disable dma
* 1 to use IN dma only (normal operation)
* 2 to use IN and OUT dma
*/
module_param(use_dma, uint, S_IRUGO);
#endif
/*-------------------------------------------------------------------------*/
static void nuke(struct goku_ep *, int status);
static inline void
command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
{
writel(COMMAND_EP(epnum) | command, ®s->Command);
udelay(300);
}
static int
goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct goku_udc *dev;
struct goku_ep *ep;
u32 mode;
u16 max;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !desc || ep->desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (ep == &dev->ep[0])
return -EINVAL;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->num != (desc->bEndpointAddress & 0x0f))
return -EINVAL;
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
break;
default:
return -EINVAL;
}
if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
!= EPxSTATUS_EP_INVALID)
return -EBUSY;
/* enabling the no-toggle interrupt mode would need an api hook */
mode = 0;
max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
switch (max) {
case 64: mode++;
case 32: mode++;
case 16: mode++;
case 8: mode <<= 3;
break;
default:
return -EINVAL;
}
mode |= 2 << 1; /* bulk, or intr-with-toggle */
/* ep1/ep2 dma direction is chosen early; it works in the other
* direction, with pio. be cautious with out-dma.
*/
ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
if (ep->is_in) {
mode |= 1;
ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
} else {
ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
if (ep->dma)
DBG(dev, "%s out-dma hides short packets\n",
ep->ep.name);
}
spin_lock_irqsave(&ep->dev->lock, flags);
/* ep1 and ep2 can do double buffering and/or dma */
if (ep->num < 3) {
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 tmp;
/* double buffer except (for now) with pio in */
tmp = ((ep->dma || !ep->is_in)
? 0x10 /* double buffered */
: 0x11 /* single buffer */
) << ep->num;
tmp |= readl(®s->EPxSingle);
writel(tmp, ®s->EPxSingle);
tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
tmp |= readl(®s->EPxBCS);
writel(tmp, ®s->EPxBCS);
}
writel(mode, ep->reg_mode);
command(ep->dev->regs, COMMAND_RESET, ep->num);
ep->ep.maxpacket = max;
ep->stopped = 0;
ep->desc = desc;
spin_unlock_irqrestore(&ep->dev->lock, flags);
DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
max);
return 0;
}
static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
{
struct goku_udc *dev = ep->dev;
if (regs) {
command(regs, COMMAND_INVALID, ep->num);
if (ep->num) {
if (ep->num == UDC_MSTWR_ENDPOINT)
dev->int_enable &= ~(INT_MSTWREND
|INT_MSTWRTMOUT);
else if (ep->num == UDC_MSTRD_ENDPOINT)
dev->int_enable &= ~INT_MSTRDEND;
dev->int_enable &= ~INT_EPxDATASET (ep->num);
} else
dev->int_enable &= ~INT_EP0;
writel(dev->int_enable, ®s->int_enable);
readl(®s->int_enable);
if (ep->num < 3) {
struct goku_udc_regs __iomem *r = ep->dev->regs;
u32 tmp;
tmp = readl(&r->EPxSingle);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxSingle);
tmp = readl(&r->EPxBCS);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxBCS);
}
/* reset dma in case we're still using it */
if (ep->dma) {
u32 master;
master = readl(®s->dma_master) & MST_RW_BITS;
if (ep->num == UDC_MSTWR_ENDPOINT) {
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
} else {
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
}
writel(master, ®s->dma_master);
}
}
ep->ep.maxpacket = MAX_FIFO_SIZE;
ep->desc = NULL;
ep->stopped = 1;
ep->irqs = 0;
ep->dma = 0;
}
static int goku_ep_disable(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !ep->desc)
return -ENODEV;
dev = ep->dev;
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "disable %s\n", _ep->name);
spin_lock_irqsave(&dev->lock, flags);
nuke(ep, -ESHUTDOWN);
ep_reset(dev->regs, ep);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct goku_request *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
if (!_ep || !_req)
return;
req = container_of(_req, struct goku_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct goku_ep *ep, struct goku_request *req, int status)
{
struct goku_udc *dev;
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (req->mapped) {
pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
}
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
static inline int
write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
{
unsigned length, count;
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
count = length;
while (likely(count--))
writel(*buf++, fifo);
return length;
}
// return: 0 = still running, 1 = completed, negative = errno
static int write_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc *dev = ep->dev;
u32 tmp;
u8 *buf;
unsigned count;
int is_last;
tmp = readl(&dev->regs->DataSet);
buf = req->req.buf + req->req.actual;
prefetch(buf);
dev = ep->dev;
if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
return -EL2HLT;
/* NOTE: just single-buffered PIO-IN for now. */
if (unlikely((tmp & DATASET_A(ep->num)) != 0))
return 0;
/* clear our "packet available" irq */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
/* last packet often short (sometimes a zlp, especially on ep0) */
if (unlikely(count != ep->ep.maxpacket)) {
writel(~(1<<ep->num), &dev->regs->EOP);
if (ep->num == 0) {
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
}
is_last = 1;
} else {
if (likely(req->req.length != req->req.actual)
|| req->req.zero)
is_last = 0;
else
is_last = 1;
}
#if 0 /* printk seemed to trash is_last...*/
//#ifdef USB_TRACE
VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
ep->ep.name, count, is_last ? "/last" : "",
req->req.length - req->req.actual, req);
#endif
/* requests complete when all IN data is in the FIFO,
* or sometimes later, if a zlp was needed.
*/
if (is_last) {
done(ep, req, 0);
return 1;
}
return 0;
}
static int read_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs;
u32 size, set;
u8 *buf;
unsigned bufferspace, is_short, dbuff;
regs = ep->dev->regs;
top:
buf = req->req.buf + req->req.actual;
prefetchw(buf);
if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
return -EL2HLT;
dbuff = (ep->num == 1 || ep->num == 2);
do {
/* ack dataset irq matching the status we'll handle */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), ®s->int_status);
set = readl(®s->DataSet) & DATASET_AB(ep->num);
size = readl(®s->EPxSizeLA[ep->num]);
bufferspace = req->req.length - req->req.actual;
/* usually do nothing without an OUT packet */
if (likely(ep->num != 0 || bufferspace != 0)) {
if (unlikely(set == 0))
break;
/* use ep1/ep2 double-buffering for OUT */
if (!(size & PACKET_ACTIVE))
size = readl(®s->EPxSizeLB[ep->num]);
if (!(size & PACKET_ACTIVE)) /* "can't happen" */
break;
size &= DATASIZE; /* EPxSizeH == 0 */
/* ep0out no-out-data case for set_config, etc */
} else
size = 0;
/* read all bytes from this packet */
req->req.actual += size;
is_short = (size < ep->ep.maxpacket);
#ifdef USB_TRACE
VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
ep->ep.name, size, is_short ? "/S" : "",
req, req->req.actual, req->req.length);
#endif
while (likely(size-- != 0)) {
u8 byte = (u8) readl(ep->reg_fifo);
if (unlikely(bufferspace == 0)) {
/* this happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data in this packet.
*/
if (req->req.status != -EOVERFLOW)
DBG(ep->dev, "%s overflow %u\n",
ep->ep.name, size);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
bufferspace--;
}
}
/* completion */
if (unlikely(is_short || req->req.actual == req->req.length)) {
if (unlikely(ep->num == 0)) {
/* non-control endpoints now usable? */
if (ep->dev->req_config)
writel(ep->dev->configured
? USBSTATE_CONFIGURED
: 0,
®s->UsbState);
/* ep0out status stage */
writel(~(1<<0), ®s->EOP);
ep->stopped = 1;
ep->dev->ep0state = EP0_STATUS;
}
done(ep, req, 0);
/* empty the second buffer asap */
if (dbuff && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct goku_request, queue);
goto top;
}
return 1;
}
} while (dbuff);
return 0;
}
static inline void
pio_irq_enable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable |= INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_irq_disable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable &= ~INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_advance(struct goku_ep *ep)
{
struct goku_request *req;
if (unlikely(list_empty (&ep->queue)))
return;
req = list_entry(ep->queue.next, struct goku_request, queue);
(ep->is_in ? write_fifo : read_fifo)(ep, req);
}
/*-------------------------------------------------------------------------*/
// return: 0 = q running, 1 = q stopped, negative = errno
static int start_dma(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
u32 start = req->req.dma;
u32 end = start + req->req.length - 1;
master = readl(®s->dma_master) & MST_RW_BITS;
/* re-init the bits affecting IN dma; careful with zlps */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA)) {
DBG (ep->dev, "start, IN active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->in_dma_end);
writel(start, ®s->in_dma_start);
master &= ~MST_R_BITS;
if (unlikely(req->req.length == 0))
master = MST_RD_ENA | MST_RD_EOPB;
else if ((req->req.length % ep->ep.maxpacket) != 0
|| req->req.zero)
master = MST_RD_ENA | MST_EOPB_ENA;
else
master = MST_RD_ENA | MST_EOPB_DIS;
ep->dev->int_enable |= INT_MSTRDEND;
/* Goku DMA-OUT merges short packets, which plays poorly with
* protocols where short packets mark the transfer boundaries.
* The chip supports a nonstandard policy with INT_MSTWRTMOUT,
* ending transfers after 3 SOFs; we don't turn it on.
*/
} else {
if (unlikely(master & MST_WR_ENA)) {
DBG (ep->dev, "start, OUT active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->out_dma_end);
writel(start, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_ENA | MST_TIMEOUT_DIS;
ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
}
writel(master, ®s->dma_master);
writel(ep->dev->int_enable, ®s->int_enable);
return 0;
}
static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
{
struct goku_request *req;
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
master = readl(®s->dma_master);
if (unlikely(list_empty(&ep->queue))) {
stop:
if (ep->is_in)
dev->int_enable &= ~INT_MSTRDEND;
else
dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
writel(dev->int_enable, ®s->int_enable);
return;
}
req = list_entry(ep->queue.next, struct goku_request, queue);
/* normal hw dma completion (not abort) */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA))
return;
req->req.actual = readl(®s->in_dma_current);
} else {
if (unlikely(master & MST_WR_ENA))
return;
/* hardware merges short packets, and also hides packet
* overruns. a partial packet MAY be in the fifo here.
*/
req->req.actual = readl(®s->out_dma_current);
}
req->req.actual -= req->req.dma;
req->req.actual++;
#ifdef USB_TRACE
VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
ep->ep.name, ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length, req);
#endif
done(ep, req, 0);
if (list_empty(&ep->queue))
goto stop;
req = list_entry(ep->queue.next, struct goku_request, queue);
(void) start_dma(ep, req);
}
static void abort_dma(struct goku_ep *ep, int status)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
struct goku_request *req;
u32 curr, master;
/* NAK future host requests, hoping the implicit delay lets the
* dma engine finish reading (or writing) its latest packet and
* empty the dma buffer (up to 16 bytes).
*
* This avoids needing to clean up a partial packet in the fifo;
* we can't do that for IN without side effects to HALT and TOGGLE.
*/
command(regs, COMMAND_FIFO_DISABLE, ep->num);
req = list_entry(ep->queue.next, struct goku_request, queue);
master = readl(®s->dma_master) & MST_RW_BITS;
/* FIXME using these resets isn't usably documented. this may
* not work unless it's followed by disabling the endpoint.
*
* FIXME the OUT reset path doesn't even behave consistently.
*/
if (ep->is_in) {
if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0))
goto finished;
curr = readl(®s->in_dma_current);
writel(curr, ®s->in_dma_end);
writel(curr, ®s->in_dma_start);
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_RD_ENA)
DBG(ep->dev, "IN dma active after reset!\n");
} else {
if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0))
goto finished;
curr = readl(®s->out_dma_current);
writel(curr, ®s->out_dma_end);
writel(curr, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_WR_ENA)
DBG(ep->dev, "OUT dma active after reset!\n");
}
req->req.actual = (curr - req->req.dma) + 1;
req->req.status = status;
VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name,
ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length);
command(regs, COMMAND_FIFO_ENABLE, ep->num);
return;
finished:
/* dma already completed; no abort needed */
command(regs, COMMAND_FIFO_ENABLE, ep->num);
req->req.actual = req->req.length;
req->req.status = 0;
}
/*-------------------------------------------------------------------------*/
static int
goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
int status;
/* always require a cpu-view buffer so pio works */
req = container_of(_req, struct goku_request, req);
if (unlikely(!_req || !_req->complete
|| !_req->buf || !list_empty(&req->queue)))
return -EINVAL;
ep = container_of(_ep, struct goku_ep, ep);
if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
return -EINVAL;
dev = ep->dev;
if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
/* can't touch registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
/* set up dma mapping in case the caller didn't */
if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
_req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
req->mapped = 1;
}
#ifdef USB_TRACE
VDBG(dev, "%s queue req %p, len %u buf %p\n",
_ep->name, _req, _req->length, _req->buf);
#endif
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* for ep0 IN without premature status, zlp is required and
* writing EOP starts the status stage (OUT).
*/
if (unlikely(ep->num == 0 && ep->is_in))
_req->zero = 1;
/* kickstart this i/o queue? */
status = 0;
if (list_empty(&ep->queue) && likely(!ep->stopped)) {
/* dma: done after dma completion IRQ (or error)
* pio: done after last fifo operation
*/
if (ep->dma)
status = start_dma(ep, req);
else
status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
if (unlikely(status != 0)) {
if (status > 0)
status = 0;
req = NULL;
}
} /* else pio or dma irq handler advances the queue. */
if (likely(req != 0))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue))
&& likely(ep->num != 0)
&& !ep->dma
&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
pio_irq_enable(dev, dev->regs, ep->num);
spin_unlock_irqrestore(&dev->lock, flags);
/* pci writes may still be posted */
return status;
}
/* dequeue ALL requests */
static void nuke(struct goku_ep *ep, int status)
{
struct goku_request *req;
ep->stopped = 1;
if (list_empty(&ep->queue))
return;
if (ep->dma)
abort_dma(ep, status);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct goku_request, queue);
done(ep, req, status);
}
}
/* dequeue JUST ONE request */
static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !_req || (!ep->desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver)
return -ESHUTDOWN;
/* we can't touch (dma) registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
_req);
spin_lock_irqsave(&dev->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore (&dev->lock, flags);
return -EINVAL;
}
if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
abort_dma(ep, -ECONNRESET);
done(ep, req, -ECONNRESET);
dma_advance(dev, ep);
} else if (!list_empty(&req->queue))
done(ep, req, -ECONNRESET);
else
req = NULL;
spin_unlock_irqrestore(&dev->lock, flags);
return req ? 0 : -EOPNOTSUPP;
}
/*-------------------------------------------------------------------------*/
static void goku_clear_halt(struct goku_ep *ep)
{
// assert (ep->num !=0)
VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
if (ep->stopped) {
ep->stopped = 0;
if (ep->dma) {
struct goku_request *req;
if (list_empty(&ep->queue))
return;
req = list_entry(ep->queue.next, struct goku_request,
queue);
(void) start_dma(ep, req);
} else
pio_advance(ep);
}
}
static int goku_set_halt(struct usb_ep *_ep, int value)
{
struct goku_ep *ep;
unsigned long flags;
int retval = 0;
if (!_ep)
return -ENODEV;
ep = container_of (_ep, struct goku_ep, ep);
if (ep->num == 0) {
if (value) {
ep->dev->ep0state = EP0_STALL;
ep->dev->ep[0].stopped = 1;
} else
return -EINVAL;
/* don't change EPxSTATUS_EP_INVALID to READY */
} else if (!ep->desc) {
DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue))
retval = -EAGAIN;
else if (ep->is_in && value
/* data in (either) packet buffer? */
&& (readl(&ep->dev->regs->DataSet)
& DATASET_AB(ep->num)))
retval = -EAGAIN;
else if (!value)
goku_clear_halt(ep);
else {
ep->stopped = 1;
VDBG(ep->dev, "%s set halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_STALL, ep->num);
readl(ep->reg_status);
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return retval;
}
static int goku_fifo_status(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return -ENODEV;
ep = container_of(_ep, struct goku_ep, ep);
/* size is only reported sanely for OUT */
if (ep->is_in)
return -EOPNOTSUPP;
/* ignores 16-byte dma buffer; SizeH == 0 */
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE;
size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE;
VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size);
return size;
}
static void goku_fifo_flush(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return;
ep = container_of(_ep, struct goku_ep, ep);
VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name);
/* don't change EPxSTATUS_EP_INVALID to READY */
if (!ep->desc && ep->num != 0) {
DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
return;
}
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]);
size &= DATASIZE;
/* Non-desirable behavior: FIFO_CLEAR also clears the
* endpoint halt feature. For OUT, we _could_ just read
* the bytes out (PIO, if !ep->dma); for in, no choice.
*/
if (size)
command(regs, COMMAND_FIFO_CLEAR, ep->num);
}
static struct usb_ep_ops goku_ep_ops = {
.enable = goku_ep_enable,
.disable = goku_ep_disable,
.alloc_request = goku_alloc_request,
.free_request = goku_free_request,
.queue = goku_queue,
.dequeue = goku_dequeue,
.set_halt = goku_set_halt,
.fifo_status = goku_fifo_status,
.fifo_flush = goku_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int goku_get_frame(struct usb_gadget *_gadget)
{
return -EOPNOTSUPP;
}
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
// no remote wakeup
// not selfpowered
};
/*-------------------------------------------------------------------------*/
static inline char *dmastr(void)
{
if (use_dma == 0)
return "(dma disabled)";
else if (use_dma == 2)
return "(dma IN and OUT)";
else
return "(dma IN)";
}
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name [] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void
dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
{
int t;
/* int_status is the same format ... */
t = scnprintf(*next, *size,
"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
label, mask,
(mask & INT_PWRDETECT) ? " power" : "",
(mask & INT_SYSERROR) ? " sys" : "",
(mask & INT_MSTRDEND) ? " in-dma" : "",
(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
(mask & INT_MSTWREND) ? " out-dma" : "",
(mask & INT_MSTWRSET) ? " wrset" : "",
(mask & INT_ERR) ? " err" : "",
(mask & INT_SOF) ? " sof" : "",
(mask & INT_EP3NAK) ? " ep3nak" : "",
(mask & INT_EP2NAK) ? " ep2nak" : "",
(mask & INT_EP1NAK) ? " ep1nak" : "",
(mask & INT_EP3DATASET) ? " ep3" : "",
(mask & INT_EP2DATASET) ? " ep2" : "",
(mask & INT_EP1DATASET) ? " ep1" : "",
(mask & INT_STATUSNAK) ? " ep0snak" : "",
(mask & INT_STATUS) ? " ep0status" : "",
(mask & INT_SETUP) ? " setup" : "",
(mask & INT_ENDPOINT0) ? " ep0" : "",
(mask & INT_USBRESET) ? " reset" : "",
(mask & INT_SUSPEND) ? " suspend" : "");
*size -= t;
*next += t;
}
static int
udc_proc_read(char *buffer, char **start, off_t off, int count,
int *eof, void *_dev)
{
char *buf = buffer;
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
char *next = buf;
unsigned size = count;
unsigned long flags;
int i, t, is_usb_connected;
u32 tmp;
if (off != 0)
return 0;
local_irq_save(flags);
/* basic device status */
tmp = readl(®s->power_detect);
is_usb_connected = tmp & PW_DETECT;
t = scnprintf(next, size,
"%s - %s\n"
"%s version: %s %s\n"
"Gadget driver: %s\n"
"Host %s, %s\n"
"\n",
pci_name(dev->pdev), driver_desc,
driver_name, DRIVER_VERSION, dmastr(),
dev->driver ? dev->driver->driver.name : "(none)",
is_usb_connected
? ((tmp & PW_PULLUP) ? "full speed" : "powered")
: "disconnected",
({char *state;
switch(dev->ep0state){
case EP0_DISCONNECT: state = "ep0_disconnect"; break;
case EP0_IDLE: state = "ep0_idle"; break;
case EP0_IN: state = "ep0_in"; break;
case EP0_OUT: state = "ep0_out"; break;
case EP0_STATUS: state = "ep0_status"; break;
case EP0_STALL: state = "ep0_stall"; break;
case EP0_SUSPEND: state = "ep0_suspend"; break;
default: state = "ep0_?"; break;
} state; })
);
size -= t;
next += t;
dump_intmask("int_status", readl(®s->int_status), &next, &size);
dump_intmask("int_enable", readl(®s->int_enable), &next, &size);
if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
goto done;
/* registers for (active) device and ep0 */
t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
"single.bcs %02x.%02x state %x addr %u\n",
dev->irqs, readl(®s->DataSet),
readl(®s->EPxSingle), readl(®s->EPxBCS),
readl(®s->UsbState),
readl(®s->address));
size -= t;
next += t;
tmp = readl(®s->dma_master);
t = scnprintf(next, size,
"dma %03X =" EIGHTBITS "%s %s\n", tmp,
(tmp & MST_EOPB_DIS) ? " eopb-" : "",
(tmp & MST_EOPB_ENA) ? " eopb+" : "",
(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
(tmp & MST_RD_EOPB) ? " eopb" : "",
(tmp & MST_RD_RESET) ? " in_reset" : "",
(tmp & MST_WR_RESET) ? " out_reset" : "",
(tmp & MST_RD_ENA) ? " IN" : "",
(tmp & MST_WR_ENA) ? " OUT" : "",
(tmp & MST_CONNECTION)
? "ep1in/ep2out"
: "ep1out/ep2in");
size -= t;
next += t;
/* dump endpoint queues */
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep [i];
struct goku_request *req;
if (i && !ep->desc)
continue;
tmp = readl(ep->reg_status);
t = scnprintf(next, size,
"%s %s max %u %s, irqs %lu, "
"status %02x (%s) " FOURBITS "\n",
ep->ep.name,
ep->is_in ? "in" : "out",
ep->ep.maxpacket,
ep->dma ? "dma" : "pio",
ep->irqs,
tmp, ({ char *s;
switch (tmp & EPxSTATUS_EP_MASK) {
case EPxSTATUS_EP_READY:
s = "ready"; break;
case EPxSTATUS_EP_DATAIN:
s = "packet"; break;
case EPxSTATUS_EP_FULL:
s = "full"; break;
case EPxSTATUS_EP_TX_ERR: // host will retry
s = "tx_err"; break;
case EPxSTATUS_EP_RX_ERR:
s = "rx_err"; break;
case EPxSTATUS_EP_BUSY: /* ep0 only */
s = "busy"; break;
case EPxSTATUS_EP_STALL:
s = "stall"; break;
case EPxSTATUS_EP_INVALID: // these "can't happen"
s = "invalid"; break;
default:
s = "?"; break;
}; s; }),
(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (list_empty(&ep->queue)) {
t = scnprintf(next, size, "\t(nothing queued)\n");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
if (ep->dma && req->queue.prev == &ep->queue) {
if (i == UDC_MSTRD_ENDPOINT)
tmp = readl(®s->in_dma_current);
else
tmp = readl(®s->out_dma_current);
tmp -= req->req.dma;
tmp++;
} else
tmp = req->req.actual;
t = scnprintf(next, size,
"\treq %p len %u/%u buf %p\n",
&req->req, tmp, req->req.length,
req->req.buf);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
}
}
done:
local_irq_restore(flags);
*eof = 1;
return count - size;
}
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
static void udc_reinit (struct goku_udc *dev)
{
static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
unsigned i;
INIT_LIST_HEAD (&dev->gadget.ep_list);
dev->gadget.ep0 = &dev->ep [0].ep;
dev->gadget.speed = USB_SPEED_UNKNOWN;
dev->ep0state = EP0_DISCONNECT;
dev->irqs = 0;
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep[i];
ep->num = i;
ep->ep.name = names[i];
ep->reg_fifo = &dev->regs->ep_fifo [i];
ep->reg_status = &dev->regs->ep_status [i];
ep->reg_mode = &dev->regs->ep_mode[i];
ep->ep.ops = &goku_ep_ops;
list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
ep->dev = dev;
INIT_LIST_HEAD (&ep->queue);
ep_reset(NULL, ep);
}
dev->ep[0].reg_mode = NULL;
dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
list_del_init (&dev->ep[0].ep.ep_list);
}
static void udc_reset(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
writel(0, ®s->power_detect);
writel(0, ®s->int_enable);
readl(®s->int_enable);
dev->int_enable = 0;
/* deassert reset, leave USB D+ at hi-Z (no pullup)
* don't let INT_PWRDETECT sequence begin
*/
udelay(250);
writel(PW_RESETB, ®s->power_detect);
readl(®s->int_enable);
}
static void ep0_start(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
unsigned i;
VDBG(dev, "%s\n", __FUNCTION__);
udc_reset(dev);
udc_reinit (dev);
//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master);
/* hw handles set_address, set_feature, get_status; maybe more */
writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
| G_REQMODE_GET_DESC
| G_REQMODE_CLEAR_FEAT
, ®s->reqmode);
for (i = 0; i < 4; i++)
dev->ep[i].irqs = 0;
/* can't modify descriptors after writing UsbReady */
for (i = 0; i < DESC_LEN; i++)
writel(0, ®s->descriptors[i]);
writel(0, ®s->UsbReady);
/* expect ep0 requests when the host drops reset */
writel(PW_RESETB | PW_PULLUP, ®s->power_detect);
dev->int_enable = INT_DEVWIDE | INT_EP0;
writel(dev->int_enable, &dev->regs->int_enable);
readl(®s->int_enable);
dev->gadget.speed = USB_SPEED_FULL;
dev->ep0state = EP0_IDLE;
}
static void udc_enable(struct goku_udc *dev)
{
/* start enumeration now, or after power detect irq */
if (readl(&dev->regs->power_detect) & PW_DETECT)
ep0_start(dev);
else {
DBG(dev, "%s\n", __FUNCTION__);
dev->int_enable = INT_PWRDETECT;
writel(dev->int_enable, &dev->regs->int_enable);
}
}
/*-------------------------------------------------------------------------*/
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
*/
static struct goku_udc *the_controller;
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
int usb_gadget_register_driver(struct usb_gadget_driver *driver)
{
struct goku_udc *dev = the_controller;
int retval;
if (!driver
|| driver->speed != USB_SPEED_FULL
|| !driver->bind
|| !driver->disconnect
|| !driver->setup)
return -EINVAL;
if (!dev)
return -ENODEV;
if (dev->driver)
return -EBUSY;
/* hook up the driver */
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
retval = driver->bind(&dev->gadget);
if (retval) {
DBG(dev, "bind to driver %s --> error %d\n",
driver->driver.name, retval);
dev->driver = NULL;
dev->gadget.dev.driver = NULL;
return retval;
}
/* then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
udc_enable(dev);
DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
return 0;
}
EXPORT_SYMBOL(usb_gadget_register_driver);
static void
stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
{
unsigned i;
DBG (dev, "%s\n", __FUNCTION__);
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* disconnect gadget driver after quiesceing hw and the driver */
udc_reset (dev);
for (i = 0; i < 4; i++)
nuke(&dev->ep [i], -ESHUTDOWN);
if (driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
if (dev->driver)
udc_enable(dev);
}
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
struct goku_udc *dev = the_controller;
unsigned long flags;
if (!dev)
return -ENODEV;
if (!driver || driver != dev->driver || !driver->unbind)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
driver->unbind(&dev->gadget);
DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------*/
static void ep0_setup(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
struct usb_ctrlrequest ctrl;
int tmp;
/* read SETUP packet and enter DATA stage */
ctrl.bRequestType = readl(®s->bRequestType);
ctrl.bRequest = readl(®s->bRequest);
ctrl.wValue = cpu_to_le16((readl(®s->wValueH) << 8)
| readl(®s->wValueL));
ctrl.wIndex = cpu_to_le16((readl(®s->wIndexH) << 8)
| readl(®s->wIndexL));
ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8)
| readl(®s->wLengthL));
writel(0, ®s->SetupRecv);
nuke(&dev->ep[0], 0);
dev->ep[0].stopped = 0;
if (likely(ctrl.bRequestType & USB_DIR_IN)) {
dev->ep[0].is_in = 1;
dev->ep0state = EP0_IN;
/* detect early status stages */
writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
} else {
dev->ep[0].is_in = 0;
dev->ep0state = EP0_OUT;
/* NOTE: CLEAR_FEATURE is done in software so that we can
* synchronize transfer restarts after bulk IN stalls. data
* won't even enter the fifo until the halt is cleared.
*/
switch (ctrl.bRequest) {
case USB_REQ_CLEAR_FEATURE:
switch (ctrl.bRequestType) {
case USB_RECIP_ENDPOINT:
tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
/* active endpoint */
if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
goto stall;
if (ctrl.wIndex & __constant_cpu_to_le16(
USB_DIR_IN)) {
if (!dev->ep[tmp].is_in)
goto stall;
} else {
if (dev->ep[tmp].is_in)
goto stall;
}
if (ctrl.wValue != __constant_cpu_to_le16(
USB_ENDPOINT_HALT))
goto stall;
if (tmp)
goku_clear_halt(&dev->ep[tmp]);
succeed:
/* start ep0out status stage */
writel(~(1<<0), ®s->EOP);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
return;
case USB_RECIP_DEVICE:
/* device remote wakeup: always clear */
if (ctrl.wValue != __constant_cpu_to_le16(1))
goto stall;
VDBG(dev, "clear dev remote wakeup\n");
goto succeed;
case USB_RECIP_INTERFACE:
goto stall;
default: /* pass to gadget driver */
break;
}
break;
default:
break;
}
}
#ifdef USB_TRACE
VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
ctrl.bRequestType, ctrl.bRequest,
le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
le16_to_cpu(ctrl.wLength));
#endif
/* hw wants to know when we're configured (or not) */
dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
&& ctrl.bRequestType == USB_RECIP_DEVICE);
if (unlikely(dev->req_config))
dev->configured = (ctrl.wValue != __constant_cpu_to_le16(0));
/* delegate everything to the gadget driver.
* it may respond after this irq handler returns.
*/
spin_unlock (&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &ctrl);
spin_lock (&dev->lock);
if (unlikely(tmp < 0)) {
stall:
#ifdef USB_TRACE
VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
ctrl.bRequestType, ctrl.bRequest, tmp);
#endif
command(regs, COMMAND_STALL, 0);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STALL;
}
/* expect at least one data or status stage irq */
}
#define ACK(irqbit) { \
stat &= ~irqbit; \
writel(~irqbit, ®s->int_status); \
handled = 1; \
}
static irqreturn_t goku_irq(int irq, void *_dev)
{
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
struct goku_ep *ep;
u32 stat, handled = 0;
unsigned i, rescans = 5;
spin_lock(&dev->lock);
rescan:
stat = readl(®s->int_status) & dev->int_enable;
if (!stat)
goto done;
dev->irqs++;
/* device-wide irqs */
if (unlikely(stat & INT_DEVWIDE)) {
if (stat & INT_SYSERROR) {
ERROR(dev, "system error\n");
stop_activity(dev, dev->driver);
stat = 0;
handled = 1;
// FIXME have a neater way to prevent re-enumeration
dev->driver = NULL;
goto done;
}
if (stat & INT_PWRDETECT) {
writel(~stat, ®s->int_status);
if (readl(&dev->regs->power_detect) & PW_DETECT) {
VDBG(dev, "connect\n");
ep0_start(dev);
} else {
DBG(dev, "disconnect\n");
if (dev->gadget.speed == USB_SPEED_FULL)
stop_activity(dev, dev->driver);
dev->ep0state = EP0_DISCONNECT;
dev->int_enable = INT_DEVWIDE;
writel(dev->int_enable, &dev->regs->int_enable);
}
stat = 0;
handled = 1;
goto done;
}
if (stat & INT_SUSPEND) {
ACK(INT_SUSPEND);
if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) {
switch (dev->ep0state) {
case EP0_DISCONNECT:
case EP0_SUSPEND:
goto pm_next;
default:
break;
}
DBG(dev, "USB suspend\n");
dev->ep0state = EP0_SUSPEND;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
} else {
if (dev->ep0state != EP0_SUSPEND) {
DBG(dev, "bogus USB resume %d\n",
dev->ep0state);
goto pm_next;
}
DBG(dev, "USB resume\n");
dev->ep0state = EP0_IDLE;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->resume) {
spin_unlock(&dev->lock);
dev->driver->resume(&dev->gadget);
spin_lock(&dev->lock);
}
}
}
pm_next:
if (stat & INT_USBRESET) { /* hub reset done */
ACK(INT_USBRESET);
INFO(dev, "USB reset done, gadget %s\n",
dev->driver->driver.name);
}
// and INT_ERR on some endpoint's crc/bitstuff/... problem
}
/* progress ep0 setup, data, or status stages.
* no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
*/
if (stat & INT_SETUP) {
ACK(INT_SETUP);
dev->ep[0].irqs++;
ep0_setup(dev);
}
if (stat & INT_STATUSNAK) {
ACK(INT_STATUSNAK|INT_ENDPOINT0);
if (dev->ep0state == EP0_IN) {
ep = &dev->ep[0];
ep->irqs++;
nuke(ep, 0);
writel(~(1<<0), ®s->EOP);
dev->ep0state = EP0_STATUS;
}
}
if (stat & INT_ENDPOINT0) {
ACK(INT_ENDPOINT0);
ep = &dev->ep[0];
ep->irqs++;
pio_advance(ep);
}
/* dma completion */
if (stat & INT_MSTRDEND) { /* IN */
ACK(INT_MSTRDEND);
ep = &dev->ep[UDC_MSTRD_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWREND) { /* OUT */
ACK(INT_MSTWREND);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWRTMOUT) { /* OUT */
ACK(INT_MSTWRTMOUT);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
ERROR(dev, "%s write timeout ?\n", ep->ep.name);
// reset dma? then dma_advance()
}
/* pio */
for (i = 1; i < 4; i++) {
u32 tmp = INT_EPxDATASET(i);
if (!(stat & tmp))
continue;
ep = &dev->ep[i];
pio_advance(ep);
if (list_empty (&ep->queue))
pio_irq_disable(dev, regs, i);
stat &= ~tmp;
handled = 1;
ep->irqs++;
}
if (rescans--)
goto rescan;
done:
(void)readl(®s->int_enable);
spin_unlock(&dev->lock);
if (stat)
DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
readl(®s->int_status), dev->int_enable);
return IRQ_RETVAL(handled);
}
#undef ACK
/*-------------------------------------------------------------------------*/
static void gadget_release(struct device *_dev)
{
struct goku_udc *dev = dev_get_drvdata(_dev);
kfree(dev);
}
/* tear down the binding between this driver and the pci device */
static void goku_remove(struct pci_dev *pdev)
{
struct goku_udc *dev = pci_get_drvdata(pdev);
DBG(dev, "%s\n", __FUNCTION__);
BUG_ON(dev->driver);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
remove_proc_entry(proc_node_name, NULL);
#endif
if (dev->regs)
udc_reset(dev);
if (dev->got_irq)
free_irq(pdev->irq, dev);
if (dev->regs)
iounmap(dev->regs);
if (dev->got_region)
release_mem_region(pci_resource_start (pdev, 0),
pci_resource_len (pdev, 0));
if (dev->enabled)
pci_disable_device(pdev);
device_unregister(&dev->gadget.dev);
pci_set_drvdata(pdev, NULL);
dev->regs = NULL;
the_controller = NULL;
INFO(dev, "unbind\n");
}
/* wrap this driver around the specified pci device, but
* don't respond over USB until a gadget driver binds to us.
*/
static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct goku_udc *dev = NULL;
unsigned long resource, len;
void __iomem *base = NULL;
int retval;
/* if you want to support more than one controller in a system,
* usb_gadget_driver_{register,unregister}() must change.
*/
if (the_controller) {
WARN(dev, "ignoring %s\n", pci_name(pdev));
return -EBUSY;
}
if (!pdev->irq) {
printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
retval = -ENODEV;
goto done;
}
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (dev == NULL){
pr_debug("enomem %s\n", pci_name(pdev));
retval = -ENOMEM;
goto done;
}
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
/* the "gadget" abstracts/virtualizes the controller */
strcpy(dev->gadget.dev.bus_id, "gadget");
dev->gadget.dev.parent = &pdev->dev;
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = driver_name;
/* now all the pci goodies ... */
retval = pci_enable_device(pdev);
if (retval < 0) {
DBG(dev, "can't enable, %d\n", retval);
goto done;
}
dev->enabled = 1;
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, driver_name)) {
DBG(dev, "controller already in use\n");
retval = -EBUSY;
goto done;
}
dev->got_region = 1;
base = ioremap_nocache(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
goto done;
}
dev->regs = (struct goku_udc_regs __iomem *) base;
pci_set_drvdata(pdev, dev);
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
/* init to known state, then setup irqs */
udc_reset(dev);
udc_reinit (dev);
if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
driver_name, dev) != 0) {
DBG(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
goto done;
}
dev->got_irq = 1;
if (use_dma)
pci_set_master(pdev);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
#endif
/* done */
the_controller = dev;
retval = device_register(&dev->gadget.dev);
if (retval == 0)
return 0;
done:
if (dev)
goku_remove (pdev);
return retval;
}
/*-------------------------------------------------------------------------*/
static struct pci_device_id pci_ids [] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
.device = 0x0107, /* this UDC */
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver goku_pci_driver = {
.name = (char *) driver_name,
.id_table = pci_ids,
.probe = goku_probe,
.remove = goku_remove,
/* FIXME add power management support */
};
static int __init init (void)
{
return pci_register_driver (&goku_pci_driver);
}
module_init (init);
static void __exit cleanup (void)
{
pci_unregister_driver (&goku_pci_driver);
}
module_exit (cleanup);
| gpl-2.0 |
Grarak/android_kernel_oneplus_msm8996 | drivers/misc/type-c-tusb320.c | 30 | 24191 | /*
* tusb320.c -- TUSB320 USB TYPE-C Controller device driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/switch.h>
#include <linux/input.h>
#include <linux/timer.h>
#include <linux/wakelock.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/version.h>
#include <linux/power_supply.h>
#include <linux/type-c_notifier.h>
/******************************************************************************/
#define USE_TIMER_WHEN_DFP_TO_DETETC_UFP
#define OTG_WL_HOLD_TIME 5000
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
#define KERNEL_ABOVE_2_6_38
#endif
#ifdef KERNEL_ABOVE_2_6_38
#define sstrtoul(...) kstrtoul(__VA_ARGS__)
#else
#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
#endif
#define TUSB320_ATTACH 1
#define TUSB320_DETACH 0
enum tusb320_type{
TUSB320_TYPE_NONE = 0,
TUSB320_TYPE_AUDIO,
TUSB320_TYPE_DEBUG,
TUSB320_TYPE_POWER_ACC,
TUSB320_TYPE_SOURCE,
TUSB320_TYPE_SINK
};
enum tusb320_bc_lvl{
TUSB320_BC_LVL_RA = 0,
TUSB320_BC_LVL_USB,
TUSB320_BC_LVL_1P5,
TUSB320_BC_LVL_3A
};
enum tusb320_drp_toggle{
TUSB320_TOGGLE_SNK35_SRC15 = 0, // default
TUSB320_TOGGLE_SNK30_SRC20,
TUSB320_TOGGLE_SNK25_SRC25,
TUSB320_TOGGLE_SNK20_SRC30,
};
enum tusb320_host_cur{
TUSB320_HOST_CUR_NO = 0, // no current
TUSB320_HOST_CUR_80, // default USB
TUSB320_HOST_CUR_180, // 1.5A
TUSB320_HOST_CUR_330, // 3A
};
enum tusb320_orient{
TUSB320_ORIENT_NO_CONN = 0,
TUSB320_ORIENT_CC1_CC,
TUSB320_ORIENT_CC2_CC,
TUSB320_ORIENT_FAULT
};
enum tusb320_config_modes{
TUSB320_MODE_SRC = 0,
TUSB320_MODE_SRC_ACC,
TUSB320_MODE_SNK,
TUSB320_MODE_SNK_ACC,
TUSB320_MODE_DRP,
TUSB320_MODE_DRP_ACC
};
struct tusb320_info {
struct i2c_client *i2c;
struct device *dev_t;
struct mutex mutex;
struct class *fusb_class;
struct power_supply *usb_psy;
int irq;
int OTG_USB_ID_irq;
//int switch_sel_gpio;
int irq_gpio;
int ID_gpio;
enum tusb320_type fusb_type;
enum tusb320_orient fusb_orient;
struct timer_list s_timer;
struct work_struct otg_work;
struct workqueue_struct *otg_wqueue;
struct wake_lock otg_wl;
};
u8 Try_Snk_Attempt = 0x00; //Global Variable for Try.Snk attempt.
static int tusb320_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
struct tusb320_info *info = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&info->mutex);
ret = i2c_smbus_read_byte_data(i2c, reg);
mutex_unlock(&info->mutex);
if (ret < 0) {
printk("%s:reg(0x%x), ret(%d)\n", __func__, reg, ret);
return ret;
}
ret &= 0xff;
*dest = ret;
return 0;
}
static int tusb320_write_reg(struct i2c_client *i2c, u8 reg, u8 value)
{
struct tusb320_info *info = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&info->mutex);
ret = i2c_smbus_write_byte_data(i2c, reg, value);
mutex_unlock(&info->mutex);
if (ret < 0)
printk("%s:reg(0x%x), ret(%d)\n",
__func__, reg, ret);
return ret;
}
/*
static int tusb320_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
{
struct tusb320_info *info = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&info->mutex);
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (val & mask) | (old_val & (~mask));
ret = i2c_smbus_write_byte_data(i2c, reg, new_val);
}
mutex_unlock(&info->mutex);
return ret;
}
*/
static int tusb320_clear_intr(struct i2c_client *i2c)
{
struct tusb320_info *info = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&info->mutex);
/*
ret = i2c_smbus_read_byte_data(i2c, 0x09);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (old_val | 0x10);
ret = i2c_smbus_write_byte_data(i2c, 0x09, new_val);
}*/
ret = i2c_smbus_write_byte_data(i2c, 0x09, 0x10);
mutex_unlock(&info->mutex);
return ret;
}
#ifdef USE_TIMER_WHEN_DFP_TO_DETETC_UFP
static int tusb320_soft_reset(struct i2c_client *i2c)
{
struct tusb320_info *info = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&info->mutex);
/*
ret = i2c_smbus_read_byte_data(i2c, 0x0a);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (old_val | 0x08);
ret = i2c_smbus_write_byte_data(i2c, 0x0a, new_val);
}*/
ret = i2c_smbus_write_byte_data(i2c, 0x0a, 0x08);
mutex_unlock(&info->mutex);
return ret;
}
static int tusb320_change_DRP_duty_cycle_and_clear_intr(struct i2c_client *i2c)
{
struct tusb320_info *info = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&info->mutex);
/*
ret = i2c_smbus_read_byte_data(i2c, 0x09);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (old_val | 0x10);
ret = i2c_smbus_write_byte_data(i2c, 0x09, new_val);
}*/
ret = i2c_smbus_write_byte_data(i2c, 0x09, 0x16);
mutex_unlock(&info->mutex);
return ret;
}
#if 0
static void timer_handle(unsigned long data)
{
struct tusb320_info *info = (struct tusb320_info *)data;
Try_Snk_Attempt = 0x00; //Clear flag
tusb320_clear_intr(info->i2c);//write_csr(0x09, 0x10); //Set DRP duty cycle to default
}
static void Start_WatchDog_Timer(struct tusb320_info *info)
{
init_timer(&info->s_timer);
info->s_timer.function = &timer_handle;
info->s_timer.expires = jiffies + 250/HZ;//25ms
info->s_timer.expires = (unsigned long)info;
add_timer(&info->s_timer);
}
static void Clear_Stop_Watchdog_timer(struct tusb320_info *info)
{
//del_timer_sync(&info->s_timer);//should not be called in interrupt contex
del_timer(&info->s_timer);
}
#endif
#endif
static ssize_t show_current_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tusb320_info *info = dev_get_drvdata(dev);
switch(info->fusb_type){
case TUSB320_TYPE_AUDIO:
return sprintf(buf, "TUSB320_TYPE_AUDIO\n");
case TUSB320_TYPE_DEBUG:
return sprintf(buf, "TUSB320_TYPE_DEBUG\n");
case TUSB320_TYPE_POWER_ACC:
return sprintf(buf, "TUSB320_TYPE_POWER_ACC\n");
case TUSB320_TYPE_SOURCE:
return sprintf(buf, "TUSB320_SOURCE\n");
case TUSB320_TYPE_SINK:
return sprintf(buf, "TUSB320_TYPE_SINK\n");
default:
return sprintf(buf, "Not Connected\n");
}
}
static ssize_t show_CC_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tusb320_info *info = dev_get_drvdata(dev);
switch(info->fusb_orient){
case TUSB320_ORIENT_CC1_CC:
return sprintf(buf, "cc1\n");
case TUSB320_ORIENT_CC2_CC:
return sprintf(buf, "cc2\n");
case TUSB320_ORIENT_NO_CONN:
return sprintf(buf, "disconnect\n");
case TUSB320_ORIENT_FAULT:
default:
return sprintf(buf, "fault\n");
}
}
#define REG_MOD 0x0a
static ssize_t config_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int error;
unsigned long data;
u8 rdata;
struct tusb320_info *info = dev_get_drvdata(dev);
error = sstrtoul(buf, 10, &data);
if(error)
return error;
if(data == TUSB320_MODE_SRC)
tusb320_write_reg(info->i2c, REG_MOD, 0x20);
else if(data == TUSB320_MODE_SNK)
tusb320_write_reg(info->i2c, REG_MOD, 0x10);
else if(data == TUSB320_MODE_DRP)
tusb320_write_reg(info->i2c, REG_MOD, 0x30);
else
printk("%s: Argument is not match!!\n", __func__);
tusb320_read_reg(info->i2c, REG_MOD, &rdata);
printk("%s: REG_MOD(0x%02x)\n", __func__, rdata);
return count;
}
static DEVICE_ATTR(type, S_IRUGO, show_current_type, NULL);
static DEVICE_ATTR(CC_state, S_IRUGO, show_CC_state, NULL);
static DEVICE_ATTR(mode, S_IWUSR, NULL, config_mode);
static void tusb320_check_type(struct tusb320_info *info, u8 type)
{
if(type == 0x00)
{
info->fusb_type = TUSB320_TYPE_NONE;
}
else if(type == 0x40)
{
info->fusb_type = TUSB320_TYPE_SOURCE;
}
else if(type == 0x80)
{
info->fusb_type = TUSB320_TYPE_SINK;
}
else if(type == 0xc0)
{
info->fusb_type = TUSB320_TYPE_POWER_ACC;
}
else
{
printk("%s: No device type!\n", __func__);
}
}
static void tusb320_check_orient(struct tusb320_info *info, u8 status)
{
if(status)
{
info->fusb_orient = TUSB320_ORIENT_CC2_CC;
printk("%s: Orient is CC2 %d\n", __func__,info->fusb_orient);
}else{
info->fusb_orient = TUSB320_ORIENT_CC1_CC;
printk("%s: Orient is CC1 %d\n", __func__,info->fusb_orient);
}
}
static irqreturn_t tusb320_irq_thread(int irq, void *handle)
{
u8 intr;
u8 current_mode;
u8 Attached_State = 0x00;
struct tusb320_info *info = (struct tusb320_info *)handle;
/*
* #ifdef USE_TIMER_WHEN_DFP_TO_DETETC_UFP
* Clear_Stop_Watchdog_timer(info);
* #endif
*/
tusb320_read_reg(info->i2c, 0x09, &intr);
dev_err(&info->i2c->dev,"\n%s: from type<%d> int(0x%02x)\n", __func__,info->fusb_type, intr);
tusb320_check_type(info, intr & 0xc0);
dev_err(&info->i2c->dev,"%s: TYPE is %d\n", __func__, info->fusb_type);
tusb320_check_orient(info,intr & 0x20);
dev_err(&info->i2c->dev,"%s: Orient is %d\n", __func__, info->fusb_orient);
if(info->fusb_type == TUSB320_TYPE_NONE)//cause tusb320 cc status default is cc2 not no connect
{
info->fusb_orient = TUSB320_ORIENT_NO_CONN;
dev_err(&info->i2c->dev,"%s: Orient correct to %d\n", __func__, info->fusb_orient);
}
if(info->fusb_type == TUSB320_TYPE_SINK)//DFP attached
{
// SINK
tusb320_read_reg(info->i2c, 0x08, ¤t_mode);
dev_err(&info->i2c->dev,"\n%s: current_mode(0x%02x)\n", __func__, current_mode);
if((current_mode&0x30) == 0x30){
dev_err(&info->i2c->dev,"%s: 3A!\n", __func__);
bc_notifier_call_chain(TUSB320_BC_LVL_3A);
}
if((current_mode&0x30) == 0x00 || (current_mode&0x30) == 0x20){
dev_err(&info->i2c->dev,"%s: default(500/900)ma!\n", __func__);
bc_notifier_call_chain(TUSB320_BC_LVL_USB);
}
if((current_mode&0x30) == 0x10){
dev_err(&info->i2c->dev,"%s: 1.5A!\n", __func__);
bc_notifier_call_chain(TUSB320_BC_LVL_1P5);
}
}
Attached_State = intr & 0xc0;
#ifdef USE_TIMER_WHEN_DFP_TO_DETETC_UFP
if (Try_Snk_Attempt) //Try.Snk has been attempted.
{
if(Attached_State != 0x00)
Try_Snk_Attempt = 0x00; //Clear flag.
if (Attached_State == 0x40) //DFP
{
//Do DFP routine.
//Enable VBUS.
//TODO triger OTG isr,open 5V VBUS and change USB PHY to HOST
}
else //UFP or Accessory or Unattached
{
//Do UFP, or Accessory, Unattach Routines
}
tusb320_clear_intr(info->i2c);//write_csr(0x09, 0x10); //Clear Interrupt status and set DRP duty cycle to default
}
else //Try.Snk has NOT been attempted
{
if (Attached_State == 0x40) //DFP. Perform Try.SNK.
{
Try_Snk_Attempt = 0x01; //Set Try_Snk flag.
tusb320_soft_reset(info->i2c);//write_csr(0x0A, 0x08); //Set Soft Reset
mdelay(25);//wait_time(25); // Wait 25ms.
tusb320_soft_reset(info->i2c);//write_csr(0x0A, 0x08); // Set Soft Reset
//Start_WatchDog_Timer(info);
tusb320_change_DRP_duty_cycle_and_clear_intr(info->i2c);//write_csr(0x09, 0x16); // Change DRP duty Cycle and Interrupt status
}
else // NOT DFP
{
//Do UFP ,or Accessory, Unattach Routines
Try_Snk_Attempt = 0x00;
tusb320_clear_intr(info->i2c);//write_csr(0x09, 0x10); //Clear Interrupt Status
}
}
#else
tusb320_clear_intr(info->i2c);//write_csr(0x09, 0x10); //Clear Interrupt Status
#endif
return IRQ_HANDLED;
}
static void tusb320_reboot_scan_otg(struct tusb320_info *info)
{
u8 intr;
u8 current_mode;
u8 Attached_State = 0x00;
tusb320_read_reg(info->i2c, 0x09, &intr);
dev_err(&info->i2c->dev,"\n%s: from type<%d> int(0x%02x)\n", __func__,info->fusb_type, intr);
tusb320_check_type(info, intr & 0xc0);
dev_err(&info->i2c->dev,"%s: TYPE is %d\n", __func__, info->fusb_type);
tusb320_check_orient(info,intr & 0x20);
dev_err(&info->i2c->dev,"%s: Orient is %d\n", __func__, info->fusb_orient);
if(info->fusb_type == TUSB320_TYPE_NONE)//cause tusb320 cc status default is cc2 not no connect
{
info->fusb_orient = TUSB320_ORIENT_NO_CONN;
dev_err(&info->i2c->dev,"%s: Orient correct to %d\n", __func__, info->fusb_orient);
}
if(info->fusb_type == TUSB320_TYPE_SINK)//DFP attached
{
// SINK
tusb320_read_reg(info->i2c, 0x08, ¤t_mode);
dev_err(&info->i2c->dev,"\n%s: current_mode(0x%02x)\n", __func__, current_mode);
if((current_mode&0x30) == 0x30){
dev_err(&info->i2c->dev,"%s: 3A!\n", __func__);
bc_notifier_call_chain(TUSB320_BC_LVL_3A);
}
if((current_mode&0x30) == 0x00 || (current_mode&0x30) == 0x20){
dev_err(&info->i2c->dev,"%s: default(500/900)ma!\n", __func__);
bc_notifier_call_chain(TUSB320_BC_LVL_USB);
}
if((current_mode&0x30) == 0x10){
dev_err(&info->i2c->dev,"%s: 1.5A!\n", __func__);
bc_notifier_call_chain(TUSB320_BC_LVL_1P5);
}
}
Attached_State = intr & 0xc0;
#ifdef USE_TIMER_WHEN_DFP_TO_DETETC_UFP
if (Try_Snk_Attempt) //Try.Snk has been attempted.
{
if(Attached_State != 0x00)
Try_Snk_Attempt = 0x00; //Clear flag.
if (Attached_State == 0x40) //DFP
{
//Do DFP routine.
//Enable VBUS.
//TODO triger OTG isr,open 5V VBUS and change USB PHY to HOST
}
else //UFP or Accessory or Unattached
{
//Do UFP, or Accessory, Unattach Routines
}
tusb320_clear_intr(info->i2c);//write_csr(0x09, 0x10); //Clear Interrupt status and set DRP duty cycle to default
}
else //Try.Snk has NOT been attempted
{
if (Attached_State == 0x40) //DFP. Perform Try.SNK.
{
Try_Snk_Attempt = 0x01; //Set Try_Snk flag.
tusb320_soft_reset(info->i2c);//write_csr(0x0A, 0x08); //Set Soft Reset
mdelay(25);//wait_time(25); // Wait 25ms.
tusb320_soft_reset(info->i2c);//write_csr(0x0A, 0x08); // Set Soft Reset
//Start_WatchDog_Timer(info);
tusb320_change_DRP_duty_cycle_and_clear_intr(info->i2c);//write_csr(0x09, 0x16); // Change DRP duty Cycle and Interrupt status
}
else // NOT DFP
{
//Do UFP ,or Accessory, Unattach Routines
Try_Snk_Attempt = 0x00;
tusb320_clear_intr(info->i2c);//write_csr(0x09, 0x10); //Clear Interrupt Status
}
}
#else
tusb320_clear_intr(info->i2c);//write_csr(0x09, 0x10); //Clear Interrupt Status
#endif
}
static void tusb320_otg_work(struct work_struct *work)
{
struct tusb320_info *info = container_of(work, struct tusb320_info, otg_work);
bool otg_present = !gpio_get_value(info->ID_gpio);
dev_err(&info->i2c->dev,"%s : otg_present = (%d)\n",__func__,otg_present);
power_supply_set_usb_otg(info->usb_psy, otg_present ? 1 : 0);
}
static irqreturn_t tusb320_otg_irq_thread(int irq, void *handle)
{
struct tusb320_info *info = (struct tusb320_info *)handle;
wake_lock_timeout(&info->otg_wl, msecs_to_jiffies(OTG_WL_HOLD_TIME));
queue_work(info->otg_wqueue, &info->otg_work);
return IRQ_HANDLED;
}
static void tusb320_initialization(struct tusb320_info *info)
{
info->fusb_type = TUSB320_TYPE_NONE;
info->fusb_orient= TUSB320_ORIENT_NO_CONN;
}
static int tusb320_gpio_configure(struct tusb320_info *info)
{
int retval = 0;
if (gpio_is_valid(info->irq_gpio)) {
/* configure touchscreen irq gpio */
retval = gpio_request(info->irq_gpio,
"tusb320_irq_gpio");
if (retval) {
dev_err(&info->i2c->dev,
"unable to request gpio [%d]\n",
info->irq_gpio);
goto err_irq_gpio_req;
}
retval = gpio_direction_input(info->irq_gpio);
if (retval) {
dev_err(&info->i2c->dev,
"unable to set dir for gpio[%d]\n",
info->irq_gpio);
goto err_irq_gpio_dir;
}
info->irq = gpio_to_irq(info->irq_gpio);
if (info->irq < 0) {
dev_err(&info->i2c->dev,
"Unable to get irq number for GPIO %d, error %d\n",
info->irq_gpio, info->irq);
retval = info->irq;
goto err_irq_gpio_dir;
}
} else {
dev_err(&info->i2c->dev,
"irq gpio not provided\n");
goto err_irq_gpio_req;
}
if (gpio_is_valid(info->ID_gpio)) {
/* configure touchscreen reset out gpio */
retval = gpio_request(info->ID_gpio,
"tusb320_OTG_USB_ID_gpio");
if (retval) {
dev_err(&info->i2c->dev, "unable to request gpio [%d]\n",
info->ID_gpio);
goto err_irq_gpio_to_irq;
}
retval = gpio_direction_input(info->ID_gpio);
if (retval) {
dev_err(&info->i2c->dev,
"unable to set dir for gpio [%d]\n",
info->ID_gpio);
goto err_irq_gpio_to_irq;
}
info->OTG_USB_ID_irq = gpio_to_irq(info->ID_gpio);
if (info->OTG_USB_ID_irq < 0) {
dev_err(&info->i2c->dev,
"Unable to get irq number for GPIO %d, error %d\n",
info->ID_gpio, info->OTG_USB_ID_irq);
retval = info->OTG_USB_ID_irq;
goto err_irq_gpio_dir;
}
}else {
dev_err(&info->i2c->dev,
"OTG_USB_ID gpio not provided\n");
goto err_id_gpio_dir;
}
dev_err(&info->i2c->dev,"%s OK!\n",__func__);
return 0;
err_id_gpio_dir:
if (gpio_is_valid(info->ID_gpio))
gpio_free(info->ID_gpio);
err_irq_gpio_to_irq:
free_irq(info->irq, info);
err_irq_gpio_dir:
if (gpio_is_valid(info->irq_gpio))
gpio_free(info->irq_gpio);
err_irq_gpio_req:
return retval;
}
#include <linux/notifier.h>
#include <linux/reboot.h>
static struct tusb320_info *ginfo;
static int tusb320_power_down_callback(
struct notifier_block *nfb, unsigned long action, void *data)
{
if(ginfo == NULL){
return NOTIFY_OK;
}
switch (action) {
case SYS_POWER_OFF:
case SYS_RESTART:
case SYS_HALT:
pr_info("%s : action=0x%x\n", __func__, (unsigned)action);
disable_irq_wake(ginfo->OTG_USB_ID_irq);
free_irq(ginfo->OTG_USB_ID_irq, ginfo);
//set to UFP to get a chance to make sure get charging from other DRP typeC device
tusb320_write_reg(ginfo->i2c, REG_MOD, 0x10);//0x0a,0x10 MOD_SNK
power_supply_set_usb_otg(ginfo->usb_psy, 0);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block tusb320_reboot_notifier = {
.notifier_call = tusb320_power_down_callback,
};
static int tusb320_probe(
struct i2c_client *client, const struct i2c_device_id *id)
{
int ret = 0;
u8 rdata = 0;
struct tusb320_info *info;
struct device_node *np = client->dev.of_node;
struct power_supply *usb_psy;
usb_psy = power_supply_get_by_name("usb");
if (!usb_psy) {
dev_dbg(&client->dev, "USB power_supply not found, defer probe\n");
return -EPROBE_DEFER;
}
info = kzalloc(sizeof(struct tusb320_info), GFP_KERNEL);
info->i2c = client;
info->usb_psy = usb_psy;
i2c_set_clientdata(client, info);
mutex_init(&info->mutex);
dev_err(&client->dev, "%s\n",__func__);
tusb320_read_reg(info->i2c, 0x00, &rdata);
if(rdata != 0x30){
dev_err(&client->dev, "DEV_ID(0x%x)!= 0x30\n",rdata);
ret = -EINVAL;
goto parse_dt_failed;
}
else
{
dev_err(&client->dev, "DEV_ID(0x%x) OK!\n",rdata);
}
/* USBID, irq gpio info */
info->ID_gpio = of_get_named_gpio(np,"tusb320,ID-gpio", 0);
info->irq_gpio = of_get_named_gpio(np,"tusb320,irq-gpio", 0);
ret = tusb320_gpio_configure(info);
if(ret != 0)
{
goto parse_dt_failed;
}
info->fusb_class = class_create(THIS_MODULE, "type-c-tusb320");
info->dev_t = device_create(info->fusb_class, NULL, 0, NULL, "tusb320");
device_create_file(info->dev_t, &dev_attr_type);
device_create_file(info->dev_t, &dev_attr_mode);
device_create_file(info->dev_t, &dev_attr_CC_state);
dev_set_drvdata(info->dev_t, info);
info->otg_wqueue = create_singlethread_workqueue("tusb320_otg_wqueue");
INIT_WORK(&info->otg_work, tusb320_otg_work);
ret = request_threaded_irq(info->irq, NULL, tusb320_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT, "tusb320_irq", info);
if (ret){
dev_err(&client->dev, "failed to reqeust IRQ\n");
goto request_irq_failed;
}
wake_lock_init(&info->otg_wl, WAKE_LOCK_SUSPEND, "tusb320_otg_wl");
/* Update initial state to USB */
if(!gpio_get_value(info->ID_gpio)){
tusb320_otg_irq_thread(info->OTG_USB_ID_irq, info);
tusb320_reboot_scan_otg(info);/*Anderson-triger_scan_otg_after_reboot*/
}
/*
ret = request_threaded_irq(info->OTG_USB_ID_irq, NULL, tusb320_otg_irq_thread,
IRQF_TRIGGER_FALLING| IRQF_ONESHOT, "tusb320_otg_irq", info);
*/
ret = request_irq(info->OTG_USB_ID_irq, tusb320_otg_irq_thread,
IRQF_TRIGGER_FALLING|IRQF_TRIGGER_RISING, "tusb320_otg_irq", info);
if (ret){
dev_err(&client->dev, "failed to reqeust OTG IRQ\n");
goto request_irq_failed;
}
ret = enable_irq_wake(info->irq);
if (ret < 0){
dev_err(&client->dev,
"failed to enable wakeup src %d\n", ret);
goto enable_irq_failed;
}
ret = enable_irq_wake(info->OTG_USB_ID_irq);
if (ret < 0){
dev_err(&client->dev,
"failed to enable wakeup src %d\n", ret);
goto enable_irq_failed;
}
ginfo = info;
register_reboot_notifier(&tusb320_reboot_notifier);
tusb320_initialization(info);
dev_err(&info->i2c->dev,"%s OK!\n",__func__);
return 0;
enable_irq_failed:
free_irq(info->irq,NULL);
free_irq(info->OTG_USB_ID_irq,NULL);
request_irq_failed:
device_remove_file(info->dev_t, &dev_attr_type);
device_destroy(info->fusb_class, 0);
class_destroy(info->fusb_class);
parse_dt_failed:
mutex_destroy(&info->mutex);
i2c_set_clientdata(client, NULL);
kfree(info);
return ret;
}
static int tusb320_remove(struct i2c_client *client)
{
struct tusb320_info *info = i2c_get_clientdata(client);
if (client->irq) {
disable_irq_wake(client->irq);
free_irq(client->irq, info);
}
device_remove_file(info->dev_t, &dev_attr_type);
device_destroy(info->fusb_class, 0);
class_destroy(info->fusb_class);
mutex_destroy(&info->mutex);
i2c_set_clientdata(client, NULL);
kfree(info);
return 0;
}
static int tusb320_suspend(struct i2c_client *client, pm_message_t message)
{
return 0;
}
static int tusb320_resume(struct i2c_client *client)
{
return 0;
}
static const struct of_device_id tusb320_id[] = {
{.compatible = "tusb320"},
{},
};
MODULE_DEVICE_TABLE(of, tusb320_id);
static const struct i2c_device_id tusb320_i2c_id[] = {
{ "tusb320", 0 },
{ }
};
static struct i2c_driver tusb320_i2c_driver = {
.driver = {
.name = "tusb320",
.of_match_table = of_match_ptr(tusb320_id),
},
.probe = tusb320_probe,
.remove = tusb320_remove,
.suspend = tusb320_suspend,
.resume = tusb320_resume,
.id_table = tusb320_i2c_id,
};
static __init int tusb320_i2c_init(void)
{
return i2c_add_driver(&tusb320_i2c_driver);
}
static __exit void tusb320_i2c_exit(void)
{
i2c_del_driver(&tusb320_i2c_driver);
}
module_init(tusb320_i2c_init);
module_exit(tusb320_i2c_exit);
MODULE_AUTHOR("oem@OP");
MODULE_DESCRIPTION("I2C bus driver for TUSB320 USB Type-C");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
derekhe/huawei-g330d-u8825d-kernel | sound/soc/msm/qdsp6v2/q6asm.c | 30 | 90719 | /*
* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/android_pmem.h>
#include <linux/memory_alloc.h>
#include <linux/debugfs.h>
#include <linux/time.h>
#include <linux/atomic.h>
#include <asm/ioctls.h>
#include <mach/memory.h>
#include <mach/debug_mm.h>
#include <mach/peripheral-loader.h>
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/qdsp6v2/rtac.h>
#include <mach/msm_subsystem_map.h>
#include <sound/apr_audio-v2.h>
#include <sound/q6asm-v2.h>
#define TRUE 0x01
#define FALSE 0x00
#define READDONE_IDX_STATUS 0
#define READDONE_IDX_BUFADD_LSW 1
#define READDONE_IDX_BUFADD_MSW 2
#define READDONE_IDX_MEMMAP_HDL 3
#define READDONE_IDX_SIZE 4
#define READDONE_IDX_OFFSET 5
#define READDONE_IDX_LSW_TS 6
#define READDONE_IDX_MSW_TS 7
#define READDONE_IDX_FLAGS 8
#define READDONE_IDX_NUMFRAMES 9
#define READDONE_IDX_SEQ_ID 10
/* TODO, combine them together */
static DEFINE_MUTEX(session_lock);
struct asm_mmap {
atomic_t ref_cnt;
void *apr;
};
static struct asm_mmap this_mmap;
/* session id: 0 reserved */
static struct audio_client *session[SESSION_MAX+1];
struct asm_buffer_node {
struct list_head list;
uint32_t buf_addr_lsw;
uint32_t mmap_hdl;
};
static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv);
static int32_t q6asm_callback(struct apr_client_data *data, void *priv);
static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
uint32_t pkt_size, uint32_t cmd_flg);
static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
uint32_t pkt_size, uint32_t cmd_flg);
static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
uint32_t bufsz, uint32_t bufcnt);
static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir,
uint32_t bufsz, uint32_t bufcnt);
static void q6asm_reset_buf_state(struct audio_client *ac);
#ifdef CONFIG_DEBUG_FS
#define OUT_BUFFER_SIZE 56
#define IN_BUFFER_SIZE 24
static struct timeval out_cold_tv;
static struct timeval out_warm_tv;
static struct timeval out_cont_tv;
static struct timeval in_cont_tv;
static long out_enable_flag;
static long in_enable_flag;
static struct dentry *out_dentry;
static struct dentry *in_dentry;
static int in_cont_index;
/*This var is used to keep track of first write done for cold output latency */
static int out_cold_index;
static char *out_buffer;
static char *in_buffer;
static int audio_output_latency_dbgfs_open(struct inode *inode,
struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t audio_output_latency_dbgfs_read(struct file *file,
char __user *buf, size_t count, loff_t *ppos)
{
snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\
out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\
out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec);
return simple_read_from_buffer(buf, OUT_BUFFER_SIZE, ppos,
out_buffer, OUT_BUFFER_SIZE);
}
static ssize_t audio_output_latency_dbgfs_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
char *temp;
if (count > 2*sizeof(char))
return -EINVAL;
else
temp = kmalloc(2*sizeof(char), GFP_KERNEL);
out_cold_index = 0;
if (temp) {
if (copy_from_user(temp, buf, 2*sizeof(char))) {
kfree(temp);
return -EFAULT;
}
if (!strict_strtol(temp, 10, &out_enable_flag)) {
kfree(temp);
return count;
}
kfree(temp);
}
return -EINVAL;
}
static const struct file_operations audio_output_latency_debug_fops = {
.open = audio_output_latency_dbgfs_open,
.read = audio_output_latency_dbgfs_read,
.write = audio_output_latency_dbgfs_write
};
static int audio_input_latency_dbgfs_open(struct inode *inode,
struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t audio_input_latency_dbgfs_read(struct file *file,
char __user *buf, size_t count, loff_t *ppos)
{
snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\
in_cont_tv.tv_sec, in_cont_tv.tv_usec);
return simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos,
in_buffer, IN_BUFFER_SIZE);
}
static ssize_t audio_input_latency_dbgfs_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
char *temp;
if (count > 2*sizeof(char))
return -EINVAL;
else
temp = kmalloc(2*sizeof(char), GFP_KERNEL);
if (temp) {
if (copy_from_user(temp, buf, 2*sizeof(char))) {
kfree(temp);
return -EFAULT;
}
if (!strict_strtol(temp, 10, &in_enable_flag)) {
kfree(temp);
return count;
}
kfree(temp);
}
return -EINVAL;
}
static const struct file_operations audio_input_latency_debug_fops = {
.open = audio_input_latency_dbgfs_open,
.read = audio_input_latency_dbgfs_read,
.write = audio_input_latency_dbgfs_write
};
static void config_debug_fs_write_cb(void)
{
if (out_enable_flag) {
/* For first Write done log the time and reset
out_cold_index*/
if (out_cold_index != 1) {
do_gettimeofday(&out_cold_tv);
pr_debug("COLD: apr_send_pkt at %ld"
"sec %ld microsec\n",\
out_cold_tv.tv_sec,\
out_cold_tv.tv_usec);
out_cold_index = 1;
}
pr_debug("out_enable_flag %ld",\
out_enable_flag);
}
}
static void config_debug_fs_read_cb(void)
{
if (in_enable_flag) {
/* when in_cont_index == 7, DSP would be
* writing into the 8th 512 byte buffer and this
* timestamp is tapped here.Once done it then writes
* to 9th 512 byte buffer.These two buffers(8th, 9th)
* reach the test application in 5th iteration and that
* timestamp is tapped at user level. The difference
* of these two timestamps gives us the time between
* the time at which dsp started filling the sample
* required and when it reached the test application.
* Hence continuous input latency
*/
if (in_cont_index == 7) {
do_gettimeofday(&in_cont_tv);
pr_err("In_CONT:previous read buffer done"
"at %ld sec %ld microsec\n",\
in_cont_tv.tv_sec, in_cont_tv.tv_usec);
}
in_cont_index++;
}
}
static void config_debug_fs_reset_index(void)
{
in_cont_index = 0;
}
static void config_debug_fs_run(void)
{
if (out_enable_flag) {
do_gettimeofday(&out_cold_tv);
pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",\
out_cold_tv.tv_sec, out_cold_tv.tv_usec);
}
}
static void config_debug_fs_write(struct audio_buffer *ab)
{
if (out_enable_flag) {
char zero_pattern[2] = {0x00, 0x00};
/* If First two byte is non zero and last two byte
is zero then it is warm output pattern */
if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_warm_tv);
pr_debug("WARM:apr_send_pkt at"
"%ld sec %ld microsec\n", out_warm_tv.tv_sec,\
out_warm_tv.tv_usec);
pr_debug("Warm Pattern Matched");
}
/* If First two byte is zero and last two byte is
non zero then it is cont ouput pattern */
else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_cont_tv);
pr_debug("CONT:apr_send_pkt at"
"%ld sec %ld microsec\n", out_cont_tv.tv_sec,\
out_cont_tv.tv_usec);
pr_debug("Cont Pattern Matched");
}
}
}
static void config_debug_fs_init(void)
{
out_buffer = kmalloc(OUT_BUFFER_SIZE, GFP_KERNEL);
out_dentry = debugfs_create_file("audio_out_latency_measurement_node",\
S_IFREG | S_IRUGO | S_IWUGO,\
NULL, NULL, &audio_output_latency_debug_fops);
if (IS_ERR(out_dentry))
pr_err("debugfs_create_file failed\n");
in_buffer = kmalloc(IN_BUFFER_SIZE, GFP_KERNEL);
in_dentry = debugfs_create_file("audio_in_latency_measurement_node",\
S_IFREG | S_IRUGO | S_IWUGO,\
NULL, NULL, &audio_input_latency_debug_fops);
if (IS_ERR(in_dentry))
pr_err("debugfs_create_file failed\n");
}
#else
static void config_debug_fs_write(struct audio_buffer *ab)
{
return;
}
static void config_debug_fs_run(void)
{
return;
}
static void config_debug_fs_reset_index(void)
{
return;
}
static void config_debug_fs_read_cb(void)
{
return;
}
static void config_debug_fs_write_cb(void)
{
return;
}
static void config_debug_fs_init(void)
{
return;
}
#endif
static int q6asm_session_alloc(struct audio_client *ac)
{
int n;
mutex_lock(&session_lock);
for (n = 1; n <= SESSION_MAX; n++) {
if (!session[n]) {
session[n] = ac;
mutex_unlock(&session_lock);
return n;
}
}
mutex_unlock(&session_lock);
return -ENOMEM;
}
static void q6asm_session_free(struct audio_client *ac)
{
pr_debug("%s: sessionid[%d]\n", __func__, ac->session);
rtac_remove_popp_from_adm_devices(ac->session);
mutex_lock(&session_lock);
session[ac->session] = 0;
mutex_unlock(&session_lock);
ac->session = 0;
return;
}
int q6asm_audio_client_buf_free(unsigned int dir,
struct audio_client *ac)
{
struct audio_port_data *port;
int cnt = 0;
int rc = 0;
pr_debug("%s: Session id %d\n", __func__, ac->session);
mutex_lock(&ac->cmd_lock);
if (ac->io_mode == SYNC_IO_MODE) {
port = &ac->port[dir];
if (!port->buf) {
mutex_unlock(&ac->cmd_lock);
return 0;
}
cnt = port->max_buf_cnt - 1;
if (cnt >= 0) {
rc = q6asm_memory_unmap_regions(ac, dir,
port->buf[0].size,
port->max_buf_cnt);
if (rc < 0)
pr_err("%s CMD Memory_unmap_regions failed\n",
__func__);
}
while (cnt >= 0) {
if (port->buf[cnt].data) {
ion_unmap_kernel(port->buf[cnt].client,
port->buf[cnt].handle);
ion_free(port->buf[cnt].client,
port->buf[cnt].handle);
ion_client_destroy(port->buf[cnt].client);
port->buf[cnt].data = NULL;
port->buf[cnt].phys = 0;
--(port->max_buf_cnt);
}
--cnt;
}
kfree(port->buf);
port->buf = NULL;
}
mutex_unlock(&ac->cmd_lock);
return 0;
}
int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
struct audio_client *ac)
{
struct audio_port_data *port;
int cnt = 0;
int rc = 0;
pr_debug("%s: Session id %d\n", __func__, ac->session);
mutex_lock(&ac->cmd_lock);
port = &ac->port[dir];
if (!port->buf) {
mutex_unlock(&ac->cmd_lock);
return 0;
}
cnt = port->max_buf_cnt - 1;
if (cnt >= 0) {
rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir);
if (rc < 0)
pr_err("%s CMD Memory_unmap_regions failed\n",
__func__);
}
if (port->buf[0].data) {
ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
ion_free(port->buf[0].client, port->buf[0].handle);
ion_client_destroy(port->buf[0].client);
pr_debug("%s:data[%p]phys[%p][%p]"
", client[%p] handle[%p]\n",
__func__,
(void *)port->buf[0].data,
(void *)port->buf[0].phys,
(void *)&port->buf[0].phys,
(void *)port->buf[0].client,
(void *)port->buf[0].handle);
}
while (cnt >= 0) {
port->buf[cnt].data = NULL;
port->buf[cnt].phys = 0;
cnt--;
}
port->max_buf_cnt = 0;
kfree(port->buf);
port->buf = NULL;
mutex_unlock(&ac->cmd_lock);
return 0;
}
int q6asm_mmap_apr_dereg(void)
{
if (atomic_read(&this_mmap.ref_cnt) <= 0) {
pr_err("%s: APR Common Port Already Closed\n", __func__);
goto done;
}
atomic_dec(&this_mmap.ref_cnt);
if (atomic_read(&this_mmap.ref_cnt) == 0) {
apr_deregister(this_mmap.apr);
pr_debug("%s:APR De-Register common port\n", __func__);
}
done:
return 0;
}
void q6asm_audio_client_free(struct audio_client *ac)
{
int loopcnt;
struct audio_port_data *port;
if (!ac || !ac->session)
return;
pr_debug("%s: Session id %d\n", __func__, ac->session);
if (ac->io_mode == SYNC_IO_MODE) {
for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
port = &ac->port[loopcnt];
if (!port->buf)
continue;
pr_debug("%s:loopcnt = %d\n", __func__, loopcnt);
q6asm_audio_client_buf_free(loopcnt, ac);
}
}
apr_deregister(ac->apr);
ac->mmap_apr = NULL;
q6asm_session_free(ac);
q6asm_mmap_apr_dereg();
pr_debug("%s: APR De-Register\n", __func__);
/*done:*/
kfree(ac);
return;
}
int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode)
{
if (ac == NULL) {
pr_err("%s APR handle NULL\n", __func__);
return -EINVAL;
}
if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) {
ac->io_mode = mode;
pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode);
return 0;
} else {
pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode);
return -EINVAL;
}
}
void *q6asm_mmap_apr_reg(void)
{
if (atomic_read(&this_mmap.ref_cnt) == 0) {
this_mmap.apr = apr_register("ADSP", "ASM", \
(apr_fn)q6asm_mmapcallback,\
0x0FFFFFFFF, &this_mmap);
if (this_mmap.apr == NULL) {
pr_debug("%s Unable to register"
"APR ASM common port\n", __func__);
goto fail;
}
}
atomic_inc(&this_mmap.ref_cnt);
return this_mmap.apr;
fail:
return NULL;
}
struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv)
{
struct audio_client *ac;
int n;
int lcnt = 0;
ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL);
if (!ac)
return NULL;
n = q6asm_session_alloc(ac);
if (n <= 0)
goto fail_session;
ac->session = n;
ac->cb = cb;
ac->priv = priv;
ac->io_mode = SYNC_IO_MODE;
ac->apr = apr_register("ADSP", "ASM", \
(apr_fn)q6asm_callback,\
((ac->session) << 8 | 0x0001),\
ac);
if (ac->apr == NULL) {
pr_err("%s Registration with APR failed\n", __func__);
goto fail;
}
rtac_set_asm_handle(n, ac->apr);
pr_debug("%s Registering the common port with APR\n", __func__);
ac->mmap_apr = q6asm_mmap_apr_reg();
if (ac->mmap_apr == NULL)
goto fail;
init_waitqueue_head(&ac->cmd_wait);
INIT_LIST_HEAD(&ac->port[0].mem_map_handle);
INIT_LIST_HEAD(&ac->port[1].mem_map_handle);
pr_debug("%s: mem_map_handle list init'ed\n", __func__);
mutex_init(&ac->cmd_lock);
for (lcnt = 0; lcnt <= OUT; lcnt++) {
mutex_init(&ac->port[lcnt].lock);
spin_lock_init(&ac->port[lcnt].dsp_lock);
}
atomic_set(&ac->cmd_state, 0);
pr_debug("%s: session[%d]\n", __func__, ac->session);
return ac;
fail:
q6asm_audio_client_free(ac);
return NULL;
fail_session:
kfree(ac);
return NULL;
}
struct audio_client *q6asm_get_audio_client(int session_id)
{
if ((session_id <= 0) || (session_id > SESSION_MAX)) {
pr_err("%s: invalid session: %d\n", __func__, session_id);
goto err;
}
if (!session[session_id]) {
pr_err("%s: session not active: %d\n", __func__, session_id);
goto err;
}
return session[session_id];
err:
return NULL;
}
int q6asm_audio_client_buf_alloc(unsigned int dir,
struct audio_client *ac,
unsigned int bufsz,
unsigned int bufcnt)
{
int cnt = 0;
int rc = 0;
struct audio_buffer *buf;
int len;
if (!(ac) || ((dir != IN) && (dir != OUT)))
return -EINVAL;
pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session,
bufsz, bufcnt);
if (ac->session <= 0 || ac->session > 8)
goto fail;
if (ac->io_mode == SYNC_IO_MODE) {
if (ac->port[dir].buf) {
pr_debug("%s: buffer already allocated\n", __func__);
return 0;
}
mutex_lock(&ac->cmd_lock);
buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
GFP_KERNEL);
if (!buf) {
mutex_unlock(&ac->cmd_lock);
goto fail;
}
ac->port[dir].buf = buf;
while (cnt < bufcnt) {
if (bufsz > 0) {
if (!buf[cnt].data) {
buf[cnt].client = msm_ion_client_create
(UINT_MAX, "audio_client");
if (IS_ERR_OR_NULL((void *)
buf[cnt].client)) {
pr_err("%s: ION create client"
" for AUDIO failed\n",
__func__);
goto fail;
}
buf[cnt].handle = ion_alloc
(buf[cnt].client, bufsz, SZ_4K,
(0x1 << ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL((void *)
buf[cnt].handle)) {
pr_err("%s: ION memory"
" allocation for AUDIO failed\n",
__func__);
goto fail;
}
rc = ion_phys(buf[cnt].client,
buf[cnt].handle,
(ion_phys_addr_t *)
&buf[cnt].phys,
(size_t *)&len);
if (rc) {
pr_err("%s: ION Get Physical"
" for AUDIO failed, rc = %d\n",
__func__, rc);
goto fail;
}
buf[cnt].data = ion_map_kernel
(buf[cnt].client, buf[cnt].handle,
0);
if (IS_ERR_OR_NULL((void *)
buf[cnt].data)) {
pr_err("%s: ION memory"
" mapping for AUDIO failed\n", __func__);
goto fail;
}
memset((void *)buf[cnt].data, 0, bufsz);
buf[cnt].used = 1;
buf[cnt].size = bufsz;
buf[cnt].actual_size = bufsz;
pr_debug("%s data[%p]phys[%p][%p]\n",
__func__,
(void *)buf[cnt].data,
(void *)buf[cnt].phys,
(void *)&buf[cnt].phys);
cnt++;
}
}
}
ac->port[dir].max_buf_cnt = cnt;
mutex_unlock(&ac->cmd_lock);
rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt);
if (rc < 0) {
pr_err("%s:CMD Memory_map_regions failed\n", __func__);
goto fail;
}
}
return 0;
fail:
q6asm_audio_client_buf_free(dir, ac);
return -EINVAL;
}
int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
struct audio_client *ac,
unsigned int bufsz,
unsigned int bufcnt)
{
int cnt = 0;
int rc = 0;
struct audio_buffer *buf;
int len;
if (!(ac) || ((dir != IN) && (dir != OUT)))
return -EINVAL;
pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n",
__func__, ac->session,
bufsz, bufcnt);
if (ac->session <= 0 || ac->session > 8)
goto fail;
if (ac->port[dir].buf) {
pr_debug("%s: buffer already allocated\n", __func__);
return 0;
}
mutex_lock(&ac->cmd_lock);
buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
GFP_KERNEL);
if (!buf) {
mutex_unlock(&ac->cmd_lock);
goto fail;
}
ac->port[dir].buf = buf;
buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client");
if (IS_ERR_OR_NULL((void *)buf[0].client)) {
pr_err("%s: ION create client for AUDIO failed\n", __func__);
goto fail;
}
buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K,
(0x1 << ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL((void *) buf[0].handle)) {
pr_err("%s: ION memory allocation for AUDIO failed\n",
__func__);
goto fail;
}
rc = ion_phys(buf[0].client, buf[0].handle,
(ion_phys_addr_t *)&buf[0].phys, (size_t *)&len);
if (rc) {
pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
__func__, rc);
goto fail;
}
buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle, 0);
if (IS_ERR_OR_NULL((void *) buf[0].data)) {
pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
goto fail;
}
memset((void *)buf[0].data, 0, (bufsz * bufcnt));
if (!buf[0].data) {
pr_err("%s:invalid vaddr,"
" iomap failed\n", __func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[0].used = dir ^ 1;
buf[0].size = bufsz;
buf[0].actual_size = bufsz;
cnt = 1;
while (cnt < bufcnt) {
if (bufsz > 0) {
buf[cnt].data = buf[0].data + (cnt * bufsz);
buf[cnt].phys = buf[0].phys + (cnt * bufsz);
if (!buf[cnt].data) {
pr_err("%s Buf alloc failed\n",
__func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[cnt].used = dir ^ 1;
buf[cnt].size = bufsz;
buf[cnt].actual_size = bufsz;
pr_debug("%s data[%p]phys[%p][%p]\n", __func__,
(void *)buf[cnt].data,
(void *)buf[cnt].phys,
(void *)&buf[cnt].phys);
}
cnt++;
}
ac->port[dir].max_buf_cnt = cnt;
mutex_unlock(&ac->cmd_lock);
rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt);
if (rc < 0) {
pr_err("%s:CMD Memory_map_regions failed\n", __func__);
goto fail;
}
return 0;
fail:
q6asm_audio_client_buf_free_contiguous(dir, ac);
return -EINVAL;
}
static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv)
{
uint32_t sid = 0;
uint32_t dir = 0;
uint32_t *payload = data->payload;
unsigned long dsp_flags;
struct audio_client *ac = NULL;
struct audio_port_data *port;
if (!data) {
pr_err("%s: Invalid CB\n", __func__);
return 0;
}
if (data->opcode == RESET_EVENTS) {
pr_debug("%s: Reset event is received: %d %d apr[%p]\n",
__func__,
data->reset_event,
data->reset_proc,
this_mmap.apr);
apr_reset(this_mmap.apr);
atomic_set(&this_mmap.ref_cnt, 0);
this_mmap.apr = NULL;
return 0;
}
sid = (data->token >> 8) & 0x0F;
ac = q6asm_get_audio_client(sid);
pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
"token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
__func__, payload[0], payload[1], data->opcode, data->token,
data->payload_size, data->src_port, data->dest_port, sid, dir);
pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
__func__, payload[0], payload[1]);
if (data->opcode == APR_BASIC_RSP_RESULT) {
switch (payload[0]) {
case ASM_CMD_SHARED_MEM_MAP_REGIONS:
case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
if (atomic_read(&ac->cmd_state)) {
atomic_set(&ac->cmd_state, 0);
wake_up(&ac->cmd_wait);
}
pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
__func__, payload[0], payload[1]);
break;
default:
pr_debug("%s:command[0x%x] not expecting rsp\n",
__func__, payload[0]);
break;
}
return 0;
}
dir = (data->token & 0x0F);
port = &ac->port[dir];
switch (data->opcode) {
case ASM_CMDRSP_SHARED_MEM_MAP_REGIONS:{
pr_debug("%s:PL#0[0x%x]PL#1 [0x%x] dir=%x s_id=%x\n",
__func__, payload[0], payload[1], dir, sid);
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
if (atomic_read(&ac->cmd_state)) {
ac->port[dir].tmp_hdl = payload[0];
atomic_set(&ac->cmd_state, 0);
wake_up(&ac->cmd_wait);
}
spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
break;
}
case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:{
pr_debug("%s:PL#0[0x%x]PL#1 [0x%x]\n",
__func__, payload[0], payload[1]);
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
if (atomic_read(&ac->cmd_state)) {
atomic_set(&ac->cmd_state, 0);
wake_up(&ac->cmd_wait);
}
spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
break;
}
default:
pr_debug("%s:command[0x%x]success [0x%x]\n",
__func__, payload[0], payload[1]);
}
if (ac->cb)
ac->cb(data->opcode, data->token,
data->payload, ac->priv);
return 0;
}
static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
{
int i = 0;
struct audio_client *ac = (struct audio_client *)priv;
uint32_t token;
unsigned long dsp_flags;
uint32_t *payload;
if ((ac == NULL) || (data == NULL)) {
pr_err("ac or priv NULL\n");
return -EINVAL;
}
if (ac->session <= 0 || ac->session > 8) {
pr_err("%s:Session ID is invalid, session = %d\n", __func__,
ac->session);
return -EINVAL;
}
payload = data->payload;
if (data->opcode == RESET_EVENTS) {
pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n",
data->reset_event, data->reset_proc, ac->apr);
if (ac->cb)
ac->cb(data->opcode, data->token,
(uint32_t *)data->payload, ac->priv);
apr_reset(ac->apr);
return 0;
}
pr_debug("%s: session[%d]opcode[0x%x]"
"token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
ac->session, data->opcode,
data->token, data->payload_size, data->src_port,
data->dest_port);
if ((data->opcode != ASM_DATA_EVENT_RENDERED_EOS) &&
(data->opcode != ASM_DATA_EVENT_EOS))
pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
__func__, payload[0], payload[1]);
if (data->opcode == APR_BASIC_RSP_RESULT) {
token = data->token;
switch (payload[0]) {
case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
if (rtac_make_asm_callback(ac->session, payload,
data->payload_size))
break;
case ASM_SESSION_CMD_PAUSE:
case ASM_DATA_CMD_EOS:
case ASM_STREAM_CMD_CLOSE:
case ASM_STREAM_CMD_FLUSH:
case ASM_SESSION_CMD_RUN_V2:
case ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS:
case ASM_STREAM_CMD_FLUSH_READBUFS:
pr_debug("%s:Payload = [0x%x]\n", __func__, payload[0]);
if (token != ac->session) {
pr_err("%s:Invalid session[%d] rxed expected[%d]",
__func__, token, ac->session);
return -EINVAL;
}
case ASM_STREAM_CMD_OPEN_READ_V2:
case ASM_STREAM_CMD_OPEN_WRITE_V2:
case ASM_STREAM_CMD_OPEN_READWRITE_V2:
case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
__func__, payload[0], payload[1]);
if (atomic_read(&ac->cmd_state)) {
atomic_set(&ac->cmd_state, 0);
wake_up(&ac->cmd_wait);
}
if (ac->cb)
ac->cb(data->opcode, data->token,
(uint32_t *)data->payload, ac->priv);
break;
default:
pr_debug("%s:command[0x%x] not expecting rsp\n",
__func__, payload[0]);
break;
}
return 0;
}
switch (data->opcode) {
case ASM_DATA_EVENT_WRITE_DONE_V2:{
struct audio_port_data *port = &ac->port[IN];
pr_debug("%s: Rxed opcode[0x%x] status[0x%x] token[%d]",
__func__, payload[0], payload[1],
data->token);
if (ac->io_mode == SYNC_IO_MODE) {
if (port->buf == NULL) {
pr_err("%s: Unexpected Write Done\n",
__func__);
return -EINVAL;
}
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
if (port->buf[data->token].phys !=
payload[0]) {
pr_err("Buf expected[%p]rxed[%p]\n",\
(void *)port->buf[data->token].phys,\
(void *)payload[0]);
spin_unlock_irqrestore(&port->dsp_lock,
dsp_flags);
return -EINVAL;
}
token = data->token;
port->buf[token].used = 1;
spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
config_debug_fs_write_cb();
for (i = 0; i < port->max_buf_cnt; i++)
pr_debug("%d ", port->buf[i].used);
}
break;
}
case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
rtac_make_asm_callback(ac->session, payload,
data->payload_size);
break;
case ASM_DATA_EVENT_READ_DONE_V2:{
struct audio_port_data *port = &ac->port[OUT];
config_debug_fs_read_cb();
pr_debug("%s:R-D: status=%d buff_add=%x act_size=%d offset=%d\n",
__func__, payload[READDONE_IDX_STATUS],
payload[READDONE_IDX_BUFADD_LSW],
payload[READDONE_IDX_SIZE],
payload[READDONE_IDX_OFFSET]);
pr_debug("%s:R-D:msw_ts=%d lsw_ts=%d memmap_hdl=%x flags=%d id=%d num=%d\n",
__func__, payload[READDONE_IDX_MSW_TS],
payload[READDONE_IDX_LSW_TS],
payload[READDONE_IDX_MEMMAP_HDL],
payload[READDONE_IDX_FLAGS],
payload[READDONE_IDX_SEQ_ID],
payload[READDONE_IDX_NUMFRAMES]);
if (ac->io_mode == SYNC_IO_MODE) {
if (port->buf == NULL) {
pr_err("%s: Unexpected Write Done\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
token = data->token;
port->buf[token].used = 0;
if (port->buf[token].phys !=
payload[READDONE_IDX_BUFADD_LSW]) {
pr_err("Buf expected[%p]rxed[%p]\n",\
(void *)port->buf[token].phys,\
(void *)payload[READDONE_IDX_BUFADD_LSW]);
spin_unlock_irqrestore(&port->dsp_lock,
dsp_flags);
break;
}
port->buf[token].actual_size =
payload[READDONE_IDX_SIZE];
spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
}
break;
}
case ASM_DATA_EVENT_EOS:
case ASM_DATA_EVENT_RENDERED_EOS:
pr_debug("%s:EOS ACK received: rxed opcode[0x%x]\n",
__func__, data->opcode);
break;
case ASM_SESSION_EVENTX_OVERFLOW:
pr_err("ASM_SESSION_EVENTX_OVERFLOW\n");
break;
case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, "
"payload[0] = %d, payload[1] = %d, "
"payload[2] = %d\n", __func__,
payload[0], payload[1], payload[2]);
ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
payload[2]);
if (atomic_read(&ac->cmd_state)) {
atomic_set(&ac->cmd_state, 0);
wake_up(&ac->cmd_wait);
}
break;
case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
"payload[0] = %d, payload[1] = %d, "
"payload[2] = %d, payload[3] = %d\n", __func__,
payload[0], payload[1], payload[2],
payload[3]);
break;
}
if (ac->cb)
ac->cb(data->opcode, data->token,
data->payload, ac->priv);
return 0;
}
void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size,
uint32_t *index)
{
void *data;
unsigned char idx;
struct audio_port_data *port;
if (!ac || ((dir != IN) && (dir != OUT)))
return NULL;
if (ac->io_mode == SYNC_IO_MODE) {
port = &ac->port[dir];
mutex_lock(&port->lock);
idx = port->cpu_buf;
if (port->buf == NULL) {
pr_debug("%s:Buffer pointer null\n", __func__);
mutex_unlock(&port->lock);
return NULL;
}
/* dir 0: used = 0 means buf in use
dir 1: used = 1 means buf in use */
if (port->buf[idx].used == dir) {
/* To make it more robust, we could loop and get the
next avail buf, its risky though */
pr_debug("%s:Next buf idx[0x%x] not available,"
"dir[%d]\n", __func__, idx, dir);
mutex_unlock(&port->lock);
return NULL;
}
*size = port->buf[idx].actual_size;
*index = port->cpu_buf;
data = port->buf[idx].data;
pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n",
__func__,
ac->session,
port->cpu_buf,
data, *size);
/* By default increase the cpu_buf cnt
user accesses this function,increase cpu
buf(to avoid another api)*/
port->buf[idx].used = dir;
port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1));
mutex_unlock(&port->lock);
return data;
}
return NULL;
}
void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
uint32_t *size, uint32_t *index)
{
void *data;
unsigned char idx;
struct audio_port_data *port;
if (!ac || ((dir != IN) && (dir != OUT)))
return NULL;
port = &ac->port[dir];
idx = port->cpu_buf;
if (port->buf == NULL) {
pr_debug("%s:Buffer pointer null\n", __func__);
return NULL;
}
/*
* dir 0: used = 0 means buf in use
* dir 1: used = 1 means buf in use
*/
if (port->buf[idx].used == dir) {
/*
* To make it more robust, we could loop and get the
* next avail buf, its risky though
*/
pr_debug("%s:Next buf idx[0x%x] not available,"
"dir[%d]\n", __func__, idx, dir);
return NULL;
}
*size = port->buf[idx].actual_size;
*index = port->cpu_buf;
data = port->buf[idx].data;
pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n",
__func__, ac->session, port->cpu_buf,
data, *size);
/*
* By default increase the cpu_buf cnt
* user accesses this function,increase cpu
* buf(to avoid another api)
*/
port->buf[idx].used = dir;
port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1));
return data;
}
int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac)
{
int ret = -1;
struct audio_port_data *port;
uint32_t idx;
if (!ac || (dir != OUT))
return ret;
if (ac->io_mode == SYNC_IO_MODE) {
port = &ac->port[dir];
mutex_lock(&port->lock);
idx = port->dsp_buf;
if (port->buf[idx].used == (dir ^ 1)) {
/* To make it more robust, we could loop and get the
next avail buf, its risky though */
pr_err("Next buf idx[0x%x] not available, dir[%d]\n",
idx, dir);
mutex_unlock(&port->lock);
return ret;
}
pr_debug("%s: session[%d]dsp_buf=%d cpu_buf=%d\n", __func__,
ac->session, port->dsp_buf, port->cpu_buf);
ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1);
mutex_unlock(&port->lock);
}
return ret;
}
static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
uint32_t pkt_size, uint32_t cmd_flg)
{
pr_debug("%s:pkt_size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
cmd_flg, ac->session);
mutex_lock(&ac->cmd_lock);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(sizeof(struct apr_hdr)),\
APR_PKT_VER);
hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
hdr->src_domain = APR_DOMAIN_APPS;
hdr->dest_svc = APR_SVC_ASM;
hdr->dest_domain = APR_DOMAIN_ADSP;
hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01;
if (cmd_flg) {
hdr->token = ac->session;
atomic_set(&ac->cmd_state, 1);
}
hdr->pkt_size = pkt_size;
mutex_unlock(&ac->cmd_lock);
return;
}
static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
uint32_t pkt_size, uint32_t cmd_flg)
{
pr_debug("pkt_size = %d, cmd_flg = %d, session = %d\n",
pkt_size, cmd_flg, ac->session);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(sizeof(struct apr_hdr)),\
APR_PKT_VER);
hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
hdr->src_domain = APR_DOMAIN_APPS;
hdr->dest_svc = APR_SVC_ASM;
hdr->dest_domain = APR_DOMAIN_ADSP;
hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01;
if (cmd_flg) {
hdr->token = ac->session;
atomic_set(&ac->cmd_state, 1);
}
hdr->pkt_size = pkt_size;
return;
}
static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr,
u32 pkt_size, u32 cmd_flg, u32 token)
{
pr_debug("%s:pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
hdr->src_port = 0;
hdr->dest_port = 0;
if (cmd_flg) {
hdr->token = token;
atomic_set(&ac->cmd_state, 1);
}
hdr->pkt_size = pkt_size;
return;
}
int q6asm_open_read(struct audio_client *ac,
uint32_t format)
{
int rc = 0x00;
struct asm_stream_cmd_open_read_v2 open;
uint16_t bits_per_sample = 16;
config_debug_fs_reset_index();
if ((ac == NULL) || (ac->apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s:session[%d]", __func__, ac->session);
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V2;
/* Stream prio : High, provide meta info with encoded frames */
open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
open.preprocopo_id = get_asm_topology();
if (open.preprocopo_id == 0)
open.preprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT;
open.bits_per_sample = bits_per_sample;
switch (format) {
case FORMAT_LINEAR_PCM:
open.mode_flags = 0x00;
open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
break;
case FORMAT_MPEG4_AAC:
open.mode_flags = BUFFER_META_ENABLE;
open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2;
break;
case FORMAT_V13K:
open.mode_flags = BUFFER_META_ENABLE;
open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS;
break;
case FORMAT_EVRC:
open.mode_flags = BUFFER_META_ENABLE;
open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS;
break;
case FORMAT_AMRNB:
open.mode_flags = BUFFER_META_ENABLE ;
open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS;
break;
case FORMAT_AMRWB:
open.mode_flags = BUFFER_META_ENABLE ;
open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS;
break;
default:
pr_err("Invalid format[%d]\n", format);
goto fail_cmd;
}
rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
if (rc < 0) {
pr_err("open failed op[0x%x]rc[%d]\n", \
open.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout. waited for open read rc[%d]\n", __func__,
rc);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_open_write(struct audio_client *ac, uint32_t format)
{
int rc = 0x00;
struct asm_stream_cmd_open_write_v2 open;
if ((ac == NULL) || (ac->apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session,
format);
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V2;
open.mode_flags = 0x00;
/* source endpoint : matrix */
open.sink_endpointype = ASM_END_POINT_DEVICE_MATRIX;
open.bits_per_sample = 16;
open.postprocopo_id = get_asm_topology();
if (open.postprocopo_id == 0)
open.postprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT;
switch (format) {
case FORMAT_LINEAR_PCM:
open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
break;
case FORMAT_MPEG4_AAC:
open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
break;
case FORMAT_MPEG4_MULTI_AAC:
open.dec_fmt_id = ASM_MEDIA_FMT_DOLBY_AAC;
break;
case FORMAT_WMA_V9:
open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2;
break;
case FORMAT_WMA_V10PRO:
open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2;
break;
case FORMAT_MP3:
open.dec_fmt_id = ASM_MEDIA_FMT_MP3;
break;
default:
pr_err("%s: Invalid format[%d]\n", __func__, format);
goto fail_cmd;
}
rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
if (rc < 0) {
pr_err("%s: open failed op[0x%x]rc[%d]\n", \
__func__, open.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout. waited for open write rc[%d]\n", __func__,
rc);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_open_read_write(struct audio_client *ac,
uint32_t rd_format,
uint32_t wr_format)
{
int rc = 0x00;
struct asm_stream_cmd_open_readwrite_v2 open;
if ((ac == NULL) || (ac->apr == NULL)) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: session[%d]", __func__, ac->session);
pr_debug("wr_format[0x%x]rd_format[0x%x]",
wr_format, rd_format);
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
open.mode_flags = BUFFER_META_ENABLE;
open.bits_per_sample = 16;
/* source endpoint : matrix */
open.postprocopo_id = get_asm_topology();
if (open.postprocopo_id == 0)
open.postprocopo_id = ASM_STREAM_POSTPROC_TOPO_ID_DEFAULT;
switch (wr_format) {
case FORMAT_LINEAR_PCM:
open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
break;
case FORMAT_MPEG4_AAC:
open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
break;
case FORMAT_MPEG4_MULTI_AAC:
open.dec_fmt_id = ASM_MEDIA_FMT_DOLBY_AAC;
break;
case FORMAT_WMA_V9:
open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V9_V2;
break;
case FORMAT_WMA_V10PRO:
open.dec_fmt_id = ASM_MEDIA_FMT_WMA_V10PRO_V2;
break;
case FORMAT_AMRNB:
open.dec_fmt_id = ASM_MEDIA_FMT_AMRNB_FS;
break;
case FORMAT_AMRWB:
open.dec_fmt_id = ASM_MEDIA_FMT_AMRWB_FS;
break;
case FORMAT_V13K:
open.dec_fmt_id = ASM_MEDIA_FMT_V13K_FS;
break;
case FORMAT_EVRC:
open.dec_fmt_id = ASM_MEDIA_FMT_EVRC_FS;
break;
case FORMAT_EVRCB:
open.dec_fmt_id = ASM_MEDIA_FMT_EVRCB_FS;
break;
case FORMAT_EVRCWB:
open.dec_fmt_id = ASM_MEDIA_FMT_EVRCWB_FS;
break;
case FORMAT_MP3:
open.dec_fmt_id = ASM_MEDIA_FMT_MP3;
break;
default:
pr_err("Invalid format[%d]\n", wr_format);
goto fail_cmd;
}
switch (rd_format) {
case FORMAT_LINEAR_PCM:
open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
break;
case FORMAT_MPEG4_AAC:
open.enc_cfg_id = ASM_MEDIA_FMT_AAC_V2;
break;
case FORMAT_V13K:
open.enc_cfg_id = ASM_MEDIA_FMT_V13K_FS;
break;
case FORMAT_EVRC:
open.enc_cfg_id = ASM_MEDIA_FMT_EVRC_FS;
break;
case FORMAT_AMRNB:
open.enc_cfg_id = ASM_MEDIA_FMT_AMRNB_FS;
break;
case FORMAT_AMRWB:
open.enc_cfg_id = ASM_MEDIA_FMT_AMRWB_FS;
break;
default:
pr_err("Invalid format[%d]\n", rd_format);
goto fail_cmd;
}
pr_debug("%s:rdformat[0x%x]wrformat[0x%x]\n", __func__,
open.enc_cfg_id, open.dec_fmt_id);
rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
if (rc < 0) {
pr_err("open failed op[0x%x]rc[%d]\n", \
open.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for open read-write rc[%d]\n", rc);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_run(struct audio_client *ac, uint32_t flags,
uint32_t msw_ts, uint32_t lsw_ts)
{
struct asm_session_cmd_run_v2 run;
int rc;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s session[%d]", __func__, ac->session);
q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE);
run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
run.flags = flags;
run.time_lsw = lsw_ts;
run.time_msw = msw_ts;
config_debug_fs_run();
rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
if (rc < 0) {
pr_err("Commmand run failed[%d]", rc);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for run success rc[%d]", rc);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
uint32_t msw_ts, uint32_t lsw_ts)
{
struct asm_session_cmd_run_v2 run;
int rc;
if (!ac || ac->apr == NULL) {
pr_err("%s:APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("session[%d]", ac->session);
q6asm_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE);
run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
run.flags = flags;
run.time_lsw = lsw_ts;
run.time_msw = msw_ts;
rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
if (rc < 0) {
pr_err("%s:Commmand run failed[%d]", __func__, rc);
return -EINVAL;
}
return 0;
}
int q6asm_enc_cfg_blk_aac(struct audio_client *ac,
uint32_t frames_per_buf,
uint32_t sample_rate, uint32_t channels,
uint32_t bit_rate, uint32_t mode, uint32_t format)
{
struct asm_aac_enc_cfg_v2 enc_cfg;
int rc = 0;
pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
"format[%d]", __func__, ac->session, frames_per_buf,
sample_rate, channels, bit_rate, mode, format);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_aac_enc_cfg_v2) -
sizeof(struct asm_stream_cmd_set_encdec_param);
enc_cfg.encblk.frames_per_buf = frames_per_buf;
enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
enc_cfg.bit_rate = bit_rate;
enc_cfg.enc_mode = mode;
enc_cfg.aac_fmt_flag = format;
enc_cfg.channel_cfg = channels;
enc_cfg.sample_rate = sample_rate;
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for FORMAT_UPDATE\n");
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_set_encdec_chan_map(struct audio_client *ac,
uint32_t num_channels)
{
/* Todo: */
return 0;
}
int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels)
{
struct asm_multi_channel_pcm_enc_cfg_v2 enc_cfg;
u8 *channel_mapping;
u32 frames_per_buf = 0;
int rc = 0;
pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
ac->session, rate, channels);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
sizeof(enc_cfg.encdec);
enc_cfg.encblk.frames_per_buf = frames_per_buf;
enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
enc_cfg.num_channels = channels;
enc_cfg.bits_per_sample = 16;
enc_cfg.sample_rate = rate;
enc_cfg.is_signed = 1;
channel_mapping = enc_cfg.channel_mapping; /* ??? PHANI */
memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
if (channels == 1) {
channel_mapping[0] = PCM_CHANNEL_FL;
} else if (channels == 2) {
channel_mapping[0] = PCM_CHANNEL_FL;
channel_mapping[1] = PCM_CHANNEL_FR;
} else if (channels == 6) {
channel_mapping[0] = PCM_CHANNEL_FC;
channel_mapping[1] = PCM_CHANNEL_FL;
channel_mapping[2] = PCM_CHANNEL_FR;
channel_mapping[3] = PCM_CHANNEL_LB;
channel_mapping[4] = PCM_CHANNEL_RB;
channel_mapping[5] = PCM_CHANNEL_LFE;
} else {
pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
channels);
return -EINVAL;
}
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
pr_err("Comamnd open failed\n");
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_enable_sbrps(struct audio_client *ac,
uint32_t sbr_ps_enable)
{
struct asm_aac_sbr_ps_flag_param sbrps;
u32 frames_per_buf = 0;
int rc = 0;
pr_debug("%s: Session %d\n", __func__, ac->session);
q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE);
sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
sbrps.encdec.param_id = ASM_PARAM_ID_AAC_SBR_PS_FLAG;
sbrps.encdec.param_size = sizeof(struct asm_aac_sbr_ps_flag_param) -
sizeof(struct asm_stream_cmd_set_encdec_param);
sbrps.encblk.frames_per_buf = frames_per_buf;
sbrps.encblk.enc_cfg_blk_size = sbrps.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
sbrps.sbr_ps_flag = sbr_ps_enable;
rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps);
if (rc < 0) {
pr_err("Command opcode[0x%x]paramid[0x%x] failed\n",
ASM_STREAM_CMD_SET_ENCDEC_PARAM,
ASM_PARAM_ID_AAC_SBR_PS_FLAG);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout opcode[0x%x] ", sbrps.hdr.opcode);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
uint16_t sce_left, uint16_t sce_right)
{
struct asm_aac_dual_mono_mapping_param dual_mono;
u32 frames_per_buf = 0;
int rc = 0;
pr_debug("%s: Session %d, sce_left = %d, sce_right = %d\n",
__func__, ac->session, sce_left, sce_right);
q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE);
dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
dual_mono.encdec.param_id = ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING;
dual_mono.encdec.param_size = sizeof(struct asm_aac_enc_cfg_v2) -
sizeof(struct asm_stream_cmd_set_encdec_param);
dual_mono.encblk.frames_per_buf = frames_per_buf;
dual_mono.encblk.enc_cfg_blk_size = dual_mono.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
dual_mono.left_channel_sce = sce_left;
dual_mono.right_channel_sce = sce_right;
rc = apr_send_pkt(ac->apr, (uint32_t *) &dual_mono);
if (rc < 0) {
pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s:timeout opcode[0x%x]\n", __func__,
dual_mono.hdr.opcode);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
uint16_t min_rate, uint16_t max_rate,
uint16_t reduced_rate_level, uint16_t rate_modulation_cmd)
{
struct asm_v13k_enc_cfg enc_cfg;
int rc = 0;
pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
"reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
ac->session, frames_per_buf, min_rate, max_rate,
reduced_rate_level, rate_modulation_cmd);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_v13k_enc_cfg) -
sizeof(struct asm_stream_cmd_set_encdec_param);
enc_cfg.encblk.frames_per_buf = frames_per_buf;
enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
enc_cfg.min_rate = min_rate;
enc_cfg.max_rate = max_rate;
enc_cfg.reduced_rate_cmd = reduced_rate_level;
enc_cfg.rate_mod_cmd = rate_modulation_cmd;
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for setencdec v13k resp\n");
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf,
uint16_t min_rate, uint16_t max_rate,
uint16_t rate_modulation_cmd)
{
struct asm_evrc_enc_cfg enc_cfg;
int rc = 0;
pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
"rate_modulation_cmd[0x%4x]", __func__, ac->session,
frames_per_buf, min_rate, max_rate, rate_modulation_cmd);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_evrc_enc_cfg) -
sizeof(struct asm_stream_cmd_set_encdec_param);
enc_cfg.encblk.frames_per_buf = frames_per_buf;
enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
enc_cfg.min_rate = min_rate;
enc_cfg.max_rate = max_rate;
enc_cfg.rate_mod_cmd = rate_modulation_cmd;
enc_cfg.reserved = 0;
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for encdec evrc\n");
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf,
uint16_t band_mode, uint16_t dtx_enable)
{
struct asm_amrnb_enc_cfg enc_cfg;
int rc = 0;
pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]",
__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_amrnb_enc_cfg) -
sizeof(struct asm_stream_cmd_set_encdec_param);
enc_cfg.encblk.frames_per_buf = frames_per_buf;
enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
enc_cfg.enc_mode = band_mode;
enc_cfg.dtx_mode = dtx_enable;
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for set encdec amrnb\n");
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf,
uint16_t band_mode, uint16_t dtx_enable)
{
struct asm_amrwb_enc_cfg enc_cfg;
int rc = 0;
pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]",
__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_amrwb_enc_cfg) -
sizeof(struct asm_stream_cmd_set_encdec_param);
enc_cfg.encblk.frames_per_buf = frames_per_buf;
enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
sizeof(struct asm_enc_cfg_blk_param_v2);
enc_cfg.enc_mode = band_mode;
enc_cfg.dtx_mode = dtx_enable;
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for FORMAT_UPDATE\n");
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_media_format_block_aac(struct audio_client *ac,
struct asm_aac_cfg *cfg)
{
return q6asm_media_format_block_multi_aac(ac, cfg);
}
int q6asm_media_format_block_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels)
{
struct asm_multi_channel_pcm_fmt_blk_v2 fmt;
u8 *channel_mapping;
int rc = 0;
pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate,
channels);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
sizeof(fmt.fmt_blk);
fmt.num_channels = channels;
fmt.bits_per_sample = 16;
fmt.sample_rate = rate;
fmt.is_signed = 1;
channel_mapping = fmt.channel_mapping;
memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
if (channels == 1) {
channel_mapping[0] = PCM_CHANNEL_FL;
} else if (channels == 2) {
channel_mapping[0] = PCM_CHANNEL_FL;
channel_mapping[1] = PCM_CHANNEL_FR;
} else if (channels == 6) {
channel_mapping[0] = PCM_CHANNEL_FC;
channel_mapping[1] = PCM_CHANNEL_FL;
channel_mapping[2] = PCM_CHANNEL_FR;
channel_mapping[3] = PCM_CHANNEL_LB;
channel_mapping[4] = PCM_CHANNEL_RB;
channel_mapping[5] = PCM_CHANNEL_LFE;
} else {
pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
channels);
return -EINVAL;
}
rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
if (rc < 0) {
pr_err("%s:Comamnd open failed\n", __func__);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s:timeout. waited for format update\n", __func__);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_media_format_block_multi_aac(struct audio_client *ac,
struct asm_aac_cfg *cfg)
{
struct asm_aac_fmt_blk_v2 fmt;
int rc = 0;
pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session,
cfg->sample_rate, cfg->ch_cfg);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
sizeof(fmt.fmt_blk);
fmt.aac_fmt_flag = cfg->format;
fmt.audio_objype = cfg->aot;
/* If zero, PCE is assumed to be available in bitstream*/
fmt.total_size_of_PCE_bits = 0;
fmt.channel_config = cfg->ch_cfg;
fmt.sample_rate = cfg->sample_rate;
pr_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n",
__func__, fmt.aac_fmt_flag, fmt.fmt_blk.fmt_blk_size,
fmt.aac_fmt_flag,
fmt.audio_objype,
fmt.channel_config,
fmt.sample_rate);
rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
if (rc < 0) {
pr_err("%s:Comamnd open failed\n", __func__);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_media_format_block_wma(struct audio_client *ac,
void *cfg)
{
struct asm_wmastdv9_fmt_blk_v2 fmt;
struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
int rc = 0;
pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
wma_cfg->ch_mask, wma_cfg->encode_opt);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V9_V2;
fmt.fmtag = wma_cfg->format_tag;
fmt.num_channels = wma_cfg->ch_cfg;
fmt.sample_rate = wma_cfg->sample_rate;
fmt.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec;
fmt.blk_align = wma_cfg->block_align;
fmt.bits_per_sample =
wma_cfg->valid_bits_per_sample;
fmt.channel_mask = wma_cfg->ch_mask;
fmt.enc_options = wma_cfg->encode_opt;
rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
if (rc < 0) {
pr_err("%s:Comamnd open failed\n", __func__);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_media_format_block_wmapro(struct audio_client *ac,
void *cfg)
{
struct asm_wmaprov10_fmt_blk_v2 fmt;
struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
int rc = 0;
pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],"
"adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec,
wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
wmapro_cfg->ch_mask, wmapro_cfg->encode_opt,
wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V10PRO_V2;
fmt.fmtag = wmapro_cfg->format_tag;
fmt.num_channels = wmapro_cfg->ch_cfg;
fmt.sample_rate = wmapro_cfg->sample_rate;
fmt.avg_bytes_per_sec =
wmapro_cfg->avg_bytes_per_sec;
fmt.blk_align = wmapro_cfg->block_align;
fmt.bits_per_sample = wmapro_cfg->valid_bits_per_sample;
fmt.channel_mask = wmapro_cfg->ch_mask;
fmt.enc_options = wmapro_cfg->encode_opt;
fmt.usAdvancedEncodeOpt = wmapro_cfg->adv_encode_opt;
fmt.advanced_enc_options2 = wmapro_cfg->adv_encode_opt2;
rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
if (rc < 0) {
pr_err("%s:Comamnd open failed\n", __func__);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir,
uint32_t bufsz, uint32_t bufcnt)
{
struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
struct avs_shared_map_region_payload *mregions = NULL;
struct audio_port_data *port = NULL;
struct audio_buffer *ab = NULL;
void *mmap_region_cmd = NULL;
void *payload = NULL;
struct asm_buffer_node *buffer_node = NULL;
int rc = 0;
int i = 0;
int cmd_size = 0;
if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
buffer_node = kmalloc(sizeof(struct asm_buffer_node), GFP_KERNEL);
if (!buffer_node)
return -ENOMEM;
cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+ sizeof(struct avs_shared_map_region_payload) * bufcnt;
mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
if (mmap_region_cmd == NULL) {
pr_err("%s: Mem alloc failed\n", __func__);
rc = -EINVAL;
return rc;
}
mmap_regions = (struct avs_cmd_shared_mem_map_regions *)
mmap_region_cmd;
q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size,
TRUE, ((ac->session << 8) | dir));
mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
mmap_regions->num_regions = bufcnt & 0x00ff;
mmap_regions->property_flag = 0x00;
pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
payload = ((u8 *) mmap_region_cmd +
sizeof(struct avs_cmd_shared_mem_map_regions));
mregions = (struct avs_shared_map_region_payload *)payload;
ac->port[dir].tmp_hdl = 0;
port = &ac->port[dir];
for (i = 0; i < bufcnt; i++) {
ab = &port->buf[i];
mregions->shm_addr_lsw = ab->phys;
/* Using only 32 bit address */
mregions->shm_addr_msw = 0;
mregions->mem_size_bytes = ab->size;
++mregions;
}
rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
if (rc < 0) {
pr_err("mmap op[0x%x]rc[%d]\n",
mmap_regions->hdr.opcode, rc);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0 &&
ac->port[dir].tmp_hdl), 5*HZ);
if (!rc) {
pr_err("timeout. waited for memory_map\n");
rc = -EINVAL;
goto fail_cmd;
}
buffer_node->buf_addr_lsw = buf_add;
buffer_node->mmap_hdl = ac->port[dir].tmp_hdl;
list_add_tail(&buffer_node->list, &ac->port[dir].mem_map_handle);
ac->port[dir].tmp_hdl = 0;
rc = 0;
fail_cmd:
kfree(mmap_region_cmd);
return rc;
}
int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, int dir)
{
struct avs_cmd_shared_mem_unmap_regions mem_unmap;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
int rc = 0;
if (!ac || ac->apr == NULL || this_mmap.apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
q6asm_add_mmaphdr(ac, &mem_unmap.hdr,
sizeof(struct avs_cmd_shared_mem_unmap_regions),
TRUE, ((ac->session << 8) | dir));
mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == buf_add) {
pr_info("%s: Found the element\n", __func__);
mem_unmap.mem_map_handle = buf_node->mmap_hdl;
break;
}
}
pr_debug("%s: mem_unmap-mem_map_handle: 0x%x",
__func__, mem_unmap.mem_map_handle);
rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
if (rc < 0) {
pr_err("mem_unmap op[0x%x]rc[%d]\n",
mem_unmap.hdr.opcode, rc);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5 * HZ);
if (!rc) {
pr_err("timeout. waited for memory_map\n");
rc = -EINVAL;
goto fail_cmd;
}
list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == buf_add) {
list_del(&buf_node->list);
kfree(buf_node);
}
}
rc = 0;
fail_cmd:
return rc;
}
static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
uint32_t bufsz, uint32_t bufcnt)
{
struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
struct avs_shared_map_region_payload *mregions = NULL;
struct audio_port_data *port = NULL;
struct audio_buffer *ab = NULL;
void *mmap_region_cmd = NULL;
void *payload = NULL;
struct asm_buffer_node *buffer_node = NULL;
int rc = 0;
int i = 0;
int cmd_size = 0;
if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+ (sizeof(struct avs_shared_map_region_payload));
buffer_node = kzalloc(sizeof(struct asm_buffer_node) * bufcnt,
GFP_KERNEL);
mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
if ((mmap_region_cmd == NULL) || (buffer_node == NULL)) {
pr_err("%s: Mem alloc failed\n", __func__);
rc = -EINVAL;
return rc;
}
mmap_regions = (struct avs_cmd_shared_mem_map_regions *)
mmap_region_cmd;
q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, TRUE,
((ac->session << 8) | dir));
pr_debug("mmap_region=0x%p token=0x%x\n",
mmap_regions, ((ac->session << 8) | dir));
mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
mmap_regions->num_regions = 1; /*bufcnt & 0x00ff; */
mmap_regions->property_flag = 0x00;
pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
payload = ((u8 *) mmap_region_cmd +
sizeof(struct avs_cmd_shared_mem_map_regions));
mregions = (struct avs_shared_map_region_payload *)payload;
ac->port[dir].tmp_hdl = 0;
port = &ac->port[dir];
ab = &port->buf[0];
mregions->shm_addr_lsw = ab->phys;
/* Using only 32 bit address */
mregions->shm_addr_msw = 0;
mregions->mem_size_bytes = (bufsz * bufcnt);
rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
if (rc < 0) {
pr_err("mmap_regions op[0x%x]rc[%d]\n",
mmap_regions->hdr.opcode, rc);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0)
, 5*HZ);
/*ac->port[dir].tmp_hdl), 5*HZ);*/
if (!rc) {
pr_err("timeout. waited for memory_map\n");
rc = -EINVAL;
goto fail_cmd;
}
mutex_lock(&ac->cmd_lock);
for (i = 0; i < bufcnt; i++) {
ab = &port->buf[i];
buffer_node[i].buf_addr_lsw = ab->phys;
buffer_node[i].mmap_hdl = ac->port[dir].tmp_hdl;
list_add_tail(&buffer_node[i].list,
&ac->port[dir].mem_map_handle);
pr_debug("%s: i=%d, bufadd[i] = 0x%x, maphdl[i] = 0x%x\n",
__func__, i, buffer_node[i].buf_addr_lsw,
buffer_node[i].mmap_hdl);
}
ac->port[dir].tmp_hdl = 0;
mutex_unlock(&ac->cmd_lock);
rc = 0;
pr_debug("%s: exit\n", __func__);
fail_cmd:
kfree(mmap_region_cmd);
return rc;
}
static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir,
uint32_t bufsz, uint32_t bufcnt)
{
struct avs_cmd_shared_mem_unmap_regions mem_unmap;
struct audio_port_data *port = NULL;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
uint32_t buf_add;
int rc = 0;
int cmd_size = 0;
if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
q6asm_add_mmaphdr(ac, &mem_unmap.hdr, cmd_size,
TRUE, ((ac->session << 8) | dir));
port = &ac->port[dir];
buf_add = (uint32_t)port->buf->phys;
mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == buf_add) {
pr_debug("%s: Found the element\n", __func__);
mem_unmap.mem_map_handle = buf_node->mmap_hdl;
break;
}
}
pr_debug("%s: mem_unmap-mem_map_handle: 0x%x",
__func__, mem_unmap.mem_map_handle);
rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) &mem_unmap);
if (rc < 0) {
pr_err("mmap_regions op[0x%x]rc[%d]\n",
mem_unmap.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for memory_unmap\n");
goto fail_cmd;
}
list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == buf_add) {
list_del(&buf_node->list);
kfree(buf_node);
}
}
rc = 0;
fail_cmd:
return rc;
}
int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain)
{
struct asm_volume_ctrl_lr_chan_gain lrgain;
int sz = 0;
int rc = 0;
sz = sizeof(struct asm_volume_ctrl_lr_chan_gain);
q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE);
lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
lrgain.param.data_payload_addr_lsw = 0;
lrgain.param.data_payload_addr_msw = 0;
lrgain.param.mem_map_handle = 0;
lrgain.param.data_payload_size = sizeof(lrgain) -
sizeof(lrgain.hdr) - sizeof(lrgain.param);
lrgain.data.module_id = ASM_MODULE_ID_VOL_CTRL;
lrgain.data.param_id = ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN;
lrgain.data.param_size = lrgain.param.data_payload_size -
sizeof(lrgain.data);
lrgain.data.reserved = 0;
lrgain.l_chan_gain = left_gain;
lrgain.r_chan_gain = right_gain;
rc = apr_send_pkt(ac->apr, (uint32_t *) &lrgain);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
lrgain.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
lrgain.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = 0;
fail_cmd:
return rc;
}
int q6asm_set_mute(struct audio_client *ac, int muteflag)
{
struct asm_volume_ctrl_mute_config mute;
int sz = 0;
int rc = 0;
sz = sizeof(struct asm_volume_ctrl_mute_config);
q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
mute.param.data_payload_addr_lsw = 0;
mute.param.data_payload_addr_msw = 0;
mute.param.mem_map_handle = 0;
mute.param.data_payload_size = sizeof(mute) -
sizeof(mute.hdr) - sizeof(mute.param);
mute.data.module_id = ASM_MODULE_ID_VOL_CTRL;
mute.data.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG;
mute.data.param_size = mute.param.data_payload_size - sizeof(mute.data);
mute.data.reserved = 0;
mute.mute_flag = muteflag;
rc = apr_send_pkt(ac->apr, (uint32_t *) &mute);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
mute.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
mute.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = 0;
fail_cmd:
return rc;
}
int q6asm_set_volume(struct audio_client *ac, int volume)
{
struct asm_volume_ctrl_master_gain vol;
int sz = 0;
int rc = 0;
sz = sizeof(struct asm_volume_ctrl_master_gain);
q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
vol.param.data_payload_addr_lsw = 0;
vol.param.data_payload_addr_msw = 0;
vol.param.mem_map_handle = 0;
vol.param.data_payload_size = sizeof(vol) -
sizeof(vol.hdr) - sizeof(vol.param);
vol.data.module_id = ASM_MODULE_ID_VOL_CTRL;
vol.data.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
vol.data.param_size = vol.param.data_payload_size - sizeof(vol.data);
vol.data.reserved = 0;
vol.master_gain = volume;
rc = apr_send_pkt(ac->apr, (uint32_t *) &vol);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
vol.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
vol.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = 0;
fail_cmd:
return rc;
}
int q6asm_set_softpause(struct audio_client *ac,
struct asm_softpause_params *pause_param)
{
struct asm_soft_pause_params softpause;
int sz = 0;
int rc = 0;
sz = sizeof(struct asm_soft_pause_params);
q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
softpause.param.data_payload_addr_lsw = 0;
softpause.param.data_payload_addr_msw = 0;
softpause.param.mem_map_handle = 0;
softpause.param.data_payload_size = sizeof(softpause) -
sizeof(softpause.hdr) - sizeof(softpause.param);
softpause.data.module_id = ASM_MODULE_ID_VOL_CTRL;
softpause.data.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS;
softpause.data.param_size = softpause.param.data_payload_size -
sizeof(softpause.data);
softpause.data.reserved = 0;
softpause.enable_flag = pause_param->enable;
softpause.period = pause_param->period;
softpause.step = pause_param->step;
softpause.ramping_curve = pause_param->rampingcurve;
rc = apr_send_pkt(ac->apr, (uint32_t *) &softpause);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
softpause.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
softpause.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = 0;
fail_cmd:
return rc;
}
int q6asm_set_softvolume(struct audio_client *ac,
struct asm_softvolume_params *softvol_param)
{
struct asm_soft_step_volume_params softvol;
int sz = 0;
int rc = 0;
sz = sizeof(struct asm_soft_step_volume_params);
q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
softvol.param.data_payload_addr_lsw = 0;
softvol.param.data_payload_addr_msw = 0;
softvol.param.mem_map_handle = 0;
softvol.param.data_payload_size = sizeof(softvol) -
sizeof(softvol.hdr) - sizeof(softvol.param);
softvol.data.module_id = ASM_MODULE_ID_VOL_CTRL;
softvol.data.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
softvol.data.param_size = softvol.param.data_payload_size -
sizeof(softvol.data);
softvol.data.reserved = 0;
softvol.period = softvol_param->period;
softvol.step = softvol_param->step;
softvol.ramping_curve = softvol_param->rampingcurve;
rc = apr_send_pkt(ac->apr, (uint32_t *) &softvol);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
softvol.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
softvol.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = 0;
fail_cmd:
return rc;
}
int q6asm_equalizer(struct audio_client *ac, void *eq_p)
{
struct asm_eq_params eq;
struct msm_audio_eq_stream_config *eq_params = NULL;
int i = 0;
int sz = 0;
int rc = 0;
if (eq_p == NULL) {
pr_err("%s[%d]: Invalid Eq param\n", __func__, ac->session);
rc = -EINVAL;
goto fail_cmd;
}
sz = sizeof(struct asm_eq_params);
eq_params = (struct msm_audio_eq_stream_config *) eq_p;
q6asm_add_hdr(ac, &eq.hdr, sz, TRUE);
eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
eq.param.data_payload_addr_lsw = 0;
eq.param.data_payload_addr_msw = 0;
eq.param.mem_map_handle = 0;
eq.param.data_payload_size = sizeof(eq) -
sizeof(eq.hdr) - sizeof(eq.param);
eq.data.module_id = ASM_MODULE_ID_EQUALIZER;
eq.data.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS;
eq.data.param_size = eq.param.data_payload_size - sizeof(eq.data);
eq.enable_flag = eq_params->enable;
eq.num_bands = eq_params->num_bands;
pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable,
eq_params->num_bands);
for (i = 0; i < eq_params->num_bands; i++) {
eq.eq_bands[i].band_idx =
eq_params->eq_bands[i].band_idx;
eq.eq_bands[i].filterype =
eq_params->eq_bands[i].filter_type;
eq.eq_bands[i].center_freq_hz =
eq_params->eq_bands[i].center_freq_hz;
eq.eq_bands[i].filter_gain =
eq_params->eq_bands[i].filter_gain;
eq.eq_bands[i].q_factor =
eq_params->eq_bands[i].q_factor;
pr_debug("%s: filter_type:%u bandnum:%d\n", __func__,
eq_params->eq_bands[i].filter_type, i);
pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__,
eq_params->eq_bands[i].center_freq_hz, i);
pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__,
eq_params->eq_bands[i].filter_gain, i);
pr_debug("%s: q_factor:%d bandnum:%d\n", __func__,
eq_params->eq_bands[i].q_factor, i);
}
rc = apr_send_pkt(ac->apr, (uint32_t *)&eq);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x]\n", __func__,
eq.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
eq.data.param_id);
rc = -EINVAL;
goto fail_cmd;
}
rc = 0;
fail_cmd:
return rc;
}
int q6asm_read(struct audio_client *ac)
{
struct asm_data_cmd_read_v2 read;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
struct audio_buffer *ab;
int dsp_buf;
struct audio_port_data *port;
int rc;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
if (ac->io_mode == SYNC_IO_MODE) {
port = &ac->port[OUT];
q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE);
mutex_lock(&port->lock);
dsp_buf = port->dsp_buf;
ab = &port->buf[dsp_buf];
pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n",
__func__,
ac->session,
dsp_buf,
(void *)port->buf[dsp_buf].data,
port->cpu_buf,
(void *)port->buf[port->cpu_buf].phys);
read.hdr.opcode = ASM_DATA_CMD_READ_V2;
read.buf_addr_lsw = ab->phys;
read.buf_addr_msw = 0;
list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == (uint32_t) ab->phys)
read.mem_map_handle = buf_node->mmap_hdl;
}
pr_debug("memory_map handle in q6asm_read: [%0x]:",
read.mem_map_handle);
read.buf_size = ab->size;
read.seq_id = port->dsp_buf;
read.hdr.token = port->dsp_buf;
port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
mutex_unlock(&port->lock);
pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__,
read.buf_addr_lsw,
read.hdr.token,
read.seq_id);
rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
if (rc < 0) {
pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
goto fail_cmd;
}
return 0;
}
fail_cmd:
return -EINVAL;
}
int q6asm_read_nolock(struct audio_client *ac)
{
struct asm_data_cmd_read_v2 read;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
struct audio_buffer *ab;
int dsp_buf;
struct audio_port_data *port;
int rc;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
if (ac->io_mode == SYNC_IO_MODE) {
port = &ac->port[OUT];
q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
dsp_buf = port->dsp_buf;
ab = &port->buf[dsp_buf];
pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n",
__func__,
ac->session,
dsp_buf,
(void *)port->buf[dsp_buf].data,
port->cpu_buf,
(void *)port->buf[port->cpu_buf].phys);
read.hdr.opcode = ASM_DATA_CMD_READ_V2;
read.buf_addr_lsw = ab->phys;
read.buf_addr_msw = 0;
read.buf_size = ab->size;
read.seq_id = port->dsp_buf;
read.hdr.token = port->dsp_buf;
list_for_each_safe(ptr, next, &ac->port[OUT].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == (uint32_t)ab->phys) {
read.mem_map_handle = buf_node->mmap_hdl;
break;
}
}
port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__,
read.buf_addr_lsw,
read.hdr.token,
read.seq_id);
pr_debug("q6asm_read_nolock mem-map handle is %x",
read.mem_map_handle);
rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
if (rc < 0) {
pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
goto fail_cmd;
}
return 0;
}
fail_cmd:
return -EINVAL;
}
int q6asm_async_write(struct audio_client *ac,
struct audio_aio_write_param *param)
{
int rc = 0;
struct asm_data_cmd_write_v2 write;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
struct audio_buffer *ab;
struct audio_port_data *port;
if (!ac || ac->apr == NULL) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE);
port = &ac->port[IN];
ab = &port->buf[port->dsp_buf];
/* Pass physical address as token for AIO scheme */
write.hdr.token = param->uid;
write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
write.buf_addr_lsw = param->paddr;
write.buf_addr_msw = 0x00;
write.buf_size = param->len;
write.timestamp_msw = param->msw_ts;
write.timestamp_lsw = param->lsw_ts;
pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x],"
"ts_msw[0x%x], ts_lsw[0x%x]\n",
__func__, write.hdr.token, write.buf_addr_lsw,
write.buf_size, write.timestamp_msw,
write.timestamp_lsw);
/* Use 0xFF00 for disabling timestamps */
if (param->flags == 0xFF00)
write.flags = (0x00000000 | (param->flags & 0x800000FF));
else
write.flags = (0x80000000 | param->flags);
write.seq_id = param->uid;
list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == (uint32_t)write.buf_addr_lsw) {
write.mem_map_handle = buf_node->mmap_hdl;
pr_debug("%s:buf_node->mmap_hdl = 0x%x,"
"write.mem_map_handle = 0x%x\n",
__func__,
buf_node->mmap_hdl,
(uint32_t)write.mem_map_handle);
break;
}
}
pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x],"
"mem_map_handle[0x%x]\n", __func__, ac->session,
write.buf_addr_lsw, write.buf_size, write.mem_map_handle);
rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
if (rc < 0) {
pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__,
write.hdr.opcode, rc);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_async_read(struct audio_client *ac,
struct audio_aio_read_param *param)
{
int rc = 0;
struct asm_data_cmd_read_v2 read;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
if (!ac || ac->apr == NULL) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
/* Pass physical address as token for AIO scheme */
read.hdr.token = param->paddr;
read.hdr.opcode = ASM_DATA_CMD_READ_V2;
read.buf_addr_lsw = param->paddr;
read.buf_addr_msw = 0;
read.buf_size = param->len;
read.seq_id = param->uid;
list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == param->paddr)
read.mem_map_handle = buf_node->mmap_hdl;
}
pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
read.buf_addr_lsw, read.buf_size);
rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
if (rc < 0) {
pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__,
read.hdr.opcode, rc);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
uint32_t lsw_ts, uint32_t flags)
{
int rc = 0;
struct asm_data_cmd_write_v2 write;
struct asm_buffer_node *buf_node = NULL;
struct audio_port_data *port;
struct audio_buffer *ab;
int dsp_buf = 0;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
if (ac->io_mode == SYNC_IO_MODE) {
port = &ac->port[IN];
q6asm_add_hdr(ac, &write.hdr, sizeof(write),
FALSE);
mutex_lock(&port->lock);
dsp_buf = port->dsp_buf;
ab = &port->buf[dsp_buf];
write.hdr.token = port->dsp_buf;
write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
write.buf_addr_lsw = ab->phys;
write.buf_addr_msw = 0;
write.buf_size = len;
write.seq_id = port->dsp_buf;
write.timestamp_lsw = lsw_ts;
write.timestamp_msw = msw_ts;
/* Use 0xFF00 for disabling timestamps */
if (flags == 0xFF00)
write.flags = (0x00000000 | (flags & 0x800000FF));
else
write.flags = (0x80000000 | flags);
port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
buf_node = list_first_entry(&ac->port[IN].mem_map_handle,
struct asm_buffer_node,
list);
write.mem_map_handle = buf_node->mmap_hdl;
pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]"
"token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
, __func__,
ab->phys,
write.buf_addr_lsw,
write.hdr.token,
write.seq_id,
write.buf_size,
write.mem_map_handle);
mutex_unlock(&port->lock);
config_debug_fs_write(ab);
rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
if (rc < 0) {
pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc);
goto fail_cmd;
}
pr_debug("%s: WRITE SUCCESS\n", __func__);
return 0;
}
fail_cmd:
return -EINVAL;
}
int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
uint32_t lsw_ts, uint32_t flags)
{
int rc = 0;
struct asm_data_cmd_write_v2 write;
struct asm_buffer_node *buf_node = NULL;
struct audio_port_data *port;
struct audio_buffer *ab;
int dsp_buf = 0;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
if (ac->io_mode == SYNC_IO_MODE) {
port = &ac->port[IN];
q6asm_add_hdr_async(ac, &write.hdr, sizeof(write),
FALSE);
dsp_buf = port->dsp_buf;
ab = &port->buf[dsp_buf];
write.hdr.token = port->dsp_buf;
write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
write.buf_addr_lsw = ab->phys;
write.buf_addr_msw = 0;
write.buf_size = len;
write.seq_id = port->dsp_buf;
write.timestamp_lsw = lsw_ts;
write.timestamp_msw = msw_ts;
buf_node = list_first_entry(&ac->port[IN].mem_map_handle,
struct asm_buffer_node,
list);
write.mem_map_handle = buf_node->mmap_hdl;
/* Use 0xFF00 for disabling timestamps */
if (flags == 0xFF00)
write.flags = (0x00000000 | (flags & 0x800000FF));
else
write.flags = (0x80000000 | flags);
port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
pr_err("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]"
"buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
, __func__,
ab->phys,
write.buf_addr_lsw,
write.hdr.token,
write.seq_id,
write.buf_size,
write.mem_map_handle);
rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
if (rc < 0) {
pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc);
goto fail_cmd;
}
pr_debug("%s: WRITE SUCCESS\n", __func__);
return 0;
}
fail_cmd:
return -EINVAL;
}
uint64_t q6asm_get_session_time(struct audio_client *ac)
{
struct apr_hdr hdr;
int rc;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
hdr.opcode = ASM_SESSION_CMD_GET_SESSIONTIME_V3;
atomic_set(&ac->cmd_state, 1);
pr_debug("%s: session[%d]opcode[0x%x]\n", __func__,
ac->session,
hdr.opcode);
rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
if (rc < 0) {
pr_err("Commmand 0x%x failed\n", hdr.opcode);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("%s: timeout in getting session time from DSP\n",
__func__);
goto fail_cmd;
}
return ac->time_stamp;
fail_cmd:
return -EINVAL;
}
int q6asm_cmd(struct audio_client *ac, int cmd)
{
struct apr_hdr hdr;
int rc;
atomic_t *state;
int cnt = 0;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
switch (cmd) {
case CMD_PAUSE:
pr_debug("%s:CMD_PAUSE\n", __func__);
hdr.opcode = ASM_SESSION_CMD_PAUSE;
state = &ac->cmd_state;
break;
case CMD_FLUSH:
pr_debug("%s:CMD_FLUSH\n", __func__);
hdr.opcode = ASM_STREAM_CMD_FLUSH;
state = &ac->cmd_state;
break;
case CMD_OUT_FLUSH:
pr_debug("%s:CMD_OUT_FLUSH\n", __func__);
hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
state = &ac->cmd_state;
break;
case CMD_EOS:
pr_debug("%s:CMD_EOS\n", __func__);
hdr.opcode = ASM_DATA_CMD_EOS;
atomic_set(&ac->cmd_state, 0);
state = &ac->cmd_state;
break;
case CMD_CLOSE:
pr_debug("%s:CMD_CLOSE\n", __func__);
hdr.opcode = ASM_STREAM_CMD_CLOSE;
state = &ac->cmd_state;
break;
default:
pr_err("Invalid format[%d]\n", cmd);
goto fail_cmd;
}
pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
ac->session,
hdr.opcode);
rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
if (rc < 0) {
pr_err("Commmand 0x%x failed\n", hdr.opcode);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for response opcode[0x%x]\n",
hdr.opcode);
goto fail_cmd;
}
if (cmd == CMD_FLUSH)
q6asm_reset_buf_state(ac);
if (cmd == CMD_CLOSE) {
/* check if DSP return all buffers */
if (ac->port[IN].buf) {
for (cnt = 0; cnt < ac->port[IN].max_buf_cnt;
cnt++) {
if (ac->port[IN].buf[cnt].used == IN) {
pr_debug("Write Buf[%d] not returned\n",
cnt);
}
}
}
if (ac->port[OUT].buf) {
for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) {
if (ac->port[OUT].buf[cnt].used == OUT) {
pr_debug("Read Buf[%d] not returned\n",
cnt);
}
}
}
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_cmd_nowait(struct audio_client *ac, int cmd)
{
struct apr_hdr hdr;
int rc;
if (!ac || ac->apr == NULL) {
pr_err("%s:APR handle NULL\n", __func__);
return -EINVAL;
}
q6asm_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE);
switch (cmd) {
case CMD_PAUSE:
pr_debug("%s:CMD_PAUSE\n", __func__);
hdr.opcode = ASM_SESSION_CMD_PAUSE;
break;
case CMD_EOS:
pr_debug("%s:CMD_EOS\n", __func__);
hdr.opcode = ASM_DATA_CMD_EOS;
break;
default:
pr_err("%s:Invalid format[%d]\n", __func__, cmd);
goto fail_cmd;
}
pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
ac->session,
hdr.opcode);
rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
if (rc < 0) {
pr_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode);
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
static void q6asm_reset_buf_state(struct audio_client *ac)
{
int cnt = 0;
int loopcnt = 0;
struct audio_port_data *port = NULL;
if (ac->io_mode == SYNC_IO_MODE) {
mutex_lock(&ac->cmd_lock);
for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
port = &ac->port[loopcnt];
cnt = port->max_buf_cnt - 1;
port->dsp_buf = 0;
port->cpu_buf = 0;
while (cnt >= 0) {
if (!port->buf)
continue;
port->buf[cnt].used = 1;
cnt--;
}
}
mutex_unlock(&ac->cmd_lock);
}
}
int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable)
{
struct asm_session_cmd_regx_overflow tx_overflow;
int rc;
if (!ac || ac->apr == NULL) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s:session[%d]enable[%d]\n", __func__,
ac->session, enable);
q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE);
tx_overflow.hdr.opcode = \
ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS;
/* tx overflow event: enable */
tx_overflow.enable_flag = enable;
rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow);
if (rc < 0) {
pr_err("tx overflow op[0x%x]rc[%d]\n", \
tx_overflow.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
pr_err("timeout. waited for tx overflow\n");
goto fail_cmd;
}
return 0;
fail_cmd:
return -EINVAL;
}
int q6asm_get_apr_service_id(int session_id)
{
pr_debug("%s\n", __func__);
if (session_id < 0 || session_id > SESSION_MAX) {
pr_err("%s: invalid session_id = %d\n", __func__, session_id);
return -EINVAL;
}
return ((struct apr_svc *)session[session_id]->apr)->id;
}
static int __init q6asm_init(void)
{
pr_debug("%s\n", __func__);
memset(session, 0, sizeof(session));
config_debug_fs_init();
return 0;
}
device_initcall(q6asm_init);
| gpl-2.0 |
danielkutik/android_kernel_google_msm | drivers/media/platform/msm/camera_v1/io/msm_io_8960.c | 286 | 2952 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/pm8xxx/pm8921.h>
#include <mach/gpio.h>
#include <mach/gpiomux.h>
#include <mach/board.h>
#include <mach/camera.h>
#include <mach/vreg.h>
#include <mach/camera.h>
#include <mach/clk.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#define BUFF_SIZE_128 128
void msm_camio_clk_rate_set_2(struct clk *clk, int rate)
{
clk_set_rate(clk, rate);
}
void msm_camio_bus_scale_cfg(struct msm_bus_scale_pdata *cam_bus_scale_table,
enum msm_bus_perf_setting perf_setting)
{
static uint32_t bus_perf_client;
int rc = 0;
switch (perf_setting) {
case S_INIT:
bus_perf_client =
msm_bus_scale_register_client(cam_bus_scale_table);
if (!bus_perf_client) {
CDBG("%s: Registration Failed!!!\n", __func__);
bus_perf_client = 0;
return;
}
CDBG("%s: S_INIT rc = %u\n", __func__, bus_perf_client);
break;
case S_EXIT:
if (bus_perf_client) {
CDBG("%s: S_EXIT\n", __func__);
msm_bus_scale_unregister_client(bus_perf_client);
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
case S_PREVIEW:
if (bus_perf_client) {
rc = msm_bus_scale_client_update_request(
bus_perf_client, 1);
CDBG("%s: S_PREVIEW rc = %d\n", __func__, rc);
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
case S_VIDEO:
if (bus_perf_client) {
rc = msm_bus_scale_client_update_request(
bus_perf_client, 2);
CDBG("%s: S_VIDEO rc = %d\n", __func__, rc);
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
case S_CAPTURE:
if (bus_perf_client) {
rc = msm_bus_scale_client_update_request(
bus_perf_client, 3);
CDBG("%s: S_CAPTURE rc = %d\n", __func__, rc);
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
case S_ZSL:
if (bus_perf_client) {
rc = msm_bus_scale_client_update_request(
bus_perf_client, 4);
CDBG("%s: S_ZSL rc = %d\n", __func__, rc);
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
case S_LIVESHOT:
if (bus_perf_client) {
rc = msm_bus_scale_client_update_request(
bus_perf_client, 5);
CDBG("%s: S_LIVESHOT rc = %d\n", __func__, rc);
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
case S_DEFAULT:
break;
default:
pr_warning("%s: INVALID CASE\n", __func__);
}
}
| gpl-2.0 |
vmayoral/snappy-kernel | arch/powerpc/platforms/cell/spu_base.c | 1054 | 19628 | /*
* Low-level SPU handling
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/linux_logo.h>
#include <linux/syscore_ops.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
#include <asm/xmon.h>
#include <asm/prom.h>
#include <asm/kexec.h>
const struct spu_management_ops *spu_management_ops;
EXPORT_SYMBOL_GPL(spu_management_ops);
const struct spu_priv1_ops *spu_priv1_ops;
EXPORT_SYMBOL_GPL(spu_priv1_ops);
struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
EXPORT_SYMBOL_GPL(cbe_spu_info);
/*
* The spufs fault-handling code needs to call force_sig_info to raise signals
* on DMA errors. Export it here to avoid general kernel-wide access to this
* function
*/
EXPORT_SYMBOL_GPL(force_sig_info);
/*
* Protects cbe_spu_info and spu->number.
*/
static DEFINE_SPINLOCK(spu_lock);
/*
* List of all spus in the system.
*
* This list is iterated by callers from irq context and callers that
* want to sleep. Thus modifications need to be done with both
* spu_full_list_lock and spu_full_list_mutex held, while iterating
* through it requires either of these locks.
*
* In addition spu_full_list_lock protects all assignmens to
* spu->mm.
*/
static LIST_HEAD(spu_full_list);
static DEFINE_SPINLOCK(spu_full_list_lock);
static DEFINE_MUTEX(spu_full_list_mutex);
void spu_invalidate_slbs(struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
unsigned long flags;
spin_lock_irqsave(&spu->register_lock, flags);
if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
out_be64(&priv2->slb_invalidate_all_W, 0UL);
spin_unlock_irqrestore(&spu->register_lock, flags);
}
EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
/* This is called by the MM core when a segment size is changed, to
* request a flush of all the SPEs using a given mm
*/
void spu_flush_all_slbs(struct mm_struct *mm)
{
struct spu *spu;
unsigned long flags;
spin_lock_irqsave(&spu_full_list_lock, flags);
list_for_each_entry(spu, &spu_full_list, full_list) {
if (spu->mm == mm)
spu_invalidate_slbs(spu);
}
spin_unlock_irqrestore(&spu_full_list_lock, flags);
}
/* The hack below stinks... try to do something better one of
* these days... Does it even work properly with NR_CPUS == 1 ?
*/
static inline void mm_needs_global_tlbie(struct mm_struct *mm)
{
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
/* Global TLBIE broadcast required with SPEs. */
bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
}
void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
{
unsigned long flags;
spin_lock_irqsave(&spu_full_list_lock, flags);
spu->mm = mm;
spin_unlock_irqrestore(&spu_full_list_lock, flags);
if (mm)
mm_needs_global_tlbie(mm);
}
EXPORT_SYMBOL_GPL(spu_associate_mm);
int spu_64k_pages_available(void)
{
return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
}
EXPORT_SYMBOL_GPL(spu_64k_pages_available);
static void spu_restart_dma(struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
else {
set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
mb();
}
}
static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
__func__, slbe, slb->vsid, slb->esid);
out_be64(&priv2->slb_index_W, slbe);
/* set invalid before writing vsid */
out_be64(&priv2->slb_esid_RW, 0);
/* now it's safe to write the vsid */
out_be64(&priv2->slb_vsid_RW, slb->vsid);
/* setting the new esid makes the entry valid again */
out_be64(&priv2->slb_esid_RW, slb->esid);
}
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
struct copro_slb slb;
int ret;
ret = copro_calculate_slb(spu->mm, ea, &slb);
if (ret)
return ret;
spu_load_slb(spu, spu->slb_replace, &slb);
spu->slb_replace++;
if (spu->slb_replace >= 8)
spu->slb_replace = 0;
spu_restart_dma(spu);
spu->stats.slb_flt++;
return 0;
}
extern int hash_page(unsigned long ea, unsigned long access,
unsigned long trap, unsigned long dsisr); //XXX
static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
{
int ret;
pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
/*
* Handle kernel space hash faults immediately. User hash
* faults need to be deferred to process context.
*/
if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
(REGION_ID(ea) != USER_REGION_ID)) {
spin_unlock(&spu->register_lock);
ret = hash_page(ea, _PAGE_PRESENT, 0x300, dsisr);
spin_lock(&spu->register_lock);
if (!ret) {
spu_restart_dma(spu);
return 0;
}
}
spu->class_1_dar = ea;
spu->class_1_dsisr = dsisr;
spu->stop_callback(spu, 1);
spu->class_1_dar = 0;
spu->class_1_dsisr = 0;
return 0;
}
static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
{
unsigned long ea = (unsigned long)addr;
u64 llp;
if (REGION_ID(ea) == KERNEL_REGION_ID)
llp = mmu_psize_defs[mmu_linear_psize].sllp;
else
llp = mmu_psize_defs[mmu_virtual_psize].sllp;
slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
SLB_VSID_KERNEL | llp;
slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
}
/**
* Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
* address @new_addr is present.
*/
static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
void *new_addr)
{
unsigned long ea = (unsigned long)new_addr;
int i;
for (i = 0; i < nr_slbs; i++)
if (!((slbs[i].esid ^ ea) & ESID_MASK))
return 1;
return 0;
}
/**
* Setup the SPU kernel SLBs, in preparation for a context save/restore. We
* need to map both the context save area, and the save/restore code.
*
* Because the lscsa and code may cross segment boundaires, we check to see
* if mappings are required for the start and end of each range. We currently
* assume that the mappings are smaller that one segment - if not, something
* is seriously wrong.
*/
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
void *code, int code_size)
{
struct copro_slb slbs[4];
int i, nr_slbs = 0;
/* start and end addresses of both mappings */
void *addrs[] = {
lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
code, code + code_size - 1
};
/* check the set of addresses, and create a new entry in the slbs array
* if there isn't already a SLB for that address */
for (i = 0; i < ARRAY_SIZE(addrs); i++) {
if (__slb_present(slbs, nr_slbs, addrs[i]))
continue;
__spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
nr_slbs++;
}
spin_lock_irq(&spu->register_lock);
/* Add the set of SLBs */
for (i = 0; i < nr_slbs; i++)
spu_load_slb(spu, i, &slbs[i]);
spin_unlock_irq(&spu->register_lock);
}
EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
static irqreturn_t
spu_irq_class_0(int irq, void *data)
{
struct spu *spu;
unsigned long stat, mask;
spu = data;
spin_lock(&spu->register_lock);
mask = spu_int_mask_get(spu, 0);
stat = spu_int_stat_get(spu, 0) & mask;
spu->class_0_pending |= stat;
spu->class_0_dar = spu_mfc_dar_get(spu);
spu->stop_callback(spu, 0);
spu->class_0_pending = 0;
spu->class_0_dar = 0;
spu_int_stat_clear(spu, 0, stat);
spin_unlock(&spu->register_lock);
return IRQ_HANDLED;
}
static irqreturn_t
spu_irq_class_1(int irq, void *data)
{
struct spu *spu;
unsigned long stat, mask, dar, dsisr;
spu = data;
/* atomically read & clear class1 status. */
spin_lock(&spu->register_lock);
mask = spu_int_mask_get(spu, 1);
stat = spu_int_stat_get(spu, 1) & mask;
dar = spu_mfc_dar_get(spu);
dsisr = spu_mfc_dsisr_get(spu);
if (stat & CLASS1_STORAGE_FAULT_INTR)
spu_mfc_dsisr_set(spu, 0ul);
spu_int_stat_clear(spu, 1, stat);
pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
dar, dsisr);
if (stat & CLASS1_SEGMENT_FAULT_INTR)
__spu_trap_data_seg(spu, dar);
if (stat & CLASS1_STORAGE_FAULT_INTR)
__spu_trap_data_map(spu, dar, dsisr);
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
;
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
;
spu->class_1_dsisr = 0;
spu->class_1_dar = 0;
spin_unlock(&spu->register_lock);
return stat ? IRQ_HANDLED : IRQ_NONE;
}
static irqreturn_t
spu_irq_class_2(int irq, void *data)
{
struct spu *spu;
unsigned long stat;
unsigned long mask;
const int mailbox_intrs =
CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
spu = data;
spin_lock(&spu->register_lock);
stat = spu_int_stat_get(spu, 2);
mask = spu_int_mask_get(spu, 2);
/* ignore interrupts we're not waiting for */
stat &= mask;
/* mailbox interrupts are level triggered. mask them now before
* acknowledging */
if (stat & mailbox_intrs)
spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
/* acknowledge all interrupts before the callbacks */
spu_int_stat_clear(spu, 2, stat);
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
if (stat & CLASS2_MAILBOX_INTR)
spu->ibox_callback(spu);
if (stat & CLASS2_SPU_STOP_INTR)
spu->stop_callback(spu, 2);
if (stat & CLASS2_SPU_HALT_INTR)
spu->stop_callback(spu, 2);
if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
spu->mfc_callback(spu);
if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
spu->wbox_callback(spu);
spu->stats.class2_intr++;
spin_unlock(&spu->register_lock);
return stat ? IRQ_HANDLED : IRQ_NONE;
}
static int spu_request_irqs(struct spu *spu)
{
int ret = 0;
if (spu->irqs[0] != NO_IRQ) {
snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
spu->number);
ret = request_irq(spu->irqs[0], spu_irq_class_0,
0, spu->irq_c0, spu);
if (ret)
goto bail0;
}
if (spu->irqs[1] != NO_IRQ) {
snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
spu->number);
ret = request_irq(spu->irqs[1], spu_irq_class_1,
0, spu->irq_c1, spu);
if (ret)
goto bail1;
}
if (spu->irqs[2] != NO_IRQ) {
snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
spu->number);
ret = request_irq(spu->irqs[2], spu_irq_class_2,
0, spu->irq_c2, spu);
if (ret)
goto bail2;
}
return 0;
bail2:
if (spu->irqs[1] != NO_IRQ)
free_irq(spu->irqs[1], spu);
bail1:
if (spu->irqs[0] != NO_IRQ)
free_irq(spu->irqs[0], spu);
bail0:
return ret;
}
static void spu_free_irqs(struct spu *spu)
{
if (spu->irqs[0] != NO_IRQ)
free_irq(spu->irqs[0], spu);
if (spu->irqs[1] != NO_IRQ)
free_irq(spu->irqs[1], spu);
if (spu->irqs[2] != NO_IRQ)
free_irq(spu->irqs[2], spu);
}
void spu_init_channels(struct spu *spu)
{
static const struct {
unsigned channel;
unsigned count;
} zero_list[] = {
{ 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
{ 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
}, count_list[] = {
{ 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
{ 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
{ 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
};
struct spu_priv2 __iomem *priv2;
int i;
priv2 = spu->priv2;
/* initialize all channel data to zero */
for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
int count;
out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
for (count = 0; count < zero_list[i].count; count++)
out_be64(&priv2->spu_chnldata_RW, 0);
}
/* initialize channel counts to meaningful values */
for (i = 0; i < ARRAY_SIZE(count_list); i++) {
out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
}
}
EXPORT_SYMBOL_GPL(spu_init_channels);
static struct bus_type spu_subsys = {
.name = "spu",
.dev_name = "spu",
};
int spu_add_dev_attr(struct device_attribute *attr)
{
struct spu *spu;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list)
device_create_file(&spu->dev, attr);
mutex_unlock(&spu_full_list_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(spu_add_dev_attr);
int spu_add_dev_attr_group(struct attribute_group *attrs)
{
struct spu *spu;
int rc = 0;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list) {
rc = sysfs_create_group(&spu->dev.kobj, attrs);
/* we're in trouble here, but try unwinding anyway */
if (rc) {
printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
__func__, attrs->name);
list_for_each_entry_continue_reverse(spu,
&spu_full_list, full_list)
sysfs_remove_group(&spu->dev.kobj, attrs);
break;
}
}
mutex_unlock(&spu_full_list_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
void spu_remove_dev_attr(struct device_attribute *attr)
{
struct spu *spu;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list)
device_remove_file(&spu->dev, attr);
mutex_unlock(&spu_full_list_mutex);
}
EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
void spu_remove_dev_attr_group(struct attribute_group *attrs)
{
struct spu *spu;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list)
sysfs_remove_group(&spu->dev.kobj, attrs);
mutex_unlock(&spu_full_list_mutex);
}
EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
static int spu_create_dev(struct spu *spu)
{
int ret;
spu->dev.id = spu->number;
spu->dev.bus = &spu_subsys;
ret = device_register(&spu->dev);
if (ret) {
printk(KERN_ERR "Can't register SPU %d with sysfs\n",
spu->number);
return ret;
}
sysfs_add_device_to_node(&spu->dev, spu->node);
return 0;
}
static int __init create_spu(void *data)
{
struct spu *spu;
int ret;
static int number;
unsigned long flags;
ret = -ENOMEM;
spu = kzalloc(sizeof (*spu), GFP_KERNEL);
if (!spu)
goto out;
spu->alloc_state = SPU_FREE;
spin_lock_init(&spu->register_lock);
spin_lock(&spu_lock);
spu->number = number++;
spin_unlock(&spu_lock);
ret = spu_create_spu(spu, data);
if (ret)
goto out_free;
spu_mfc_sdr_setup(spu);
spu_mfc_sr1_set(spu, 0x33);
ret = spu_request_irqs(spu);
if (ret)
goto out_destroy;
ret = spu_create_dev(spu);
if (ret)
goto out_free_irqs;
mutex_lock(&cbe_spu_info[spu->node].list_mutex);
list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
cbe_spu_info[spu->node].n_spus++;
mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
mutex_lock(&spu_full_list_mutex);
spin_lock_irqsave(&spu_full_list_lock, flags);
list_add(&spu->full_list, &spu_full_list);
spin_unlock_irqrestore(&spu_full_list_lock, flags);
mutex_unlock(&spu_full_list_mutex);
spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
spu->stats.tstamp = ktime_get_ns();
INIT_LIST_HEAD(&spu->aff_list);
goto out;
out_free_irqs:
spu_free_irqs(spu);
out_destroy:
spu_destroy_spu(spu);
out_free:
kfree(spu);
out:
return ret;
}
static const char *spu_state_names[] = {
"user", "system", "iowait", "idle"
};
static unsigned long long spu_acct_time(struct spu *spu,
enum spu_utilization_state state)
{
unsigned long long time = spu->stats.times[state];
/*
* If the spu is idle or the context is stopped, utilization
* statistics are not updated. Apply the time delta from the
* last recorded state of the spu.
*/
if (spu->stats.util_state == state)
time += ktime_get_ns() - spu->stats.tstamp;
return time / NSEC_PER_MSEC;
}
static ssize_t spu_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spu *spu = container_of(dev, struct spu, dev);
return sprintf(buf, "%s %llu %llu %llu %llu "
"%llu %llu %llu %llu %llu %llu %llu %llu\n",
spu_state_names[spu->stats.util_state],
spu_acct_time(spu, SPU_UTIL_USER),
spu_acct_time(spu, SPU_UTIL_SYSTEM),
spu_acct_time(spu, SPU_UTIL_IOWAIT),
spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
spu->stats.vol_ctx_switch,
spu->stats.invol_ctx_switch,
spu->stats.slb_flt,
spu->stats.hash_flt,
spu->stats.min_flt,
spu->stats.maj_flt,
spu->stats.class2_intr,
spu->stats.libassist);
}
static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
#ifdef CONFIG_KEXEC
struct crash_spu_info {
struct spu *spu;
u32 saved_spu_runcntl_RW;
u32 saved_spu_status_R;
u32 saved_spu_npc_RW;
u64 saved_mfc_sr1_RW;
u64 saved_mfc_dar;
u64 saved_mfc_dsisr;
};
#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
static void crash_kexec_stop_spus(void)
{
struct spu *spu;
int i;
u64 tmp;
for (i = 0; i < CRASH_NUM_SPUS; i++) {
if (!crash_spu_info[i].spu)
continue;
spu = crash_spu_info[i].spu;
crash_spu_info[i].saved_spu_runcntl_RW =
in_be32(&spu->problem->spu_runcntl_RW);
crash_spu_info[i].saved_spu_status_R =
in_be32(&spu->problem->spu_status_R);
crash_spu_info[i].saved_spu_npc_RW =
in_be32(&spu->problem->spu_npc_RW);
crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
tmp = spu_mfc_sr1_get(spu);
crash_spu_info[i].saved_mfc_sr1_RW = tmp;
tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
spu_mfc_sr1_set(spu, tmp);
__delay(200);
}
}
static void crash_register_spus(struct list_head *list)
{
struct spu *spu;
int ret;
list_for_each_entry(spu, list, full_list) {
if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
continue;
crash_spu_info[spu->number].spu = spu;
}
ret = crash_shutdown_register(&crash_kexec_stop_spus);
if (ret)
printk(KERN_ERR "Could not register SPU crash handler");
}
#else
static inline void crash_register_spus(struct list_head *list)
{
}
#endif
static void spu_shutdown(void)
{
struct spu *spu;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list) {
spu_free_irqs(spu);
spu_destroy_spu(spu);
}
mutex_unlock(&spu_full_list_mutex);
}
static struct syscore_ops spu_syscore_ops = {
.shutdown = spu_shutdown,
};
static int __init init_spu_base(void)
{
int i, ret = 0;
for (i = 0; i < MAX_NUMNODES; i++) {
mutex_init(&cbe_spu_info[i].list_mutex);
INIT_LIST_HEAD(&cbe_spu_info[i].spus);
}
if (!spu_management_ops)
goto out;
/* create system subsystem for spus */
ret = subsys_system_register(&spu_subsys, NULL);
if (ret)
goto out;
ret = spu_enumerate_spus(create_spu);
if (ret < 0) {
printk(KERN_WARNING "%s: Error initializing spus\n",
__func__);
goto out_unregister_subsys;
}
if (ret > 0)
fb_append_extra_logo(&logo_spe_clut224, ret);
mutex_lock(&spu_full_list_mutex);
xmon_register_spus(&spu_full_list);
crash_register_spus(&spu_full_list);
mutex_unlock(&spu_full_list_mutex);
spu_add_dev_attr(&dev_attr_stat);
register_syscore_ops(&spu_syscore_ops);
spu_init_affinity();
return 0;
out_unregister_subsys:
bus_unregister(&spu_subsys);
out:
return ret;
}
module_init(init_spu_base);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
| gpl-2.0 |
RidaShamasneh/nethunter_kernel_g5 | arch/sparc/kernel/auxio_32.c | 1822 | 3618 | /* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/export.h>
#include <asm/oplib.h>
#include <asm/io.h>
#include <asm/auxio.h>
#include <asm/string.h> /* memset(), Linux has no bzero() */
#include <asm/cpu_type.h>
#include "kernel.h"
/* Probe and map in the Auxiliary I/O register */
/* auxio_register is not static because it is referenced
* in entry.S::floppy_tdone
*/
void __iomem *auxio_register = NULL;
static DEFINE_SPINLOCK(auxio_lock);
void __init auxio_probe(void)
{
phandle node, auxio_nd;
struct linux_prom_registers auxregs[1];
struct resource r;
switch (sparc_cpu_model) {
case sparc_leon:
case sun4d:
return;
default:
break;
}
node = prom_getchild(prom_root_node);
auxio_nd = prom_searchsiblings(node, "auxiliary-io");
if(!auxio_nd) {
node = prom_searchsiblings(node, "obio");
node = prom_getchild(node);
auxio_nd = prom_searchsiblings(node, "auxio");
if(!auxio_nd) {
#ifdef CONFIG_PCI
/* There may be auxio on Ebus */
return;
#else
if(prom_searchsiblings(node, "leds")) {
/* VME chassis sun4m machine, no auxio exists. */
return;
}
prom_printf("Cannot find auxio node, cannot continue...\n");
prom_halt();
#endif
}
}
if(prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs)) <= 0)
return;
prom_apply_obio_ranges(auxregs, 0x1);
/* Map the register both read and write */
r.flags = auxregs[0].which_io & 0xF;
r.start = auxregs[0].phys_addr;
r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1;
auxio_register = of_ioremap(&r, 0, auxregs[0].reg_size, "auxio");
/* Fix the address on sun4m. */
if ((((unsigned long) auxregs[0].phys_addr) & 3) == 3)
auxio_register += (3 - ((unsigned long)auxio_register & 3));
set_auxio(AUXIO_LED, 0);
}
unsigned char get_auxio(void)
{
if(auxio_register)
return sbus_readb(auxio_register);
return 0;
}
EXPORT_SYMBOL(get_auxio);
void set_auxio(unsigned char bits_on, unsigned char bits_off)
{
unsigned char regval;
unsigned long flags;
spin_lock_irqsave(&auxio_lock, flags);
switch (sparc_cpu_model) {
case sun4m:
if(!auxio_register)
break; /* VME chassis sun4m, no auxio. */
regval = sbus_readb(auxio_register);
sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN4M,
auxio_register);
break;
case sun4d:
break;
default:
panic("Can't set AUXIO register on this machine.");
}
spin_unlock_irqrestore(&auxio_lock, flags);
}
EXPORT_SYMBOL(set_auxio);
/* sun4m power control register (AUXIO2) */
volatile u8 __iomem *auxio_power_register = NULL;
void __init auxio_power_probe(void)
{
struct linux_prom_registers regs;
phandle node;
struct resource r;
/* Attempt to find the sun4m power control node. */
node = prom_getchild(prom_root_node);
node = prom_searchsiblings(node, "obio");
node = prom_getchild(node);
node = prom_searchsiblings(node, "power");
if (node == 0 || (s32)node == -1)
return;
/* Map the power control register. */
if (prom_getproperty(node, "reg", (char *)®s, sizeof(regs)) <= 0)
return;
prom_apply_obio_ranges(®s, 1);
memset(&r, 0, sizeof(r));
r.flags = regs.which_io & 0xF;
r.start = regs.phys_addr;
r.end = regs.phys_addr + regs.reg_size - 1;
auxio_power_register =
(u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower");
/* Display a quick message on the console. */
if (auxio_power_register)
printk(KERN_INFO "Power off control detected.\n");
}
| gpl-2.0 |
Falklore/shamu | drivers/scsi/lpfc/lpfc_bsg.c | 2078 | 151390 | /*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2009-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#include <linux/interrupt.h>
#include <linux/mempool.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include <scsi/fc/fc_fs.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_bsg.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
#include "lpfc_version.h"
struct lpfc_bsg_event {
struct list_head node;
struct kref kref;
wait_queue_head_t wq;
/* Event type and waiter identifiers */
uint32_t type_mask;
uint32_t req_id;
uint32_t reg_id;
/* next two flags are here for the auto-delete logic */
unsigned long wait_time_stamp;
int waiting;
/* seen and not seen events */
struct list_head events_to_get;
struct list_head events_to_see;
/* driver data associated with the job */
void *dd_data;
};
struct lpfc_bsg_iocb {
struct lpfc_iocbq *cmdiocbq;
struct lpfc_dmabuf *rmp;
struct lpfc_nodelist *ndlp;
};
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
uint8_t *ext; /* extended mailbox data */
uint32_t mbOffset; /* from app */
uint32_t inExtWLen; /* from app */
uint32_t outExtWLen; /* from app */
};
#define MENLO_DID 0x0000FC0E
struct lpfc_bsg_menlo {
struct lpfc_iocbq *cmdiocbq;
struct lpfc_dmabuf *rmp;
};
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
struct lpfc_bsg_menlo menlo;
} context_un;
};
struct event_data {
struct list_head node;
uint32_t type;
uint32_t immed_dat;
void *data;
uint32_t len;
};
#define BUF_SZ_4K 4096
#define SLI_CT_ELX_LOOPBACK 0x10
enum ELX_LOOPBACK_CMD {
ELX_LOOPBACK_XRI_SETUP,
ELX_LOOPBACK_DATA,
};
#define ELX_LOOPBACK_HEADER_SZ \
(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
struct lpfc_dmabufext {
struct lpfc_dmabuf dma;
uint32_t size;
uint32_t flag;
};
static void
lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
{
struct lpfc_dmabuf *mlast, *next_mlast;
if (mlist) {
list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
list) {
lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
list_del(&mlast->list);
kfree(mlast);
}
lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
kfree(mlist);
}
return;
}
static struct lpfc_dmabuf *
lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
int outbound_buffers, struct ulp_bde64 *bpl,
int *bpl_entries)
{
struct lpfc_dmabuf *mlist = NULL;
struct lpfc_dmabuf *mp;
unsigned int bytes_left = size;
/* Verify we can support the size specified */
if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
return NULL;
/* Determine the number of dma buffers to allocate */
*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
size/LPFC_BPL_SIZE);
/* Allocate dma buffer and place in BPL passed */
while (bytes_left) {
/* Allocate dma buffer */
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!mp) {
if (mlist)
lpfc_free_bsg_buffers(phba, mlist);
return NULL;
}
INIT_LIST_HEAD(&mp->list);
mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
if (!mp->virt) {
kfree(mp);
if (mlist)
lpfc_free_bsg_buffers(phba, mlist);
return NULL;
}
/* Queue it to a linked list */
if (!mlist)
mlist = mp;
else
list_add_tail(&mp->list, &mlist->list);
/* Add buffer to buffer pointer list */
if (outbound_buffers)
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
else
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
bpl->tus.f.bdeSize = (uint16_t)
(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
bytes_left);
bytes_left -= bpl->tus.f.bdeSize;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
}
return mlist;
}
static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
struct fc_bsg_buffer *bsg_buffers,
unsigned int bytes_to_transfer, int to_buffers)
{
struct lpfc_dmabuf *mp;
unsigned int transfer_bytes, bytes_copied = 0;
unsigned int sg_offset, dma_offset;
unsigned char *dma_address, *sg_address;
LIST_HEAD(temp_list);
struct sg_mapping_iter miter;
unsigned long flags;
unsigned int sg_flags = SG_MITER_ATOMIC;
bool sg_valid;
list_splice_init(&dma_buffers->list, &temp_list);
list_add(&dma_buffers->list, &temp_list);
sg_offset = 0;
if (to_buffers)
sg_flags |= SG_MITER_FROM_SG;
else
sg_flags |= SG_MITER_TO_SG;
sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
sg_flags);
local_irq_save(flags);
sg_valid = sg_miter_next(&miter);
list_for_each_entry(mp, &temp_list, list) {
dma_offset = 0;
while (bytes_to_transfer && sg_valid &&
(dma_offset < LPFC_BPL_SIZE)) {
dma_address = mp->virt + dma_offset;
if (sg_offset) {
/* Continue previous partial transfer of sg */
sg_address = miter.addr + sg_offset;
transfer_bytes = miter.length - sg_offset;
} else {
sg_address = miter.addr;
transfer_bytes = miter.length;
}
if (bytes_to_transfer < transfer_bytes)
transfer_bytes = bytes_to_transfer;
if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
transfer_bytes = LPFC_BPL_SIZE - dma_offset;
if (to_buffers)
memcpy(dma_address, sg_address, transfer_bytes);
else
memcpy(sg_address, dma_address, transfer_bytes);
dma_offset += transfer_bytes;
sg_offset += transfer_bytes;
bytes_to_transfer -= transfer_bytes;
bytes_copied += transfer_bytes;
if (sg_offset >= miter.length) {
sg_offset = 0;
sg_valid = sg_miter_next(&miter);
}
}
}
sg_miter_stop(&miter);
local_irq_restore(flags);
list_del_init(&dma_buffers->list);
list_splice(&temp_list, &dma_buffers->list);
return bytes_copied;
}
/**
* lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
* @rspiocbq: Pointer to response iocb.
*
* This function is the completion handler for iocbs issued using
* lpfc_bsg_send_mgmt_cmd function. This function is called by the
* ring event handler function without any lock held. This function
* can be called from both worker thread context and interrupt
* context. This function also can be called from another thread which
* cleans up the SLI layer objects.
* This function copies the contents of the response iocb to the
* response iocb memory object provided by the caller of
* lpfc_sli_issue_iocb_wait and then wakes up the thread which
* sleeps for the iocb completion.
**/
static void
lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
struct lpfc_bsg_iocb *iocb;
unsigned long flags;
unsigned int rsp_size;
int rc = 0;
dd_data = cmdiocbq->context1;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
iocb = &dd_data->context_un.iocb;
ndlp = iocb->ndlp;
rmp = iocb->rmp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
rsp = &rspiocbq->iocb;
/* Copy the completed data or set the error status */
if (job) {
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
case IOERR_INVALID_RPI:
rc = -EFAULT;
break;
default:
rc = -EACCES;
break;
}
} else {
rc = -EACCES;
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
job->reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
}
lpfc_free_bsg_buffers(phba, cmp);
lpfc_free_bsg_buffers(phba, rmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
kfree(dd_data);
/* Complete the job if the job is still active */
if (job) {
job->reply->result = rc;
job->job_done(job);
}
return;
}
/**
* lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
* @job: fc_bsg_job to handle
**/
static int
lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = job->rport->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct ulp_bde64 *bpl = NULL;
uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
IOCB_t *cmd;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
int request_nseg;
int reply_nseg;
struct bsg_job_data *dd_data;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2733 Failed allocation of dd_data\n");
rc = -ENOMEM;
goto no_dd_data;
}
if (!lpfc_nlp_get(ndlp)) {
rc = -ENODEV;
goto no_ndlp;
}
if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
rc = -ENODEV;
goto free_ndlp;
}
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
goto free_ndlp;
}
cmd = &cmdiocbq->iocb;
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
goto free_cmdiocbq;
}
bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
if (!bmp->virt) {
rc = -ENOMEM;
goto free_bmp;
}
INIT_LIST_HEAD(&bmp->list);
bpl = (struct ulp_bde64 *) bmp->virt;
request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1, bpl, &request_nseg);
if (!cmp) {
rc = -ENOMEM;
goto free_bmp;
}
lpfc_bsg_copy_data(cmp, &job->request_payload,
job->request_payload.payload_len, 1);
bpl += request_nseg;
reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
bpl, &reply_nseg);
if (!rmp) {
rc = -ENOMEM;
goto free_cmp;
}
cmd->un.genreq64.bdl.ulpIoTag32 = 0;
cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
cmd->un.genreq64.bdl.bdeSize =
(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
cmd->un.genreq64.w5.hcsw.Dfctl = 0;
cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = ndlp->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context3 = bmp;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
timeout = phba->fc_ratov * 2;
cmd->ulpTimeout = timeout;
cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
cmdiocbq->context_un.ndlp = ndlp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
dd_data->context_un.iocb.ndlp = ndlp;
dd_data->context_un.iocb.rmp = rmp;
job->dd_data = dd_data;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
rc = -EIO ;
goto free_rmp;
}
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (iocb_stat == IOCB_SUCCESS)
return 0; /* done for now */
else if (iocb_stat == IOCB_BUSY)
rc = -EAGAIN;
else
rc = -EIO;
/* iocb failed so cleanup */
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
free_cmp:
lpfc_free_bsg_buffers(phba, cmp);
free_bmp:
if (bmp->virt)
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
free_ndlp:
lpfc_nlp_put(ndlp);
no_ndlp:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
job->reply->result = rc;
job->dd_data = NULL;
return rc;
}
/**
* lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
* @rspiocbq: Pointer to response iocb.
*
* This function is the completion handler for iocbs issued using
* lpfc_bsg_rport_els_cmp function. This function is called by the
* ring event handler function without any lock held. This function
* can be called from both worker thread context and interrupt
* context. This function also can be called from other thread which
* cleans up the SLI layer objects.
* This function copies the contents of the response iocb to the
* response iocb memory object provided by the caller of
* lpfc_sli_issue_iocb_wait and then wakes up the thread which
* sleeps for the iocb completion.
**/
static void
lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
struct fc_bsg_ctels_reply *els_reply;
uint8_t *rjt_data;
unsigned long flags;
unsigned int rsp_size;
int rc = 0;
dd_data = cmdiocbq->context1;
ndlp = dd_data->context_un.iocb.ndlp;
cmdiocbq->context1 = ndlp;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
rsp = &rspiocbq->iocb;
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
/* Copy the completed job data or determine the job status if job is
* still active
*/
if (job) {
if (rsp->ulpStatus == IOSTAT_SUCCESS) {
rsp_size = rsp->un.elsreq64.bdl.bdeSize;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
job->reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
els_reply = &job->reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
els_reply->rjt_data.reason_explanation = rjt_data[1];
els_reply->rjt_data.vendor_unique = rjt_data[0];
} else {
rc = -EIO;
}
}
lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, cmdiocbq);
kfree(dd_data);
/* Complete the job if the job is still active */
if (job) {
job->reply->result = rc;
job->job_done(job);
}
return;
}
/**
* lpfc_bsg_rport_els - send an ELS command from a bsg request
* @job: fc_bsg_job to handle
**/
static int
lpfc_bsg_rport_els(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = job->rport->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
uint32_t elscmd;
uint32_t cmdsize;
uint32_t rspsize;
struct lpfc_iocbq *cmdiocbq;
uint16_t rpi = 0;
struct bsg_job_data *dd_data;
uint32_t creg_val;
int rc = 0;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
/* verify the els command is not greater than the
* maximum ELS transfer size.
*/
if (job->request_payload.payload_len > FCELSSIZE) {
rc = -EINVAL;
goto no_dd_data;
}
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2735 Failed allocation of dd_data\n");
rc = -ENOMEM;
goto no_dd_data;
}
elscmd = job->request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
rspsize = job->reply_payload.payload_len;
if (!lpfc_nlp_get(ndlp)) {
rc = -ENODEV;
goto free_dd_data;
}
/* We will use the allocated dma buffers by prep els iocb for command
* and response to ensure if the job times out and the request is freed,
* we won't be dma into memory that is no longer allocated to for the
* request.
*/
cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
rc = -EIO;
goto release_ndlp;
}
rpi = ndlp->nlp_rpi;
/* Transfer the request payload to allocated command dma buffer */
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
cmdsize);
if (phba->sli_rev == LPFC_SLI_REV4)
cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
else
cmdiocbq->iocb.ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->context1 = dd_data;
cmdiocbq->context_un.ndlp = ndlp;
cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
dd_data->context_un.iocb.ndlp = ndlp;
dd_data->context_un.iocb.rmp = NULL;
job->dd_data = dd_data;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
rc = -EIO;
goto linkdown_err;
}
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (rc == IOCB_SUCCESS)
return 0; /* done for now */
else if (rc == IOCB_BUSY)
rc = -EAGAIN;
else
rc = -EIO;
linkdown_err:
cmdiocbq->context1 = ndlp;
lpfc_els_free_iocb(phba, cmdiocbq);
release_ndlp:
lpfc_nlp_put(ndlp);
free_dd_data:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
job->reply->result = rc;
job->dd_data = NULL;
return rc;
}
/**
* lpfc_bsg_event_free - frees an allocated event structure
* @kref: Pointer to a kref.
*
* Called from kref_put. Back cast the kref into an event structure address.
* Free any events to get, delete associated nodes, free any events to see,
* free any data then free the event itself.
**/
static void
lpfc_bsg_event_free(struct kref *kref)
{
struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
kref);
struct event_data *ed;
list_del(&evt->node);
while (!list_empty(&evt->events_to_get)) {
ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
list_del(&ed->node);
kfree(ed->data);
kfree(ed);
}
while (!list_empty(&evt->events_to_see)) {
ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
list_del(&ed->node);
kfree(ed->data);
kfree(ed);
}
kfree(evt->dd_data);
kfree(evt);
}
/**
* lpfc_bsg_event_ref - increments the kref for an event
* @evt: Pointer to an event structure.
**/
static inline void
lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
{
kref_get(&evt->kref);
}
/**
* lpfc_bsg_event_unref - Uses kref_put to free an event structure
* @evt: Pointer to an event structure.
**/
static inline void
lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
{
kref_put(&evt->kref, lpfc_bsg_event_free);
}
/**
* lpfc_bsg_event_new - allocate and initialize a event structure
* @ev_mask: Mask of events.
* @ev_reg_id: Event reg id.
* @ev_req_id: Event request id.
**/
static struct lpfc_bsg_event *
lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
{
struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt)
return NULL;
INIT_LIST_HEAD(&evt->events_to_get);
INIT_LIST_HEAD(&evt->events_to_see);
evt->type_mask = ev_mask;
evt->req_id = ev_req_id;
evt->reg_id = ev_reg_id;
evt->wait_time_stamp = jiffies;
evt->dd_data = NULL;
init_waitqueue_head(&evt->wq);
kref_init(&evt->kref);
return evt;
}
/**
* diag_cmd_data_free - Frees an lpfc dma buffer extension
* @phba: Pointer to HBA context object.
* @mlist: Pointer to an lpfc dma buffer extension.
**/
static int
diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
{
struct lpfc_dmabufext *mlast;
struct pci_dev *pcidev;
struct list_head head, *curr, *next;
if ((!mlist) || (!lpfc_is_link_up(phba) &&
(phba->link_flag & LS_LOOPBACK_MODE))) {
return 0;
}
pcidev = phba->pcidev;
list_add_tail(&head, &mlist->dma.list);
list_for_each_safe(curr, next, &head) {
mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
if (mlast->dma.virt)
dma_free_coherent(&pcidev->dev,
mlast->size,
mlast->dma.virt,
mlast->dma.phys);
kfree(mlast);
}
return 0;
}
/**
* lpfc_bsg_ct_unsol_event - process an unsolicited CT command
* @phba:
* @pring:
* @piocbq:
*
* This function is called when an unsolicited CT command is received. It
* forwards the event to any processes registered to receive CT events.
**/
int
lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocbq)
{
uint32_t evt_req_id = 0;
uint32_t cmd;
uint32_t len;
struct lpfc_dmabuf *dmabuf = NULL;
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
size_t offset = 0;
struct list_head head;
struct ulp_bde64 *bde;
dma_addr_t dma_addr;
int i;
struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
struct fc_bsg_job *job = NULL;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
if (piocbq->iocb.ulpBdeCount == 0 ||
piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
goto error_ct_unsol_exit;
if (phba->link_state == LPFC_HBA_ERROR ||
(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
goto error_ct_unsol_exit;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
dmabuf = bdeBuf1;
else {
dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
piocbq->iocb.un.cont64[0].addrLow);
dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
}
if (dmabuf == NULL)
goto error_ct_unsol_exit;
ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
evt_req_id = ct_req->FsType;
cmd = ct_req->CommandResponse.bits.CmdRsp;
len = ct_req->CommandResponse.bits.Size;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (!(evt->type_mask & FC_REG_CT_EVENT) ||
evt->req_id != evt_req_id)
continue;
lpfc_bsg_event_ref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
if (evt_dat == NULL) {
spin_lock_irqsave(&phba->ct_ev_lock, flags);
lpfc_bsg_event_unref(evt);
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2614 Memory allocation failed for "
"CT event\n");
break;
}
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
} else {
list_for_each_entry(iocbq, &head, list) {
for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
evt_dat->len +=
iocbq->iocb.un.cont64[i].tus.f.bdeSize;
}
}
evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
if (evt_dat->data == NULL) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2615 Memory allocation failed for "
"CT event data, size %d\n",
evt_dat->len);
kfree(evt_dat);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
goto error_ct_unsol_exit;
}
list_for_each_entry(iocbq, &head, list) {
size = 0;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
bdeBuf1 = iocbq->context2;
bdeBuf2 = iocbq->context3;
}
for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
hbqe = (struct lpfc_hbq_entry *)
&iocbq->iocb.un.ulpWord[0];
size = hbqe->bde.tus.f.bdeSize;
dmabuf = bdeBuf1;
} else if (i == 1) {
hbqe = (struct lpfc_hbq_entry *)
&iocbq->iocb.unsli3.
sli3Words[4];
size = hbqe->bde.tus.f.bdeSize;
dmabuf = bdeBuf2;
}
if ((offset + size) > evt_dat->len)
size = evt_dat->len - offset;
} else {
size = iocbq->iocb.un.cont64[i].
tus.f.bdeSize;
bde = &iocbq->iocb.un.cont64[i];
dma_addr = getPaddr(bde->addrHigh,
bde->addrLow);
dmabuf = lpfc_sli_ringpostbuf_get(phba,
pring, dma_addr);
}
if (!dmabuf) {
lpfc_printf_log(phba, KERN_ERR,
LOG_LIBDFC, "2616 No dmabuf "
"found for iocbq 0x%p\n",
iocbq);
kfree(evt_dat->data);
kfree(evt_dat);
spin_lock_irqsave(&phba->ct_ev_lock,
flags);
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(
&phba->ct_ev_lock, flags);
goto error_ct_unsol_exit;
}
memcpy((char *)(evt_dat->data) + offset,
dmabuf->virt, size);
offset += size;
if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED)) {
lpfc_sli_ringpostbuf_put(phba, pring,
dmabuf);
} else {
switch (cmd) {
case ELX_LOOPBACK_DATA:
if (phba->sli_rev <
LPFC_SLI_REV4)
diag_cmd_data_free(phba,
(struct lpfc_dmabufext
*)dmabuf);
break;
case ELX_LOOPBACK_XRI_SETUP:
if ((phba->sli_rev ==
LPFC_SLI_REV2) ||
(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED
)) {
lpfc_in_buf_free(phba,
dmabuf);
} else {
lpfc_post_buffer(phba,
pring,
1);
}
break;
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba,
pring,
1);
break;
}
}
}
}
spin_lock_irqsave(&phba->ct_ev_lock, flags);
if (phba->sli_rev == LPFC_SLI_REV4) {
evt_dat->immed_dat = phba->ctx_idx;
phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
/* Provide warning for over-run of the ct_ctx array */
if (phba->ct_ctx[evt_dat->immed_dat].valid ==
UNSOL_VALID)
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"2717 CT context array entry "
"[%d] over-run: oxid:x%x, "
"sid:x%x\n", phba->ctx_idx,
phba->ct_ctx[
evt_dat->immed_dat].oxid,
phba->ct_ctx[
evt_dat->immed_dat].SID);
phba->ct_ctx[evt_dat->immed_dat].rxid =
piocbq->iocb.ulpContext;
phba->ct_ctx[evt_dat->immed_dat].oxid =
piocbq->iocb.unsli3.rcvsli3.ox_id;
phba->ct_ctx[evt_dat->immed_dat].SID =
piocbq->iocb.un.rcvels.remoteID;
phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
evt_dat->immed_dat = piocbq->iocb.ulpContext;
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
wake_up_interruptible(&evt->wq);
lpfc_bsg_event_unref(evt);
break;
}
list_move(evt->events_to_see.prev, &evt->events_to_get);
dd_data = (struct bsg_job_data *)evt->dd_data;
job = dd_data->set_job;
dd_data->set_job = NULL;
lpfc_bsg_event_unref(evt);
if (job) {
job->reply->reply_payload_rcv_len = size;
/* make error code available to userspace */
job->reply->result = 0;
job->dd_data = NULL;
/* complete the job back to userspace */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
}
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
error_ct_unsol_exit:
if (!list_empty(&head))
list_del(&head);
if ((phba->sli_rev < LPFC_SLI_REV4) &&
(evt_req_id == SLI_CT_ELX_LOOPBACK))
return 0;
return 1;
}
/**
* lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
* @phba: Pointer to HBA context object.
* @dmabuf: pointer to a dmabuf that describes the FC sequence
*
* This function handles abort to the CT command toward management plane
* for SLI4 port.
*
* If the pending context of a CT command to management plane present, clears
* such context and returns 1 for handled; otherwise, it returns 0 indicating
* no context exists.
**/
int
lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
{
struct fc_frame_header fc_hdr;
struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
int ctx_idx, handled = 0;
uint16_t oxid, rxid;
uint32_t sid;
memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
continue;
if (phba->ct_ctx[ctx_idx].rxid != rxid)
continue;
if (phba->ct_ctx[ctx_idx].oxid != oxid)
continue;
if (phba->ct_ctx[ctx_idx].SID != sid)
continue;
phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
handled = 1;
}
return handled;
}
/**
* lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
* @job: SET_EVENT fc_bsg_job
**/
static int
lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct set_ct_event *event_req;
struct lpfc_bsg_event *evt;
int rc = 0;
struct bsg_job_data *dd_data = NULL;
uint32_t ev_mask;
unsigned long flags;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2612 Received SET_CT_EVENT below minimum "
"size\n");
rc = -EINVAL;
goto job_error;
}
event_req = (struct set_ct_event *)
job->request->rqst_data.h_vendor.vendor_cmd;
ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
FC_REG_EVENT_MASK);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
lpfc_bsg_event_ref(evt);
evt->wait_time_stamp = jiffies;
dd_data = (struct bsg_job_data *)evt->dd_data;
break;
}
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (&evt->node == &phba->ct_ev_waiters) {
/* no event waiting struct yet - first call */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (dd_data == NULL) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2734 Failed allocation of dd_data\n");
rc = -ENOMEM;
goto job_error;
}
evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
event_req->ev_req_id);
if (!evt) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2617 Failed allocation of event "
"waiter\n");
rc = -ENOMEM;
goto job_error;
}
dd_data->type = TYPE_EVT;
dd_data->set_job = NULL;
dd_data->context_un.evt = evt;
evt->dd_data = (void *)dd_data;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_add(&evt->node, &phba->ct_ev_waiters);
lpfc_bsg_event_ref(evt);
evt->wait_time_stamp = jiffies;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
spin_lock_irqsave(&phba->ct_ev_lock, flags);
evt->waiting = 1;
dd_data->set_job = job; /* for unsolicited command */
job->dd_data = dd_data; /* for fc transport timeout callback*/
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return 0; /* call job done later */
job_error:
if (dd_data != NULL)
kfree(dd_data);
job->dd_data = NULL;
return rc;
}
/**
* lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
* @job: GET_EVENT fc_bsg_job
**/
static int
lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
unsigned long flags;
uint32_t rc = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2613 Received GET_CT_EVENT request below "
"minimum size\n");
rc = -EINVAL;
goto job_error;
}
event_req = (struct get_ct_event *)
job->request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_ct_event_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
if (list_empty(&evt->events_to_get))
break;
lpfc_bsg_event_ref(evt);
evt->wait_time_stamp = jiffies;
evt_dat = list_entry(evt->events_to_get.prev,
struct event_data, node);
list_del(&evt_dat->node);
break;
}
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/* The app may continue to ask for event data until it gets
* an error indicating that there isn't anymore
*/
if (evt_dat == NULL) {
job->reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
goto job_error;
}
if (evt_dat->len > job->request_payload.payload_len) {
evt_dat->len = job->request_payload.payload_len;
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2618 Truncated event data at %d "
"bytes\n",
job->request_payload.payload_len);
}
event_reply->type = evt_dat->type;
event_reply->immed_data = evt_dat->immed_dat;
if (evt_dat->len > 0)
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
job->reply->reply_payload_rcv_len = 0;
if (evt_dat) {
kfree(evt_dat->data);
kfree(evt_dat);
}
spin_lock_irqsave(&phba->ct_ev_lock, flags);
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->dd_data = NULL;
job->reply->result = 0;
job->job_done(job);
return 0;
job_error:
job->dd_data = NULL;
job->reply->result = rc;
return rc;
}
/**
* lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
* @rspiocbq: Pointer to response iocb.
*
* This function is the completion handler for iocbs issued using
* lpfc_issue_ct_rsp_cmp function. This function is called by the
* ring event handler function without any lock held. This function
* can be called from both worker thread context and interrupt
* context. This function also can be called from other thread which
* cleans up the SLI layer objects.
* This function copy the contents of the response iocb to the
* response iocb memory object provided by the caller of
* lpfc_sli_issue_iocb_wait and then wakes up the thread which
* sleeps for the iocb completion.
**/
static void
lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
unsigned long flags;
int rc = 0;
dd_data = cmdiocbq->context1;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
ndlp = dd_data->context_un.iocb.ndlp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
rsp = &rspiocbq->iocb;
/* Copy the completed job data or set the error status */
if (job) {
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
case IOERR_INVALID_RPI:
rc = -EFAULT;
break;
default:
rc = -EACCES;
break;
}
} else {
rc = -EACCES;
}
} else {
job->reply->reply_payload_rcv_len = 0;
}
}
lpfc_free_bsg_buffers(phba, cmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
kfree(dd_data);
/* Complete the job if the job is still active */
if (job) {
job->reply->result = rc;
job->job_done(job);
}
return;
}
/**
* lpfc_issue_ct_rsp - issue a ct response
* @phba: Pointer to HBA context object.
* @job: Pointer to the job object.
* @tag: tag index value into the ports context exchange array.
* @bmp: Pointer to a dma buffer descriptor.
* @num_entry: Number of enties in the bde.
**/
static int
lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
IOCB_t *icmd;
struct lpfc_iocbq *ctiocb = NULL;
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
uint32_t creg_val;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2736 Failed allocation of dd_data\n");
rc = -ENOMEM;
goto no_dd_data;
}
/* Allocate buffer for command iocb */
ctiocb = lpfc_sli_get_iocbq(phba);
if (!ctiocb) {
rc = -ENOMEM;
goto no_ctiocb;
}
icmd = &ctiocb->iocb;
icmd->un.xseq64.bdl.ulpIoTag32 = 0;
icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
icmd->un.xseq64.w5.hcsw.Dfctl = 0;
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
/* Fill in rest of iocb */
icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
icmd->ulpBdeCount = 1;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
icmd->ulpContext = phba->ct_ctx[tag].rxid;
icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
if (!ndlp) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"2721 ndlp null for oxid %x SID %x\n",
icmd->ulpContext,
phba->ct_ctx[tag].SID);
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
/* Check if the ndlp is active */
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
/* get a refernece count so the ndlp doesn't go away while
* we respond
*/
if (!lpfc_nlp_get(ndlp)) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
icmd->un.ulpWord[3] =
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].valid = UNSOL_INVALID;
} else
icmd->ulpContext = (ushort) tag;
icmd->ulpTimeout = phba->fc_ratov * 2;
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
ctiocb->iocb_cmpl = NULL;
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
ctiocb->context1 = dd_data;
ctiocb->context2 = cmp;
ctiocb->context3 = bmp;
ctiocb->context_un.ndlp = ndlp;
ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = ctiocb;
dd_data->context_un.iocb.ndlp = ndlp;
dd_data->context_un.iocb.rmp = NULL;
job->dd_data = dd_data;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
rc = -IOCB_ERROR;
goto issue_ct_rsp_exit;
}
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_SUCCESS)
return 0; /* done for now */
issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb);
no_ctiocb:
kfree(dd_data);
no_dd_data:
return rc;
}
/**
* lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
* @job: SEND_MGMT_RESP fc_bsg_job
**/
static int
lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
job->request->rqst_data.h_vendor.vendor_cmd;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
int bpl_entries;
uint32_t tag = mgmt_resp->tag;
unsigned long reqbfrcnt =
(unsigned long)job->request_payload.payload_len;
int rc = 0;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
rc = -ERANGE;
goto send_mgmt_rsp_exit;
}
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
goto send_mgmt_rsp_exit;
}
bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
if (!bmp->virt) {
rc = -ENOMEM;
goto send_mgmt_rsp_free_bmp;
}
INIT_LIST_HEAD(&bmp->list);
bpl = (struct ulp_bde64 *) bmp->virt;
bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1, bpl, &bpl_entries);
if (!cmp) {
rc = -ENOMEM;
goto send_mgmt_rsp_free_bmp;
}
lpfc_bsg_copy_data(cmp, &job->request_payload,
job->request_payload.payload_len, 1);
rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
if (rc == IOCB_SUCCESS)
return 0; /* done for now */
rc = -EACCES;
lpfc_free_bsg_buffers(phba, cmp);
send_mgmt_rsp_free_bmp:
if (bmp->virt)
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
send_mgmt_rsp_exit:
/* make error code available to userspace */
job->reply->result = rc;
job->dd_data = NULL;
return rc;
}
/**
* lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
* @phba: Pointer to HBA context object.
*
* This function is responsible for preparing driver for diag loopback
* on device.
*/
static int
lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
int i = 0;
psli = &phba->sli;
if (!psli)
return -ENODEV;
pring = &psli->ring[LPFC_FCP_RING];
if (!pring)
return -ENODEV;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
return -EACCES;
vports = lpfc_create_vport_work_array(phba);
if (vports) {
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
scsi_block_requests(shost);
}
lpfc_destroy_vport_work_array(phba, vports);
} else {
shost = lpfc_shost_from_vport(phba->pport);
scsi_block_requests(shost);
}
while (!list_empty(&pring->txcmplq)) {
if (i++ > 500) /* wait up to 5 seconds */
break;
msleep(10);
}
return 0;
}
/**
* lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
* @phba: Pointer to HBA context object.
*
* This function is responsible for driver exit processing of setting up
* diag loopback mode on device.
*/
static void
lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
{
struct Scsi_Host *shost;
struct lpfc_vport **vports;
int i;
vports = lpfc_create_vport_work_array(phba);
if (vports) {
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
scsi_unblock_requests(shost);
}
lpfc_destroy_vport_work_array(phba, vports);
} else {
shost = lpfc_shost_from_vport(phba->pport);
scsi_unblock_requests(shost);
}
return;
}
/**
* lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
* @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
* This function is responsible for placing an sli3 port into diagnostic
* loopback mode in order to perform a diagnostic loopback test.
* All new scsi requests are blocked, a small delay is used to allow the
* scsi requests to complete then the link is brought down. If the link is
* is placed in loopback mode then scsi requests are again allowed
* so the scsi mid-layer doesn't give up on the port.
* All of this is done in-line.
*/
static int
lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
LPFC_MBOXQ_t *pmboxq = NULL;
int mbxstatus = MBX_SUCCESS;
int i = 0;
int rc = 0;
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2738 Received DIAG MODE request size:%d "
"below the minimum size:%d\n",
job->request_len,
(int)(sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)));
rc = -EINVAL;
goto job_error;
}
rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
/* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
goto loopback_mode_exit;
}
memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
/* wait for link down before proceeding */
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
goto loopback_mode_exit;
}
msleep(10);
}
memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
if (link_flags == INTERNAL_LOOP_BACK)
pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
else
pmboxq->u.mb.un.varInitLnk.link_flags =
FLAGS_TOPOLOGY_MODE_LOOP;
pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
LPFC_MBOX_TMO);
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
rc = -ENODEV;
else {
spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
/* wait for the link attention interrupt */
msleep(100);
i = 0;
while (phba->link_state != LPFC_HBA_READY) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
break;
}
msleep(10);
}
}
} else
rc = -ENODEV;
loopback_mode_exit:
lpfc_bsg_diag_mode_exit(phba);
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
if (pmboxq && mbxstatus != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
/* make error code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
job->job_done(job);
return rc;
}
/**
* lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
* @phba: Pointer to HBA context object.
* @diag: Flag for set link to diag or nomral operation state.
*
* This function is responsible for issuing a sli4 mailbox command for setting
* link to either diag state or normal operation state.
*/
static int
lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
{
LPFC_MBOXQ_t *pmboxq;
struct lpfc_mbx_set_link_diag_state *link_diag_state;
uint32_t req_len, alloc_len;
int mbxstatus = MBX_SUCCESS, rc;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return -ENOMEM;
req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
sizeof(struct lpfc_sli4_cfg_mhdr));
alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
req_len, LPFC_SLI4_MBX_EMBED);
if (alloc_len != req_len) {
rc = -ENOMEM;
goto link_diag_state_set_out;
}
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
diag, phba->sli4_hba.lnk_info.lnk_tp,
phba->sli4_hba.lnk_info.lnk_no);
link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
phba->sli4_hba.lnk_info.lnk_tp);
if (diag)
bf_set(lpfc_mbx_set_diag_state_diag,
&link_diag_state->u.req, 1);
else
bf_set(lpfc_mbx_set_diag_state_diag,
&link_diag_state->u.req, 0);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
rc = 0;
else
rc = -ENODEV;
link_diag_state_set_out:
if (pmboxq && (mbxstatus != MBX_TIMEOUT))
mempool_free(pmboxq, phba->mbox_mem_pool);
return rc;
}
/**
* lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
* @phba: Pointer to HBA context object.
*
* This function is responsible for issuing a sli4 mailbox command for setting
* up internal loopback diagnostic.
*/
static int
lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmboxq;
uint32_t req_len, alloc_len;
struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
int mbxstatus = MBX_SUCCESS, rc = 0;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return -ENOMEM;
req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
sizeof(struct lpfc_sli4_cfg_mhdr));
alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
req_len, LPFC_SLI4_MBX_EMBED);
if (alloc_len != req_len) {
mempool_free(pmboxq, phba->mbox_mem_pool);
return -ENOMEM;
}
link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
bf_set(lpfc_mbx_set_diag_state_link_num,
&link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_set_diag_state_link_type,
&link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3127 Failed setup loopback mode mailbox "
"command, rc:x%x, status:x%x\n", mbxstatus,
pmboxq->u.mb.mbxStatus);
rc = -ENODEV;
}
if (pmboxq && (mbxstatus != MBX_TIMEOUT))
mempool_free(pmboxq, phba->mbox_mem_pool);
return rc;
}
/**
* lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
* @phba: Pointer to HBA context object.
*
* This function set up SLI4 FC port registrations for diagnostic run, which
* includes all the rpis, vfi, and also vpi.
*/
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
int rc;
if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
phba->pport->fc_myDID, phba->fcf.fcfi,
phba->sli4_hba.vfi_ids[phba->pport->vfi],
phba->vpi_ids[phba->pport->vpi]);
return -EINVAL;
}
rc = lpfc_issue_reg_vfi(phba->pport);
return rc;
}
/**
* lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
* @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
* This function is responsible for placing an sli4 port into diagnostic
* loopback mode in order to perform a diagnostic loopback test.
*/
static int
lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
struct diag_mode_set *loopback_mode;
uint32_t link_flags, timeout;
int i, rc = 0;
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3011 Received DIAG MODE request size:%d "
"below the minimum size:%d\n",
job->request_len,
(int)(sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)));
rc = -EINVAL;
goto job_error;
}
rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
/* indicate we are in loobpack diagnostic mode */
spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
/* reset port to start frome scratch */
rc = lpfc_selective_reset(phba);
if (rc)
goto job_error;
/* bring the link to diagnostic mode */
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3130 Failed to bring link to diagnostic "
"state, rc:x%x\n", rc);
goto loopback_mode_exit;
}
/* wait for link down before proceeding */
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3131 Timeout waiting for link to "
"diagnostic mode, timeout:%d ms\n",
timeout * 10);
goto loopback_mode_exit;
}
msleep(10);
}
/* set up loopback mode */
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3132 Set up loopback mode:x%x\n", link_flags);
if (link_flags == INTERNAL_LOOP_BACK)
rc = lpfc_sli4_bsg_set_internal_loopback(phba);
else if (link_flags == EXTERNAL_LOOP_BACK)
rc = lpfc_hba_init_link_fc_topology(phba,
FLAGS_TOPOLOGY_MODE_PT_PT,
MBX_NOWAIT);
else {
rc = -EINVAL;
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3141 Loopback mode:x%x not supported\n",
link_flags);
goto loopback_mode_exit;
}
if (!rc) {
/* wait for the link attention interrupt */
msleep(100);
i = 0;
while (phba->link_state < LPFC_LINK_UP) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3137 Timeout waiting for link up "
"in loopback mode, timeout:%d ms\n",
timeout * 10);
break;
}
msleep(10);
}
}
/* port resource registration setup for loopback diagnostic */
if (!rc) {
/* set up a none zero myDID for loopback test */
phba->pport->fc_myDID = 1;
rc = lpfc_sli4_diag_fcport_reg_setup(phba);
} else
goto loopback_mode_exit;
if (!rc) {
/* wait for the port ready */
msleep(100);
i = 0;
while (phba->link_state != LPFC_HBA_READY) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3133 Timeout waiting for port "
"loopback mode ready, timeout:%d ms\n",
timeout * 10);
break;
}
msleep(10);
}
}
loopback_mode_exit:
/* clear loopback diagnostic mode */
if (rc) {
spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
}
lpfc_bsg_diag_mode_exit(phba);
job_error:
/* make error code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
job->job_done(job);
return rc;
}
/**
* lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
* This function is responsible for responding to check and dispatch bsg diag
* command from the user to proper driver action routines.
*/
static int
lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
int rc;
shost = job->shost;
if (!shost)
return -ENODEV;
vport = (struct lpfc_vport *)job->shost->hostdata;
if (!vport)
return -ENODEV;
phba = vport->phba;
if (!phba)
return -ENODEV;
if (phba->sli_rev < LPFC_SLI_REV4)
rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_2)
rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
else
rc = -ENODEV;
return rc;
}
/**
* lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
* @job: LPFC_BSG_VENDOR_DIAG_MODE_END
*
* This function is responsible for responding to check and dispatch bsg diag
* command from the user to proper driver action routines.
*/
static int
lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct diag_mode_set *loopback_mode_end_cmd;
uint32_t timeout;
int rc, i;
shost = job->shost;
if (!shost)
return -ENODEV;
vport = (struct lpfc_vport *)job->shost->hostdata;
if (!vport)
return -ENODEV;
phba = vport->phba;
if (!phba)
return -ENODEV;
if (phba->sli_rev < LPFC_SLI_REV4)
return -ENODEV;
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
/* clear loopback diagnostic mode */
spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
loopback_mode_end_cmd = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
timeout = loopback_mode_end_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3139 Failed to bring link to diagnostic "
"state, rc:x%x\n", rc);
goto loopback_mode_end_exit;
}
/* wait for link down before proceeding */
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3140 Timeout waiting for link to "
"diagnostic mode_end, timeout:%d ms\n",
timeout * 10);
/* there is nothing much we can do here */
break;
}
msleep(10);
}
/* reset port resource registrations */
rc = lpfc_selective_reset(phba);
phba->pport->fc_myDID = 0;
loopback_mode_end_exit:
/* make return code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
job->job_done(job);
return rc;
}
/**
* lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
* @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
*
* This function is to perform SLI4 diag link test request from the user
* applicaiton.
*/
static int
lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
LPFC_MBOXQ_t *pmboxq;
struct sli4_link_diag *link_diag_test_cmd;
uint32_t req_len, alloc_len;
uint32_t timeout;
struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t shdr_status, shdr_add_status;
struct diag_status *diag_status_reply;
int mbxstatus, rc = 0;
shost = job->shost;
if (!shost) {
rc = -ENODEV;
goto job_error;
}
vport = (struct lpfc_vport *)job->shost->hostdata;
if (!vport) {
rc = -ENODEV;
goto job_error;
}
phba = vport->phba;
if (!phba) {
rc = -ENODEV;
goto job_error;
}
if (phba->sli_rev < LPFC_SLI_REV4) {
rc = -ENODEV;
goto job_error;
}
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2) {
rc = -ENODEV;
goto job_error;
}
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct sli4_link_diag)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3013 Received LINK DIAG TEST request "
" size:%d below the minimum size:%d\n",
job->request_len,
(int)(sizeof(struct fc_bsg_request) +
sizeof(struct sli4_link_diag)));
rc = -EINVAL;
goto job_error;
}
rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
link_diag_test_cmd = (struct sli4_link_diag *)
job->request->rqst_data.h_vendor.vendor_cmd;
timeout = link_diag_test_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
if (rc)
goto job_error;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
goto link_diag_test_exit;
}
req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
sizeof(struct lpfc_sli4_cfg_mhdr));
alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
req_len, LPFC_SLI4_MBX_EMBED);
if (alloc_len != req_len) {
rc = -ENOMEM;
goto link_diag_test_exit;
}
run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
phba->sli4_hba.lnk_info.lnk_tp);
bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
link_diag_test_cmd->test_id);
bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
link_diag_test_cmd->loops);
bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
link_diag_test_cmd->test_version);
bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
link_diag_test_cmd->error_action);
mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *)
&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || mbxstatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3010 Run link diag test mailbox failed with "
"mbx_status x%x status x%x, add_status x%x\n",
mbxstatus, shdr_status, shdr_add_status);
}
diag_status_reply = (struct diag_status *)
job->reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3012 Received Run link diag test reply "
"below minimum size (%d): reply_len:%d\n",
(int)(sizeof(struct fc_bsg_request) +
sizeof(struct diag_status)),
job->reply_len);
rc = -EINVAL;
goto job_error;
}
diag_status_reply->mbox_status = mbxstatus;
diag_status_reply->shdr_status = shdr_status;
diag_status_reply->shdr_add_status = shdr_add_status;
link_diag_test_exit:
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_diag_mode_exit(phba);
job_error:
/* make error code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
job->job_done(job);
return rc;
}
/**
* lpfcdiag_loop_self_reg - obtains a remote port login id
* @phba: Pointer to HBA context object
* @rpi: Pointer to a remote port login id
*
* This function obtains a remote port login id so the diag loopback test
* can send and receive its own unsolicited CT command.
**/
static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
{
LPFC_MBOXQ_t *mbox;
struct lpfc_dmabuf *dmabuff;
int status;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
if (phba->sli_rev < LPFC_SLI_REV4)
status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
(uint8_t *)&phba->pport->fc_sparam,
mbox, *rpi);
else {
*rpi = lpfc_sli4_alloc_rpi(phba);
status = lpfc_reg_rpi(phba, phba->pport->vpi,
phba->pport->fc_myDID,
(uint8_t *)&phba->pport->fc_sparam,
mbox, *rpi);
}
if (status) {
mempool_free(mbox, phba->mbox_mem_pool);
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_free_rpi(phba, *rpi);
return -ENOMEM;
}
dmabuff = (struct lpfc_dmabuf *) mbox->context1;
mbox->context1 = NULL;
mbox->context2 = NULL;
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
kfree(dmabuff);
if (status != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_free_rpi(phba, *rpi);
return -ENODEV;
}
if (phba->sli_rev < LPFC_SLI_REV4)
*rpi = mbox->u.mb.un.varWords[0];
lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
kfree(dmabuff);
mempool_free(mbox, phba->mbox_mem_pool);
return 0;
}
/**
* lpfcdiag_loop_self_unreg - unregs from the rpi
* @phba: Pointer to HBA context object
* @rpi: Remote port login id
*
* This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
**/
static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
{
LPFC_MBOXQ_t *mbox;
int status;
/* Allocate mboxq structure */
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox == NULL)
return -ENOMEM;
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_unreg_login(phba, 0, rpi, mbox);
else
lpfc_unreg_login(phba, phba->pport->vpi,
phba->sli4_hba.rpi_ids[rpi], mbox);
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
if (status != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
return -EIO;
}
mempool_free(mbox, phba->mbox_mem_pool);
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_free_rpi(phba, rpi);
return 0;
}
/**
* lpfcdiag_loop_get_xri - obtains the transmit and receive ids
* @phba: Pointer to HBA context object
* @rpi: Remote port login id
* @txxri: Pointer to transmit exchange id
* @rxxri: Pointer to response exchabge id
*
* This function obtains the transmit and receive ids required to send
* an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
* flags are used to the unsolicted response handler is able to process
* the ct command sent on the same port.
**/
static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
uint16_t *txxri, uint16_t * rxxri)
{
struct lpfc_bsg_event *evt;
struct lpfc_iocbq *cmdiocbq, *rspiocbq;
IOCB_t *cmd, *rsp;
struct lpfc_dmabuf *dmabuf;
struct ulp_bde64 *bpl = NULL;
struct lpfc_sli_ct_request *ctreq = NULL;
int ret_val = 0;
int time_left;
int iocb_stat = 0;
unsigned long flags;
*txxri = 0;
*rxxri = 0;
evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
SLI_CT_ELX_LOOPBACK);
if (!evt)
return -ENOMEM;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_add(&evt->node, &phba->ct_ev_waiters);
lpfc_bsg_event_ref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
cmdiocbq = lpfc_sli_get_iocbq(phba);
rspiocbq = lpfc_sli_get_iocbq(phba);
dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (dmabuf) {
dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
if (dmabuf->virt) {
INIT_LIST_HEAD(&dmabuf->list);
bpl = (struct ulp_bde64 *) dmabuf->virt;
memset(bpl, 0, sizeof(*bpl));
ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
bpl->addrHigh =
le32_to_cpu(putPaddrHigh(dmabuf->phys +
sizeof(*bpl)));
bpl->addrLow =
le32_to_cpu(putPaddrLow(dmabuf->phys +
sizeof(*bpl)));
bpl->tus.f.bdeFlags = 0;
bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
}
}
if (cmdiocbq == NULL || rspiocbq == NULL ||
dmabuf == NULL || bpl == NULL || ctreq == NULL ||
dmabuf->virt == NULL) {
ret_val = -ENOMEM;
goto err_get_xri_exit;
}
cmd = &cmdiocbq->iocb;
rsp = &rspiocbq->iocb;
memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
ctreq->RevisionId.bits.InId = 0;
ctreq->FsType = SLI_CT_ELX_LOOPBACK;
ctreq->FsSubType = 0;
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
ctreq->CommandResponse.bits.Size = 0;
cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
cmd->un.xseq64.w5.hcsw.Fctl = LA;
cmd->un.xseq64.w5.hcsw.Dfctl = 0;
cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq,
(phba->fc_ratov * 2)
+ LPFC_DRVR_TIMEOUT);
if (iocb_stat) {
ret_val = -EIO;
goto err_get_xri_exit;
}
*txxri = rsp->ulpContext;
evt->waiting = 1;
evt->wait_time_stamp = jiffies;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
msecs_to_jiffies(1000 *
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
if (list_empty(&evt->events_to_see))
ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
else {
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_move(evt->events_to_see.prev, &evt->events_to_get);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
*rxxri = (list_entry(evt->events_to_get.prev,
typeof(struct event_data),
node))->immed_dat;
}
evt->waiting = 0;
err_get_xri_exit:
spin_lock_irqsave(&phba->ct_ev_lock, flags);
lpfc_bsg_event_unref(evt); /* release ref */
lpfc_bsg_event_unref(evt); /* delete */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (dmabuf) {
if (dmabuf->virt)
lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
lpfc_sli_release_iocbq(phba, cmdiocbq);
if (rspiocbq)
lpfc_sli_release_iocbq(phba, rspiocbq);
return ret_val;
}
/**
* lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
* @phba: Pointer to HBA context object
*
* This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
* retruns the pointer to the buffer.
**/
static struct lpfc_dmabuf *
lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
{
struct lpfc_dmabuf *dmabuf;
struct pci_dev *pcidev = phba->pcidev;
/* allocate dma buffer struct */
dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
return NULL;
INIT_LIST_HEAD(&dmabuf->list);
/* now, allocate dma buffer */
dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
&(dmabuf->phys), GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return NULL;
}
memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
return dmabuf;
}
/**
* lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
* @phba: Pointer to HBA context object.
* @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
*
* This routine just simply frees a dma buffer and its associated buffer
* descriptor referred by @dmabuf.
**/
static void
lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
{
struct pci_dev *pcidev = phba->pcidev;
if (!dmabuf)
return;
if (dmabuf->virt)
dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
return;
}
/**
* lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
* @phba: Pointer to HBA context object.
* @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
*
* This routine just simply frees all dma buffers and their associated buffer
* descriptors referred by @dmabuf_list.
**/
static void
lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
struct list_head *dmabuf_list)
{
struct lpfc_dmabuf *dmabuf, *next_dmabuf;
if (list_empty(dmabuf_list))
return;
list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
list_del_init(&dmabuf->list);
lpfc_bsg_dma_page_free(phba, dmabuf);
}
return;
}
/**
* diag_cmd_data_alloc - fills in a bde struct with dma buffers
* @phba: Pointer to HBA context object
* @bpl: Pointer to 64 bit bde structure
* @size: Number of bytes to process
* @nocopydata: Flag to copy user data into the allocated buffer
*
* This function allocates page size buffers and populates an lpfc_dmabufext.
* If allowed the user data pointed to with indataptr is copied into the kernel
* memory. The chained list of page size buffers is returned.
**/
static struct lpfc_dmabufext *
diag_cmd_data_alloc(struct lpfc_hba *phba,
struct ulp_bde64 *bpl, uint32_t size,
int nocopydata)
{
struct lpfc_dmabufext *mlist = NULL;
struct lpfc_dmabufext *dmp;
int cnt, offset = 0, i = 0;
struct pci_dev *pcidev;
pcidev = phba->pcidev;
while (size) {
/* We get chunks of 4K */
if (size > BUF_SZ_4K)
cnt = BUF_SZ_4K;
else
cnt = size;
/* allocate struct lpfc_dmabufext buffer header */
dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
if (!dmp)
goto out;
INIT_LIST_HEAD(&dmp->dma.list);
/* Queue it to a linked list */
if (mlist)
list_add_tail(&dmp->dma.list, &mlist->dma.list);
else
mlist = dmp;
/* allocate buffer */
dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
cnt,
&(dmp->dma.phys),
GFP_KERNEL);
if (!dmp->dma.virt)
goto out;
dmp->size = cnt;
if (nocopydata) {
bpl->tus.f.bdeFlags = 0;
pci_dma_sync_single_for_device(phba->pcidev,
dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
} else {
memset((uint8_t *)dmp->dma.virt, 0, cnt);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
}
/* build buffer ptr list for IOCB */
bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
bpl->tus.f.bdeSize = (ushort) cnt;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
i++;
offset += cnt;
size -= cnt;
}
mlist->flag = i;
return mlist;
out:
diag_cmd_data_free(phba, mlist);
return NULL;
}
/**
* lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
* @phba: Pointer to HBA context object
* @rxxri: Receive exchange id
* @len: Number of data bytes
*
* This function allocates and posts a data buffer of sufficient size to receive
* an unsolicted CT command.
**/
static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
size_t len)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd = NULL;
struct list_head head, *curr, *next;
struct lpfc_dmabuf *rxbmp;
struct lpfc_dmabuf *dmp;
struct lpfc_dmabuf *mp[2] = {NULL, NULL};
struct ulp_bde64 *rxbpl = NULL;
uint32_t num_bde;
struct lpfc_dmabufext *rxbuffer = NULL;
int ret_val = 0;
int iocb_stat;
int i = 0;
cmdiocbq = lpfc_sli_get_iocbq(phba);
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) {
rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
if (rxbmp->virt) {
INIT_LIST_HEAD(&rxbmp->list);
rxbpl = (struct ulp_bde64 *) rxbmp->virt;
rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
}
}
if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
ret_val = -ENOMEM;
goto err_post_rxbufs_exit;
}
/* Queue buffers for the receive exchange */
num_bde = (uint32_t)rxbuffer->flag;
dmp = &rxbuffer->dma;
cmd = &cmdiocbq->iocb;
i = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &dmp->list);
list_for_each_safe(curr, next, &head) {
mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
list_del(curr);
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
cmd->un.quexri64cx.buff.bde.addrHigh =
putPaddrHigh(mp[i]->phys);
cmd->un.quexri64cx.buff.bde.addrLow =
putPaddrLow(mp[i]->phys);
cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
((struct lpfc_dmabufext *)mp[i])->size;
cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
cmd->ulpCommand = CMD_QUE_XRI64_CX;
cmd->ulpPU = 0;
cmd->ulpLe = 1;
cmd->ulpBdeCount = 1;
cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
} else {
cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
cmd->un.cont64[i].tus.f.bdeSize =
((struct lpfc_dmabufext *)mp[i])->size;
cmd->ulpBdeCount = ++i;
if ((--num_bde > 0) && (i < 2))
continue;
cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
cmd->ulpLe = 1;
}
cmd->ulpClass = CLASS3;
cmd->ulpContext = rxxri;
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
0);
if (iocb_stat == IOCB_ERROR) {
diag_cmd_data_free(phba,
(struct lpfc_dmabufext *)mp[0]);
if (mp[1])
diag_cmd_data_free(phba,
(struct lpfc_dmabufext *)mp[1]);
dmp = list_entry(next, struct lpfc_dmabuf, list);
ret_val = -EIO;
goto err_post_rxbufs_exit;
}
lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
if (mp[1]) {
lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
mp[1] = NULL;
}
/* The iocb was freed by lpfc_sli_issue_iocb */
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
dmp = list_entry(next, struct lpfc_dmabuf, list);
ret_val = -EIO;
goto err_post_rxbufs_exit;
}
cmd = &cmdiocbq->iocb;
i = 0;
}
list_del(&head);
err_post_rxbufs_exit:
if (rxbmp) {
if (rxbmp->virt)
lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
kfree(rxbmp);
}
if (cmdiocbq)
lpfc_sli_release_iocbq(phba, cmdiocbq);
return ret_val;
}
/**
* lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
* @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
*
* This function receives a user data buffer to be transmitted and received on
* the same port, the link must be up and in loopback mode prior
* to being called.
* 1. A kernel buffer is allocated to copy the user data into.
* 2. The port registers with "itself".
* 3. The transmit and receive exchange ids are obtained.
* 4. The receive exchange id is posted.
* 5. A new els loopback event is created.
* 6. The command and response iocbs are allocated.
* 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
*
* This function is meant to be called n times while the port is in loopback
* so it is the apps responsibility to issue a reset to take the port out
* of loopback mode.
**/
static int
lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct diag_mode_test *diag_mode;
struct lpfc_bsg_event *evt;
struct event_data *evdat;
struct lpfc_sli *psli = &phba->sli;
uint32_t size;
uint32_t full_size;
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
IOCB_t *cmd, *rsp = NULL;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
struct lpfc_dmabufext *txbuffer = NULL;
struct list_head head;
struct lpfc_dmabuf *curr;
uint16_t txxri = 0, rxxri;
uint32_t num_bde;
uint8_t *ptr = NULL, *rx_databuf = NULL;
int rc = 0;
int time_left;
int iocb_stat;
unsigned long flags;
void *dataout = NULL;
uint32_t total_mem;
/* in case no data is returned return just the return code */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2739 Received DIAG TEST request below minimum "
"size\n");
rc = -EINVAL;
goto loopback_test_exit;
}
if (job->request_payload.payload_len !=
job->reply_payload.payload_len) {
rc = -EINVAL;
goto loopback_test_exit;
}
diag_mode = (struct diag_mode_test *)
job->request->rqst_data.h_vendor.vendor_cmd;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
(!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
rc = -EACCES;
goto loopback_test_exit;
}
if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
rc = -EACCES;
goto loopback_test_exit;
}
size = job->request_payload.payload_len;
full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
rc = -ERANGE;
goto loopback_test_exit;
}
if (full_size >= BUF_SZ_4K) {
/*
* Allocate memory for ioctl data. If buffer is bigger than 64k,
* then we allocate 64k and re-use that buffer over and over to
* xfer the whole block. This is because Linux kernel has a
* problem allocating more than 120k of kernel space memory. Saw
* problem with GET_FCPTARGETMAPPING...
*/
if (size <= (64 * 1024))
total_mem = full_size;
else
total_mem = 64 * 1024;
} else
/* Allocate memory for ioctl data */
total_mem = BUF_SZ_4K;
dataout = kmalloc(total_mem, GFP_KERNEL);
if (dataout == NULL) {
rc = -ENOMEM;
goto loopback_test_exit;
}
ptr = dataout;
ptr += ELX_LOOPBACK_HEADER_SZ;
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
ptr, size);
rc = lpfcdiag_loop_self_reg(phba, &rpi);
if (rc)
goto loopback_test_exit;
if (phba->sli_rev < LPFC_SLI_REV4) {
rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
if (rc) {
lpfcdiag_loop_self_unreg(phba, rpi);
goto loopback_test_exit;
}
rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
if (rc) {
lpfcdiag_loop_self_unreg(phba, rpi);
goto loopback_test_exit;
}
}
evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
SLI_CT_ELX_LOOPBACK);
if (!evt) {
lpfcdiag_loop_self_unreg(phba, rpi);
rc = -ENOMEM;
goto loopback_test_exit;
}
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_add(&evt->node, &phba->ct_ev_waiters);
lpfc_bsg_event_ref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (phba->sli_rev < LPFC_SLI_REV4)
rspiocbq = lpfc_sli_get_iocbq(phba);
txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (txbmp) {
txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
if (txbmp->virt) {
INIT_LIST_HEAD(&txbmp->list);
txbpl = (struct ulp_bde64 *) txbmp->virt;
txbuffer = diag_cmd_data_alloc(phba,
txbpl, full_size, 0);
}
}
if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
cmd = &cmdiocbq->iocb;
if (phba->sli_rev < LPFC_SLI_REV4)
rsp = &rspiocbq->iocb;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
list_for_each_entry(curr, &head, list) {
segment_len = ((struct lpfc_dmabufext *)curr)->size;
if (current_offset == 0) {
ctreq = curr->virt;
memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
ctreq->RevisionId.bits.InId = 0;
ctreq->FsType = SLI_CT_ELX_LOOPBACK;
ctreq->FsSubType = 0;
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
ctreq->CommandResponse.bits.Size = size;
segment_offset = ELX_LOOPBACK_HEADER_SZ;
} else
segment_offset = 0;
BUG_ON(segment_offset >= segment_len);
memcpy(curr->virt + segment_offset,
ptr + current_offset,
segment_len - segment_offset);
current_offset += segment_len - segment_offset;
BUG_ON(current_offset > size);
}
list_del(&head);
/* Build the XMIT_SEQUENCE iocb */
num_bde = (uint32_t)txbuffer->flag;
cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
cmd->un.xseq64.w5.hcsw.Dfctl = 0;
cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
if (phba->sli_rev < LPFC_SLI_REV4) {
cmd->ulpContext = txxri;
} else {
cmd->un.xseq64.bdl.ulpIoTag32 = 0;
cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
cmdiocbq->context3 = txbmp;
cmdiocbq->sli4_xritag = NO_XRI;
cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
(rsp->ulpStatus != IOCB_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
rc = -EIO;
goto err_loopback_test_exit;
}
evt->waiting = 1;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
msecs_to_jiffies(1000 *
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
evt->waiting = 0;
if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3125 Not receiving unsolicited event, "
"rc:x%x\n", rc);
} else {
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_move(evt->events_to_see.prev, &evt->events_to_get);
evdat = list_entry(evt->events_to_get.prev,
typeof(*evdat), node);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
rx_databuf = evdat->data;
if (evdat->len != full_size) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"1603 Loopback test did not receive expected "
"data length. actual length 0x%x expected "
"length 0x%x\n",
evdat->len, full_size);
rc = -EIO;
} else if (rx_databuf == NULL)
rc = -EIO;
else {
rc = IOCB_SUCCESS;
/* skip over elx loopback header */
rx_databuf += ELX_LOOPBACK_HEADER_SZ;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
rx_databuf, size);
job->reply->reply_payload_rcv_len = size;
}
}
err_loopback_test_exit:
lpfcdiag_loop_self_unreg(phba, rpi);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
lpfc_bsg_event_unref(evt); /* release ref */
lpfc_bsg_event_unref(evt); /* delete */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (cmdiocbq != NULL)
lpfc_sli_release_iocbq(phba, cmdiocbq);
if (rspiocbq != NULL)
lpfc_sli_release_iocbq(phba, rspiocbq);
if (txbmp != NULL) {
if (txbpl != NULL) {
if (txbuffer != NULL)
diag_cmd_data_free(phba, txbuffer);
lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
}
kfree(txbmp);
}
loopback_test_exit:
kfree(dataout);
/* make error code available to userspace */
job->reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
if (rc == IOCB_SUCCESS)
job->job_done(job);
return rc;
}
/**
* lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
* @job: GET_DFC_REV fc_bsg_job
**/
static int
lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct get_mgmt_rev *event_req;
struct get_mgmt_rev_reply *event_reply;
int rc = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2740 Received GET_DFC_REV request below "
"minimum size\n");
rc = -EINVAL;
goto job_error;
}
event_req = (struct get_mgmt_rev *)
job->request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_mgmt_rev_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2741 Received GET_DFC_REV reply below "
"minimum size\n");
rc = -EINVAL;
goto job_error;
}
event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
job_error:
job->reply->result = rc;
if (rc == 0)
job->job_done(job);
return rc;
}
/**
* lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
* This is completion handler function for mailbox commands issued from
* lpfc_bsg_issue_mbox function. This function is called by the
* mailbox event handler function with no lock held. This function
* will wake up thread waiting on the wait queue pointed by context1
* of the mailbox.
**/
void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
uint32_t size;
unsigned long flags;
uint8_t *pmb, *pmb_buf;
dd_data = pmboxq->context1;
/*
* The outgoing buffer is readily referred from the dma buffer,
* just need to get header part from mailboxq structure.
*/
pmb = (uint8_t *)&pmboxq->u.mb;
pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/* Copy the mailbox data to the job if it is still active */
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
}
dd_data->set_job = NULL;
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
kfree(dd_data);
/* Complete the job if the job is still active */
if (job) {
job->reply->result = 0;
job->job_done(job);
}
return;
}
/**
* lpfc_bsg_check_cmd_access - test for a supported mailbox command
* @phba: Pointer to HBA context object.
* @mb: Pointer to a mailbox object.
* @vport: Pointer to a vport object.
*
* Some commands require the port to be offline, some may not be called from
* the application.
**/
static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
MAILBOX_t *mb, struct lpfc_vport *vport)
{
/* return negative error values for bsg job */
switch (mb->mbxCommand) {
/* Offline only */
case MBX_INIT_LINK:
case MBX_DOWN_LINK:
case MBX_CONFIG_LINK:
case MBX_CONFIG_RING:
case MBX_RESET_RING:
case MBX_UNREG_LOGIN:
case MBX_CLEAR_LA:
case MBX_DUMP_CONTEXT:
case MBX_RUN_DIAGS:
case MBX_RESTART:
case MBX_SET_MASK:
if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2743 Command 0x%x is illegal in on-line "
"state\n",
mb->mbxCommand);
return -EPERM;
}
case MBX_WRITE_NV:
case MBX_WRITE_VPARMS:
case MBX_LOAD_SM:
case MBX_READ_NV:
case MBX_READ_CONFIG:
case MBX_READ_RCONFIG:
case MBX_READ_STATUS:
case MBX_READ_XRI:
case MBX_READ_REV:
case MBX_READ_LNK_STAT:
case MBX_DUMP_MEMORY:
case MBX_DOWN_LOAD:
case MBX_UPDATE_CFG:
case MBX_KILL_BOARD:
case MBX_LOAD_AREA:
case MBX_LOAD_EXP_ROM:
case MBX_BEACON:
case MBX_DEL_LD_ENTRY:
case MBX_SET_DEBUG:
case MBX_WRITE_WWN:
case MBX_SLI4_CONFIG:
case MBX_READ_EVENT_LOG:
case MBX_READ_EVENT_LOG_STATUS:
case MBX_WRITE_EVENT_LOG:
case MBX_PORT_CAPABILITIES:
case MBX_PORT_IOV_CONTROL:
case MBX_RUN_BIU_DIAG64:
break;
case MBX_SET_VARIABLE:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
if ((mb->un.varWords[0] == SETVAR_MLOMNT)
&& (mb->un.varWords[1] == 1)) {
phba->wait_4_mlo_maint_flg = 1;
} else if (mb->un.varWords[0] == SETVAR_MLORST) {
spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
}
break;
case MBX_READ_SPARM64:
case MBX_READ_TOPOLOGY:
case MBX_REG_LOGIN:
case MBX_REG_LOGIN64:
case MBX_CONFIG_PORT:
case MBX_RUN_BIU_DIAG:
default:
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2742 Unknown Command 0x%x\n",
mb->mbxCommand);
return -EPERM;
}
return 0; /* ok */
}
/**
* lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
* @phba: Pointer to HBA context object.
*
* This is routine clean up and reset BSG handling of multi-buffer mbox
* command session.
**/
static void
lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
{
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
return;
/* free all memory, including dma buffers */
lpfc_bsg_dma_page_list_free(phba,
&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
/* multi-buffer write mailbox command pass-through complete */
memset((char *)&phba->mbox_ext_buf_ctx, 0,
sizeof(struct lpfc_mbox_ext_buf_ctx));
INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
return;
}
/**
* lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
* This is routine handles BSG job for mailbox commands completions with
* multiple external buffers.
**/
static struct fc_bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
uint32_t size;
int rc = 0;
struct lpfc_dmabuf *dmabuf;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint8_t *pmbx;
dd_data = pmboxq->context1;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/*
* The outgoing buffer is readily referred from the dma buffer,
* just need to get header part from mailboxq structure.
*/
pmb = (uint8_t *)&pmboxq->u.mb;
pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
/* Copy the byte swapped response mailbox back to the user */
memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
/* if there is any non-embedded extended data copy that too */
dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (!bsg_bf_get(lpfc_mbox_hdr_emb,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
pmbx = (uint8_t *)dmabuf->virt;
/* byte swap the extended data following the mailbox command */
lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
&pmbx[sizeof(MAILBOX_t)],
sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
}
/* Complete the job if the job is still active */
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
/* result for successful */
job->reply->result = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer maibox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, size);
lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType,
dma_ebuf, sta_pos_addr,
phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2938 SLI_CONFIG ext-buffer maibox "
"command (x%x/x%x) failure, rc:x%x\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, rc);
}
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
kfree(dd_data);
return job;
}
/**
* lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
* This is completion handler function for mailbox read commands with multiple
* external buffers.
**/
static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct fc_bsg_job *job;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
/* handle the BSG job with mailbox command */
if (!job)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2939 SLI_CONFIG ext-buffer rd maibox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
lpfc_bsg_mbox_ext_session_reset(phba);
/* free base driver mailbox structure memory */
mempool_free(pmboxq, phba->mbox_mem_pool);
/* if the job is still active, call job done */
if (job)
job->job_done(job);
return;
}
/**
* lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
* This is completion handler function for mailbox write commands with multiple
* external buffers.
**/
static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct fc_bsg_job *job;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
/* handle the BSG job with the mailbox command */
if (!job)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2940 SLI_CONFIG ext-buffer wr maibox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
/* free all memory, including dma buffers */
mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_mbox_ext_session_reset(phba);
/* if the job is still active, call job done */
if (job)
job->job_done(job);
return;
}
static void
lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
struct lpfc_dmabuf *ext_dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
if (nemb_tp == nemb_mse) {
if (index == 0) {
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_hi =
putPaddrHigh(mbx_dmabuf->phys +
sizeof(MAILBOX_t));
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_lo =
putPaddrLow(mbx_dmabuf->phys +
sizeof(MAILBOX_t));
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2943 SLI_CONFIG(mse)[%d], "
"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
index,
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].buf_len,
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_hi,
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_lo);
} else {
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_hi =
putPaddrHigh(ext_dmabuf->phys);
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_lo =
putPaddrLow(ext_dmabuf->phys);
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2944 SLI_CONFIG(mse)[%d], "
"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
index,
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].buf_len,
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_hi,
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[index].pa_lo);
}
} else {
if (index == 0) {
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_hi =
putPaddrHigh(mbx_dmabuf->phys +
sizeof(MAILBOX_t));
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_lo =
putPaddrLow(mbx_dmabuf->phys +
sizeof(MAILBOX_t));
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3007 SLI_CONFIG(hbd)[%d], "
"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
index,
bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
&sli_cfg_mbx->un.
sli_config_emb1_subsys.hbd[index]),
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_hi,
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_lo);
} else {
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_hi =
putPaddrHigh(ext_dmabuf->phys);
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_lo =
putPaddrLow(ext_dmabuf->phys);
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3008 SLI_CONFIG(hbd)[%d], "
"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
index,
bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
&sli_cfg_mbx->un.
sli_config_emb1_subsys.hbd[index]),
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_hi,
sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[index].pa_lo);
}
}
return;
}
/**
* lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
* @phba: Pointer to HBA context object.
* @mb: Pointer to a BSG mailbox object.
* @nemb_tp: Enumerate of non-embedded mailbox command type.
* @dmabuff: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
* non-embedded external bufffers.
**/
static int
lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
uint32_t ext_buf_cnt, ext_buf_index;
struct lpfc_dmabuf *ext_dmabuf = NULL;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
uint8_t *pmbx;
int rc, i;
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (nemb_tp == nemb_mse) {
ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2945 Handled SLI_CONFIG(mse) rd, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_MSE);
rc = -ERANGE;
goto job_error;
}
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2941 Handled SLI_CONFIG(mse) rd, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
} else {
/* sanity check on interface type for support */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2) {
rc = -ENODEV;
goto job_error;
}
/* nemb_tp == nemb_hbd */
ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2946 Handled SLI_CONFIG(hbd) rd, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_HBD);
rc = -ERANGE;
goto job_error;
}
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2942 Handled SLI_CONFIG(hbd) rd, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
}
/* before dma descriptor setup */
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
sta_pre_addr, dmabuf, ext_buf_cnt);
/* reject non-embedded mailbox command with none external buffer */
if (ext_buf_cnt == 0) {
rc = -EPERM;
goto job_error;
} else if (ext_buf_cnt > 1) {
/* additional external read buffers */
for (i = 1; i < ext_buf_cnt; i++) {
ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
if (!ext_dmabuf) {
rc = -ENOMEM;
goto job_error;
}
list_add_tail(&ext_dmabuf->list,
&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
}
}
/* bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
rc = -ENOMEM;
goto job_error;
}
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
goto job_error;
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
/* for the first external buffer */
lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
/* for the rest of external buffer descriptors if any */
if (ext_buf_cnt > 1) {
ext_buf_index = 1;
list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
ext_buf_index, dmabuf,
curr_dmabuf);
ext_buf_index++;
}
}
/* after dma descriptor setup */
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
sta_pos_addr, dmabuf, ext_buf_cnt);
/* construct base driver mbox command */
pmb = &pmboxq->u.mb;
pmbx = (uint8_t *)dmabuf->virt;
memcpy(pmb, pmbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = phba->pport;
/* multi-buffer handling context */
phba->mbox_ext_buf_ctx.nembType = nemb_tp;
phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
/* callback for multi-buffer read mailbox command */
pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
/* context fields to callback function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
job->dd_data = dd_data;
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
/*
* Non-embedded mailbox subcommand data gets byte swapped here because
* the lower level driver code only does the first 64 mailbox words.
*/
if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
(nemb_tp == nemb_mse))
lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
&pmbx[sizeof(MAILBOX_t)],
sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[0].buf_len);
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2947 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2948 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
rc = -EPIPE;
job_error:
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_list_free(phba,
&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
kfree(dd_data);
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
return rc;
}
/**
* lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
* @phba: Pointer to HBA context object.
* @mb: Pointer to a BSG mailbox object.
* @dmabuff: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
* non-embedded external bufffers.
**/
static int
lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
struct dfc_mbox_req *mbox_req;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint32_t ext_buf_cnt;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
uint8_t *mbx;
int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (nemb_tp == nemb_mse) {
ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2953 Failed SLI_CONFIG(mse) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_MSE);
return -ERANGE;
}
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2949 Handled SLI_CONFIG(mse) wr, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
} else {
/* sanity check on interface type for support */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
/* nemb_tp == nemb_hbd */
ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2954 Failed SLI_CONFIG(hbd) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_HBD);
return -ERANGE;
}
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2950 Handled SLI_CONFIG(hbd) wr, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
}
/* before dma buffer descriptor setup */
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
sta_pre_addr, dmabuf, ext_buf_cnt);
if (ext_buf_cnt == 0)
return -EPERM;
/* for the first external buffer */
lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
/* after dma descriptor setup */
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
sta_pos_addr, dmabuf, ext_buf_cnt);
/* log for looking forward */
for (i = 1; i < ext_buf_cnt; i++) {
if (nemb_tp == nemb_mse)
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
i, sli_cfg_mbx->un.sli_config_emb0_subsys.
mse[i].buf_len);
else
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
&sli_cfg_mbx->un.sli_config_emb1_subsys.
hbd[i]));
}
/* multi-buffer handling context */
phba->mbox_ext_buf_ctx.nembType = nemb_tp;
phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
if (ext_buf_cnt == 1) {
/* bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
rc = -ENOMEM;
goto job_error;
}
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
goto job_error;
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
mbx = (uint8_t *)dmabuf->virt;
memcpy(pmb, mbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = phba->pport;
/* callback for multi-buffer read mailbox command */
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
job->dd_data = dd_data;
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2955 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2956 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}
/* wait for additoinal external buffers */
job->reply->result = 0;
job->job_done(job);
return SLI_CONFIG_HANDLED;
job_error:
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
kfree(dd_data);
return rc;
}
/**
* lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
* @phba: Pointer to HBA context object.
* @mb: Pointer to a BSG mailbox object.
* @dmabuff: Pointer to a DMA buffer descriptor.
*
* This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
* external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
* with embedded sussystem 0x1 and opcodes with external HBDs.
**/
static int
lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint32_t subsys;
uint32_t opcode;
int rc = SLI_CONFIG_NOT_HANDLED;
/* state change on new multi-buffer pass-through mailbox command */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (!bsg_bf_get(lpfc_mbox_hdr_emb,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
&sli_cfg_mbx->un.sli_config_emb0_subsys);
opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
&sli_cfg_mbx->un.sli_config_emb0_subsys);
if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
switch (opcode) {
case FCOE_OPCODE_READ_FCF:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2957 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
opcode);
rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
nemb_mse, dmabuf);
break;
case FCOE_OPCODE_ADD_FCF:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2958 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
opcode);
rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
nemb_mse, dmabuf);
break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2959 Reject SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
opcode);
rc = -EPERM;
break;
}
} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
switch (opcode) {
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
opcode);
rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
nemb_mse, dmabuf);
break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3107 Reject SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
opcode);
rc = -EPERM;
break;
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2977 Reject SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
rc = -EPERM;
}
} else {
subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
&sli_cfg_mbx->un.sli_config_emb1_subsys);
opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
&sli_cfg_mbx->un.sli_config_emb1_subsys);
if (subsys == SLI_CONFIG_SUBSYS_COMN) {
switch (opcode) {
case COMN_OPCODE_READ_OBJECT:
case COMN_OPCODE_READ_OBJECT_LIST:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2960 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
opcode);
rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
nemb_hbd, dmabuf);
break;
case COMN_OPCODE_WRITE_OBJECT:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2961 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
opcode);
rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
nemb_hbd, dmabuf);
break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2962 Not handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
opcode);
rc = SLI_CONFIG_NOT_HANDLED;
break;
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2978 Not handled SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
rc = SLI_CONFIG_NOT_HANDLED;
}
}
/* state reset on not handled new multi-buffer mailbox command */
if (rc != SLI_CONFIG_HANDLED)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
return rc;
}
/**
* lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
* @phba: Pointer to HBA context object.
*
* This routine is for requesting to abort a pass-through mailbox command with
* multiple external buffers due to error condition.
**/
static void
lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
{
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
else
lpfc_bsg_mbox_ext_session_reset(phba);
return;
}
/**
* lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
* @phba: Pointer to HBA context object.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine extracts the next mailbox read external buffer back to
* user space through BSG.
**/
static int
lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct lpfc_dmabuf *dmabuf;
uint8_t *pbuf;
uint32_t size;
uint32_t index;
index = phba->mbox_ext_buf_ctx.seqNum;
phba->mbox_ext_buf_ctx.seqNum++;
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2963 SLI_CONFIG (mse) ext-buffer rd get "
"buffer[%d], size:%d\n", index, size);
} else {
size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2964 SLI_CONFIG (hbd) ext-buffer rd get "
"buffer[%d], size:%d\n", index, size);
}
if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
return -EPIPE;
dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
struct lpfc_dmabuf, list);
list_del_init(&dmabuf->list);
/* after dma buffer descriptor setup */
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
mbox_rd, dma_ebuf, sta_pos_addr,
dmabuf, index);
pbuf = (uint8_t *)dmabuf->virt;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pbuf, size);
lpfc_bsg_dma_page_free(phba, dmabuf);
if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
"command session done\n");
lpfc_bsg_mbox_ext_session_reset(phba);
}
job->reply->result = 0;
job->job_done(job);
return SLI_CONFIG_HANDLED;
}
/**
* lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
* @phba: Pointer to HBA context object.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine sets up the next mailbox read external buffer obtained
* from user space through BSG.
**/
static int
lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
enum nemb_type nemb_tp;
uint8_t *pbuf;
uint32_t size;
uint32_t index;
int rc;
index = phba->mbox_ext_buf_ctx.seqNum;
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
rc = -ENOMEM;
goto job_error;
}
pbuf = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
pbuf, size);
if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2966 SLI_CONFIG (mse) ext-buffer wr set "
"buffer[%d], size:%d\n",
phba->mbox_ext_buf_ctx.seqNum, size);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2967 SLI_CONFIG (hbd) ext-buffer wr set "
"buffer[%d], size:%d\n",
phba->mbox_ext_buf_ctx.seqNum, size);
}
/* set up external buffer descriptor and add to external buffer list */
lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
phba->mbox_ext_buf_ctx.mbx_dmabuf,
dmabuf);
list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
/* after write dma buffer */
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
mbox_wr, dma_ebuf, sta_pos_addr,
dmabuf, index);
if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2968 SLI_CONFIG ext-buffer wr all %d "
"ebuffers received\n",
phba->mbox_ext_buf_ctx.numBuf);
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
goto job_error;
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
pmb = &pmboxq->u.mb;
memcpy(pmb, pbuf, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = phba->pport;
/* callback for multi-buffer write mailbox command */
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
job->dd_data = dd_data;
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2969 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2970 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}
/* wait for additoinal external buffers */
job->reply->result = 0;
job->job_done(job);
return SLI_CONFIG_HANDLED;
job_error:
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
return rc;
}
/**
* lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
* @phba: Pointer to HBA context object.
* @mb: Pointer to a BSG mailbox object.
* @dmabuff: Pointer to a DMA buffer descriptor.
*
* This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
* command with multiple non-embedded external buffers.
**/
static int
lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
int rc;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2971 SLI_CONFIG buffer (type:x%x)\n",
phba->mbox_ext_buf_ctx.mboxType);
if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2972 SLI_CONFIG rd buffer state "
"mismatch:x%x\n",
phba->mbox_ext_buf_ctx.state);
lpfc_bsg_mbox_ext_abort(phba);
return -EPIPE;
}
rc = lpfc_bsg_read_ebuf_get(phba, job);
if (rc == SLI_CONFIG_HANDLED)
lpfc_bsg_dma_page_free(phba, dmabuf);
} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2973 SLI_CONFIG wr buffer state "
"mismatch:x%x\n",
phba->mbox_ext_buf_ctx.state);
lpfc_bsg_mbox_ext_abort(phba);
return -EPIPE;
}
rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
}
return rc;
}
/**
* lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
* @phba: Pointer to HBA context object.
* @mb: Pointer to a BSG mailbox object.
* @dmabuff: Pointer to a DMA buffer descriptor.
*
* This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
* (0x9B) mailbox commands and external buffers.
**/
static int
lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct dfc_mbox_req *mbox_req;
int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
return rc;
/* mbox command and first external buffer */
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
if (mbox_req->extSeqNum == 1) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2974 SLI_CONFIG mailbox: tag:%d, "
"seq:%d\n", mbox_req->extMboxTag,
mbox_req->extSeqNum);
rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
return rc;
} else
goto sli_cfg_ext_error;
}
/*
* handle additional external buffers
*/
/* check broken pipe conditions */
if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
goto sli_cfg_ext_error;
if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
goto sli_cfg_ext_error;
if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
goto sli_cfg_ext_error;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2975 SLI_CONFIG mailbox external buffer: "
"extSta:x%x, tag:%d, seq:%d\n",
phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
mbox_req->extSeqNum);
rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
return rc;
sli_cfg_ext_error:
/* all other cases, broken pipe */
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2976 SLI_CONFIG mailbox broken pipe: "
"ctxSta:x%x, ctxNumBuf:%d "
"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
phba->mbox_ext_buf_ctx.state,
phba->mbox_ext_buf_ctx.numBuf,
phba->mbox_ext_buf_ctx.mbxTag,
phba->mbox_ext_buf_ctx.seqNum,
mbox_req->extMboxTag, mbox_req->extSeqNum);
lpfc_bsg_mbox_ext_session_reset(phba);
return -EPIPE;
}
/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
* @mb: Pointer to a mailbox object.
* @vport: Pointer to a vport object.
*
* Allocate a tracking object, mailbox command memory, get a mailbox
* from the mailbox pool, copy the caller mailbox command.
*
* If offline and the sli is active we need to poll for the command (port is
* being reset) and com-plete the job, otherwise issue the mailbox command and
* let our completion handler finish the command.
**/
static uint32_t
lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_vport *vport)
{
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
uint8_t *pmbx = NULL;
struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
struct lpfc_dmabuf *dmabuf = NULL;
struct dfc_mbox_req *mbox_req;
struct READ_EVENT_LOG_VAR *rdEventLog;
uint32_t transmit_length, receive_length, mode;
struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
uint8_t *from;
uint32_t size;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
job->request_payload.payload_len > BSG_MBOX_SIZE) {
rc = -ERANGE;
goto job_done;
}
/*
* Don't allow mailbox commands to be sent when blocked or when in
* the middle of discovery
*/
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
rc = -EAGAIN;
goto job_done;
}
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
(mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
rc = -ERANGE;
goto job_done;
}
dmabuf = lpfc_bsg_dma_page_alloc(phba);
if (!dmabuf || !dmabuf->virt) {
rc = -ENOMEM;
goto job_done;
}
/* Get the mailbox command or external buffer from BSG */
pmbx = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt, pmbx, size);
/* Handle possible SLI_CONFIG with non-embedded payloads */
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
if (rc == SLI_CONFIG_HANDLED)
goto job_cont;
if (rc)
goto job_done;
/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
}
rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
if (rc != 0)
goto job_done; /* must be negative */
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2727 Failed allocation of dd_data\n");
rc = -ENOMEM;
goto job_done;
}
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
goto job_done;
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
memcpy(pmb, pmbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
/* If HBA encountered an error attention, allow only DUMP
* or RESTART mailbox commands until the HBA is restarted.
*/
if (phba->pport->stopped &&
pmb->mbxCommand != MBX_DUMP_MEMORY &&
pmb->mbxCommand != MBX_RESTART &&
pmb->mbxCommand != MBX_WRITE_VPARMS &&
pmb->mbxCommand != MBX_WRITE_WWN)
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"2797 mbox: Issued mailbox cmd "
"0x%x while in stopped state.\n",
pmb->mbxCommand);
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
from = pmbx;
ext = from + sizeof(MAILBOX_t);
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
pmboxq->out_ext_byte_len =
mbox_req->outExtWLen * sizeof(uint32_t);
pmboxq->mbox_offset_word = mbox_req->mbOffset;
}
/* biu diag will need a kernel buffer to transfer the data
* allocate our own buffer and setup the mailbox command to
* use ours
*/
if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
transmit_length = pmb->un.varWords[1];
receive_length = pmb->un.varWords[4];
/* transmit length cannot be greater than receive length or
* mailbox extension size
*/
if ((transmit_length > receive_length) ||
(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
rc = -ERANGE;
goto job_done;
}
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
rdEventLog = &pmb->un.varRdEventLog;
receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
mode = bf_get(lpfc_event_log, rdEventLog);
/* receive length cannot be greater than mailbox
* extension size
*/
if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
rc = -ERANGE;
goto job_done;
}
/* mode zero uses a bde like biu diags command */
if (mode == 0) {
pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ sizeof(MAILBOX_t));
pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
/* Let type 4 (well known data) through because the data is
* returned in varwords[4-8]
* otherwise check the recieve length and fetch the buffer addr
*/
if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
/* rebuild the command for sli4 using our own buffers
* like we do for biu diags
*/
receive_length = pmb->un.varWords[2];
/* receive length cannot be greater than mailbox
* extension size
*/
if (receive_length == 0) {
rc = -ERANGE;
goto job_done;
}
pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ sizeof(MAILBOX_t));
pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ sizeof(MAILBOX_t));
} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
pmb->un.varUpdateCfg.co) {
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
/* bde size cannot be greater than mailbox ext size */
if (bde->tus.f.bdeSize >
BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
rc = -ERANGE;
goto job_done;
}
bde->addrHigh = putPaddrHigh(dmabuf->phys
+ sizeof(MAILBOX_t));
bde->addrLow = putPaddrLow(dmabuf->phys
+ sizeof(MAILBOX_t));
} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
/* Handling non-embedded SLI_CONFIG mailbox command */
sli4_config = &pmboxq->u.mqe.un.sli4_config;
if (!bf_get(lpfc_mbox_hdr_emb,
&sli4_config->header.cfg_mhdr)) {
/* rebuild the command for sli4 using our
* own buffers like we do for biu diags
*/
header = (struct mbox_header *)
&pmb->un.varWords[0];
nembed_sge = (struct lpfc_mbx_nembed_cmd *)
&pmb->un.varWords[0];
receive_length = nembed_sge->sge[0].length;
/* receive length cannot be greater than
* mailbox extension size
*/
if ((receive_length == 0) ||
(receive_length >
BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
rc = -ERANGE;
goto job_done;
}
nembed_sge->sge[0].pa_hi =
putPaddrHigh(dmabuf->phys
+ sizeof(MAILBOX_t));
nembed_sge->sge[0].pa_lo =
putPaddrLow(dmabuf->phys
+ sizeof(MAILBOX_t));
}
}
}
dd_data->context_un.mbox.dmabuffers = dmabuf;
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
dd_data->context_un.mbox.ext = ext;
dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
job->dd_data = dd_data;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
goto job_done;
}
/* job finished, copy the data */
memcpy(pmbx, pmb, sizeof(*pmb));
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmbx, size);
/* not waiting mbox already done */
rc = 0;
goto job_done;
}
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
return 1; /* job started */
job_done:
/* common exit for error or job completed inline */
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
job_cont:
return rc;
}
/**
* lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
* @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
**/
static int
lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct dfc_mbox_req *mbox_req;
int rc = 0;
/* mix-and-match backward compatibility */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2737 Mix-and-match backward compatibility "
"between MBOX_REQ old size:%d and "
"new request size:%d\n",
(int)(job->request_len -
sizeof(struct fc_bsg_request)),
(int)sizeof(struct dfc_mbox_req));
mbox_req = (struct dfc_mbox_req *)
job->request->rqst_data.h_vendor.vendor_cmd;
mbox_req->extMboxTag = 0;
mbox_req->extSeqNum = 0;
}
rc = lpfc_bsg_issue_mbox(phba, job, vport);
if (rc == 0) {
/* job done */
job->reply->result = 0;
job->dd_data = NULL;
job->job_done(job);
} else if (rc == 1)
/* job submitted, will complete later*/
rc = 0; /* return zero, no error */
else {
/* some error occurred */
job->reply->result = rc;
job->dd_data = NULL;
}
return rc;
}
/**
* lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
* @rspiocbq: Pointer to response iocb.
*
* This function is the completion handler for iocbs issued using
* lpfc_menlo_cmd function. This function is called by the
* ring event handler function without any lock held. This function
* can be called from both worker thread context and interrupt
* context. This function also can be called from another thread which
* cleans up the SLI layer objects.
* This function copies the contents of the response iocb to the
* response iocb memory object provided by the caller of
* lpfc_sli_issue_iocb_wait and then wakes up the thread which
* sleeps for the iocb completion.
**/
static void
lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_bsg_menlo *menlo;
unsigned long flags;
struct menlo_response *menlo_resp;
unsigned int rsp_size;
int rc = 0;
dd_data = cmdiocbq->context1;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
menlo = &dd_data->context_un.menlo;
rmp = menlo->rmp;
rsp = &rspiocbq->iocb;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/* Copy the job data or set the failing status for the job */
if (job) {
/* always return the xri, this would be used in the case
* of a menlo download to allow the data to be sent as a
* continuation of the exchange.
*/
menlo_resp = (struct menlo_response *)
job->reply->reply_data.vendor_reply.vendor_rsp;
menlo_resp->xri = rsp->ulpContext;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
case IOERR_INVALID_RPI:
rc = -EFAULT;
break;
default:
rc = -EACCES;
break;
}
} else {
rc = -EACCES;
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
job->reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
}
lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_free_bsg_buffers(phba, cmp);
lpfc_free_bsg_buffers(phba, rmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
kfree(dd_data);
/* Complete the job if active */
if (job) {
job->reply->result = rc;
job->job_done(job);
}
return;
}
/**
* lpfc_menlo_cmd - send an ioctl for menlo hardware
* @job: fc_bsg_job to handle
*
* This function issues a gen request 64 CR ioctl for all menlo cmd requests,
* all the command completions will return the xri for the command.
* For menlo data requests a gen request 64 CX is used to continue the exchange
* supplied in the menlo request header xri field.
**/
static int
lpfc_menlo_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd;
int rc = 0;
struct menlo_command *menlo_cmd;
struct menlo_response *menlo_resp;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
int request_nseg;
int reply_nseg;
struct bsg_job_data *dd_data;
struct ulp_bde64 *bpl = NULL;
/* in case no data is returned return just the return code */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
sizeof(struct menlo_command)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2784 Received MENLO_CMD request below "
"minimum size\n");
rc = -ERANGE;
goto no_dd_data;
}
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2785 Received MENLO_CMD reply below "
"minimum size\n");
rc = -ERANGE;
goto no_dd_data;
}
if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2786 Adapter does not support menlo "
"commands\n");
rc = -EPERM;
goto no_dd_data;
}
menlo_cmd = (struct menlo_command *)
job->request->rqst_data.h_vendor.vendor_cmd;
menlo_resp = (struct menlo_response *)
job->reply->reply_data.vendor_reply.vendor_rsp;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2787 Failed allocation of dd_data\n");
rc = -ENOMEM;
goto no_dd_data;
}
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
goto free_dd;
}
bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
if (!bmp->virt) {
rc = -ENOMEM;
goto free_bmp;
}
INIT_LIST_HEAD(&bmp->list);
bpl = (struct ulp_bde64 *)bmp->virt;
request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1, bpl, &request_nseg);
if (!cmp) {
rc = -ENOMEM;
goto free_bmp;
}
lpfc_bsg_copy_data(cmp, &job->request_payload,
job->request_payload.payload_len, 1);
bpl += request_nseg;
reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
bpl, &reply_nseg);
if (!rmp) {
rc = -ENOMEM;
goto free_cmp;
}
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
goto free_rmp;
}
cmd = &cmdiocbq->iocb;
cmd->un.genreq64.bdl.ulpIoTag32 = 0;
cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
cmd->un.genreq64.bdl.bdeSize =
(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
cmd->un.genreq64.w5.hcsw.Dfctl = 0;
cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
cmd->ulpBdeCount = 1;
cmd->ulpClass = CLASS3;
cmd->ulpOwner = OWN_CHIP;
cmd->ulpLe = 1; /* Limited Edition */
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
/* We want the firmware to timeout before we do */
cmd->ulpTimeout = MENLO_TIMEOUT - 5;
cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
cmd->ulpPU = MENLO_PU; /* 3 */
cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
cmd->ulpContext = MENLO_CONTEXT; /* 0 */
} else {
cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
cmd->ulpPU = 1;
cmd->un.ulpWord[4] = 0;
cmd->ulpContext = menlo_cmd->xri;
}
dd_data->type = TYPE_MENLO;
dd_data->set_job = job;
dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
dd_data->context_un.menlo.rmp = rmp;
job->dd_data = dd_data;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
MENLO_TIMEOUT - 5);
if (rc == IOCB_SUCCESS)
return 0; /* done for now */
lpfc_sli_release_iocbq(phba, cmdiocbq);
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
free_cmp:
lpfc_free_bsg_buffers(phba, cmp);
free_bmp:
if (bmp->virt)
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
job->reply->result = rc;
job->dd_data = NULL;
return rc;
}
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
static int
lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
{
int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
int rc;
switch (command) {
case LPFC_BSG_VENDOR_SET_CT_EVENT:
rc = lpfc_bsg_hba_set_event(job);
break;
case LPFC_BSG_VENDOR_GET_CT_EVENT:
rc = lpfc_bsg_hba_get_event(job);
break;
case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
rc = lpfc_bsg_send_mgmt_rsp(job);
break;
case LPFC_BSG_VENDOR_DIAG_MODE:
rc = lpfc_bsg_diag_loopback_mode(job);
break;
case LPFC_BSG_VENDOR_DIAG_MODE_END:
rc = lpfc_sli4_bsg_diag_mode_end(job);
break;
case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
rc = lpfc_bsg_diag_loopback_run(job);
break;
case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
rc = lpfc_sli4_bsg_link_diag_test(job);
break;
case LPFC_BSG_VENDOR_GET_MGMT_REV:
rc = lpfc_bsg_get_dfc_rev(job);
break;
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
case LPFC_BSG_VENDOR_MENLO_CMD:
case LPFC_BSG_VENDOR_MENLO_DATA:
rc = lpfc_menlo_cmd(job);
break;
default:
rc = -EINVAL;
job->reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
job->reply->result = rc;
break;
}
return rc;
}
/**
* lpfc_bsg_request - handle a bsg request from the FC transport
* @job: fc_bsg_job to handle
**/
int
lpfc_bsg_request(struct fc_bsg_job *job)
{
uint32_t msgcode;
int rc;
msgcode = job->request->msgcode;
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
break;
case FC_BSG_RPT_ELS:
rc = lpfc_bsg_rport_els(job);
break;
case FC_BSG_RPT_CT:
rc = lpfc_bsg_send_mgmt_cmd(job);
break;
default:
rc = -EINVAL;
job->reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
job->reply->result = rc;
break;
}
return rc;
}
/**
* lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
* @job: fc_bsg_job that has timed out
*
* This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace
**/
int
lpfc_bsg_timeout(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct bsg_job_data *dd_data;
unsigned long flags;
int rc = 0;
LIST_HEAD(completions);
struct lpfc_iocbq *check_iocb, *next_iocb;
/* if job's driver data is NULL, the command completed or is in the
* the process of completing. In this case, return status to request
* so the timeout is retried. This avoids double completion issues
* and the request will be pulled off the timer queue when the
* command's completion handler executes. Otherwise, prevent the
* command's completion handler from executing the job done callback
* and continue processing to abort the outstanding the command.
*/
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = (struct bsg_job_data *)job->dd_data;
if (dd_data) {
dd_data->set_job = NULL;
job->dd_data = NULL;
} else {
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return -EAGAIN;
}
switch (dd_data->type) {
case TYPE_IOCB:
/* Check to see if IOCB was issued to the port or not. If not,
* remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag
*/
cmdiocb = dd_data->context_un.iocb.cmdiocbq;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) {
if (check_iocb == cmdiocb) {
list_move_tail(&check_iocb->list, &completions);
break;
}
}
if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
spin_unlock_irq(&phba->hbalock);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
break;
case TYPE_EVT:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
case TYPE_MBOX:
/* Update the ext buf ctx state if needed */
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
case TYPE_MENLO:
/* Check to see if IOCB was issued to the port or not. If not,
* remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag.
*/
cmdiocb = dd_data->context_un.menlo.cmdiocbq;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) {
if (check_iocb == cmdiocb) {
list_move_tail(&check_iocb->list, &completions);
break;
}
}
if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
spin_unlock_irq(&phba->hbalock);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
}
/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
* otherwise an error message will be displayed on the console
* so always return success (zero)
*/
return rc;
}
| gpl-2.0 |
kumajaya/android_kernel_samsung_lt01 | drivers/tty/vt/vt.c | 2078 | 103989 | /*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Hopefully this will be a rather complete VT102 implementation.
*
* Beeping thanks to John T Kohl.
*
* Virtual Consoles, Screen Blanking, Screen Dumping, Color, Graphics
* Chars, and VT100 enhancements by Peter MacDonald.
*
* Copy and paste function by Andrew Haylett,
* some enhancements by Alessandro Rubini.
*
* Code to check for different video-cards mostly by Galen Hunt,
* <g-hunt@ee.utah.edu>
*
* Rudimentary ISO 10646/Unicode/UTF-8 character set support by
* Markus Kuhn, <mskuhn@immd4.informatik.uni-erlangen.de>.
*
* Dynamic allocation of consoles, aeb@cwi.nl, May 1994
* Resizing of consoles, aeb, 940926
*
* Code for xterm like mouse click reporting by Peter Orbaek 20-Jul-94
* <poe@daimi.aau.dk>
*
* User-defined bell sound, new setterm control sequences and printk
* redirection by Martin Mares <mj@k332.feld.cvut.cz> 19-Nov-95
*
* APM screenblank bug fixed Takashi Manabe <manabe@roy.dsl.tutics.tut.jp>
*
* Merge with the abstract console driver by Geert Uytterhoeven
* <geert@linux-m68k.org>, Jan 1997.
*
* Original m68k console driver modifications by
*
* - Arno Griffioen <arno@usn.nl>
* - David Carter <carter@cs.bris.ac.uk>
*
* The abstract console driver provides a generic interface for a text
* console. It supports VGA text mode, frame buffer based graphical consoles
* and special graphics processors that are only accessible through some
* registers (e.g. a TMS340x0 GSP).
*
* The interface to the hardware is specified using a special structure
* (struct consw) which contains function pointers to console operations
* (see <linux/console.h> for more information).
*
* Support for changeable cursor shape
* by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>, August 1997
*
* Ported to i386 and con_scrolldelta fixed
* by Emmanuel Marty <core@ggi-project.org>, April 1998
*
* Resurrected character buffers in videoram plus lots of other trickery
* by Martin Mares <mj@atrey.karlin.mff.cuni.cz>, July 1998
*
* Removed old-style timers, introduced console_timer, made timer
* deletion SMP-safe. 17Jun00, Andrew Morton
*
* Removed console_lock, enabled interrupts across all console operations
* 13 March 2001, Andrew Morton
*
* Fixed UTF-8 mode so alternate charset modes always work according
* to control sequences interpreted in do_con_trol function
* preserving backward VT100 semigraphics compatibility,
* malformed UTF sequences represented as sequences of replacement glyphs,
* original codes or '?' as a last resort if replacement glyph is undefined
* by Adam Tla/lka <atlka@pg.gda.pl>, Aug 2006
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kd.h>
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/vt_kern.h>
#include <linux/selection.h>
#include <linux/tiocl.h>
#include <linux/kbd_kern.h>
#include <linux/consolemap.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/pm.h>
#include <linux/font.h>
#include <linux/bitops.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/io.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <linux/kdb.h>
#include <linux/ctype.h>
#define MAX_NR_CON_DRIVER 16
#define CON_DRIVER_FLAG_MODULE 1
#define CON_DRIVER_FLAG_INIT 2
#define CON_DRIVER_FLAG_ATTR 4
struct con_driver {
const struct consw *con;
const char *desc;
struct device *dev;
int node;
int first;
int last;
int flag;
};
static struct con_driver registered_con_driver[MAX_NR_CON_DRIVER];
const struct consw *conswitchp;
/* A bitmap for codes <32. A bit of 1 indicates that the code
* corresponding to that bit number invokes some special action
* (such as cursor movement) and should not be displayed as a
* glyph unless the disp_ctrl mode is explicitly enabled.
*/
#define CTRL_ACTION 0x0d00ff81
#define CTRL_ALWAYS 0x0800f501 /* Cannot be overridden by disp_ctrl */
/*
* Here is the default bell parameters: 750HZ, 1/8th of a second
*/
#define DEFAULT_BELL_PITCH 750
#define DEFAULT_BELL_DURATION (HZ/8)
struct vc vc_cons [MAX_NR_CONSOLES];
#ifndef VT_SINGLE_DRIVER
static const struct consw *con_driver_map[MAX_NR_CONSOLES];
#endif
static int con_open(struct tty_struct *, struct file *);
static void vc_init(struct vc_data *vc, unsigned int rows,
unsigned int cols, int do_clear);
static void gotoxy(struct vc_data *vc, int new_x, int new_y);
static void save_cur(struct vc_data *vc);
static void reset_terminal(struct vc_data *vc, int do_clear);
static void con_flush_chars(struct tty_struct *tty);
static int set_vesa_blanking(char __user *p);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
static void blank_screen_t(unsigned long dummy);
static void set_palette(struct vc_data *vc);
static int printable; /* Is console ready for printing? */
int default_utf8 = true;
module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
static int cur_default = CUR_DEFAULT;
module_param(cur_default, int, S_IRUGO | S_IWUSR);
/*
* ignore_poke: don't unblank the screen when things are typed. This is
* mainly for the privacy of braille terminal users.
*/
static int ignore_poke;
int do_poke_blanked_console;
int console_blanked;
static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
static int vesa_off_interval;
static int blankinterval = 10*60;
core_param(consoleblank, blankinterval, int, 0444);
static DECLARE_WORK(console_work, console_callback);
/*
* fg_console is the current virtual console,
* last_console is the last used one,
* want_console is the console we want to switch to,
* saved_* variants are for save/restore around kernel debugger enter/leave
*/
int fg_console;
int last_console;
int want_console = -1;
static int saved_fg_console;
static int saved_last_console;
static int saved_want_console;
static int saved_vc_mode;
static int saved_console_blanked;
/*
* For each existing display, we have a pointer to console currently visible
* on that display, allowing consoles other than fg_console to be refreshed
* appropriately. Unless the low-level driver supplies its own display_fg
* variable, we use this one for the "master display".
*/
static struct vc_data *master_display_fg;
/*
* Unfortunately, we need to delay tty echo when we're currently writing to the
* console since the code is (and always was) not re-entrant, so we schedule
* all flip requests to process context with schedule-task() and run it from
* console_callback().
*/
/*
* For the same reason, we defer scrollback to the console callback.
*/
static int scrollback_delta;
/*
* Hook so that the power management routines can (un)blank
* the console on our behalf.
*/
int (*console_blank_hook)(int);
static DEFINE_TIMER(console_timer, blank_screen_t, 0, 0);
static int blank_state;
static int blank_timer_expired;
enum {
blank_off = 0,
blank_normal_wait,
blank_vesa_wait,
};
/*
* /sys/class/tty/tty0/
*
* the attribute 'active' contains the name of the current vc
* console and it supports poll() to detect vc switches
*/
static struct device *tty0dev;
/*
* Notifier list for console events.
*/
static ATOMIC_NOTIFIER_HEAD(vt_notifier_list);
int register_vt_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&vt_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(register_vt_notifier);
int unregister_vt_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&vt_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_vt_notifier);
static void notify_write(struct vc_data *vc, unsigned int unicode)
{
struct vt_notifier_param param = { .vc = vc, unicode = unicode };
atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, ¶m);
}
static void notify_update(struct vc_data *vc)
{
struct vt_notifier_param param = { .vc = vc };
atomic_notifier_call_chain(&vt_notifier_list, VT_UPDATE, ¶m);
}
/*
* Low-Level Functions
*/
#define IS_FG(vc) ((vc)->vc_num == fg_console)
#ifdef VT_BUF_VRAM_ONLY
#define DO_UPDATE(vc) 0
#else
#define DO_UPDATE(vc) (CON_IS_VISIBLE(vc) && !console_blanked)
#endif
static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
{
unsigned short *p;
if (!viewed)
p = (unsigned short *)(vc->vc_origin + offset);
else if (!vc->vc_sw->con_screen_pos)
p = (unsigned short *)(vc->vc_visible_origin + offset);
else
p = vc->vc_sw->con_screen_pos(vc, offset);
return p;
}
/* Called from the keyboard irq path.. */
static inline void scrolldelta(int lines)
{
/* FIXME */
/* scrolldelta needs some kind of consistency lock, but the BKL was
and still is not protecting versus the scheduled back end */
scrollback_delta += lines;
schedule_console_callback();
}
void schedule_console_callback(void)
{
schedule_work(&console_work);
}
static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
{
unsigned short *d, *s;
if (t+nr >= b)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
return;
d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
vc->vc_size_row * nr);
}
static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
{
unsigned short *s;
unsigned int step;
if (t+nr >= b)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
return;
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
step = vc->vc_cols * nr;
scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
}
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
{
#ifndef VT_BUF_VRAM_ONLY
unsigned int xx, yy, offset;
u16 *p;
p = (u16 *) start;
if (!vc->vc_sw->con_getxy) {
offset = (start - vc->vc_origin) / 2;
xx = offset % vc->vc_cols;
yy = offset / vc->vc_cols;
} else {
int nxx, nyy;
start = vc->vc_sw->con_getxy(vc, start, &nxx, &nyy);
xx = nxx; yy = nyy;
}
for(;;) {
u16 attrib = scr_readw(p) & 0xff00;
int startx = xx;
u16 *q = p;
while (xx < vc->vc_cols && count) {
if (attrib != (scr_readw(p) & 0xff00)) {
if (p > q)
vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
startx = xx;
q = p;
attrib = scr_readw(p) & 0xff00;
}
p++;
xx++;
count--;
}
if (p > q)
vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
if (!count)
break;
xx = 0;
yy++;
if (vc->vc_sw->con_getxy) {
p = (u16 *)start;
start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
}
}
#endif
}
void update_region(struct vc_data *vc, unsigned long start, int count)
{
WARN_CONSOLE_UNLOCKED();
if (DO_UPDATE(vc)) {
hide_cursor(vc);
do_update_region(vc, start, count);
set_cursor(vc);
}
}
/* Structure of attributes is hardware-dependent */
static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
u8 _underline, u8 _reverse, u8 _italic)
{
if (vc->vc_sw->con_build_attr)
return vc->vc_sw->con_build_attr(vc, _color, _intensity,
_blink, _underline, _reverse, _italic);
#ifndef VT_BUF_VRAM_ONLY
/*
* ++roman: I completely changed the attribute format for monochrome
* mode (!can_do_color). The formerly used MDA (monochrome display
* adapter) format didn't allow the combination of certain effects.
* Now the attribute is just a bit vector:
* Bit 0..1: intensity (0..2)
* Bit 2 : underline
* Bit 3 : reverse
* Bit 7 : blink
*/
{
u8 a = _color;
if (!vc->vc_can_do_color)
return _intensity |
(_italic ? 2 : 0) |
(_underline ? 4 : 0) |
(_reverse ? 8 : 0) |
(_blink ? 0x80 : 0);
if (_italic)
a = (a & 0xF0) | vc->vc_itcolor;
else if (_underline)
a = (a & 0xf0) | vc->vc_ulcolor;
else if (_intensity == 0)
a = (a & 0xf0) | vc->vc_ulcolor;
if (_reverse)
a = ((a) & 0x88) | ((((a) >> 4) | ((a) << 4)) & 0x77);
if (_blink)
a ^= 0x80;
if (_intensity == 2)
a ^= 0x08;
if (vc->vc_hi_font_mask == 0x100)
a <<= 1;
return a;
}
#else
return 0;
#endif
}
static void update_attr(struct vc_data *vc)
{
vc->vc_attr = build_attr(vc, vc->vc_color, vc->vc_intensity,
vc->vc_blink, vc->vc_underline,
vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic);
vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' ';
}
/* Note: inverting the screen twice should revert to the original state */
void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
{
unsigned short *p;
WARN_CONSOLE_UNLOCKED();
count /= 2;
p = screenpos(vc, offset, viewed);
if (vc->vc_sw->con_invert_region)
vc->vc_sw->con_invert_region(vc, p, count);
#ifndef VT_BUF_VRAM_ONLY
else {
u16 *q = p;
int cnt = count;
u16 a;
if (!vc->vc_can_do_color) {
while (cnt--) {
a = scr_readw(q);
a ^= 0x0800;
scr_writew(a, q);
q++;
}
} else if (vc->vc_hi_font_mask == 0x100) {
while (cnt--) {
a = scr_readw(q);
a = ((a) & 0x11ff) | (((a) & 0xe000) >> 4) | (((a) & 0x0e00) << 4);
scr_writew(a, q);
q++;
}
} else {
while (cnt--) {
a = scr_readw(q);
a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4);
scr_writew(a, q);
q++;
}
}
}
#endif
if (DO_UPDATE(vc))
do_update_region(vc, (unsigned long) p, count);
}
/* used by selection: complement pointer position */
void complement_pos(struct vc_data *vc, int offset)
{
static int old_offset = -1;
static unsigned short old;
static unsigned short oldx, oldy;
WARN_CONSOLE_UNLOCKED();
if (old_offset != -1 && old_offset >= 0 &&
old_offset < vc->vc_screenbuf_size) {
scr_writew(old, screenpos(vc, old_offset, 1));
if (DO_UPDATE(vc))
vc->vc_sw->con_putc(vc, old, oldy, oldx);
}
old_offset = offset;
if (offset != -1 && offset >= 0 &&
offset < vc->vc_screenbuf_size) {
unsigned short new;
unsigned short *p;
p = screenpos(vc, offset, 1);
old = scr_readw(p);
new = old ^ vc->vc_complement_mask;
scr_writew(new, p);
if (DO_UPDATE(vc)) {
oldx = (offset >> 1) % vc->vc_cols;
oldy = (offset >> 1) / vc->vc_cols;
vc->vc_sw->con_putc(vc, new, oldy, oldx);
}
}
}
static void insert_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p, *q = (unsigned short *)vc->vc_pos;
p = q + vc->vc_cols - nr - vc->vc_x;
while (--p >= q)
scr_writew(scr_readw(p), p + nr);
scr_memsetw(q, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
if (DO_UPDATE(vc)) {
unsigned short oldattr = vc->vc_attr;
vc->vc_sw->con_bmove(vc, vc->vc_y, vc->vc_x, vc->vc_y, vc->vc_x + nr, 1,
vc->vc_cols - vc->vc_x - nr);
vc->vc_attr = vc->vc_video_erase_char >> 8;
while (nr--)
vc->vc_sw->con_putc(vc, vc->vc_video_erase_char, vc->vc_y, vc->vc_x + nr);
vc->vc_attr = oldattr;
}
}
static void delete_char(struct vc_data *vc, unsigned int nr)
{
unsigned int i = vc->vc_x;
unsigned short *p = (unsigned short *)vc->vc_pos;
while (++i <= vc->vc_cols - nr) {
scr_writew(scr_readw(p+nr), p);
p++;
}
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
if (DO_UPDATE(vc)) {
unsigned short oldattr = vc->vc_attr;
vc->vc_sw->con_bmove(vc, vc->vc_y, vc->vc_x + nr, vc->vc_y, vc->vc_x, 1,
vc->vc_cols - vc->vc_x - nr);
vc->vc_attr = vc->vc_video_erase_char >> 8;
while (nr--)
vc->vc_sw->con_putc(vc, vc->vc_video_erase_char, vc->vc_y,
vc->vc_cols - 1 - nr);
vc->vc_attr = oldattr;
}
}
static int softcursor_original;
static void add_softcursor(struct vc_data *vc)
{
int i = scr_readw((u16 *) vc->vc_pos);
u32 type = vc->vc_cursor_type;
if (! (type & 0x10)) return;
if (softcursor_original != -1) return;
softcursor_original = i;
i |= ((type >> 8) & 0xff00 );
i ^= ((type) & 0xff00 );
if ((type & 0x20) && ((softcursor_original & 0x7000) == (i & 0x7000))) i ^= 0x7000;
if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700;
scr_writew(i, (u16 *) vc->vc_pos);
if (DO_UPDATE(vc))
vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x);
}
static void hide_softcursor(struct vc_data *vc)
{
if (softcursor_original != -1) {
scr_writew(softcursor_original, (u16 *)vc->vc_pos);
if (DO_UPDATE(vc))
vc->vc_sw->con_putc(vc, softcursor_original,
vc->vc_y, vc->vc_x);
softcursor_original = -1;
}
}
static void hide_cursor(struct vc_data *vc)
{
if (vc == sel_cons)
clear_selection();
vc->vc_sw->con_cursor(vc, CM_ERASE);
hide_softcursor(vc);
}
static void set_cursor(struct vc_data *vc)
{
if (!IS_FG(vc) || console_blanked ||
vc->vc_mode == KD_GRAPHICS)
return;
if (vc->vc_deccm) {
if (vc == sel_cons)
clear_selection();
add_softcursor(vc);
if ((vc->vc_cursor_type & 0x0f) != 1)
vc->vc_sw->con_cursor(vc, CM_DRAW);
} else
hide_cursor(vc);
}
static void set_origin(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
if (!CON_IS_VISIBLE(vc) ||
!vc->vc_sw->con_set_origin ||
!vc->vc_sw->con_set_origin(vc))
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
vc->vc_visible_origin = vc->vc_origin;
vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size;
vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->vc_y + 2 * vc->vc_x;
}
static inline void save_screen(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
if (vc->vc_sw->con_save_screen)
vc->vc_sw->con_save_screen(vc);
}
/*
* Redrawing of screen
*/
static void clear_buffer_attributes(struct vc_data *vc)
{
unsigned short *p = (unsigned short *)vc->vc_origin;
int count = vc->vc_screenbuf_size / 2;
int mask = vc->vc_hi_font_mask | 0xff;
for (; count > 0; count--, p++) {
scr_writew((scr_readw(p)&mask) | (vc->vc_video_erase_char & ~mask), p);
}
}
void redraw_screen(struct vc_data *vc, int is_switch)
{
int redraw = 0;
WARN_CONSOLE_UNLOCKED();
if (!vc) {
/* strange ... */
/* printk("redraw_screen: tty %d not allocated ??\n", new_console+1); */
return;
}
if (is_switch) {
struct vc_data *old_vc = vc_cons[fg_console].d;
if (old_vc == vc)
return;
if (!CON_IS_VISIBLE(vc))
redraw = 1;
*vc->vc_display_fg = vc;
fg_console = vc->vc_num;
hide_cursor(old_vc);
if (!CON_IS_VISIBLE(old_vc)) {
save_screen(old_vc);
set_origin(old_vc);
}
if (tty0dev)
sysfs_notify(&tty0dev->kobj, NULL, "active");
} else {
hide_cursor(vc);
redraw = 1;
}
if (redraw) {
int update;
int old_was_color = vc->vc_can_do_color;
set_origin(vc);
update = vc->vc_sw->con_switch(vc);
set_palette(vc);
/*
* If console changed from mono<->color, the best we can do
* is to clear the buffer attributes. As it currently stands,
* rebuilding new attributes from the old buffer is not doable
* without overly complex code.
*/
if (old_was_color != vc->vc_can_do_color) {
update_attr(vc);
clear_buffer_attributes(vc);
}
/* Forcibly update if we're panicing */
if ((update && vc->vc_mode != KD_GRAPHICS) ||
vt_force_oops_output(vc))
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
set_cursor(vc);
if (is_switch) {
set_leds();
compute_shiftstate();
notify_update(vc);
}
}
/*
* Allocation, freeing and resizing of VTs.
*/
int vc_cons_allocated(unsigned int i)
{
return (i < MAX_NR_CONSOLES && vc_cons[i].d);
}
static void visual_init(struct vc_data *vc, int num, int init)
{
/* ++Geert: vc->vc_sw->con_init determines console size */
if (vc->vc_sw)
module_put(vc->vc_sw->owner);
vc->vc_sw = conswitchp;
#ifndef VT_SINGLE_DRIVER
if (con_driver_map[num])
vc->vc_sw = con_driver_map[num];
#endif
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
vc->vc_uni_pagedir = 0;
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
vc->vc_panic_force_write = false;
vc->vc_sw->con_init(vc, init);
if (!vc->vc_complement_mask)
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
vc->vc_s_complement_mask = vc->vc_complement_mask;
vc->vc_size_row = vc->vc_cols << 1;
vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row;
}
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
WARN_CONSOLE_UNLOCKED();
if (currcons >= MAX_NR_CONSOLES)
return -ENXIO;
if (!vc_cons[currcons].d) {
struct vc_data *vc;
struct vt_notifier_param param;
/* prevent users from taking too much memory */
if (currcons >= MAX_NR_USER_CONSOLES && !capable(CAP_SYS_RESOURCE))
return -EPERM;
/* due to the granularity of kmalloc, we waste some memory here */
/* the alloc is done in two steps, to optimize the common situation
of a 25x80 console (structsize=216, screenbuf_size=4000) */
/* although the numbers above are not valid since long ago, the
point is still up-to-date and the comment still has its value
even if only as a historical artifact. --mj, July 1998 */
param.vc = vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
if (!vc)
return -ENOMEM;
vc_cons[currcons].d = vc;
tty_port_init(&vc->port);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf) {
kfree(vc);
vc_cons[currcons].d = NULL;
return -ENOMEM;
}
/* If no drivers have overridden us and the user didn't pass a
boot option, default to displaying the cursor */
if (global_cursor_default == -1)
global_cursor_default = 1;
vc_init(vc, vc->vc_rows, vc->vc_cols, 1);
vcs_make_sysfs(currcons);
atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, ¶m);
}
return 0;
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
int user)
{
/* Resizes the resolution of the display adapater */
int err = 0;
if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
err = vc->vc_sw->con_resize(vc, width, height, user);
return err;
}
/*
* Change # of rows and columns (0 means unchanged/the size of fg_console)
* [this is to be used together with some user program
* like resize that changes the hardware videomode]
*/
#define VC_RESIZE_MAXCOL (32767)
#define VC_RESIZE_MAXROW (32767)
/**
* vc_do_resize - resizing method for the tty
* @tty: tty being resized
* @real_tty: real tty (different to tty if a pty/tty pair)
* @vc: virtual console private data
* @cols: columns
* @lines: lines
*
* Resize a virtual console, clipping according to the actual constraints.
* If the caller passes a tty structure then update the termios winsize
* information and perform any necessary signal handling.
*
* Caller must hold the console semaphore. Takes the termios mutex and
* ctrl_lock of the tty IFF a tty is passed.
*/
static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
unsigned int cols, unsigned int lines)
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
unsigned long end;
unsigned int old_rows, old_row_size;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
unsigned int user;
unsigned short *newscreen;
WARN_CONSOLE_UNLOCKED();
if (!vc)
return -ENXIO;
user = vc->vc_resize_user;
vc->vc_resize_user = 0;
if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW)
return -EINVAL;
new_cols = (cols ? cols : vc->vc_cols);
new_rows = (lines ? lines : vc->vc_rows);
new_row_size = new_cols << 1;
new_screen_size = new_row_size * new_rows;
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
err = resize_screen(vc, new_cols, new_rows, user);
if (err) {
kfree(newscreen);
return err;
}
vc->vc_rows = new_rows;
vc->vc_cols = new_cols;
vc->vc_size_row = new_row_size;
vc->vc_screenbuf_size = new_screen_size;
rlth = min(old_row_size, new_row_size);
rrem = new_row_size - rlth;
old_origin = vc->vc_origin;
new_origin = (long) newscreen;
new_scr_end = new_origin + new_screen_size;
if (vc->vc_y > new_rows) {
if (old_rows - vc->vc_y < new_rows) {
/*
* Cursor near the bottom, copy contents from the
* bottom of buffer
*/
old_origin += (old_rows - new_rows) * old_row_size;
} else {
/*
* Cursor is in no man's land, copy 1/2 screenful
* from the top and bottom of cursor position
*/
old_origin += (vc->vc_y - new_rows/2) * old_row_size;
}
}
end = old_origin + old_row_size * min(old_rows, new_rows);
update_attr(vc);
while (old_origin < end) {
scr_memcpyw((unsigned short *) new_origin,
(unsigned short *) old_origin, rlth);
if (rrem)
scr_memsetw((void *)(new_origin + rlth),
vc->vc_video_erase_char, rrem);
old_origin += old_row_size;
new_origin += new_row_size;
}
if (new_scr_end > new_origin)
scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
new_scr_end - new_origin);
kfree(vc->vc_screenbuf);
vc->vc_screenbuf = newscreen;
vc->vc_screenbuf_size = new_screen_size;
set_origin(vc);
/* do part of a reset_terminal() */
vc->vc_top = 0;
vc->vc_bottom = vc->vc_rows;
gotoxy(vc, vc->vc_x, vc->vc_y);
save_cur(vc);
if (tty) {
/* Rewrite the requested winsize data with the actual
resulting sizes */
struct winsize ws;
memset(&ws, 0, sizeof(ws));
ws.ws_row = vc->vc_rows;
ws.ws_col = vc->vc_cols;
ws.ws_ypixel = vc->vc_scan_lines;
tty_do_resize(tty, &ws);
}
if (CON_IS_VISIBLE(vc))
update_screen(vc);
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
return err;
}
/**
* vc_resize - resize a VT
* @vc: virtual console
* @cols: columns
* @rows: rows
*
* Resize a virtual console as seen from the console end of things. We
* use the common vc_do_resize methods to update the structures. The
* caller must hold the console sem to protect console internals and
* vc->port.tty
*/
int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
{
return vc_do_resize(vc->port.tty, vc, cols, rows);
}
/**
* vt_resize - resize a VT
* @tty: tty to resize
* @ws: winsize attributes
*
* Resize a virtual terminal. This is called by the tty layer as we
* register our own handler for resizing. The mutual helper does all
* the actual work.
*
* Takes the console sem and the called methods then take the tty
* termios_mutex and the tty ctrl_lock in that order.
*/
static int vt_resize(struct tty_struct *tty, struct winsize *ws)
{
struct vc_data *vc = tty->driver_data;
int ret;
console_lock();
ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
console_unlock();
return ret;
}
void vc_deallocate(unsigned int currcons)
{
WARN_CONSOLE_UNLOCKED();
if (vc_cons_allocated(currcons)) {
struct vc_data *vc = vc_cons[currcons].d;
struct vt_notifier_param param = { .vc = vc };
atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m);
vcs_remove_sysfs(currcons);
vc->vc_sw->con_deinit(vc);
put_pid(vc->vt_pid);
module_put(vc->vc_sw->owner);
kfree(vc->vc_screenbuf);
if (currcons >= MIN_NR_CONSOLES)
kfree(vc);
vc_cons[currcons].d = NULL;
}
}
/*
* VT102 emulator
*/
#define set_kbd(vc, x) set_vc_kbd_mode(kbd_table + (vc)->vc_num, (x))
#define clr_kbd(vc, x) clr_vc_kbd_mode(kbd_table + (vc)->vc_num, (x))
#define is_kbd(vc, x) vc_kbd_mode(kbd_table + (vc)->vc_num, (x))
#define decarm VC_REPEAT
#define decckm VC_CKMODE
#define kbdapplic VC_APPLIC
#define lnm VC_CRLF
/*
* this is what the terminal answers to a ESC-Z or csi0c query.
*/
#define VT100ID "\033[?1;2c"
#define VT102ID "\033[?6c"
unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7,
8,12,10,14, 9,13,11,15 };
/* the default colour table, for VGA+ colour systems */
int default_red[] = {0x00,0xaa,0x00,0xaa,0x00,0xaa,0x00,0xaa,
0x55,0xff,0x55,0xff,0x55,0xff,0x55,0xff};
int default_grn[] = {0x00,0x00,0xaa,0x55,0x00,0x00,0xaa,0xaa,
0x55,0x55,0xff,0xff,0x55,0x55,0xff,0xff};
int default_blu[] = {0x00,0x00,0x00,0x00,0xaa,0xaa,0xaa,0xaa,
0x55,0x55,0x55,0x55,0xff,0xff,0xff,0xff};
module_param_array(default_red, int, NULL, S_IRUGO | S_IWUSR);
module_param_array(default_grn, int, NULL, S_IRUGO | S_IWUSR);
module_param_array(default_blu, int, NULL, S_IRUGO | S_IWUSR);
/*
* gotoxy() must verify all boundaries, because the arguments
* might also be negative. If the given position is out of
* bounds, the cursor is placed at the nearest margin.
*/
static void gotoxy(struct vc_data *vc, int new_x, int new_y)
{
int min_y, max_y;
if (new_x < 0)
vc->vc_x = 0;
else {
if (new_x >= vc->vc_cols)
vc->vc_x = vc->vc_cols - 1;
else
vc->vc_x = new_x;
}
if (vc->vc_decom) {
min_y = vc->vc_top;
max_y = vc->vc_bottom;
} else {
min_y = 0;
max_y = vc->vc_rows;
}
if (new_y < min_y)
vc->vc_y = min_y;
else if (new_y >= max_y)
vc->vc_y = max_y - 1;
else
vc->vc_y = new_y;
vc->vc_pos = vc->vc_origin + vc->vc_y * vc->vc_size_row + (vc->vc_x<<1);
vc->vc_need_wrap = 0;
}
/* for absolute user moves, when decom is set */
static void gotoxay(struct vc_data *vc, int new_x, int new_y)
{
gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y);
}
void scrollback(struct vc_data *vc, int lines)
{
if (!lines)
lines = vc->vc_rows / 2;
scrolldelta(-lines);
}
void scrollfront(struct vc_data *vc, int lines)
{
if (!lines)
lines = vc->vc_rows / 2;
scrolldelta(lines);
}
static void lf(struct vc_data *vc)
{
/* don't scroll if above bottom of scrolling region, or
* if below scrolling region
*/
if (vc->vc_y + 1 == vc->vc_bottom)
scrup(vc, vc->vc_top, vc->vc_bottom, 1);
else if (vc->vc_y < vc->vc_rows - 1) {
vc->vc_y++;
vc->vc_pos += vc->vc_size_row;
}
vc->vc_need_wrap = 0;
notify_write(vc, '\n');
}
static void ri(struct vc_data *vc)
{
/* don't scroll if below top of scrolling region, or
* if above scrolling region
*/
if (vc->vc_y == vc->vc_top)
scrdown(vc, vc->vc_top, vc->vc_bottom, 1);
else if (vc->vc_y > 0) {
vc->vc_y--;
vc->vc_pos -= vc->vc_size_row;
}
vc->vc_need_wrap = 0;
}
static inline void cr(struct vc_data *vc)
{
vc->vc_pos -= vc->vc_x << 1;
vc->vc_need_wrap = vc->vc_x = 0;
notify_write(vc, '\r');
}
static inline void bs(struct vc_data *vc)
{
if (vc->vc_x) {
vc->vc_pos -= 2;
vc->vc_x--;
vc->vc_need_wrap = 0;
notify_write(vc, '\b');
}
}
static inline void del(struct vc_data *vc)
{
/* ignored */
}
static void csi_J(struct vc_data *vc, int vpar)
{
unsigned int count;
unsigned short * start;
switch (vpar) {
case 0: /* erase from cursor to end of display */
count = (vc->vc_scr_end - vc->vc_pos) >> 1;
start = (unsigned short *)vc->vc_pos;
if (DO_UPDATE(vc)) {
/* do in two stages */
vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1,
vc->vc_cols - vc->vc_x);
vc->vc_sw->con_clear(vc, vc->vc_y + 1, 0,
vc->vc_rows - vc->vc_y - 1,
vc->vc_cols);
}
break;
case 1: /* erase from start to cursor */
count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
start = (unsigned short *)vc->vc_origin;
if (DO_UPDATE(vc)) {
/* do in two stages */
vc->vc_sw->con_clear(vc, 0, 0, vc->vc_y,
vc->vc_cols);
vc->vc_sw->con_clear(vc, vc->vc_y, 0, 1,
vc->vc_x + 1);
}
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
if (CON_IS_VISIBLE(vc))
update_screen(vc);
/* fall through */
case 2: /* erase whole display */
count = vc->vc_cols * vc->vc_rows;
start = (unsigned short *)vc->vc_origin;
if (DO_UPDATE(vc))
vc->vc_sw->con_clear(vc, 0, 0,
vc->vc_rows,
vc->vc_cols);
break;
default:
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
}
static void csi_K(struct vc_data *vc, int vpar)
{
unsigned int count;
unsigned short * start;
switch (vpar) {
case 0: /* erase from cursor to end of line */
count = vc->vc_cols - vc->vc_x;
start = (unsigned short *)vc->vc_pos;
if (DO_UPDATE(vc))
vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1,
vc->vc_cols - vc->vc_x);
break;
case 1: /* erase from start of line to cursor */
start = (unsigned short *)(vc->vc_pos - (vc->vc_x << 1));
count = vc->vc_x + 1;
if (DO_UPDATE(vc))
vc->vc_sw->con_clear(vc, vc->vc_y, 0, 1,
vc->vc_x + 1);
break;
case 2: /* erase whole line */
start = (unsigned short *)(vc->vc_pos - (vc->vc_x << 1));
count = vc->vc_cols;
if (DO_UPDATE(vc))
vc->vc_sw->con_clear(vc, vc->vc_y, 0, 1,
vc->vc_cols);
break;
default:
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
}
static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
{ /* not vt100? */
int count;
if (!vpar)
vpar++;
count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar;
scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
if (DO_UPDATE(vc))
vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count);
vc->vc_need_wrap = 0;
}
static void default_attr(struct vc_data *vc)
{
vc->vc_intensity = 1;
vc->vc_italic = 0;
vc->vc_underline = 0;
vc->vc_reverse = 0;
vc->vc_blink = 0;
vc->vc_color = vc->vc_def_color;
}
/* console_lock is held */
static void csi_m(struct vc_data *vc)
{
int i;
for (i = 0; i <= vc->vc_npar; i++)
switch (vc->vc_par[i]) {
case 0: /* all attributes off */
default_attr(vc);
break;
case 1:
vc->vc_intensity = 2;
break;
case 2:
vc->vc_intensity = 0;
break;
case 3:
vc->vc_italic = 1;
break;
case 4:
vc->vc_underline = 1;
break;
case 5:
vc->vc_blink = 1;
break;
case 7:
vc->vc_reverse = 1;
break;
case 10: /* ANSI X3.64-1979 (SCO-ish?)
* Select primary font, don't display
* control chars if defined, don't set
* bit 8 on output.
*/
vc->vc_translate = set_translate(vc->vc_charset == 0
? vc->vc_G0_charset
: vc->vc_G1_charset, vc);
vc->vc_disp_ctrl = 0;
vc->vc_toggle_meta = 0;
break;
case 11: /* ANSI X3.64-1979 (SCO-ish?)
* Select first alternate font, lets
* chars < 32 be displayed as ROM chars.
*/
vc->vc_translate = set_translate(IBMPC_MAP, vc);
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 0;
break;
case 12: /* ANSI X3.64-1979 (SCO-ish?)
* Select second alternate font, toggle
* high bit before displaying as ROM char.
*/
vc->vc_translate = set_translate(IBMPC_MAP, vc);
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 1;
break;
case 21:
case 22:
vc->vc_intensity = 1;
break;
case 23:
vc->vc_italic = 0;
break;
case 24:
vc->vc_underline = 0;
break;
case 25:
vc->vc_blink = 0;
break;
case 27:
vc->vc_reverse = 0;
break;
case 38: /* ANSI X3.64-1979 (SCO-ish?)
* Enables underscore, white foreground
* with white underscore (Linux - use
* default foreground).
*/
vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
vc->vc_underline = 1;
break;
case 39: /* ANSI X3.64-1979 (SCO-ish?)
* Disable underline option.
* Reset colour to default? It did this
* before...
*/
vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
vc->vc_underline = 0;
break;
case 49:
vc->vc_color = (vc->vc_def_color & 0xf0) | (vc->vc_color & 0x0f);
break;
default:
if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
vc->vc_color = color_table[vc->vc_par[i] - 30]
| (vc->vc_color & 0xf0);
else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
| (vc->vc_color & 0x0f);
break;
}
update_attr(vc);
}
static void respond_string(const char *p, struct tty_struct *tty)
{
while (*p) {
tty_insert_flip_char(tty, *p, 0);
p++;
}
con_schedule_flip(tty);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
{
char buf[40];
sprintf(buf, "\033[%d;%dR", vc->vc_y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->vc_x + 1);
respond_string(buf, tty);
}
static inline void status_report(struct tty_struct *tty)
{
respond_string("\033[0n", tty); /* Terminal ok */
}
static inline void respond_ID(struct tty_struct * tty)
{
respond_string(VT102ID, tty);
}
void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
{
char buf[8];
sprintf(buf, "\033[M%c%c%c", (char)(' ' + butt), (char)('!' + mrx),
(char)('!' + mry));
respond_string(buf, tty);
}
/* invoked via ioctl(TIOCLINUX) and through set_selection */
int mouse_reporting(void)
{
return vc_cons[fg_console].d->vc_report_mouse;
}
/* console_lock is held */
static void set_mode(struct vc_data *vc, int on_off)
{
int i;
for (i = 0; i <= vc->vc_npar; i++)
if (vc->vc_ques) {
switch(vc->vc_par[i]) { /* DEC private modes set/reset */
case 1: /* Cursor keys send ^[Ox/^[[x */
if (on_off)
set_kbd(vc, decckm);
else
clr_kbd(vc, decckm);
break;
case 3: /* 80/132 mode switch unimplemented */
vc->vc_deccolm = on_off;
#if 0
vc_resize(deccolm ? 132 : 80, vc->vc_rows);
/* this alone does not suffice; some user mode
utility has to change the hardware regs */
#endif
break;
case 5: /* Inverted screen on/off */
if (vc->vc_decscnm != on_off) {
vc->vc_decscnm = on_off;
invert_screen(vc, 0, vc->vc_screenbuf_size, 0);
update_attr(vc);
}
break;
case 6: /* Origin relative/absolute */
vc->vc_decom = on_off;
gotoxay(vc, 0, 0);
break;
case 7: /* Autowrap on/off */
vc->vc_decawm = on_off;
break;
case 8: /* Autorepeat on/off */
if (on_off)
set_kbd(vc, decarm);
else
clr_kbd(vc, decarm);
break;
case 9:
vc->vc_report_mouse = on_off ? 1 : 0;
break;
case 25: /* Cursor on/off */
vc->vc_deccm = on_off;
break;
case 1000:
vc->vc_report_mouse = on_off ? 2 : 0;
break;
}
} else {
switch(vc->vc_par[i]) { /* ANSI modes set/reset */
case 3: /* Monitor (display ctrls) */
vc->vc_disp_ctrl = on_off;
break;
case 4: /* Insert Mode on/off */
vc->vc_decim = on_off;
break;
case 20: /* Lf, Enter == CrLf/Lf */
if (on_off)
set_kbd(vc, lnm);
else
clr_kbd(vc, lnm);
break;
}
}
}
/* console_lock is held */
static void setterm_command(struct vc_data *vc)
{
switch(vc->vc_par[0]) {
case 1: /* set color for underline mode */
if (vc->vc_can_do_color &&
vc->vc_par[1] < 16) {
vc->vc_ulcolor = color_table[vc->vc_par[1]];
if (vc->vc_underline)
update_attr(vc);
}
break;
case 2: /* set color for half intensity mode */
if (vc->vc_can_do_color &&
vc->vc_par[1] < 16) {
vc->vc_halfcolor = color_table[vc->vc_par[1]];
if (vc->vc_intensity == 0)
update_attr(vc);
}
break;
case 8: /* store colors as defaults */
vc->vc_def_color = vc->vc_attr;
if (vc->vc_hi_font_mask == 0x100)
vc->vc_def_color >>= 1;
default_attr(vc);
update_attr(vc);
break;
case 9: /* set blanking interval */
blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60;
poke_blanked_console();
break;
case 10: /* set bell frequency in Hz */
if (vc->vc_npar >= 1)
vc->vc_bell_pitch = vc->vc_par[1];
else
vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
break;
case 11: /* set bell duration in msec */
if (vc->vc_npar >= 1)
vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
vc->vc_par[1] * HZ / 1000 : 0;
else
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
break;
case 12: /* bring specified console to the front */
if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1))
set_console(vc->vc_par[1] - 1);
break;
case 13: /* unblank the screen */
poke_blanked_console();
break;
case 14: /* set vesa powerdown interval */
vesa_off_interval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60 * HZ;
break;
case 15: /* activate the previous console */
set_console(last_console);
break;
}
}
/* console_lock is held */
static void csi_at(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_cols - vc->vc_x)
nr = vc->vc_cols - vc->vc_x;
else if (!nr)
nr = 1;
insert_char(vc, nr);
}
/* console_lock is held */
static void csi_L(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_rows - vc->vc_y)
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr = 1;
scrdown(vc, vc->vc_y, vc->vc_bottom, nr);
vc->vc_need_wrap = 0;
}
/* console_lock is held */
static void csi_P(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_cols - vc->vc_x)
nr = vc->vc_cols - vc->vc_x;
else if (!nr)
nr = 1;
delete_char(vc, nr);
}
/* console_lock is held */
static void csi_M(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_rows - vc->vc_y)
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr=1;
scrup(vc, vc->vc_y, vc->vc_bottom, nr);
vc->vc_need_wrap = 0;
}
/* console_lock is held (except via vc_init->reset_terminal */
static void save_cur(struct vc_data *vc)
{
vc->vc_saved_x = vc->vc_x;
vc->vc_saved_y = vc->vc_y;
vc->vc_s_intensity = vc->vc_intensity;
vc->vc_s_italic = vc->vc_italic;
vc->vc_s_underline = vc->vc_underline;
vc->vc_s_blink = vc->vc_blink;
vc->vc_s_reverse = vc->vc_reverse;
vc->vc_s_charset = vc->vc_charset;
vc->vc_s_color = vc->vc_color;
vc->vc_saved_G0 = vc->vc_G0_charset;
vc->vc_saved_G1 = vc->vc_G1_charset;
}
/* console_lock is held */
static void restore_cur(struct vc_data *vc)
{
gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y);
vc->vc_intensity = vc->vc_s_intensity;
vc->vc_italic = vc->vc_s_italic;
vc->vc_underline = vc->vc_s_underline;
vc->vc_blink = vc->vc_s_blink;
vc->vc_reverse = vc->vc_s_reverse;
vc->vc_charset = vc->vc_s_charset;
vc->vc_color = vc->vc_s_color;
vc->vc_G0_charset = vc->vc_saved_G0;
vc->vc_G1_charset = vc->vc_saved_G1;
vc->vc_translate = set_translate(vc->vc_charset ? vc->vc_G1_charset : vc->vc_G0_charset, vc);
update_attr(vc);
vc->vc_need_wrap = 0;
}
enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey,
EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
ESpalette };
/* console_lock is held (except via vc_init()) */
static void reset_terminal(struct vc_data *vc, int do_clear)
{
vc->vc_top = 0;
vc->vc_bottom = vc->vc_rows;
vc->vc_state = ESnormal;
vc->vc_ques = 0;
vc->vc_translate = set_translate(LAT1_MAP, vc);
vc->vc_G0_charset = LAT1_MAP;
vc->vc_G1_charset = GRAF_MAP;
vc->vc_charset = 0;
vc->vc_need_wrap = 0;
vc->vc_report_mouse = 0;
vc->vc_utf = default_utf8;
vc->vc_utf_count = 0;
vc->vc_disp_ctrl = 0;
vc->vc_toggle_meta = 0;
vc->vc_decscnm = 0;
vc->vc_decom = 0;
vc->vc_decawm = 1;
vc->vc_deccm = global_cursor_default;
vc->vc_decim = 0;
set_kbd(vc, decarm);
clr_kbd(vc, decckm);
clr_kbd(vc, kbdapplic);
clr_kbd(vc, lnm);
kbd_table[vc->vc_num].lockstate = 0;
kbd_table[vc->vc_num].slockstate = 0;
kbd_table[vc->vc_num].ledmode = LED_SHOW_FLAGS;
kbd_table[vc->vc_num].ledflagstate = kbd_table[vc->vc_num].default_ledflagstate;
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
vc->vc_cursor_type = cur_default;
vc->vc_complement_mask = vc->vc_s_complement_mask;
default_attr(vc);
update_attr(vc);
vc->vc_tab_stop[0] = 0x01010100;
vc->vc_tab_stop[1] =
vc->vc_tab_stop[2] =
vc->vc_tab_stop[3] =
vc->vc_tab_stop[4] =
vc->vc_tab_stop[5] =
vc->vc_tab_stop[6] =
vc->vc_tab_stop[7] = 0x01010101;
vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
gotoxy(vc, 0, 0);
save_cur(vc);
if (do_clear)
csi_J(vc, 2);
}
/* console_lock is held */
static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
{
/*
* Control characters can be used in the _middle_
* of an escape sequence.
*/
switch (c) {
case 0:
return;
case 7:
if (vc->vc_bell_duration)
kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
return;
case 8:
bs(vc);
return;
case 9:
vc->vc_pos -= (vc->vc_x << 1);
while (vc->vc_x < vc->vc_cols - 1) {
vc->vc_x++;
if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31)))
break;
}
vc->vc_pos += (vc->vc_x << 1);
notify_write(vc, '\t');
return;
case 10: case 11: case 12:
lf(vc);
if (!is_kbd(vc, lnm))
return;
case 13:
cr(vc);
return;
case 14:
vc->vc_charset = 1;
vc->vc_translate = set_translate(vc->vc_G1_charset, vc);
vc->vc_disp_ctrl = 1;
return;
case 15:
vc->vc_charset = 0;
vc->vc_translate = set_translate(vc->vc_G0_charset, vc);
vc->vc_disp_ctrl = 0;
return;
case 24: case 26:
vc->vc_state = ESnormal;
return;
case 27:
vc->vc_state = ESesc;
return;
case 127:
del(vc);
return;
case 128+27:
vc->vc_state = ESsquare;
return;
}
switch(vc->vc_state) {
case ESesc:
vc->vc_state = ESnormal;
switch (c) {
case '[':
vc->vc_state = ESsquare;
return;
case ']':
vc->vc_state = ESnonstd;
return;
case '%':
vc->vc_state = ESpercent;
return;
case 'E':
cr(vc);
lf(vc);
return;
case 'M':
ri(vc);
return;
case 'D':
lf(vc);
return;
case 'H':
vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31));
return;
case 'Z':
respond_ID(tty);
return;
case '7':
save_cur(vc);
return;
case '8':
restore_cur(vc);
return;
case '(':
vc->vc_state = ESsetG0;
return;
case ')':
vc->vc_state = ESsetG1;
return;
case '#':
vc->vc_state = EShash;
return;
case 'c':
reset_terminal(vc, 1);
return;
case '>': /* Numeric keypad */
clr_kbd(vc, kbdapplic);
return;
case '=': /* Appl. keypad */
set_kbd(vc, kbdapplic);
return;
}
return;
case ESnonstd:
if (c=='P') { /* palette escape sequence */
for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
vc->vc_par[vc->vc_npar] = 0;
vc->vc_npar = 0;
vc->vc_state = ESpalette;
return;
} else if (c=='R') { /* reset palette */
reset_palette(vc);
vc->vc_state = ESnormal;
} else
vc->vc_state = ESnormal;
return;
case ESpalette:
if (isxdigit(c)) {
vc->vc_par[vc->vc_npar++] = hex_to_bin(c);
if (vc->vc_npar == 7) {
int i = vc->vc_par[0] * 3, j = 1;
vc->vc_palette[i] = 16 * vc->vc_par[j++];
vc->vc_palette[i++] += vc->vc_par[j++];
vc->vc_palette[i] = 16 * vc->vc_par[j++];
vc->vc_palette[i++] += vc->vc_par[j++];
vc->vc_palette[i] = 16 * vc->vc_par[j++];
vc->vc_palette[i] += vc->vc_par[j];
set_palette(vc);
vc->vc_state = ESnormal;
}
} else
vc->vc_state = ESnormal;
return;
case ESsquare:
for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
vc->vc_par[vc->vc_npar] = 0;
vc->vc_npar = 0;
vc->vc_state = ESgetpars;
if (c == '[') { /* Function key */
vc->vc_state=ESfunckey;
return;
}
vc->vc_ques = (c == '?');
if (vc->vc_ques)
return;
case ESgetpars:
if (c == ';' && vc->vc_npar < NPAR - 1) {
vc->vc_npar++;
return;
} else if (c>='0' && c<='9') {
vc->vc_par[vc->vc_npar] *= 10;
vc->vc_par[vc->vc_npar] += c - '0';
return;
} else
vc->vc_state = ESgotpars;
case ESgotpars:
vc->vc_state = ESnormal;
switch(c) {
case 'h':
set_mode(vc, 1);
return;
case 'l':
set_mode(vc, 0);
return;
case 'c':
if (vc->vc_ques) {
if (vc->vc_par[0])
vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
else
vc->vc_cursor_type = cur_default;
return;
}
break;
case 'm':
if (vc->vc_ques) {
clear_selection();
if (vc->vc_par[0])
vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
else
vc->vc_complement_mask = vc->vc_s_complement_mask;
return;
}
break;
case 'n':
if (!vc->vc_ques) {
if (vc->vc_par[0] == 5)
status_report(tty);
else if (vc->vc_par[0] == 6)
cursor_report(vc, tty);
}
return;
}
if (vc->vc_ques) {
vc->vc_ques = 0;
return;
}
switch(c) {
case 'G': case '`':
if (vc->vc_par[0])
vc->vc_par[0]--;
gotoxy(vc, vc->vc_par[0], vc->vc_y);
return;
case 'A':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->vc_x, vc->vc_y - vc->vc_par[0]);
return;
case 'B': case 'e':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->vc_x, vc->vc_y + vc->vc_par[0]);
return;
case 'C': case 'a':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->vc_x + vc->vc_par[0], vc->vc_y);
return;
case 'D':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->vc_x - vc->vc_par[0], vc->vc_y);
return;
case 'E':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, 0, vc->vc_y + vc->vc_par[0]);
return;
case 'F':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, 0, vc->vc_y - vc->vc_par[0]);
return;
case 'd':
if (vc->vc_par[0])
vc->vc_par[0]--;
gotoxay(vc, vc->vc_x ,vc->vc_par[0]);
return;
case 'H': case 'f':
if (vc->vc_par[0])
vc->vc_par[0]--;
if (vc->vc_par[1])
vc->vc_par[1]--;
gotoxay(vc, vc->vc_par[1], vc->vc_par[0]);
return;
case 'J':
csi_J(vc, vc->vc_par[0]);
return;
case 'K':
csi_K(vc, vc->vc_par[0]);
return;
case 'L':
csi_L(vc, vc->vc_par[0]);
return;
case 'M':
csi_M(vc, vc->vc_par[0]);
return;
case 'P':
csi_P(vc, vc->vc_par[0]);
return;
case 'c':
if (!vc->vc_par[0])
respond_ID(tty);
return;
case 'g':
if (!vc->vc_par[0])
vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31));
else if (vc->vc_par[0] == 3) {
vc->vc_tab_stop[0] =
vc->vc_tab_stop[1] =
vc->vc_tab_stop[2] =
vc->vc_tab_stop[3] =
vc->vc_tab_stop[4] =
vc->vc_tab_stop[5] =
vc->vc_tab_stop[6] =
vc->vc_tab_stop[7] = 0;
}
return;
case 'm':
csi_m(vc);
return;
case 'q': /* DECLL - but only 3 leds */
/* map 0,1,2,3 to 0,1,2,4 */
if (vc->vc_par[0] < 4)
setledstate(kbd_table + vc->vc_num,
(vc->vc_par[0] < 3) ? vc->vc_par[0] : 4);
return;
case 'r':
if (!vc->vc_par[0])
vc->vc_par[0]++;
if (!vc->vc_par[1])
vc->vc_par[1] = vc->vc_rows;
/* Minimum allowed region is 2 lines */
if (vc->vc_par[0] < vc->vc_par[1] &&
vc->vc_par[1] <= vc->vc_rows) {
vc->vc_top = vc->vc_par[0] - 1;
vc->vc_bottom = vc->vc_par[1];
gotoxay(vc, 0, 0);
}
return;
case 's':
save_cur(vc);
return;
case 'u':
restore_cur(vc);
return;
case 'X':
csi_X(vc, vc->vc_par[0]);
return;
case '@':
csi_at(vc, vc->vc_par[0]);
return;
case ']': /* setterm functions */
setterm_command(vc);
return;
}
return;
case ESpercent:
vc->vc_state = ESnormal;
switch (c) {
case '@': /* defined in ISO 2022 */
vc->vc_utf = 0;
return;
case 'G': /* prelim official escape code */
case '8': /* retained for compatibility */
vc->vc_utf = 1;
return;
}
return;
case ESfunckey:
vc->vc_state = ESnormal;
return;
case EShash:
vc->vc_state = ESnormal;
if (c == '8') {
/* DEC screen alignment test. kludge :-) */
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | 'E';
csi_J(vc, 2);
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | ' ';
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
return;
case ESsetG0:
if (c == '0')
vc->vc_G0_charset = GRAF_MAP;
else if (c == 'B')
vc->vc_G0_charset = LAT1_MAP;
else if (c == 'U')
vc->vc_G0_charset = IBMPC_MAP;
else if (c == 'K')
vc->vc_G0_charset = USER_MAP;
if (vc->vc_charset == 0)
vc->vc_translate = set_translate(vc->vc_G0_charset, vc);
vc->vc_state = ESnormal;
return;
case ESsetG1:
if (c == '0')
vc->vc_G1_charset = GRAF_MAP;
else if (c == 'B')
vc->vc_G1_charset = LAT1_MAP;
else if (c == 'U')
vc->vc_G1_charset = IBMPC_MAP;
else if (c == 'K')
vc->vc_G1_charset = USER_MAP;
if (vc->vc_charset == 1)
vc->vc_translate = set_translate(vc->vc_G1_charset, vc);
vc->vc_state = ESnormal;
return;
default:
vc->vc_state = ESnormal;
}
}
/* is_double_width() is based on the wcwidth() implementation by
* Markus Kuhn -- 2007-05-26 (Unicode 5.0)
* Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
*/
struct interval {
uint32_t first;
uint32_t last;
};
static int bisearch(uint32_t ucs, const struct interval *table, int max)
{
int min = 0;
int mid;
if (ucs < table[0].first || ucs > table[max].last)
return 0;
while (max >= min) {
mid = (min + max) / 2;
if (ucs > table[mid].last)
min = mid + 1;
else if (ucs < table[mid].first)
max = mid - 1;
else
return 1;
}
return 0;
}
static int is_double_width(uint32_t ucs)
{
static const struct interval double_width[] = {
{ 0x1100, 0x115F }, { 0x2329, 0x232A }, { 0x2E80, 0x303E },
{ 0x3040, 0xA4CF }, { 0xAC00, 0xD7A3 }, { 0xF900, 0xFAFF },
{ 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
{ 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
};
return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
}
/* acquires console_lock */
static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
#ifdef VT_BUF_VRAM_ONLY
#define FLUSH do { } while(0);
#else
#define FLUSH if (draw_x >= 0) { \
vc->vc_sw->con_putcs(vc, (u16 *)draw_from, (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, draw_x); \
draw_x = -1; \
}
#endif
int c, tc, ok, n = 0, draw_x = -1;
unsigned int currcons;
unsigned long draw_from = 0, draw_to = 0;
struct vc_data *vc;
unsigned char vc_attr;
struct vt_notifier_param param;
uint8_t rescan;
uint8_t inverse;
uint8_t width;
u16 himask, charmask;
if (in_interrupt())
return count;
might_sleep();
console_lock();
vc = tty->driver_data;
if (vc == NULL) {
printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
console_unlock();
return 0;
}
currcons = vc->vc_num;
if (!vc_cons_allocated(currcons)) {
/* could this happen? */
pr_warn_once("con_write: tty %d not allocated\n", currcons+1);
console_unlock();
return 0;
}
himask = vc->vc_hi_font_mask;
charmask = himask ? 0x1ff : 0xff;
/* undraw cursor first */
if (IS_FG(vc))
hide_cursor(vc);
param.vc = vc;
while (!tty->stopped && count) {
int orig = *buf;
c = orig;
buf++;
n++;
count--;
rescan = 0;
inverse = 0;
width = 1;
/* Do no translation at all in control states */
if (vc->vc_state != ESnormal) {
tc = c;
} else if (vc->vc_utf && !vc->vc_disp_ctrl) {
/* Combine UTF-8 into Unicode in vc_utf_char.
* vc_utf_count is the number of continuation bytes still
* expected to arrive.
* vc_npar is the number of continuation bytes arrived so
* far
*/
rescan_last_byte:
if ((c & 0xc0) == 0x80) {
/* Continuation byte received */
static const uint32_t utf8_length_changes[] = { 0x0000007f, 0x000007ff, 0x0000ffff, 0x001fffff, 0x03ffffff, 0x7fffffff };
if (vc->vc_utf_count) {
vc->vc_utf_char = (vc->vc_utf_char << 6) | (c & 0x3f);
vc->vc_npar++;
if (--vc->vc_utf_count) {
/* Still need some bytes */
continue;
}
/* Got a whole character */
c = vc->vc_utf_char;
/* Reject overlong sequences */
if (c <= utf8_length_changes[vc->vc_npar - 1] ||
c > utf8_length_changes[vc->vc_npar])
c = 0xfffd;
} else {
/* Unexpected continuation byte */
vc->vc_utf_count = 0;
c = 0xfffd;
}
} else {
/* Single ASCII byte or first byte of a sequence received */
if (vc->vc_utf_count) {
/* Continuation byte expected */
rescan = 1;
vc->vc_utf_count = 0;
c = 0xfffd;
} else if (c > 0x7f) {
/* First byte of a multibyte sequence received */
vc->vc_npar = 0;
if ((c & 0xe0) == 0xc0) {
vc->vc_utf_count = 1;
vc->vc_utf_char = (c & 0x1f);
} else if ((c & 0xf0) == 0xe0) {
vc->vc_utf_count = 2;
vc->vc_utf_char = (c & 0x0f);
} else if ((c & 0xf8) == 0xf0) {
vc->vc_utf_count = 3;
vc->vc_utf_char = (c & 0x07);
} else if ((c & 0xfc) == 0xf8) {
vc->vc_utf_count = 4;
vc->vc_utf_char = (c & 0x03);
} else if ((c & 0xfe) == 0xfc) {
vc->vc_utf_count = 5;
vc->vc_utf_char = (c & 0x01);
} else {
/* 254 and 255 are invalid */
c = 0xfffd;
}
if (vc->vc_utf_count) {
/* Still need some bytes */
continue;
}
}
/* Nothing to do if an ASCII byte was received */
}
/* End of UTF-8 decoding. */
/* c is the received character, or U+FFFD for invalid sequences. */
/* Replace invalid Unicode code points with U+FFFD too */
if ((c >= 0xd800 && c <= 0xdfff) || c == 0xfffe || c == 0xffff)
c = 0xfffd;
tc = c;
} else { /* no utf or alternate charset mode */
tc = vc_translate(vc, c);
}
param.c = tc;
if (atomic_notifier_call_chain(&vt_notifier_list, VT_PREWRITE,
¶m) == NOTIFY_STOP)
continue;
/* If the original code was a control character we
* only allow a glyph to be displayed if the code is
* not normally used (such as for cursor movement) or
* if the disp_ctrl mode has been explicitly enabled.
* Certain characters (as given by the CTRL_ALWAYS
* bitmap) are always displayed as control characters,
* as the console would be pretty useless without
* them; to display an arbitrary font position use the
* direct-to-font zone in UTF-8 mode.
*/
ok = tc && (c >= 32 ||
!(vc->vc_disp_ctrl ? (CTRL_ALWAYS >> c) & 1 :
vc->vc_utf || ((CTRL_ACTION >> c) & 1)))
&& (c != 127 || vc->vc_disp_ctrl)
&& (c != 128+27);
if (vc->vc_state == ESnormal && ok) {
if (vc->vc_utf && !vc->vc_disp_ctrl) {
if (is_double_width(c))
width = 2;
}
/* Now try to find out how to display it */
tc = conv_uni_to_pc(vc, tc);
if (tc & ~charmask) {
if (tc == -1 || tc == -2) {
continue; /* nothing to display */
}
/* Glyph not found */
if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
/* In legacy mode use the glyph we get by a 1:1 mapping.
This would make absolutely no sense with Unicode in mind,
but do this for ASCII characters since a font may lack
Unicode mapping info and we don't want to end up with
having question marks only. */
tc = c;
} else {
/* Display U+FFFD. If it's not found, display an inverse question mark. */
tc = conv_uni_to_pc(vc, 0xfffd);
if (tc < 0) {
inverse = 1;
tc = conv_uni_to_pc(vc, '?');
if (tc < 0) tc = '?';
}
}
}
if (!inverse) {
vc_attr = vc->vc_attr;
} else {
/* invert vc_attr */
if (!vc->vc_can_do_color) {
vc_attr = (vc->vc_attr) ^ 0x08;
} else if (vc->vc_hi_font_mask == 0x100) {
vc_attr = ((vc->vc_attr) & 0x11) | (((vc->vc_attr) & 0xe0) >> 4) | (((vc->vc_attr) & 0x0e) << 4);
} else {
vc_attr = ((vc->vc_attr) & 0x88) | (((vc->vc_attr) & 0x70) >> 4) | (((vc->vc_attr) & 0x07) << 4);
}
FLUSH
}
while (1) {
if (vc->vc_need_wrap || vc->vc_decim)
FLUSH
if (vc->vc_need_wrap) {
cr(vc);
lf(vc);
}
if (vc->vc_decim)
insert_char(vc, 1);
scr_writew(himask ?
((vc_attr << 8) & ~himask) + ((tc & 0x100) ? himask : 0) + (tc & 0xff) :
(vc_attr << 8) + tc,
(u16 *) vc->vc_pos);
if (DO_UPDATE(vc) && draw_x < 0) {
draw_x = vc->vc_x;
draw_from = vc->vc_pos;
}
if (vc->vc_x == vc->vc_cols - 1) {
vc->vc_need_wrap = vc->vc_decawm;
draw_to = vc->vc_pos + 2;
} else {
vc->vc_x++;
draw_to = (vc->vc_pos += 2);
}
if (!--width) break;
tc = conv_uni_to_pc(vc, ' '); /* A space is printed in the second column */
if (tc < 0) tc = ' ';
}
notify_write(vc, c);
if (inverse) {
FLUSH
}
if (rescan) {
rescan = 0;
inverse = 0;
width = 1;
c = orig;
goto rescan_last_byte;
}
continue;
}
FLUSH
do_con_trol(tty, vc, orig);
}
FLUSH
console_conditional_schedule();
console_unlock();
notify_update(vc);
return n;
#undef FLUSH
}
/*
* This is the console switching callback.
*
* Doing console switching in a process context allows
* us to do the switches asynchronously (needed when we want
* to switch due to a keyboard interrupt). Synchronization
* with other console code and prevention of re-entrancy is
* ensured with console_lock.
*/
static void console_callback(struct work_struct *ignored)
{
console_lock();
if (want_console >= 0) {
if (want_console != fg_console &&
vc_cons_allocated(want_console)) {
hide_cursor(vc_cons[fg_console].d);
change_console(vc_cons[want_console].d);
/* we only changed when the console had already
been allocated - a new console is not created
in an interrupt routine */
}
want_console = -1;
}
if (do_poke_blanked_console) { /* do not unblank for a LED change */
do_poke_blanked_console = 0;
poke_blanked_console();
}
if (scrollback_delta) {
struct vc_data *vc = vc_cons[fg_console].d;
clear_selection();
if (vc->vc_mode == KD_TEXT)
vc->vc_sw->con_scrolldelta(vc, scrollback_delta);
scrollback_delta = 0;
}
if (blank_timer_expired) {
do_blank_screen(0);
blank_timer_expired = 0;
}
notify_update(vc_cons[fg_console].d);
console_unlock();
}
int set_console(int nr)
{
struct vc_data *vc = vc_cons[fg_console].d;
if (!vc_cons_allocated(nr) || vt_dont_switch ||
(vc->vt_mode.mode == VT_AUTO && vc->vc_mode == KD_GRAPHICS)) {
/*
* Console switch will fail in console_callback() or
* change_console() so there is no point scheduling
* the callback
*
* Existing set_console() users don't check the return
* value so this shouldn't break anything
*/
return -EINVAL;
}
want_console = nr;
schedule_console_callback();
return 0;
}
struct tty_driver *console_driver;
#ifdef CONFIG_VT_CONSOLE
/**
* vt_kmsg_redirect() - Sets/gets the kernel message console
* @new: The new virtual terminal number or -1 if the console should stay
* unchanged
*
* By default, the kernel messages are always printed on the current virtual
* console. However, the user may modify that default with the
* TIOCL_SETKMSGREDIRECT ioctl call.
*
* This function sets the kernel message console to be @new. It returns the old
* virtual console number. The virtual terminal number 0 (both as parameter and
* return value) means no redirection (i.e. always printed on the currently
* active console).
*
* The parameter -1 means that only the current console is returned, but the
* value is not modified. You may use the macro vt_get_kmsg_redirect() in that
* case to make the code more understandable.
*
* When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
* the parameter and always returns 0.
*/
int vt_kmsg_redirect(int new)
{
static int kmsg_con;
if (new != -1)
return xchg(&kmsg_con, new);
else
return kmsg_con;
}
/*
* Console on virtual terminal
*
* The console must be locked when we get here.
*/
static void vt_console_print(struct console *co, const char *b, unsigned count)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned char c;
static DEFINE_SPINLOCK(printing_lock);
const ushort *start;
ushort cnt = 0;
ushort myx;
int kmsg_console;
/* console busy or not yet initialized */
if (!printable)
return;
if (!spin_trylock(&printing_lock))
return;
kmsg_console = vt_get_kmsg_redirect();
if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
vc = vc_cons[kmsg_console - 1].d;
/* read `x' only after setting currcons properly (otherwise
the `x' macro will read the x of the foreground console). */
myx = vc->vc_x;
if (!vc_cons_allocated(fg_console)) {
/* impossible */
/* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
goto quit;
}
if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
goto quit;
/* undraw cursor first */
if (IS_FG(vc))
hide_cursor(vc);
start = (ushort *)vc->vc_pos;
/* Contrived structure to try to emulate original need_wrap behaviour
* Problems caused when we have need_wrap set on '\n' character */
while (count--) {
c = *b++;
if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
if (cnt > 0) {
if (CON_IS_VISIBLE(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
vc->vc_x += cnt;
if (vc->vc_need_wrap)
vc->vc_x--;
cnt = 0;
}
if (c == 8) { /* backspace */
bs(vc);
start = (ushort *)vc->vc_pos;
myx = vc->vc_x;
continue;
}
if (c != 13)
lf(vc);
cr(vc);
start = (ushort *)vc->vc_pos;
myx = vc->vc_x;
if (c == 10 || c == 13)
continue;
}
scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
notify_write(vc, c);
cnt++;
if (myx == vc->vc_cols - 1) {
vc->vc_need_wrap = 1;
continue;
}
vc->vc_pos += 2;
myx++;
}
if (cnt > 0) {
if (CON_IS_VISIBLE(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
vc->vc_x += cnt;
if (vc->vc_x == vc->vc_cols) {
vc->vc_x--;
vc->vc_need_wrap = 1;
}
}
set_cursor(vc);
notify_update(vc);
quit:
spin_unlock(&printing_lock);
}
static struct tty_driver *vt_console_device(struct console *c, int *index)
{
*index = c->index ? c->index-1 : fg_console;
return console_driver;
}
static struct console vt_console_driver = {
.name = "tty",
.write = vt_console_print,
.device = vt_console_device,
.unblank = unblank_screen,
.flags = CON_PRINTBUFFER,
.index = -1,
};
#endif
/*
* Handling of Linux-specific VC ioctls
*/
/*
* Generally a bit racy with respect to console_lock();.
*
* There are some functions which don't need it.
*
* There are some functions which can sleep for arbitrary periods
* (paste_selection) but we don't need the lock there anyway.
*
* set_selection has locking, and definitely needs it
*/
int tioclinux(struct tty_struct *tty, unsigned long arg)
{
char type, data;
char __user *p = (char __user *)arg;
int lines;
int ret;
if (current->signal->tty != tty && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(type, p))
return -EFAULT;
ret = 0;
switch (type)
{
case TIOCL_SETSEL:
console_lock();
ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
console_unlock();
break;
case TIOCL_PASTESEL:
ret = paste_selection(tty);
break;
case TIOCL_UNBLANKSCREEN:
console_lock();
unblank_screen();
console_unlock();
break;
case TIOCL_SELLOADLUT:
ret = sel_loadlut(p);
break;
case TIOCL_GETSHIFTSTATE:
/*
* Make it possible to react to Shift+Mousebutton.
* Note that 'shift_state' is an undocumented
* kernel-internal variable; programs not closely
* related to the kernel should not use this.
*/
data = shift_state;
ret = __put_user(data, p);
break;
case TIOCL_GETMOUSEREPORTING:
data = mouse_reporting();
ret = __put_user(data, p);
break;
case TIOCL_SETVESABLANK:
ret = set_vesa_blanking(p);
break;
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
ret = __put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
} else {
if (get_user(data, p+1))
ret = -EFAULT;
else
vt_kmsg_redirect(data);
}
break;
case TIOCL_GETFGCONSOLE:
ret = fg_console;
break;
case TIOCL_SCROLLCONSOLE:
if (get_user(lines, (s32 __user *)(p+4))) {
ret = -EFAULT;
} else {
scrollfront(vc_cons[fg_console].d, lines);
ret = 0;
}
break;
case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
console_lock();
ignore_poke = 1;
do_blank_screen(0);
console_unlock();
break;
case TIOCL_BLANKEDSCREEN:
ret = console_blanked;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/*
* /dev/ttyN handling
*/
static int con_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
int retval;
retval = do_con_write(tty, buf, count);
con_flush_chars(tty);
return retval;
}
static int con_put_char(struct tty_struct *tty, unsigned char ch)
{
if (in_interrupt())
return 0; /* n_r3964 calls put_char() from interrupt context */
return do_con_write(tty, &ch, 1);
}
static int con_write_room(struct tty_struct *tty)
{
if (tty->stopped)
return 0;
return 32768; /* No limit, really; we're not buffering */
}
static int con_chars_in_buffer(struct tty_struct *tty)
{
return 0; /* we're not buffering */
}
/*
* con_throttle and con_unthrottle are only used for
* paste_selection(), which has to stuff in a large number of
* characters...
*/
static void con_throttle(struct tty_struct *tty)
{
}
static void con_unthrottle(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
wake_up_interruptible(&vc->paste_wait);
}
/*
* Turn the Scroll-Lock LED on when the tty is stopped
*/
static void con_stop(struct tty_struct *tty)
{
int console_num;
if (!tty)
return;
console_num = tty->index;
if (!vc_cons_allocated(console_num))
return;
set_vc_kbd_led(kbd_table + console_num, VC_SCROLLOCK);
set_leds();
}
/*
* Turn the Scroll-Lock LED off when the console is started
*/
static void con_start(struct tty_struct *tty)
{
int console_num;
if (!tty)
return;
console_num = tty->index;
if (!vc_cons_allocated(console_num))
return;
clr_vc_kbd_led(kbd_table + console_num, VC_SCROLLOCK);
set_leds();
}
static void con_flush_chars(struct tty_struct *tty)
{
struct vc_data *vc;
if (in_interrupt()) /* from flush_to_ldisc */
return;
/* if we race with con_close(), vt may be null */
console_lock();
vc = tty->driver_data;
if (vc)
set_cursor(vc);
console_unlock();
}
/*
* Allocate the console screen memory.
*/
static int con_open(struct tty_struct *tty, struct file *filp)
{
unsigned int currcons = tty->index;
int ret = 0;
console_lock();
if (tty->driver_data == NULL) {
ret = vc_allocate(currcons);
if (ret == 0) {
struct vc_data *vc = vc_cons[currcons].d;
/* Still being freed */
if (vc->port.tty) {
console_unlock();
return -ERESTARTSYS;
}
tty->driver_data = vc;
vc->port.tty = tty;
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
tty->winsize.ws_col = vc_cons[currcons].d->vc_cols;
}
if (vc->vc_utf)
tty->termios->c_iflag |= IUTF8;
else
tty->termios->c_iflag &= ~IUTF8;
console_unlock();
return ret;
}
}
console_unlock();
return ret;
}
static void con_close(struct tty_struct *tty, struct file *filp)
{
/* Nothing to do - we defer to shutdown */
}
static void con_shutdown(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
BUG_ON(vc == NULL);
console_lock();
vc->port.tty = NULL;
console_unlock();
tty_shutdown(tty);
}
static int default_italic_color = 2; // green (ASCII)
static int default_underline_color = 3; // cyan (ASCII)
module_param_named(italic, default_italic_color, int, S_IRUGO | S_IWUSR);
module_param_named(underline, default_underline_color, int, S_IRUGO | S_IWUSR);
static void vc_init(struct vc_data *vc, unsigned int rows,
unsigned int cols, int do_clear)
{
int j, k ;
vc->vc_cols = cols;
vc->vc_rows = rows;
vc->vc_size_row = cols << 1;
vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row;
set_origin(vc);
vc->vc_pos = vc->vc_origin;
reset_vc(vc);
for (j=k=0; j<16; j++) {
vc->vc_palette[k++] = default_red[j] ;
vc->vc_palette[k++] = default_grn[j] ;
vc->vc_palette[k++] = default_blu[j] ;
}
vc->vc_def_color = 0x07; /* white */
vc->vc_ulcolor = default_underline_color;
vc->vc_itcolor = default_italic_color;
vc->vc_halfcolor = 0x08; /* grey */
init_waitqueue_head(&vc->paste_wait);
reset_terminal(vc, do_clear);
}
/*
* This routine initializes console interrupts, and does nothing
* else. If you want the screen to clear, call tty_write with
* the appropriate escape-sequence.
*/
static int __init con_init(void)
{
const char *display_desc = NULL;
struct vc_data *vc;
unsigned int currcons = 0, i;
console_lock();
if (conswitchp)
display_desc = conswitchp->con_startup();
if (!display_desc) {
fg_console = 0;
console_unlock();
return 0;
}
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = ®istered_con_driver[i];
if (con_driver->con == NULL) {
con_driver->con = conswitchp;
con_driver->desc = display_desc;
con_driver->flag = CON_DRIVER_FLAG_INIT;
con_driver->first = 0;
con_driver->last = MAX_NR_CONSOLES - 1;
break;
}
}
for (i = 0; i < MAX_NR_CONSOLES; i++)
con_driver_map[i] = conswitchp;
if (blankinterval) {
blank_state = blank_normal_wait;
mod_timer(&console_timer, jiffies + (blankinterval * HZ));
}
for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
tty_port_init(&vc->port);
visual_init(vc, currcons, 1);
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen);
}
currcons = fg_console = 0;
master_display_fg = vc = vc_cons[currcons].d;
set_origin(vc);
save_screen(vc);
gotoxy(vc, vc->vc_x, vc->vc_y);
csi_J(vc, 0);
update_screen(vc);
pr_info("Console: %s %s %dx%d",
vc->vc_can_do_color ? "colour" : "mono",
display_desc, vc->vc_cols, vc->vc_rows);
printable = 1;
printk("\n");
console_unlock();
#ifdef CONFIG_VT_CONSOLE
register_console(&vt_console_driver);
#endif
return 0;
}
console_initcall(con_init);
static const struct tty_operations con_ops = {
.open = con_open,
.close = con_close,
.write = con_write,
.write_room = con_write_room,
.put_char = con_put_char,
.flush_chars = con_flush_chars,
.chars_in_buffer = con_chars_in_buffer,
.ioctl = vt_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vt_compat_ioctl,
#endif
.stop = con_stop,
.start = con_start,
.throttle = con_throttle,
.unthrottle = con_unthrottle,
.resize = vt_resize,
.shutdown = con_shutdown
};
static struct cdev vc0_cdev;
static ssize_t show_tty_active(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "tty%d\n", fg_console + 1);
}
static DEVICE_ATTR(active, S_IRUGO, show_tty_active, NULL);
int __init vty_init(const struct file_operations *console_fops)
{
cdev_init(&vc0_cdev, console_fops);
if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
panic("Couldn't register /dev/tty0 driver\n");
tty0dev = device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
if (IS_ERR(tty0dev))
tty0dev = NULL;
else
WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
vcs_init();
console_driver = alloc_tty_driver(MAX_NR_CONSOLES);
if (!console_driver)
panic("Couldn't allocate console driver\n");
console_driver->owner = THIS_MODULE;
console_driver->name = "tty";
console_driver->name_base = 1;
console_driver->major = TTY_MAJOR;
console_driver->minor_start = 1;
console_driver->type = TTY_DRIVER_TYPE_CONSOLE;
console_driver->init_termios = tty_std_termios;
if (default_utf8)
console_driver->init_termios.c_iflag |= IUTF8;
console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
tty_set_operations(console_driver, &con_ops);
if (tty_register_driver(console_driver))
panic("Couldn't register console driver\n");
kbd_init();
console_map_init();
#ifdef CONFIG_MDA_CONSOLE
mda_console_init();
#endif
return 0;
}
#ifndef VT_SINGLE_DRIVER
static struct class *vtconsole_class;
static int bind_con_driver(const struct consw *csw, int first, int last,
int deflt)
{
struct module *owner = csw->owner;
const char *desc = NULL;
struct con_driver *con_driver;
int i, j = -1, k = -1, retval = -ENODEV;
if (!try_module_get(owner))
return -ENODEV;
console_lock();
/* check if driver is registered */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
if (con_driver->con == csw) {
desc = con_driver->desc;
retval = 0;
break;
}
}
if (retval)
goto err;
if (!(con_driver->flag & CON_DRIVER_FLAG_INIT)) {
csw->con_startup();
con_driver->flag |= CON_DRIVER_FLAG_INIT;
}
if (deflt) {
if (conswitchp)
module_put(conswitchp->owner);
__module_get(owner);
conswitchp = csw;
}
first = max(first, con_driver->first);
last = min(last, con_driver->last);
for (i = first; i <= last; i++) {
int old_was_color;
struct vc_data *vc = vc_cons[i].d;
if (con_driver_map[i])
module_put(con_driver_map[i]->owner);
__module_get(owner);
con_driver_map[i] = csw;
if (!vc || !vc->vc_sw)
continue;
j = i;
if (CON_IS_VISIBLE(vc)) {
k = i;
save_screen(vc);
}
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
visual_init(vc, i, 0);
set_origin(vc);
update_attr(vc);
/* If the console changed between mono <-> color, then
* the attributes in the screenbuf will be wrong. The
* following resets all attributes to something sane.
*/
if (old_was_color != vc->vc_can_do_color)
clear_buffer_attributes(vc);
}
pr_info("Console: switching ");
if (!deflt)
printk("consoles %d-%d ", first+1, last+1);
if (j >= 0) {
struct vc_data *vc = vc_cons[j].d;
printk("to %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
desc, vc->vc_cols, vc->vc_rows);
if (k >= 0) {
vc = vc_cons[k].d;
update_screen(vc);
}
} else
printk("to %s\n", desc);
retval = 0;
err:
console_unlock();
module_put(owner);
return retval;
};
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
static int con_is_graphics(const struct consw *csw, int first, int last)
{
int i, retval = 0;
for (i = first; i <= last; i++) {
struct vc_data *vc = vc_cons[i].d;
if (vc && vc->vc_mode == KD_GRAPHICS) {
retval = 1;
break;
}
}
return retval;
}
/**
* unbind_con_driver - unbind a console driver
* @csw: pointer to console driver to unregister
* @first: first in range of consoles that @csw should be unbound from
* @last: last in range of consoles that @csw should be unbound from
* @deflt: should next bound console driver be default after @csw is unbound?
*
* To unbind a driver from all possible consoles, pass 0 as @first and
* %MAX_NR_CONSOLES as @last.
*
* @deflt controls whether the console that ends up replacing @csw should be
* the default console.
*
* RETURNS:
* -ENODEV if @csw isn't a registered console driver or can't be unregistered
* or 0 on success.
*/
int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
struct module *owner = csw->owner;
const struct consw *defcsw = NULL;
struct con_driver *con_driver = NULL, *con_back = NULL;
int i, retval = -ENODEV;
if (!try_module_get(owner))
return -ENODEV;
console_lock();
/* check if driver is registered and if it is unbindable */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
if (con_driver->con == csw &&
con_driver->flag & CON_DRIVER_FLAG_MODULE) {
retval = 0;
break;
}
}
if (retval) {
console_unlock();
goto err;
}
retval = -ENODEV;
/* check if backup driver exists */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_back = ®istered_con_driver[i];
if (con_back->con &&
!(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
defcsw = con_back->con;
retval = 0;
break;
}
}
if (retval) {
console_unlock();
goto err;
}
if (!con_is_bound(csw)) {
console_unlock();
goto err;
}
first = max(first, con_driver->first);
last = min(last, con_driver->last);
for (i = first; i <= last; i++) {
if (con_driver_map[i] == csw) {
module_put(csw->owner);
con_driver_map[i] = NULL;
}
}
if (!con_is_bound(defcsw)) {
const struct consw *defconsw = conswitchp;
defcsw->con_startup();
con_back->flag |= CON_DRIVER_FLAG_INIT;
/*
* vgacon may change the default driver to point
* to dummycon, we restore it here...
*/
conswitchp = defconsw;
}
if (!con_is_bound(csw))
con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
console_unlock();
/* ignore return value, binding should not fail */
bind_con_driver(defcsw, first, last, deflt);
err:
module_put(owner);
return retval;
}
EXPORT_SYMBOL(unbind_con_driver);
static int vt_bind(struct con_driver *con)
{
const struct consw *defcsw = NULL, *csw = NULL;
int i, more = 1, first = -1, last = -1, deflt = 0;
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
con_is_graphics(con->con, con->first, con->last))
goto err;
csw = con->con;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con = ®istered_con_driver[i];
if (con->con && !(con->flag & CON_DRIVER_FLAG_MODULE)) {
defcsw = con->con;
break;
}
}
if (!defcsw)
goto err;
while (more) {
more = 0;
for (i = con->first; i <= con->last; i++) {
if (con_driver_map[i] == defcsw) {
if (first == -1)
first = i;
last = i;
more = 1;
} else if (first != -1)
break;
}
if (first == 0 && last == MAX_NR_CONSOLES -1)
deflt = 1;
if (first != -1)
bind_con_driver(csw, first, last, deflt);
first = -1;
last = -1;
deflt = 0;
}
err:
return 0;
}
static int vt_unbind(struct con_driver *con)
{
const struct consw *csw = NULL;
int i, more = 1, first = -1, last = -1, deflt = 0;
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
con_is_graphics(con->con, con->first, con->last))
goto err;
csw = con->con;
while (more) {
more = 0;
for (i = con->first; i <= con->last; i++) {
if (con_driver_map[i] == csw) {
if (first == -1)
first = i;
last = i;
more = 1;
} else if (first != -1)
break;
}
if (first == 0 && last == MAX_NR_CONSOLES -1)
deflt = 1;
if (first != -1)
unbind_con_driver(csw, first, last, deflt);
first = -1;
last = -1;
deflt = 0;
}
err:
return 0;
}
#else
static inline int vt_bind(struct con_driver *con)
{
return 0;
}
static inline int vt_unbind(struct con_driver *con)
{
return 0;
}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
static ssize_t store_bind(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct con_driver *con = dev_get_drvdata(dev);
int bind = simple_strtoul(buf, NULL, 0);
if (bind)
vt_bind(con);
else
vt_unbind(con);
return count;
}
static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct con_driver *con = dev_get_drvdata(dev);
int bind = con_is_bound(con->con);
return snprintf(buf, PAGE_SIZE, "%i\n", bind);
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct con_driver *con = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s %s\n",
(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
con->desc);
}
static struct device_attribute device_attrs[] = {
__ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
__ATTR(name, S_IRUGO, show_name, NULL),
};
static int vtconsole_init_device(struct con_driver *con)
{
int i;
int error = 0;
con->flag |= CON_DRIVER_FLAG_ATTR;
dev_set_drvdata(con->dev, con);
for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
error = device_create_file(con->dev, &device_attrs[i]);
if (error)
break;
}
if (error) {
while (--i >= 0)
device_remove_file(con->dev, &device_attrs[i]);
con->flag &= ~CON_DRIVER_FLAG_ATTR;
}
return error;
}
static void vtconsole_deinit_device(struct con_driver *con)
{
int i;
if (con->flag & CON_DRIVER_FLAG_ATTR) {
for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
device_remove_file(con->dev, &device_attrs[i]);
con->flag &= ~CON_DRIVER_FLAG_ATTR;
}
}
/**
* con_is_bound - checks if driver is bound to the console
* @csw: console driver
*
* RETURNS: zero if unbound, nonzero if bound
*
* Drivers can call this and if zero, they should release
* all resources allocated on con_startup()
*/
int con_is_bound(const struct consw *csw)
{
int i, bound = 0;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (con_driver_map[i] == csw) {
bound = 1;
break;
}
}
return bound;
}
EXPORT_SYMBOL(con_is_bound);
/**
* con_debug_enter - prepare the console for the kernel debugger
* @sw: console driver
*
* Called when the console is taken over by the kernel debugger, this
* function needs to save the current console state, then put the console
* into a state suitable for the kernel debugger.
*
* RETURNS:
* Zero on success, nonzero if a failure occurred when trying to prepare
* the console for the debugger.
*/
int con_debug_enter(struct vc_data *vc)
{
int ret = 0;
saved_fg_console = fg_console;
saved_last_console = last_console;
saved_want_console = want_console;
saved_vc_mode = vc->vc_mode;
saved_console_blanked = console_blanked;
vc->vc_mode = KD_TEXT;
console_blanked = 0;
if (vc->vc_sw->con_debug_enter)
ret = vc->vc_sw->con_debug_enter(vc);
#ifdef CONFIG_KGDB_KDB
/* Set the initial LINES variable if it is not already set */
if (vc->vc_rows < 999) {
int linecount;
char lns[4];
const char *setargs[3] = {
"set",
"LINES",
lns,
};
if (kdbgetintenv(setargs[0], &linecount)) {
snprintf(lns, 4, "%i", vc->vc_rows);
kdb_set(2, setargs);
}
}
#endif /* CONFIG_KGDB_KDB */
return ret;
}
EXPORT_SYMBOL_GPL(con_debug_enter);
/**
* con_debug_leave - restore console state
* @sw: console driver
*
* Restore the console state to what it was before the kernel debugger
* was invoked.
*
* RETURNS:
* Zero on success, nonzero if a failure occurred when trying to restore
* the console.
*/
int con_debug_leave(void)
{
struct vc_data *vc;
int ret = 0;
fg_console = saved_fg_console;
last_console = saved_last_console;
want_console = saved_want_console;
console_blanked = saved_console_blanked;
vc_cons[fg_console].d->vc_mode = saved_vc_mode;
vc = vc_cons[fg_console].d;
if (vc->vc_sw->con_debug_leave)
ret = vc->vc_sw->con_debug_leave(vc);
return ret;
}
EXPORT_SYMBOL_GPL(con_debug_leave);
/**
* register_con_driver - register console driver to console layer
* @csw: console driver
* @first: the first console to take over, minimum value is 0
* @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
*
* DESCRIPTION: This function registers a console driver which can later
* bind to a range of consoles specified by @first and @last. It will
* also initialize the console driver by calling con_startup().
*/
int register_con_driver(const struct consw *csw, int first, int last)
{
struct module *owner = csw->owner;
struct con_driver *con_driver;
const char *desc;
int i, retval = 0;
if (!try_module_get(owner))
return -ENODEV;
console_lock();
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
/* already registered */
if (con_driver->con == csw)
retval = -EBUSY;
}
if (retval)
goto err;
desc = csw->con_startup();
if (!desc)
goto err;
retval = -EINVAL;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
if (con_driver->con == NULL) {
con_driver->con = csw;
con_driver->desc = desc;
con_driver->node = i;
con_driver->flag = CON_DRIVER_FLAG_MODULE |
CON_DRIVER_FLAG_INIT;
con_driver->first = first;
con_driver->last = last;
retval = 0;
break;
}
}
if (retval)
goto err;
con_driver->dev = device_create(vtconsole_class, NULL,
MKDEV(0, con_driver->node),
NULL, "vtcon%i",
con_driver->node);
if (IS_ERR(con_driver->dev)) {
printk(KERN_WARNING "Unable to create device for %s; "
"errno = %ld\n", con_driver->desc,
PTR_ERR(con_driver->dev));
con_driver->dev = NULL;
} else {
vtconsole_init_device(con_driver);
}
err:
console_unlock();
module_put(owner);
return retval;
}
EXPORT_SYMBOL(register_con_driver);
/**
* unregister_con_driver - unregister console driver from console layer
* @csw: console driver
*
* DESCRIPTION: All drivers that registers to the console layer must
* call this function upon exit, or if the console driver is in a state
* where it won't be able to handle console services, such as the
* framebuffer console without loaded framebuffer drivers.
*
* The driver must unbind first prior to unregistration.
*/
int unregister_con_driver(const struct consw *csw)
{
int i, retval = -ENODEV;
console_lock();
/* cannot unregister a bound driver */
if (con_is_bound(csw))
goto err;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = ®istered_con_driver[i];
if (con_driver->con == csw &&
con_driver->flag & CON_DRIVER_FLAG_MODULE) {
vtconsole_deinit_device(con_driver);
device_destroy(vtconsole_class,
MKDEV(0, con_driver->node));
con_driver->con = NULL;
con_driver->desc = NULL;
con_driver->dev = NULL;
con_driver->node = 0;
con_driver->flag = 0;
con_driver->first = 0;
con_driver->last = 0;
retval = 0;
break;
}
}
err:
console_unlock();
return retval;
}
EXPORT_SYMBOL(unregister_con_driver);
/*
* If we support more console drivers, this function is used
* when a driver wants to take over some existing consoles
* and become default driver for newly opened ones.
*
* take_over_console is basically a register followed by unbind
*/
int take_over_console(const struct consw *csw, int first, int last, int deflt)
{
int err;
err = register_con_driver(csw, first, last);
/* if we get an busy error we still want to bind the console driver
* and return success, as we may have unbound the console driver
* but not unregistered it.
*/
if (err == -EBUSY)
err = 0;
if (!err)
bind_con_driver(csw, first, last, deflt);
return err;
}
/*
* give_up_console is a wrapper to unregister_con_driver. It will only
* work if driver is fully unbound.
*/
void give_up_console(const struct consw *csw)
{
unregister_con_driver(csw);
}
static int __init vtconsole_class_init(void)
{
int i;
vtconsole_class = class_create(THIS_MODULE, "vtconsole");
if (IS_ERR(vtconsole_class)) {
printk(KERN_WARNING "Unable to create vt console class; "
"errno = %ld\n", PTR_ERR(vtconsole_class));
vtconsole_class = NULL;
}
/* Add system drivers to sysfs */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con = ®istered_con_driver[i];
if (con->con && !con->dev) {
con->dev = device_create(vtconsole_class, NULL,
MKDEV(0, con->node),
NULL, "vtcon%i",
con->node);
if (IS_ERR(con->dev)) {
printk(KERN_WARNING "Unable to create "
"device for %s; errno = %ld\n",
con->desc, PTR_ERR(con->dev));
con->dev = NULL;
} else {
vtconsole_init_device(con);
}
}
}
return 0;
}
postcore_initcall(vtconsole_class_init);
#endif
/*
* Screen blanking
*/
static int set_vesa_blanking(char __user *p)
{
unsigned int mode;
if (get_user(mode, p + 1))
return -EFAULT;
vesa_blank_mode = (mode < 4) ? mode : 0;
return 0;
}
void do_blank_screen(int entering_gfx)
{
struct vc_data *vc = vc_cons[fg_console].d;
int i;
WARN_CONSOLE_UNLOCKED();
if (console_blanked) {
if (blank_state == blank_vesa_wait) {
blank_state = blank_off;
vc->vc_sw->con_blank(vc, vesa_blank_mode + 1, 0);
}
return;
}
/* entering graphics mode? */
if (entering_gfx) {
hide_cursor(vc);
save_screen(vc);
vc->vc_sw->con_blank(vc, -1, 1);
console_blanked = fg_console + 1;
blank_state = blank_off;
set_origin(vc);
return;
}
if (blank_state != blank_normal_wait)
return;
blank_state = blank_off;
/* don't blank graphics */
if (vc->vc_mode != KD_TEXT) {
console_blanked = fg_console + 1;
return;
}
hide_cursor(vc);
del_timer_sync(&console_timer);
blank_timer_expired = 0;
save_screen(vc);
/* In case we need to reset origin, blanking hook returns 1 */
i = vc->vc_sw->con_blank(vc, vesa_off_interval ? 1 : (vesa_blank_mode + 1), 0);
console_blanked = fg_console + 1;
if (i)
set_origin(vc);
if (console_blank_hook && console_blank_hook(1))
return;
if (vesa_off_interval && vesa_blank_mode) {
blank_state = blank_vesa_wait;
mod_timer(&console_timer, jiffies + vesa_off_interval);
}
vt_event_post(VT_EVENT_BLANK, vc->vc_num, vc->vc_num);
}
EXPORT_SYMBOL(do_blank_screen);
/*
* Called by timer as well as from vt_console_driver
*/
void do_unblank_screen(int leaving_gfx)
{
struct vc_data *vc;
/* This should now always be called from a "sane" (read: can schedule)
* context for the sake of the low level drivers, except in the special
* case of oops_in_progress
*/
if (!oops_in_progress)
might_sleep();
WARN_CONSOLE_UNLOCKED();
ignore_poke = 0;
if (!console_blanked)
return;
if (!vc_cons_allocated(fg_console)) {
/* impossible */
pr_warning("unblank_screen: tty %d not allocated ??\n",
fg_console+1);
return;
}
vc = vc_cons[fg_console].d;
/* Try to unblank in oops case too */
if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
return; /* but leave console_blanked != 0 */
if (blankinterval) {
mod_timer(&console_timer, jiffies + (blankinterval * HZ));
blank_state = blank_normal_wait;
}
console_blanked = 0;
if (vc->vc_sw->con_blank(vc, 0, leaving_gfx) || vt_force_oops_output(vc))
/* Low-level driver cannot restore -> do it ourselves */
update_screen(vc);
if (console_blank_hook)
console_blank_hook(0);
set_palette(vc);
set_cursor(vc);
vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num);
}
EXPORT_SYMBOL(do_unblank_screen);
/*
* This is called by the outside world to cause a forced unblank, mostly for
* oopses. Currently, I just call do_unblank_screen(0), but we could eventually
* call it with 1 as an argument and so force a mode restore... that may kill
* X or at least garbage the screen but would also make the Oops visible...
*/
void unblank_screen(void)
{
do_unblank_screen(0);
}
/*
* We defer the timer blanking to work queue so it can take the console mutex
* (console operations can still happen at irq time, but only from printk which
* has the console mutex. Not perfect yet, but better than no locking
*/
static void blank_screen_t(unsigned long dummy)
{
if (unlikely(!keventd_up())) {
mod_timer(&console_timer, jiffies + (blankinterval * HZ));
return;
}
blank_timer_expired = 1;
schedule_work(&console_work);
}
void poke_blanked_console(void)
{
WARN_CONSOLE_UNLOCKED();
/* Add this so we quickly catch whoever might call us in a non
* safe context. Nowadays, unblank_screen() isn't to be called in
* atomic contexts and is allowed to schedule (with the special case
* of oops_in_progress, but that isn't of any concern for this
* function. --BenH.
*/
might_sleep();
/* This isn't perfectly race free, but a race here would be mostly harmless,
* at worse, we'll do a spurrious blank and it's unlikely
*/
del_timer(&console_timer);
blank_timer_expired = 0;
if (ignore_poke || !vc_cons[fg_console].d || vc_cons[fg_console].d->vc_mode == KD_GRAPHICS)
return;
if (console_blanked)
unblank_screen();
else if (blankinterval) {
mod_timer(&console_timer, jiffies + (blankinterval * HZ));
blank_state = blank_normal_wait;
}
}
/*
* Palettes
*/
static void set_palette(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
if (vc->vc_mode != KD_GRAPHICS)
vc->vc_sw->con_set_palette(vc, color_table);
}
static int set_get_cmap(unsigned char __user *arg, int set)
{
int i, j, k;
WARN_CONSOLE_UNLOCKED();
for (i = 0; i < 16; i++)
if (set) {
get_user(default_red[i], arg++);
get_user(default_grn[i], arg++);
get_user(default_blu[i], arg++);
} else {
put_user(default_red[i], arg++);
put_user(default_grn[i], arg++);
put_user(default_blu[i], arg++);
}
if (set) {
for (i = 0; i < MAX_NR_CONSOLES; i++)
if (vc_cons_allocated(i)) {
for (j = k = 0; j < 16; j++) {
vc_cons[i].d->vc_palette[k++] = default_red[j];
vc_cons[i].d->vc_palette[k++] = default_grn[j];
vc_cons[i].d->vc_palette[k++] = default_blu[j];
}
set_palette(vc_cons[i].d);
}
}
return 0;
}
/*
* Load palette into the DAC registers. arg points to a colour
* map, 3 bytes per colour, 16 colours, range from 0 to 255.
*/
int con_set_cmap(unsigned char __user *arg)
{
int rc;
console_lock();
rc = set_get_cmap (arg,1);
console_unlock();
return rc;
}
int con_get_cmap(unsigned char __user *arg)
{
int rc;
console_lock();
rc = set_get_cmap (arg,0);
console_unlock();
return rc;
}
void reset_palette(struct vc_data *vc)
{
int j, k;
for (j=k=0; j<16; j++) {
vc->vc_palette[k++] = default_red[j];
vc->vc_palette[k++] = default_grn[j];
vc->vc_palette[k++] = default_blu[j];
}
set_palette(vc);
}
/*
* Font switching
*
* Currently we only support fonts up to 32 pixels wide, at a maximum height
* of 32 pixels. Userspace fontdata is stored with 32 bytes (shorts/ints,
* depending on width) reserved for each character which is kinda wasty, but
* this is done in order to maintain compatibility with the EGA/VGA fonts. It
* is up to the actual low-level console-driver convert data into its favorite
* format (maybe we should add a `fontoffset' field to the `display'
* structure so we won't have to convert the fontdata all the time.
* /Jes
*/
#define max_font_size 65536
static int con_font_get(struct vc_data *vc, struct console_font_op *op)
{
struct console_font font;
int rc = -EINVAL;
int c;
if (vc->vc_mode != KD_TEXT)
return -EINVAL;
if (op->data) {
font.data = kmalloc(max_font_size, GFP_KERNEL);
if (!font.data)
return -ENOMEM;
} else
font.data = NULL;
console_lock();
if (vc->vc_sw->con_font_get)
rc = vc->vc_sw->con_font_get(vc, &font);
else
rc = -ENOSYS;
console_unlock();
if (rc)
goto out;
c = (font.width+7)/8 * 32 * font.charcount;
if (op->data && font.charcount > op->charcount)
rc = -ENOSPC;
if (!(op->flags & KD_FONT_FLAG_OLD)) {
if (font.width > op->width || font.height > op->height)
rc = -ENOSPC;
} else {
if (font.width != 8)
rc = -EIO;
else if ((op->height && font.height > op->height) ||
font.height > 32)
rc = -ENOSPC;
}
if (rc)
goto out;
op->height = font.height;
op->width = font.width;
op->charcount = font.charcount;
if (op->data && copy_to_user(op->data, font.data, c))
rc = -EFAULT;
out:
kfree(font.data);
return rc;
}
static int con_font_set(struct vc_data *vc, struct console_font_op *op)
{
struct console_font font;
int rc = -EINVAL;
int size;
if (vc->vc_mode != KD_TEXT)
return -EINVAL;
if (!op->data)
return -EINVAL;
if (op->charcount > 512)
return -EINVAL;
if (!op->height) { /* Need to guess font height [compat] */
int h, i;
u8 __user *charmap = op->data;
u8 tmp;
/* If from KDFONTOP ioctl, don't allow things which can be done in userland,
so that we can get rid of this soon */
if (!(op->flags & KD_FONT_FLAG_OLD))
return -EINVAL;
for (h = 32; h > 0; h--)
for (i = 0; i < op->charcount; i++) {
if (get_user(tmp, &charmap[32*i+h-1]))
return -EFAULT;
if (tmp)
goto nonzero;
}
return -EINVAL;
nonzero:
op->height = h;
}
if (op->width <= 0 || op->width > 32 || op->height > 32)
return -EINVAL;
size = (op->width+7)/8 * 32 * op->charcount;
if (size > max_font_size)
return -ENOSPC;
font.charcount = op->charcount;
font.height = op->height;
font.width = op->width;
font.data = memdup_user(op->data, size);
if (IS_ERR(font.data))
return PTR_ERR(font.data);
console_lock();
if (vc->vc_sw->con_font_set)
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
return rc;
}
static int con_font_default(struct vc_data *vc, struct console_font_op *op)
{
struct console_font font = {.width = op->width, .height = op->height};
char name[MAX_FONT_NAME];
char *s = name;
int rc;
if (vc->vc_mode != KD_TEXT)
return -EINVAL;
if (!op->data)
s = NULL;
else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
return -EFAULT;
else
name[MAX_FONT_NAME - 1] = 0;
console_lock();
if (vc->vc_sw->con_font_default)
rc = vc->vc_sw->con_font_default(vc, &font, s);
else
rc = -ENOSYS;
console_unlock();
if (!rc) {
op->width = font.width;
op->height = font.height;
}
return rc;
}
static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
{
int con = op->height;
int rc;
if (vc->vc_mode != KD_TEXT)
return -EINVAL;
console_lock();
if (!vc->vc_sw->con_font_copy)
rc = -ENOSYS;
else if (con < 0 || !vc_cons_allocated(con))
rc = -ENOTTY;
else if (con == vc->vc_num) /* nothing to do */
rc = 0;
else
rc = vc->vc_sw->con_font_copy(vc, con);
console_unlock();
return rc;
}
int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
case KD_FONT_OP_SET:
return con_font_set(vc, op);
case KD_FONT_OP_GET:
return con_font_get(vc, op);
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
case KD_FONT_OP_COPY:
return con_font_copy(vc, op);
}
return -ENOSYS;
}
/*
* Interface exported to selection and vcs.
*/
/* used by selection */
u16 screen_glyph(struct vc_data *vc, int offset)
{
u16 w = scr_readw(screenpos(vc, offset, 1));
u16 c = w & 0xff;
if (w & vc->vc_hi_font_mask)
c |= 0x100;
return c;
}
EXPORT_SYMBOL_GPL(screen_glyph);
/* used by vcs - note the word offset */
unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
{
return screenpos(vc, 2 * w_offset, viewed);
}
void getconsxy(struct vc_data *vc, unsigned char *p)
{
p[0] = vc->vc_x;
p[1] = vc->vc_y;
}
void putconsxy(struct vc_data *vc, unsigned char *p)
{
hide_cursor(vc);
gotoxy(vc, p[0], p[1]);
set_cursor(vc);
}
u16 vcs_scr_readw(struct vc_data *vc, const u16 *org)
{
if ((unsigned long)org == vc->vc_pos && softcursor_original != -1)
return softcursor_original;
return scr_readw(org);
}
void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org)
{
scr_writew(val, org);
if ((unsigned long)org == vc->vc_pos) {
softcursor_original = -1;
add_softcursor(vc);
}
}
void vcs_scr_updated(struct vc_data *vc)
{
notify_update(vc);
}
/*
* Visible symbols for modules
*/
EXPORT_SYMBOL(color_table);
EXPORT_SYMBOL(default_red);
EXPORT_SYMBOL(default_grn);
EXPORT_SYMBOL(default_blu);
EXPORT_SYMBOL(update_region);
EXPORT_SYMBOL(redraw_screen);
EXPORT_SYMBOL(vc_resize);
EXPORT_SYMBOL(fg_console);
EXPORT_SYMBOL(console_blank_hook);
EXPORT_SYMBOL(console_blanked);
EXPORT_SYMBOL(vc_cons);
EXPORT_SYMBOL(global_cursor_default);
#ifndef VT_SINGLE_DRIVER
EXPORT_SYMBOL(take_over_console);
EXPORT_SYMBOL(give_up_console);
#endif
| gpl-2.0 |
dmansfield/linux | arch/arm/mach-imx/hotplug.c | 2334 | 1536 | /*
* Copyright 2011 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <asm/cp15.h>
#include <asm/proc-fns.h>
#include "common.h"
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
asm volatile(
"mcr p15, 0, %1, c7, c5, 0\n"
" mcr p15, 0, %1, c7, c10, 4\n"
/*
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
" bic %0, %0, %3\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
: "r" (0), "Ir" (CR_C), "Ir" (0x40)
: "cc");
}
/*
* platform-specific code to shutdown a CPU
*
* Called with IRQs disabled
*/
void imx_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
/*
* We use the cpu jumping argument register to sync with
* imx_cpu_kill() which is running on cpu0 and waiting for
* the register being cleared to kill the cpu.
*/
imx_set_cpu_arg(cpu, ~0);
while (1)
cpu_do_idle();
}
int imx_cpu_kill(unsigned int cpu)
{
unsigned long timeout = jiffies + msecs_to_jiffies(50);
while (imx_get_cpu_arg(cpu) == 0)
if (time_after(jiffies, timeout))
return 0;
imx_enable_cpu(cpu, false);
imx_set_cpu_arg(cpu, 0);
return 1;
}
| gpl-2.0 |
Jackeagle/kernel_stock_e53g | fs/hfs/extent.c | 4382 | 14179 | /*
* linux/fs/hfs/extent.c
*
* Copyright (C) 1995-1997 Paul H. Hargrove
* (C) 2003 Ardis Technologies <roman@ardistech.com>
* This file may be distributed under the terms of the GNU General Public License.
*
* This file contains the functions related to the extents B-tree.
*/
#include <linux/pagemap.h>
#include "hfs_fs.h"
#include "btree.h"
/*================ File-local functions ================*/
/*
* build_key
*/
static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
{
key->key_len = 7;
key->ext.FkType = type;
key->ext.FNum = cpu_to_be32(cnid);
key->ext.FABN = cpu_to_be16(block);
}
/*
* hfs_ext_compare()
*
* Description:
* This is the comparison function used for the extents B-tree. In
* comparing extent B-tree entries, the file id is the most
* significant field (compared as unsigned ints); the fork type is
* the second most significant field (compared as unsigned chars);
* and the allocation block number field is the least significant
* (compared as unsigned ints).
* Input Variable(s):
* struct hfs_ext_key *key1: pointer to the first key to compare
* struct hfs_ext_key *key2: pointer to the second key to compare
* Output Variable(s):
* NONE
* Returns:
* int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
* Preconditions:
* key1 and key2 point to "valid" (struct hfs_ext_key)s.
* Postconditions:
* This function has no side-effects */
int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
{
__be32 fnum1, fnum2;
__be16 block1, block2;
fnum1 = key1->ext.FNum;
fnum2 = key2->ext.FNum;
if (fnum1 != fnum2)
return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
if (key1->ext.FkType != key2->ext.FkType)
return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
block1 = key1->ext.FABN;
block2 = key2->ext.FABN;
if (block1 == block2)
return 0;
return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
}
/*
* hfs_ext_find_block
*
* Find a block within an extent record
*/
static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
{
int i;
u16 count;
for (i = 0; i < 3; ext++, i++) {
count = be16_to_cpu(ext->count);
if (off < count)
return be16_to_cpu(ext->block) + off;
off -= count;
}
/* panic? */
return 0;
}
static int hfs_ext_block_count(struct hfs_extent *ext)
{
int i;
u16 count = 0;
for (i = 0; i < 3; ext++, i++)
count += be16_to_cpu(ext->count);
return count;
}
static u16 hfs_ext_lastblock(struct hfs_extent *ext)
{
int i;
ext += 2;
for (i = 0; i < 2; ext--, i++)
if (ext->count)
break;
return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
}
static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
{
int res;
hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
res = hfs_brec_find(fd);
if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
if (res != -ENOENT)
return res;
hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
} else {
if (res)
return res;
hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
}
return 0;
}
int hfs_ext_write_extent(struct inode *inode)
{
struct hfs_find_data fd;
int res = 0;
if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
if (res)
return res;
res = __hfs_ext_write_extent(inode, &fd);
hfs_find_exit(&fd);
}
return res;
}
static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
u32 cnid, u32 block, u8 type)
{
int res;
hfs_ext_build_key(fd->search_key, cnid, block, type);
fd->key->ext.FNum = 0;
res = hfs_brec_find(fd);
if (res && res != -ENOENT)
return res;
if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
fd->key->ext.FkType != fd->search_key->ext.FkType)
return -ENOENT;
if (fd->entrylength != sizeof(hfs_extent_rec))
return -EIO;
hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
return 0;
}
static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
{
int res;
if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
res = __hfs_ext_write_extent(inode, fd);
if (res)
return res;
}
res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
if (!res) {
HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
} else {
HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
}
return res;
}
static int hfs_ext_read_extent(struct inode *inode, u16 block)
{
struct hfs_find_data fd;
int res;
if (block >= HFS_I(inode)->cached_start &&
block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
return 0;
res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
if (!res) {
res = __hfs_ext_cache_extent(&fd, inode, block);
hfs_find_exit(&fd);
}
return res;
}
static void hfs_dump_extent(struct hfs_extent *extent)
{
int i;
hfs_dbg(EXTENT, " ");
for (i = 0; i < 3; i++)
hfs_dbg_cont(EXTENT, " %u:%u",
be16_to_cpu(extent[i].block),
be16_to_cpu(extent[i].count));
hfs_dbg_cont(EXTENT, "\n");
}
static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
u16 alloc_block, u16 block_count)
{
u16 count, start;
int i;
hfs_dump_extent(extent);
for (i = 0; i < 3; extent++, i++) {
count = be16_to_cpu(extent->count);
if (offset == count) {
start = be16_to_cpu(extent->block);
if (alloc_block != start + count) {
if (++i >= 3)
return -ENOSPC;
extent++;
extent->block = cpu_to_be16(alloc_block);
} else
block_count += count;
extent->count = cpu_to_be16(block_count);
return 0;
} else if (offset < count)
break;
offset -= count;
}
/* panic? */
return -EIO;
}
static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
u16 offset, u16 block_nr)
{
u16 count, start;
int i;
hfs_dump_extent(extent);
for (i = 0; i < 3; extent++, i++) {
count = be16_to_cpu(extent->count);
if (offset == count)
goto found;
else if (offset < count)
break;
offset -= count;
}
/* panic? */
return -EIO;
found:
for (;;) {
start = be16_to_cpu(extent->block);
if (count <= block_nr) {
hfs_clear_vbm_bits(sb, start, count);
extent->block = 0;
extent->count = 0;
block_nr -= count;
} else {
count -= block_nr;
hfs_clear_vbm_bits(sb, start + count, block_nr);
extent->count = cpu_to_be16(count);
block_nr = 0;
}
if (!block_nr || !i)
return 0;
i--;
extent--;
count = be16_to_cpu(extent->count);
}
}
int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
{
struct hfs_find_data fd;
u32 total_blocks, blocks, start;
u32 cnid = be32_to_cpu(file->FlNum);
struct hfs_extent *extent;
int res, i;
if (type == HFS_FK_DATA) {
total_blocks = be32_to_cpu(file->PyLen);
extent = file->ExtRec;
} else {
total_blocks = be32_to_cpu(file->RPyLen);
extent = file->RExtRec;
}
total_blocks /= HFS_SB(sb)->alloc_blksz;
if (!total_blocks)
return 0;
blocks = 0;
for (i = 0; i < 3; extent++, i++)
blocks += be16_to_cpu(extent[i].count);
res = hfs_free_extents(sb, extent, blocks, blocks);
if (res)
return res;
if (total_blocks == blocks)
return 0;
res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
if (res)
return res;
do {
res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
if (res)
break;
start = be16_to_cpu(fd.key->ext.FABN);
hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
hfs_brec_remove(&fd);
total_blocks = start;
} while (total_blocks > blocks);
hfs_find_exit(&fd);
return res;
}
/*
* hfs_get_block
*/
int hfs_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
struct super_block *sb;
u16 dblock, ablock;
int res;
sb = inode->i_sb;
/* Convert inode block to disk allocation block */
ablock = (u32)block / HFS_SB(sb)->fs_div;
if (block >= HFS_I(inode)->fs_blocks) {
if (block > HFS_I(inode)->fs_blocks || !create)
return -EIO;
if (ablock >= HFS_I(inode)->alloc_blocks) {
res = hfs_extend_file(inode);
if (res)
return res;
}
} else
create = 0;
if (ablock < HFS_I(inode)->first_blocks) {
dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
goto done;
}
mutex_lock(&HFS_I(inode)->extents_lock);
res = hfs_ext_read_extent(inode, ablock);
if (!res)
dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
ablock - HFS_I(inode)->cached_start);
else {
mutex_unlock(&HFS_I(inode)->extents_lock);
return -EIO;
}
mutex_unlock(&HFS_I(inode)->extents_lock);
done:
map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
dblock * HFS_SB(sb)->fs_div +
(u32)block % HFS_SB(sb)->fs_div);
if (create) {
set_buffer_new(bh_result);
HFS_I(inode)->phys_size += sb->s_blocksize;
HFS_I(inode)->fs_blocks++;
inode_add_bytes(inode, sb->s_blocksize);
mark_inode_dirty(inode);
}
return 0;
}
int hfs_extend_file(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
u32 start, len, goal;
int res;
mutex_lock(&HFS_I(inode)->extents_lock);
if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
else {
res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
if (res)
goto out;
goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
}
len = HFS_I(inode)->clump_blocks;
start = hfs_vbm_search_free(sb, goal, &len);
if (!len) {
res = -ENOSPC;
goto out;
}
hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
if (!HFS_I(inode)->first_blocks) {
hfs_dbg(EXTENT, "first extents\n");
/* no extents yet */
HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
res = 0;
} else {
/* try to append to extents in inode */
res = hfs_add_extent(HFS_I(inode)->first_extents,
HFS_I(inode)->alloc_blocks,
start, len);
if (res == -ENOSPC)
goto insert_extent;
}
if (!res) {
hfs_dump_extent(HFS_I(inode)->first_extents);
HFS_I(inode)->first_blocks += len;
}
} else {
res = hfs_add_extent(HFS_I(inode)->cached_extents,
HFS_I(inode)->alloc_blocks -
HFS_I(inode)->cached_start,
start, len);
if (!res) {
hfs_dump_extent(HFS_I(inode)->cached_extents);
HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
HFS_I(inode)->cached_blocks += len;
} else if (res == -ENOSPC)
goto insert_extent;
}
out:
mutex_unlock(&HFS_I(inode)->extents_lock);
if (!res) {
HFS_I(inode)->alloc_blocks += len;
mark_inode_dirty(inode);
if (inode->i_ino < HFS_FIRSTUSER_CNID)
set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
hfs_mark_mdb_dirty(sb);
}
return res;
insert_extent:
hfs_dbg(EXTENT, "insert new extent\n");
res = hfs_ext_write_extent(inode);
if (res)
goto out;
memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
hfs_dump_extent(HFS_I(inode)->cached_extents);
HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
HFS_I(inode)->cached_blocks = len;
res = 0;
goto out;
}
void hfs_file_truncate(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct hfs_find_data fd;
u16 blk_cnt, alloc_cnt, start;
u32 size;
int res;
hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
inode->i_ino, (long long)HFS_I(inode)->phys_size,
inode->i_size);
if (inode->i_size > HFS_I(inode)->phys_size) {
struct address_space *mapping = inode->i_mapping;
void *fsdata;
struct page *page;
/* XXX: Can use generic_cont_expand? */
size = inode->i_size - 1;
res = pagecache_write_begin(NULL, mapping, size+1, 0,
AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
if (!res) {
res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
page, fsdata);
}
if (res)
inode->i_size = HFS_I(inode)->phys_size;
return;
} else if (inode->i_size == HFS_I(inode)->phys_size)
return;
size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
blk_cnt = size / HFS_SB(sb)->alloc_blksz;
alloc_cnt = HFS_I(inode)->alloc_blocks;
if (blk_cnt == alloc_cnt)
goto out;
mutex_lock(&HFS_I(inode)->extents_lock);
res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
if (res) {
mutex_unlock(&HFS_I(inode)->extents_lock);
/* XXX: We lack error handling of hfs_file_truncate() */
return;
}
while (1) {
if (alloc_cnt == HFS_I(inode)->first_blocks) {
hfs_free_extents(sb, HFS_I(inode)->first_extents,
alloc_cnt, alloc_cnt - blk_cnt);
hfs_dump_extent(HFS_I(inode)->first_extents);
HFS_I(inode)->first_blocks = blk_cnt;
break;
}
res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
if (res)
break;
start = HFS_I(inode)->cached_start;
hfs_free_extents(sb, HFS_I(inode)->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
hfs_dump_extent(HFS_I(inode)->cached_extents);
if (blk_cnt > start) {
HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
break;
}
alloc_cnt = start;
HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
hfs_brec_remove(&fd);
}
hfs_find_exit(&fd);
mutex_unlock(&HFS_I(inode)->extents_lock);
HFS_I(inode)->alloc_blocks = blk_cnt;
out:
HFS_I(inode)->phys_size = inode->i_size;
HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
mark_inode_dirty(inode);
}
| gpl-2.0 |
CaptainThrowback/kernel_htc_m8whl_3.30.654.2 | net/ipv4/ip_input.c | 4382 | 13276 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The Internet Protocol (IP) module.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Donald Becker, <becker@super.org>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Richard Underwood
* Stefan Becker, <stefanb@yello.ping.de>
* Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
*
*
* Fixes:
* Alan Cox : Commented a couple of minor bits of surplus code
* Alan Cox : Undefining IP_FORWARD doesn't include the code
* (just stops a compiler warning).
* Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
* are junked rather than corrupting things.
* Alan Cox : Frames to bad broadcast subnets are dumped
* We used to process them non broadcast and
* boy could that cause havoc.
* Alan Cox : ip_forward sets the free flag on the
* new frame it queues. Still crap because
* it copies the frame but at least it
* doesn't eat memory too.
* Alan Cox : Generic queue code and memory fixes.
* Fred Van Kempen : IP fragment support (borrowed from NET2E)
* Gerhard Koerting: Forward fragmented frames correctly.
* Gerhard Koerting: Fixes to my fix of the above 8-).
* Gerhard Koerting: IP interface addressing fix.
* Linus Torvalds : More robustness checks
* Alan Cox : Even more checks: Still not as robust as it ought to be
* Alan Cox : Save IP header pointer for later
* Alan Cox : ip option setting
* Alan Cox : Use ip_tos/ip_ttl settings
* Alan Cox : Fragmentation bogosity removed
* (Thanks to Mark.Bush@prg.ox.ac.uk)
* Dmitry Gorodchanin : Send of a raw packet crash fix.
* Alan Cox : Silly ip bug when an overlength
* fragment turns up. Now frees the
* queue.
* Linus Torvalds/ : Memory leakage on fragmentation
* Alan Cox : handling.
* Gerhard Koerting: Forwarding uses IP priority hints
* Teemu Rantanen : Fragment problems.
* Alan Cox : General cleanup, comments and reformat
* Alan Cox : SNMP statistics
* Alan Cox : BSD address rule semantics. Also see
* UDP as there is a nasty checksum issue
* if you do things the wrong way.
* Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
* Alan Cox : IP options adjust sk->priority.
* Pedro Roque : Fix mtu/length error in ip_forward.
* Alan Cox : Avoid ip_chk_addr when possible.
* Richard Underwood : IP multicasting.
* Alan Cox : Cleaned up multicast handlers.
* Alan Cox : RAW sockets demultiplex in the BSD style.
* Gunther Mayer : Fix the SNMP reporting typo
* Alan Cox : Always in group 224.0.0.1
* Pauline Middelink : Fast ip_checksum update when forwarding
* Masquerading support.
* Alan Cox : Multicast loopback error for 224.0.0.1
* Alan Cox : IP_MULTICAST_LOOP option.
* Alan Cox : Use notifiers.
* Bjorn Ekwall : Removed ip_csum (from slhc.c too)
* Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
* Stefan Becker : Send out ICMP HOST REDIRECT
* Arnt Gulbrandsen : ip_build_xmit
* Alan Cox : Per socket routing cache
* Alan Cox : Fixed routing cache, added header cache.
* Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
* Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
* Alan Cox : Incoming IP option handling.
* Alan Cox : Set saddr on raw output frames as per BSD.
* Alan Cox : Stopped broadcast source route explosions.
* Alan Cox : Can disable source routing
* Takeshi Sone : Masquerading didn't work.
* Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
* Alan Cox : Memory leaks, tramples, misc debugging.
* Alan Cox : Fixed multicast (by popular demand 8))
* Alan Cox : Fixed forwarding (by even more popular demand 8))
* Alan Cox : Fixed SNMP statistics [I think]
* Gerhard Koerting : IP fragmentation forwarding fix
* Alan Cox : Device lock against page fault.
* Alan Cox : IP_HDRINCL facility.
* Werner Almesberger : Zero fragment bug
* Alan Cox : RAW IP frame length bug
* Alan Cox : Outgoing firewall on build_xmit
* A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
* Alan Cox : Multicast routing hooks
* Jos Vos : Do accounting *before* call_in_firewall
* Willy Konynenberg : Transparent proxying support
*
*
*
* To Fix:
* IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
* and could be made very efficient with the addition of some virtual memory hacks to permit
* the allocation of a buffer that can then be 'grown' by twiddling page tables.
* Output fragmentation wants updating along with the buffer management to use a single
* interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
* output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
* fragmentation anyway.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "IPv4: " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/icmp.h>
#include <net/raw.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
#include <net/xfrm.h>
#include <linux/mroute.h>
#include <linux/netlink.h>
/*
* Process Router Attention IP option (RFC 2113)
*/
bool ip_call_ra_chain(struct sk_buff *skb)
{
struct ip_ra_chain *ra;
u8 protocol = ip_hdr(skb)->protocol;
struct sock *last = NULL;
struct net_device *dev = skb->dev;
for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
struct sock *sk = ra->sk;
/* If socket is bound to an interface, only report
* the packet if it came from that interface.
*/
if (sk && inet_sk(sk)->inet_num == protocol &&
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == dev->ifindex) &&
net_eq(sock_net(sk), dev_net(dev))) {
if (ip_is_fragment(ip_hdr(skb))) {
if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
return true;
}
if (last) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
raw_rcv(last, skb2);
}
last = sk;
}
}
if (last) {
raw_rcv(last, skb);
return true;
}
return false;
}
static int ip_local_deliver_finish(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
__skb_pull(skb, ip_hdrlen(skb));
/* Point into the IP datagram, just past the header. */
skb_reset_transport_header(skb);
rcu_read_lock();
{
int protocol = ip_hdr(skb)->protocol;
int hash, raw;
const struct net_protocol *ipprot;
resubmit:
raw = raw_local_deliver(skb, protocol);
hash = protocol & (MAX_INET_PROTOS - 1);
ipprot = rcu_dereference(inet_protos[hash]);
if (ipprot != NULL) {
int ret;
if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
if (net_ratelimit())
printk("%s: proto %d isn't netns-ready\n",
__func__, protocol);
kfree_skb(skb);
goto out;
}
if (!ipprot->no_policy) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
goto out;
}
nf_reset(skb);
}
ret = ipprot->handler(skb);
if (ret < 0) {
protocol = -ret;
goto resubmit;
}
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
} else {
if (!raw) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
}
}
out:
rcu_read_unlock();
return 0;
}
/*
* Deliver IP Packets to the higher protocol layers.
*/
int ip_local_deliver(struct sk_buff *skb)
{
/*
* Reassemble IP fragments.
*/
if (ip_is_fragment(ip_hdr(skb))) {
if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
return 0;
}
return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
ip_local_deliver_finish);
}
static inline bool ip_rcv_options(struct sk_buff *skb)
{
struct ip_options *opt;
const struct iphdr *iph;
struct net_device *dev = skb->dev;
/* It looks as overkill, because not all
IP options require packet mangling.
But it is the easiest for now, especially taking
into account that combination of IP options
and running sniffer is extremely rare condition.
--ANK (980813)
*/
if (skb_cow(skb, skb_headroom(skb))) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
goto drop;
}
if (unlikely(opt->srr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
if (IN_DEV_LOG_MARTIANS(in_dev) &&
net_ratelimit())
pr_info("source route option %pI4 -> %pI4\n",
&iph->saddr, &iph->daddr);
goto drop;
}
}
if (ip_options_rcv_srr(skb))
goto drop;
}
return false;
drop:
return true;
}
static int ip_rcv_finish(struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
/*
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
*/
if (skb_dst(skb) == NULL) {
int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
iph->tos, skb->dev);
if (unlikely(err)) {
if (err == -EHOSTUNREACH)
IP_INC_STATS_BH(dev_net(skb->dev),
IPSTATS_MIB_INADDRERRORS);
else if (err == -ENETUNREACH)
IP_INC_STATS_BH(dev_net(skb->dev),
IPSTATS_MIB_INNOROUTES);
else if (err == -EXDEV)
NET_INC_STATS_BH(dev_net(skb->dev),
LINUX_MIB_IPRPFILTER);
goto drop;
}
}
#ifdef CONFIG_IP_ROUTE_CLASSID
if (unlikely(skb_dst(skb)->tclassid)) {
struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
u32 idx = skb_dst(skb)->tclassid;
st[idx&0xFF].o_packets++;
st[idx&0xFF].o_bytes += skb->len;
st[(idx>>16)&0xFF].i_packets++;
st[(idx>>16)&0xFF].i_bytes += skb->len;
}
#endif
if (iph->ihl > 5 && ip_rcv_options(skb))
goto drop;
rt = skb_rtable(skb);
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
skb->len);
return dst_input(skb);
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
/*
* Main IP Receive routine.
*/
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
const struct iphdr *iph;
u32 len;
/* When the interface is in promisc. mode, drop all the crap
* that it receives, do not try to analyse it.
*/
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto out;
}
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
iph = ip_hdr(skb);
/*
* RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
*
* Is the datagram acceptable?
*
* 1. Length at least the size of an ip header
* 2. Version of 4
* 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
* 4. Doesn't have a bogus length
*/
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
/* Our transport medium may have padded the buffer out. Now we know it
* is IP we can trim to the true length of the frame.
* Note this now means skb->len holds ntohs(iph->tot_len).
*/
if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
/* Remove any debris in the socket control block */
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip_rcv_finish);
inhdr_error:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
drop:
kfree_skb(skb);
out:
return NET_RX_DROP;
}
| gpl-2.0 |
DevSwift/Kernel-3.4-U8500 | arch/powerpc/platforms/powermac/low_i2c.c | 4382 | 37281 | /*
* arch/powerpc/platforms/powermac/low_i2c.c
*
* Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* The linux i2c layer isn't completely suitable for our needs for various
* reasons ranging from too late initialisation to semantics not perfectly
* matching some requirements of the apple platform functions etc...
*
* This file thus provides a simple low level unified i2c interface for
* powermac that covers the various types of i2c busses used in Apple machines.
* For now, keywest, PMU and SMU, though we could add Cuda, or other bit
* banging busses found on older chipstes in earlier machines if we ever need
* one of them.
*
* The drivers in this file are synchronous/blocking. In addition, the
* keywest one is fairly slow due to the use of msleep instead of interrupts
* as the interrupt is currently used by i2c-keywest. In the long run, we
* might want to get rid of those high-level interfaces to linux i2c layer
* either completely (converting all drivers) or replacing them all with a
* single stub driver on top of this one. Once done, the interrupt will be
* available for our use.
*/
#undef DEBUG
#undef DEBUG_LOW
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smu.h>
#include <asm/pmac_pfunc.h>
#include <asm/pmac_low_i2c.h>
#ifdef DEBUG
#define DBG(x...) do {\
printk(KERN_DEBUG "low_i2c:" x); \
} while(0)
#else
#define DBG(x...)
#endif
#ifdef DEBUG_LOW
#define DBG_LOW(x...) do {\
printk(KERN_DEBUG "low_i2c:" x); \
} while(0)
#else
#define DBG_LOW(x...)
#endif
static int pmac_i2c_force_poll = 1;
/*
* A bus structure. Each bus in the system has such a structure associated.
*/
struct pmac_i2c_bus
{
struct list_head link;
struct device_node *controller;
struct device_node *busnode;
int type;
int flags;
struct i2c_adapter adapter;
void *hostdata;
int channel; /* some hosts have multiple */
int mode; /* current mode */
struct mutex mutex;
int opened;
int polled; /* open mode */
struct platform_device *platform_dev;
/* ops */
int (*open)(struct pmac_i2c_bus *bus);
void (*close)(struct pmac_i2c_bus *bus);
int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len);
};
static LIST_HEAD(pmac_i2c_busses);
/*
* Keywest implementation
*/
struct pmac_i2c_host_kw
{
struct mutex mutex; /* Access mutex for use by
* i2c-keywest */
void __iomem *base; /* register base address */
int bsteps; /* register stepping */
int speed; /* speed */
int irq;
u8 *data;
unsigned len;
int state;
int rw;
int polled;
int result;
struct completion complete;
spinlock_t lock;
struct timer_list timeout_timer;
};
/* Register indices */
typedef enum {
reg_mode = 0,
reg_control,
reg_status,
reg_isr,
reg_ier,
reg_addr,
reg_subaddr,
reg_data
} reg_t;
/* The Tumbler audio equalizer can be really slow sometimes */
#define KW_POLL_TIMEOUT (2*HZ)
/* Mode register */
#define KW_I2C_MODE_100KHZ 0x00
#define KW_I2C_MODE_50KHZ 0x01
#define KW_I2C_MODE_25KHZ 0x02
#define KW_I2C_MODE_DUMB 0x00
#define KW_I2C_MODE_STANDARD 0x04
#define KW_I2C_MODE_STANDARDSUB 0x08
#define KW_I2C_MODE_COMBINED 0x0C
#define KW_I2C_MODE_MODE_MASK 0x0C
#define KW_I2C_MODE_CHAN_MASK 0xF0
/* Control register */
#define KW_I2C_CTL_AAK 0x01
#define KW_I2C_CTL_XADDR 0x02
#define KW_I2C_CTL_STOP 0x04
#define KW_I2C_CTL_START 0x08
/* Status register */
#define KW_I2C_STAT_BUSY 0x01
#define KW_I2C_STAT_LAST_AAK 0x02
#define KW_I2C_STAT_LAST_RW 0x04
#define KW_I2C_STAT_SDA 0x08
#define KW_I2C_STAT_SCL 0x10
/* IER & ISR registers */
#define KW_I2C_IRQ_DATA 0x01
#define KW_I2C_IRQ_ADDR 0x02
#define KW_I2C_IRQ_STOP 0x04
#define KW_I2C_IRQ_START 0x08
#define KW_I2C_IRQ_MASK 0x0F
/* State machine states */
enum {
state_idle,
state_addr,
state_read,
state_write,
state_stop,
state_dead
};
#define WRONG_STATE(name) do {\
printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \
"(isr: %02x)\n", \
name, __kw_state_names[host->state], isr); \
} while(0)
static const char *__kw_state_names[] = {
"state_idle",
"state_addr",
"state_read",
"state_write",
"state_stop",
"state_dead"
};
static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg)
{
return readb(host->base + (((unsigned int)reg) << host->bsteps));
}
static inline void __kw_write_reg(struct pmac_i2c_host_kw *host,
reg_t reg, u8 val)
{
writeb(val, host->base + (((unsigned)reg) << host->bsteps));
(void)__kw_read_reg(host, reg_subaddr);
}
#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
#define kw_read_reg(reg) __kw_read_reg(host, reg)
static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host)
{
int i, j;
u8 isr;
for (i = 0; i < 1000; i++) {
isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
if (isr != 0)
return isr;
/* This code is used with the timebase frozen, we cannot rely
* on udelay nor schedule when in polled mode !
* For now, just use a bogus loop....
*/
if (host->polled) {
for (j = 1; j < 100000; j++)
mb();
} else
msleep(1);
}
return isr;
}
static void kw_i2c_do_stop(struct pmac_i2c_host_kw *host, int result)
{
kw_write_reg(reg_control, KW_I2C_CTL_STOP);
host->state = state_stop;
host->result = result;
}
static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr)
{
u8 ack;
DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n",
__kw_state_names[host->state], isr);
if (host->state == state_idle) {
printk(KERN_WARNING "low_i2c: Keywest got an out of state"
" interrupt, ignoring\n");
kw_write_reg(reg_isr, isr);
return;
}
if (isr == 0) {
printk(KERN_WARNING "low_i2c: Timeout in i2c transfer"
" on keywest !\n");
if (host->state != state_stop) {
kw_i2c_do_stop(host, -EIO);
return;
}
ack = kw_read_reg(reg_status);
if (ack & KW_I2C_STAT_BUSY)
kw_write_reg(reg_status, 0);
host->state = state_idle;
kw_write_reg(reg_ier, 0x00);
if (!host->polled)
complete(&host->complete);
return;
}
if (isr & KW_I2C_IRQ_ADDR) {
ack = kw_read_reg(reg_status);
if (host->state != state_addr) {
WRONG_STATE("KW_I2C_IRQ_ADDR");
kw_i2c_do_stop(host, -EIO);
}
if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
host->result = -ENXIO;
host->state = state_stop;
DBG_LOW("KW: NAK on address\n");
} else {
if (host->len == 0)
kw_i2c_do_stop(host, 0);
else if (host->rw) {
host->state = state_read;
if (host->len > 1)
kw_write_reg(reg_control,
KW_I2C_CTL_AAK);
} else {
host->state = state_write;
kw_write_reg(reg_data, *(host->data++));
host->len--;
}
}
kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
}
if (isr & KW_I2C_IRQ_DATA) {
if (host->state == state_read) {
*(host->data++) = kw_read_reg(reg_data);
host->len--;
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
if (host->len == 0)
host->state = state_stop;
else if (host->len == 1)
kw_write_reg(reg_control, 0);
} else if (host->state == state_write) {
ack = kw_read_reg(reg_status);
if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
DBG_LOW("KW: nack on data write\n");
host->result = -EFBIG;
host->state = state_stop;
} else if (host->len) {
kw_write_reg(reg_data, *(host->data++));
host->len--;
} else
kw_i2c_do_stop(host, 0);
} else {
WRONG_STATE("KW_I2C_IRQ_DATA");
if (host->state != state_stop)
kw_i2c_do_stop(host, -EIO);
}
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
}
if (isr & KW_I2C_IRQ_STOP) {
kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
if (host->state != state_stop) {
WRONG_STATE("KW_I2C_IRQ_STOP");
host->result = -EIO;
}
host->state = state_idle;
if (!host->polled)
complete(&host->complete);
}
/* Below should only happen in manual mode which we don't use ... */
if (isr & KW_I2C_IRQ_START)
kw_write_reg(reg_isr, KW_I2C_IRQ_START);
}
/* Interrupt handler */
static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
{
struct pmac_i2c_host_kw *host = dev_id;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
del_timer(&host->timeout_timer);
kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
if (host->state != state_idle) {
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
add_timer(&host->timeout_timer);
}
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_HANDLED;
}
static void kw_i2c_timeout(unsigned long data)
{
struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
/*
* If the timer is pending, that means we raced with the
* irq, in which case we just return
*/
if (timer_pending(&host->timeout_timer))
goto skip;
kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
if (host->state != state_idle) {
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
add_timer(&host->timeout_timer);
}
skip:
spin_unlock_irqrestore(&host->lock, flags);
}
static int kw_i2c_open(struct pmac_i2c_bus *bus)
{
struct pmac_i2c_host_kw *host = bus->hostdata;
mutex_lock(&host->mutex);
return 0;
}
static void kw_i2c_close(struct pmac_i2c_bus *bus)
{
struct pmac_i2c_host_kw *host = bus->hostdata;
mutex_unlock(&host->mutex);
}
static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
struct pmac_i2c_host_kw *host = bus->hostdata;
u8 mode_reg = host->speed;
int use_irq = host->irq != NO_IRQ && !bus->polled;
/* Setup mode & subaddress if any */
switch(bus->mode) {
case pmac_i2c_mode_dumb:
return -EINVAL;
case pmac_i2c_mode_std:
mode_reg |= KW_I2C_MODE_STANDARD;
if (subsize != 0)
return -EINVAL;
break;
case pmac_i2c_mode_stdsub:
mode_reg |= KW_I2C_MODE_STANDARDSUB;
if (subsize != 1)
return -EINVAL;
break;
case pmac_i2c_mode_combined:
mode_reg |= KW_I2C_MODE_COMBINED;
if (subsize != 1)
return -EINVAL;
break;
}
/* Setup channel & clear pending irqs */
kw_write_reg(reg_isr, kw_read_reg(reg_isr));
kw_write_reg(reg_mode, mode_reg | (bus->channel << 4));
kw_write_reg(reg_status, 0);
/* Set up address and r/w bit, strip possible stale bus number from
* address top bits
*/
kw_write_reg(reg_addr, addrdir & 0xff);
/* Set up the sub address */
if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
|| (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
kw_write_reg(reg_subaddr, subaddr);
/* Prepare for async operations */
host->data = data;
host->len = len;
host->state = state_addr;
host->result = 0;
host->rw = (addrdir & 1);
host->polled = bus->polled;
/* Enable interrupt if not using polled mode and interrupt is
* available
*/
if (use_irq) {
/* Clear completion */
INIT_COMPLETION(host->complete);
/* Ack stale interrupts */
kw_write_reg(reg_isr, kw_read_reg(reg_isr));
/* Arm timeout */
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
add_timer(&host->timeout_timer);
/* Enable emission */
kw_write_reg(reg_ier, KW_I2C_IRQ_MASK);
}
/* Start sending address */
kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
/* Wait for completion */
if (use_irq)
wait_for_completion(&host->complete);
else {
while(host->state != state_idle) {
unsigned long flags;
u8 isr = kw_i2c_wait_interrupt(host);
spin_lock_irqsave(&host->lock, flags);
kw_i2c_handle_interrupt(host, isr);
spin_unlock_irqrestore(&host->lock, flags);
}
}
/* Disable emission */
kw_write_reg(reg_ier, 0);
return host->result;
}
static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
{
struct pmac_i2c_host_kw *host;
const u32 *psteps, *prate, *addrp;
u32 steps;
host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL);
if (host == NULL) {
printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
np->full_name);
return NULL;
}
/* Apple is kind enough to provide a valid AAPL,address property
* on all i2c keywest nodes so far ... we would have to fallback
* to macio parsing if that wasn't the case
*/
addrp = of_get_property(np, "AAPL,address", NULL);
if (addrp == NULL) {
printk(KERN_ERR "low_i2c: Can't find address for %s\n",
np->full_name);
kfree(host);
return NULL;
}
mutex_init(&host->mutex);
init_completion(&host->complete);
spin_lock_init(&host->lock);
init_timer(&host->timeout_timer);
host->timeout_timer.function = kw_i2c_timeout;
host->timeout_timer.data = (unsigned long)host;
psteps = of_get_property(np, "AAPL,address-step", NULL);
steps = psteps ? (*psteps) : 0x10;
for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
steps >>= 1;
/* Select interface rate */
host->speed = KW_I2C_MODE_25KHZ;
prate = of_get_property(np, "AAPL,i2c-rate", NULL);
if (prate) switch(*prate) {
case 100:
host->speed = KW_I2C_MODE_100KHZ;
break;
case 50:
host->speed = KW_I2C_MODE_50KHZ;
break;
case 25:
host->speed = KW_I2C_MODE_25KHZ;
break;
}
host->irq = irq_of_parse_and_map(np, 0);
if (host->irq == NO_IRQ)
printk(KERN_WARNING
"low_i2c: Failed to map interrupt for %s\n",
np->full_name);
host->base = ioremap((*addrp), 0x1000);
if (host->base == NULL) {
printk(KERN_ERR "low_i2c: Can't map registers for %s\n",
np->full_name);
kfree(host);
return NULL;
}
/* Make sure IRQ is disabled */
kw_write_reg(reg_ier, 0);
/* Request chip interrupt. We set IRQF_NO_SUSPEND because we don't
* want that interrupt disabled between the 2 passes of driver
* suspend or we'll have issues running the pfuncs
*/
if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND,
"keywest i2c", host))
host->irq = NO_IRQ;
printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
*addrp, host->irq, np->full_name);
return host;
}
static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
struct device_node *controller,
struct device_node *busnode,
int channel)
{
struct pmac_i2c_bus *bus;
bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL);
if (bus == NULL)
return;
bus->controller = of_node_get(controller);
bus->busnode = of_node_get(busnode);
bus->type = pmac_i2c_bus_keywest;
bus->hostdata = host;
bus->channel = channel;
bus->mode = pmac_i2c_mode_std;
bus->open = kw_i2c_open;
bus->close = kw_i2c_close;
bus->xfer = kw_i2c_xfer;
mutex_init(&bus->mutex);
if (controller == busnode)
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
printk(KERN_INFO " channel %d bus %s\n", channel,
(controller == busnode) ? "<multibus>" : busnode->full_name);
}
static void __init kw_i2c_probe(void)
{
struct device_node *np, *child, *parent;
/* Probe keywest-i2c busses */
for_each_compatible_node(np, "i2c","keywest-i2c") {
struct pmac_i2c_host_kw *host;
int multibus;
/* Found one, init a host structure */
host = kw_i2c_host_init(np);
if (host == NULL)
continue;
/* Now check if we have a multibus setup (old style) or if we
* have proper bus nodes. Note that the "new" way (proper bus
* nodes) might cause us to not create some busses that are
* kept hidden in the device-tree. In the future, we might
* want to work around that by creating busses without a node
* but not for now
*/
child = of_get_next_child(np, NULL);
multibus = !child || strcmp(child->name, "i2c-bus");
of_node_put(child);
/* For a multibus setup, we get the bus count based on the
* parent type
*/
if (multibus) {
int chans, i;
parent = of_get_parent(np);
if (parent == NULL)
continue;
chans = parent->name[0] == 'u' ? 2 : 1;
for (i = 0; i < chans; i++)
kw_i2c_add(host, np, np, i);
} else {
for (child = NULL;
(child = of_get_next_child(np, child)) != NULL;) {
const u32 *reg = of_get_property(child,
"reg", NULL);
if (reg == NULL)
continue;
kw_i2c_add(host, np, child, *reg);
}
}
}
}
/*
*
* PMU implementation
*
*/
#ifdef CONFIG_ADB_PMU
/*
* i2c command block to the PMU
*/
struct pmu_i2c_hdr {
u8 bus;
u8 mode;
u8 bus2;
u8 address;
u8 sub_addr;
u8 comb_addr;
u8 count;
u8 data[];
};
static void pmu_i2c_complete(struct adb_request *req)
{
complete(req->arg);
}
static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
struct adb_request *req = bus->hostdata;
struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1];
struct completion comp;
int read = addrdir & 1;
int retry;
int rc = 0;
/* For now, limit ourselves to 16 bytes transfers */
if (len > 16)
return -EINVAL;
init_completion(&comp);
for (retry = 0; retry < 16; retry++) {
memset(req, 0, sizeof(struct adb_request));
hdr->bus = bus->channel;
hdr->count = len;
switch(bus->mode) {
case pmac_i2c_mode_std:
if (subsize != 0)
return -EINVAL;
hdr->address = addrdir;
hdr->mode = PMU_I2C_MODE_SIMPLE;
break;
case pmac_i2c_mode_stdsub:
case pmac_i2c_mode_combined:
if (subsize != 1)
return -EINVAL;
hdr->address = addrdir & 0xfe;
hdr->comb_addr = addrdir;
hdr->sub_addr = subaddr;
if (bus->mode == pmac_i2c_mode_stdsub)
hdr->mode = PMU_I2C_MODE_STDSUB;
else
hdr->mode = PMU_I2C_MODE_COMBINED;
break;
default:
return -EINVAL;
}
INIT_COMPLETION(comp);
req->data[0] = PMU_I2C_CMD;
req->reply[0] = 0xff;
req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
req->done = pmu_i2c_complete;
req->arg = ∁
if (!read && len) {
memcpy(hdr->data, data, len);
req->nbytes += len;
}
rc = pmu_queue_request(req);
if (rc)
return rc;
wait_for_completion(&comp);
if (req->reply[0] == PMU_I2C_STATUS_OK)
break;
msleep(15);
}
if (req->reply[0] != PMU_I2C_STATUS_OK)
return -EIO;
for (retry = 0; retry < 16; retry++) {
memset(req, 0, sizeof(struct adb_request));
/* I know that looks like a lot, slow as hell, but darwin
* does it so let's be on the safe side for now
*/
msleep(15);
hdr->bus = PMU_I2C_BUS_STATUS;
INIT_COMPLETION(comp);
req->data[0] = PMU_I2C_CMD;
req->reply[0] = 0xff;
req->nbytes = 2;
req->done = pmu_i2c_complete;
req->arg = ∁
rc = pmu_queue_request(req);
if (rc)
return rc;
wait_for_completion(&comp);
if (req->reply[0] == PMU_I2C_STATUS_OK && !read)
return 0;
if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) {
int rlen = req->reply_len - 1;
if (rlen != len) {
printk(KERN_WARNING "low_i2c: PMU returned %d"
" bytes, expected %d !\n", rlen, len);
return -EIO;
}
if (len)
memcpy(data, &req->reply[1], len);
return 0;
}
}
return -EIO;
}
static void __init pmu_i2c_probe(void)
{
struct pmac_i2c_bus *bus;
struct device_node *busnode;
int channel, sz;
if (!pmu_present())
return;
/* There might or might not be a "pmu-i2c" node, we use that
* or via-pmu itself, whatever we find. I haven't seen a machine
* with separate bus nodes, so we assume a multibus setup
*/
busnode = of_find_node_by_name(NULL, "pmu-i2c");
if (busnode == NULL)
busnode = of_find_node_by_name(NULL, "via-pmu");
if (busnode == NULL)
return;
printk(KERN_INFO "PMU i2c %s\n", busnode->full_name);
/*
* We add bus 1 and 2 only for now, bus 0 is "special"
*/
for (channel = 1; channel <= 2; channel++) {
sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request);
bus = kzalloc(sz, GFP_KERNEL);
if (bus == NULL)
return;
bus->controller = busnode;
bus->busnode = busnode;
bus->type = pmac_i2c_bus_pmu;
bus->channel = channel;
bus->mode = pmac_i2c_mode_std;
bus->hostdata = bus + 1;
bus->xfer = pmu_i2c_xfer;
mutex_init(&bus->mutex);
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
printk(KERN_INFO " channel %d bus <multibus>\n", channel);
}
}
#endif /* CONFIG_ADB_PMU */
/*
*
* SMU implementation
*
*/
#ifdef CONFIG_PMAC_SMU
static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc)
{
complete(misc);
}
static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
struct smu_i2c_cmd *cmd = bus->hostdata;
struct completion comp;
int read = addrdir & 1;
int rc = 0;
if ((read && len > SMU_I2C_READ_MAX) ||
((!read) && len > SMU_I2C_WRITE_MAX))
return -EINVAL;
memset(cmd, 0, sizeof(struct smu_i2c_cmd));
cmd->info.bus = bus->channel;
cmd->info.devaddr = addrdir;
cmd->info.datalen = len;
switch(bus->mode) {
case pmac_i2c_mode_std:
if (subsize != 0)
return -EINVAL;
cmd->info.type = SMU_I2C_TRANSFER_SIMPLE;
break;
case pmac_i2c_mode_stdsub:
case pmac_i2c_mode_combined:
if (subsize > 3 || subsize < 1)
return -EINVAL;
cmd->info.sublen = subsize;
/* that's big-endian only but heh ! */
memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize),
subsize);
if (bus->mode == pmac_i2c_mode_stdsub)
cmd->info.type = SMU_I2C_TRANSFER_STDSUB;
else
cmd->info.type = SMU_I2C_TRANSFER_COMBINED;
break;
default:
return -EINVAL;
}
if (!read && len)
memcpy(cmd->info.data, data, len);
init_completion(&comp);
cmd->done = smu_i2c_complete;
cmd->misc = ∁
rc = smu_queue_i2c(cmd);
if (rc < 0)
return rc;
wait_for_completion(&comp);
rc = cmd->status;
if (read && len)
memcpy(data, cmd->info.data, len);
return rc < 0 ? rc : 0;
}
static void __init smu_i2c_probe(void)
{
struct device_node *controller, *busnode;
struct pmac_i2c_bus *bus;
const u32 *reg;
int sz;
if (!smu_present())
return;
controller = of_find_node_by_name(NULL, "smu-i2c-control");
if (controller == NULL)
controller = of_find_node_by_name(NULL, "smu");
if (controller == NULL)
return;
printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
/* Look for childs, note that they might not be of the right
* type as older device trees mix i2c busses and other things
* at the same level
*/
for (busnode = NULL;
(busnode = of_get_next_child(controller, busnode)) != NULL;) {
if (strcmp(busnode->type, "i2c") &&
strcmp(busnode->type, "i2c-bus"))
continue;
reg = of_get_property(busnode, "reg", NULL);
if (reg == NULL)
continue;
sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
bus = kzalloc(sz, GFP_KERNEL);
if (bus == NULL)
return;
bus->controller = controller;
bus->busnode = of_node_get(busnode);
bus->type = pmac_i2c_bus_smu;
bus->channel = *reg;
bus->mode = pmac_i2c_mode_std;
bus->hostdata = bus + 1;
bus->xfer = smu_i2c_xfer;
mutex_init(&bus->mutex);
bus->flags = 0;
list_add(&bus->link, &pmac_i2c_busses);
printk(KERN_INFO " channel %x bus %s\n",
bus->channel, busnode->full_name);
}
}
#endif /* CONFIG_PMAC_SMU */
/*
*
* Core code
*
*/
struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node)
{
struct device_node *p = of_node_get(node);
struct device_node *prev = NULL;
struct pmac_i2c_bus *bus;
while(p) {
list_for_each_entry(bus, &pmac_i2c_busses, link) {
if (p == bus->busnode) {
if (prev && bus->flags & pmac_i2c_multibus) {
const u32 *reg;
reg = of_get_property(prev, "reg",
NULL);
if (!reg)
continue;
if (((*reg) >> 8) != bus->channel)
continue;
}
of_node_put(p);
of_node_put(prev);
return bus;
}
}
of_node_put(prev);
prev = p;
p = of_get_parent(p);
}
return NULL;
}
EXPORT_SYMBOL_GPL(pmac_i2c_find_bus);
u8 pmac_i2c_get_dev_addr(struct device_node *device)
{
const u32 *reg = of_get_property(device, "reg", NULL);
if (reg == NULL)
return 0;
return (*reg) & 0xff;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr);
struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus)
{
return bus->controller;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_controller);
struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus)
{
return bus->busnode;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node);
int pmac_i2c_get_type(struct pmac_i2c_bus *bus)
{
return bus->type;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_type);
int pmac_i2c_get_flags(struct pmac_i2c_bus *bus)
{
return bus->flags;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_flags);
int pmac_i2c_get_channel(struct pmac_i2c_bus *bus)
{
return bus->channel;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_channel);
struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus)
{
return &bus->adapter;
}
EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter);
struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter)
{
struct pmac_i2c_bus *bus;
list_for_each_entry(bus, &pmac_i2c_busses, link)
if (&bus->adapter == adapter)
return bus;
return NULL;
}
EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus);
int pmac_i2c_match_adapter(struct device_node *dev, struct i2c_adapter *adapter)
{
struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev);
if (bus == NULL)
return 0;
return (&bus->adapter == adapter);
}
EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter);
int pmac_low_i2c_lock(struct device_node *np)
{
struct pmac_i2c_bus *bus, *found = NULL;
list_for_each_entry(bus, &pmac_i2c_busses, link) {
if (np == bus->controller) {
found = bus;
break;
}
}
if (!found)
return -ENODEV;
return pmac_i2c_open(bus, 0);
}
EXPORT_SYMBOL_GPL(pmac_low_i2c_lock);
int pmac_low_i2c_unlock(struct device_node *np)
{
struct pmac_i2c_bus *bus, *found = NULL;
list_for_each_entry(bus, &pmac_i2c_busses, link) {
if (np == bus->controller) {
found = bus;
break;
}
}
if (!found)
return -ENODEV;
pmac_i2c_close(bus);
return 0;
}
EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock);
int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled)
{
int rc;
mutex_lock(&bus->mutex);
bus->polled = polled || pmac_i2c_force_poll;
bus->opened = 1;
bus->mode = pmac_i2c_mode_std;
if (bus->open && (rc = bus->open(bus)) != 0) {
bus->opened = 0;
mutex_unlock(&bus->mutex);
return rc;
}
return 0;
}
EXPORT_SYMBOL_GPL(pmac_i2c_open);
void pmac_i2c_close(struct pmac_i2c_bus *bus)
{
WARN_ON(!bus->opened);
if (bus->close)
bus->close(bus);
bus->opened = 0;
mutex_unlock(&bus->mutex);
}
EXPORT_SYMBOL_GPL(pmac_i2c_close);
int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode)
{
WARN_ON(!bus->opened);
/* Report me if you see the error below as there might be a new
* "combined4" mode that I need to implement for the SMU bus
*/
if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) {
printk(KERN_ERR "low_i2c: Invalid mode %d requested on"
" bus %s !\n", mode, bus->busnode->full_name);
return -EINVAL;
}
bus->mode = mode;
return 0;
}
EXPORT_SYMBOL_GPL(pmac_i2c_setmode);
int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
u32 subaddr, u8 *data, int len)
{
int rc;
WARN_ON(!bus->opened);
DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x,"
" %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize,
subaddr, len, bus->busnode->full_name);
rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len);
#ifdef DEBUG
if (rc)
DBG("xfer error %d\n", rc);
#endif
return rc;
}
EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
/* some quirks for platform function decoding */
enum {
pmac_i2c_quirk_invmask = 0x00000001u,
pmac_i2c_quirk_skip = 0x00000002u,
};
static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
int quirks))
{
struct pmac_i2c_bus *bus;
struct device_node *np;
static struct whitelist_ent {
char *name;
char *compatible;
int quirks;
} whitelist[] = {
/* XXX Study device-tree's & apple drivers are get the quirks
* right !
*/
/* Workaround: It seems that running the clockspreading
* properties on the eMac will cause lockups during boot.
* The machine seems to work fine without that. So for now,
* let's make sure i2c-hwclock doesn't match about "imic"
* clocks and we'll figure out if we really need to do
* something special about those later.
*/
{ "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip },
{ "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip },
{ "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
{ "i2c-cpu-voltage", NULL, 0},
{ "temp-monitor", NULL, 0 },
{ "supply-monitor", NULL, 0 },
{ NULL, NULL, 0 },
};
/* Only some devices need to have platform functions instanciated
* here. For now, we have a table. Others, like 9554 i2c GPIOs used
* on Xserve, if we ever do a driver for them, will use their own
* platform function instance
*/
list_for_each_entry(bus, &pmac_i2c_busses, link) {
for (np = NULL;
(np = of_get_next_child(bus->busnode, np)) != NULL;) {
struct whitelist_ent *p;
/* If multibus, check if device is on that bus */
if (bus->flags & pmac_i2c_multibus)
if (bus != pmac_i2c_find_bus(np))
continue;
for (p = whitelist; p->name != NULL; p++) {
if (strcmp(np->name, p->name))
continue;
if (p->compatible &&
!of_device_is_compatible(np, p->compatible))
continue;
if (p->quirks & pmac_i2c_quirk_skip)
break;
callback(np, p->quirks);
break;
}
}
}
}
#define MAX_I2C_DATA 64
struct pmac_i2c_pf_inst
{
struct pmac_i2c_bus *bus;
u8 addr;
u8 buffer[MAX_I2C_DATA];
u8 scratch[MAX_I2C_DATA];
int bytes;
int quirks;
};
static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args)
{
struct pmac_i2c_pf_inst *inst;
struct pmac_i2c_bus *bus;
bus = pmac_i2c_find_bus(func->node);
if (bus == NULL) {
printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n",
func->node->full_name);
return NULL;
}
if (pmac_i2c_open(bus, 0)) {
printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n",
func->node->full_name);
return NULL;
}
/* XXX might need GFP_ATOMIC when called during the suspend process,
* but then, there are already lots of issues with suspending when
* near OOM that need to be resolved, the allocator itself should
* probably make GFP_NOIO implicit during suspend
*/
inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL);
if (inst == NULL) {
pmac_i2c_close(bus);
return NULL;
}
inst->bus = bus;
inst->addr = pmac_i2c_get_dev_addr(func->node);
inst->quirks = (int)(long)func->driver_data;
return inst;
}
static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
{
struct pmac_i2c_pf_inst *inst = instdata;
if (inst == NULL)
return;
pmac_i2c_close(inst->bus);
kfree(inst);
}
static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
{
struct pmac_i2c_pf_inst *inst = instdata;
inst->bytes = len;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0,
inst->buffer, len);
}
static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data)
{
struct pmac_i2c_pf_inst *inst = instdata;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
(u8 *)data, len);
}
/* This function is used to do the masking & OR'ing for the "rmw" type
* callbacks. Ze should apply the mask and OR in the values in the
* buffer before writing back. The problem is that it seems that
* various darwin drivers implement the mask/or differently, thus
* we need to check the quirks first
*/
static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst,
u32 len, const u8 *mask, const u8 *val)
{
int i;
if (inst->quirks & pmac_i2c_quirk_invmask) {
for (i = 0; i < len; i ++)
inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i];
} else {
for (i = 0; i < len; i ++)
inst->scratch[i] = (inst->buffer[i] & ~mask[i])
| (val[i] & mask[i]);
}
}
static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen,
u32 totallen, const u8 *maskdata,
const u8 *valuedata)
{
struct pmac_i2c_pf_inst *inst = instdata;
if (masklen > inst->bytes || valuelen > inst->bytes ||
totallen > inst->bytes || valuelen > masklen)
return -EINVAL;
pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
inst->scratch, totallen);
}
static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len)
{
struct pmac_i2c_pf_inst *inst = instdata;
inst->bytes = len;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr,
inst->buffer, len);
}
static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len,
const u8 *data)
{
struct pmac_i2c_pf_inst *inst = instdata;
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
subaddr, (u8 *)data, len);
}
static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode)
{
struct pmac_i2c_pf_inst *inst = instdata;
return pmac_i2c_setmode(inst->bus, mode);
}
static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen,
u32 valuelen, u32 totallen, const u8 *maskdata,
const u8 *valuedata)
{
struct pmac_i2c_pf_inst *inst = instdata;
if (masklen > inst->bytes || valuelen > inst->bytes ||
totallen > inst->bytes || valuelen > masklen)
return -EINVAL;
pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
subaddr, inst->scratch, totallen);
}
static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len,
const u8 *maskdata,
const u8 *valuedata)
{
struct pmac_i2c_pf_inst *inst = instdata;
int i, match;
/* Get return value pointer, it's assumed to be a u32 */
if (!args || !args->count || !args->u[0].p)
return -EINVAL;
/* Check buffer */
if (len > inst->bytes)
return -EINVAL;
for (i = 0, match = 1; match && i < len; i ++)
if ((inst->buffer[i] & maskdata[i]) != valuedata[i])
match = 0;
*args->u[0].p = match;
return 0;
}
static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration)
{
msleep((duration + 999) / 1000);
return 0;
}
static struct pmf_handlers pmac_i2c_pfunc_handlers = {
.begin = pmac_i2c_do_begin,
.end = pmac_i2c_do_end,
.read_i2c = pmac_i2c_do_read,
.write_i2c = pmac_i2c_do_write,
.rmw_i2c = pmac_i2c_do_rmw,
.read_i2c_sub = pmac_i2c_do_read_sub,
.write_i2c_sub = pmac_i2c_do_write_sub,
.rmw_i2c_sub = pmac_i2c_do_rmw_sub,
.set_i2c_mode = pmac_i2c_do_set_mode,
.mask_and_compare = pmac_i2c_do_mask_and_comp,
.delay = pmac_i2c_do_delay,
};
static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
{
DBG("dev_create(%s)\n", np->full_name);
pmf_register_driver(np, &pmac_i2c_pfunc_handlers,
(void *)(long)quirks);
}
static void __init pmac_i2c_dev_init(struct device_node *np, int quirks)
{
DBG("dev_create(%s)\n", np->full_name);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
}
static void pmac_i2c_dev_suspend(struct device_node *np, int quirks)
{
DBG("dev_suspend(%s)\n", np->full_name);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL);
}
static void pmac_i2c_dev_resume(struct device_node *np, int quirks)
{
DBG("dev_resume(%s)\n", np->full_name);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL);
}
void pmac_pfunc_i2c_suspend(void)
{
pmac_i2c_devscan(pmac_i2c_dev_suspend);
}
void pmac_pfunc_i2c_resume(void)
{
pmac_i2c_devscan(pmac_i2c_dev_resume);
}
/*
* Initialize us: probe all i2c busses on the machine, instantiate
* busses and platform functions as needed.
*/
/* This is non-static as it might be called early by smp code */
int __init pmac_i2c_init(void)
{
static int i2c_inited;
if (i2c_inited)
return 0;
i2c_inited = 1;
/* Probe keywest-i2c busses */
kw_i2c_probe();
#ifdef CONFIG_ADB_PMU
/* Probe PMU i2c busses */
pmu_i2c_probe();
#endif
#ifdef CONFIG_PMAC_SMU
/* Probe SMU i2c busses */
smu_i2c_probe();
#endif
/* Now add plaform functions for some known devices */
pmac_i2c_devscan(pmac_i2c_dev_create);
return 0;
}
machine_arch_initcall(powermac, pmac_i2c_init);
/* Since pmac_i2c_init can be called too early for the platform device
* registration, we need to do it at a later time. In our case, subsys
* happens to fit well, though I agree it's a bit of a hack...
*/
static int __init pmac_i2c_create_platform_devices(void)
{
struct pmac_i2c_bus *bus;
int i = 0;
/* In the case where we are initialized from smp_init(), we must
* not use the timer (and thus the irq). It's safe from now on
* though
*/
pmac_i2c_force_poll = 0;
/* Create platform devices */
list_for_each_entry(bus, &pmac_i2c_busses, link) {
bus->platform_dev =
platform_device_alloc("i2c-powermac", i++);
if (bus->platform_dev == NULL)
return -ENOMEM;
bus->platform_dev->dev.platform_data = bus;
platform_device_add(bus->platform_dev);
}
/* Now call platform "init" functions */
pmac_i2c_devscan(pmac_i2c_dev_init);
return 0;
}
machine_subsys_initcall(powermac, pmac_i2c_create_platform_devices);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.