repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
eugenesan/android_kernel_lge_hammerhead | drivers/net/ethernet/sgi/ioc3-eth.c | 5124 | 44986 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
*
* Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
* Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
*
* References:
* o IOC3 ASIC specification 4.51, 1996-04-18
* o IEEE 802.3 specification, 2000 edition
* o DP38840A Specification, National Semiconductor, March 1997
*
* To do:
*
* o Handle allocation failures in ioc3_alloc_skb() more gracefully.
* o Handle allocation failures in ioc3_init_rings().
* o Use prefetching for large packets. What is a good lower limit for
* prefetching?
* o We're probably allocating a bit too much memory.
* o Use hardware checksums.
* o Convert to using a IOC3 meta driver.
* o Which PHYs might possibly be attached to the IOC3 in real live,
* which workarounds are required for them? Do we ever have Lucent's?
* o For the 2.5 branch kill the mii-tool ioctls.
*/
#define IOC3_NAME "ioc3-eth"
#define IOC3_VERSION "2.6.3-4"
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#ifdef CONFIG_SERIAL_8250
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#endif
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/sn/types.h>
#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
/*
* 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
* value must be a power of two.
*/
#define RX_BUFFS 64
#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
/* Private per NIC data of the driver. */
struct ioc3_private {
struct ioc3 *regs;
unsigned long *rxr; /* pointer to receiver ring */
struct ioc3_etxd *txr;
struct sk_buff *rx_skbs[512];
struct sk_buff *tx_skbs[128];
int rx_ci; /* RX consumer index */
int rx_pi; /* RX producer index */
int tx_ci; /* TX consumer index */
int tx_pi; /* TX producer index */
int txqlen;
u32 emcr, ehar_h, ehar_l;
spinlock_t ioc3_lock;
struct mii_if_info mii;
struct pci_dev *pdev;
/* Members used by autonegotiation */
struct timer_list ioc3_timer;
};
static inline struct net_device *priv_netdev(struct ioc3_private *dev)
{
return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
}
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void ioc3_timeout(struct net_device *dev);
static inline unsigned int ioc3_hash(const unsigned char *addr);
static inline void ioc3_stop(struct ioc3_private *ip);
static void ioc3_init(struct net_device *dev);
static const char ioc3_str[] = "IOC3 Ethernet";
static const struct ethtool_ops ioc3_ethtool_ops;
/* We use this to acquire receive skb's that we can DMA directly into. */
#define IOC3_CACHELINE 128UL
static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
{
return (~addr + 1) & (IOC3_CACHELINE - 1UL);
}
static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
unsigned int gfp_mask)
{
struct sk_buff *skb;
skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
if (likely(skb)) {
int offset = aligned_rx_skb_addr((unsigned long) skb->data);
if (offset)
skb_reserve(skb, offset);
}
return skb;
}
static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
{
#ifdef CONFIG_SGI_IP27
vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */
return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
((unsigned long)ptr & TO_PHYS_MASK);
#else
return virt_to_bus(ptr);
#endif
}
/* BEWARE: The IOC3 documentation documents the size of rx buffers as
1644 while it's actually 1664. This one was nasty to track down ... */
#define RX_OFFSET 10
#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
/* DMA barrier to separate cached and uncached accesses. */
#define BARRIER() \
__asm__("sync" ::: "memory")
#define IOC3_SIZE 0x100000
/*
* IOC3 is a big endian device
*
* Unorthodox but makes the users of these macros more readable - the pointer
* to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3
* in the environment.
*/
#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
static inline u32 mcr_pack(u32 pulse, u32 sample)
{
return (pulse << 10) | (sample << 2);
}
static int nic_wait(struct ioc3 *ioc3)
{
u32 mcr;
do {
mcr = ioc3_r_mcr();
} while (!(mcr & 2));
return mcr & 1;
}
static int nic_reset(struct ioc3 *ioc3)
{
int presence;
ioc3_w_mcr(mcr_pack(500, 65));
presence = nic_wait(ioc3);
ioc3_w_mcr(mcr_pack(0, 500));
nic_wait(ioc3);
return presence;
}
static inline int nic_read_bit(struct ioc3 *ioc3)
{
int result;
ioc3_w_mcr(mcr_pack(6, 13));
result = nic_wait(ioc3);
ioc3_w_mcr(mcr_pack(0, 100));
nic_wait(ioc3);
return result;
}
static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
{
if (bit)
ioc3_w_mcr(mcr_pack(6, 110));
else
ioc3_w_mcr(mcr_pack(80, 30));
nic_wait(ioc3);
}
/*
* Read a byte from an iButton device
*/
static u32 nic_read_byte(struct ioc3 *ioc3)
{
u32 result = 0;
int i;
for (i = 0; i < 8; i++)
result = (result >> 1) | (nic_read_bit(ioc3) << 7);
return result;
}
/*
* Write a byte to an iButton device
*/
static void nic_write_byte(struct ioc3 *ioc3, int byte)
{
int i, bit;
for (i = 8; i; i--) {
bit = byte & 1;
byte >>= 1;
nic_write_bit(ioc3, bit);
}
}
static u64 nic_find(struct ioc3 *ioc3, int *last)
{
int a, b, index, disc;
u64 address = 0;
nic_reset(ioc3);
/* Search ROM. */
nic_write_byte(ioc3, 0xf0);
/* Algorithm from ``Book of iButton Standards''. */
for (index = 0, disc = 0; index < 64; index++) {
a = nic_read_bit(ioc3);
b = nic_read_bit(ioc3);
if (a && b) {
printk("NIC search failed (not fatal).\n");
*last = 0;
return 0;
}
if (!a && !b) {
if (index == *last) {
address |= 1UL << index;
} else if (index > *last) {
address &= ~(1UL << index);
disc = index;
} else if ((address & (1UL << index)) == 0)
disc = index;
nic_write_bit(ioc3, address & (1UL << index));
continue;
} else {
if (a)
address |= 1UL << index;
else
address &= ~(1UL << index);
nic_write_bit(ioc3, a);
continue;
}
}
*last = disc;
return address;
}
static int nic_init(struct ioc3 *ioc3)
{
const char *unknown = "unknown";
const char *type = unknown;
u8 crc;
u8 serial[6];
int save = 0, i;
while (1) {
u64 reg;
reg = nic_find(ioc3, &save);
switch (reg & 0xff) {
case 0x91:
type = "DS1981U";
break;
default:
if (save == 0) {
/* Let the caller try again. */
return -1;
}
continue;
}
nic_reset(ioc3);
/* Match ROM. */
nic_write_byte(ioc3, 0x55);
for (i = 0; i < 8; i++)
nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
reg >>= 8; /* Shift out type. */
for (i = 0; i < 6; i++) {
serial[i] = reg & 0xff;
reg >>= 8;
}
crc = reg & 0xff;
break;
}
printk("Found %s NIC", type);
if (type != unknown)
printk (" registration number %pM, CRC %02x", serial, crc);
printk(".\n");
return 0;
}
/*
* Read the NIC (Number-In-a-Can) device used to store the MAC address on
* SN0 / SN00 nodeboards and PCI cards.
*/
static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
{
struct ioc3 *ioc3 = ip->regs;
u8 nic[14];
int tries = 2; /* There may be some problem with the battery? */
int i;
ioc3_w_gpcr_s(1 << 21);
while (tries--) {
if (!nic_init(ioc3))
break;
udelay(500);
}
if (tries < 0) {
printk("Failed to read MAC address\n");
return;
}
/* Read Memory. */
nic_write_byte(ioc3, 0xf0);
nic_write_byte(ioc3, 0x00);
nic_write_byte(ioc3, 0x00);
for (i = 13; i >= 0; i--)
nic[i] = nic_read_byte(ioc3);
for (i = 2; i < 8; i++)
priv_netdev(ip)->dev_addr[i - 2] = nic[i];
}
/*
* Ok, this is hosed by design. It's necessary to know what machine the
* NIC is in in order to know how to read the NIC address. We also have
* to know if it's a PCI card or a NIC in on the node board ...
*/
static void ioc3_get_eaddr(struct ioc3_private *ip)
{
ioc3_get_eaddr_nic(ip);
printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
}
static void __ioc3_set_mac_address(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
(dev->dev_addr[1] << 8) | dev->dev_addr[0]);
}
static int ioc3_set_mac_address(struct net_device *dev, void *addr)
{
struct ioc3_private *ip = netdev_priv(dev);
struct sockaddr *sa = addr;
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
spin_lock_irq(&ip->ioc3_lock);
__ioc3_set_mac_address(dev);
spin_unlock_irq(&ip->ioc3_lock);
return 0;
}
/*
* Caller must hold the ioc3_lock ever for MII readers. This is also
* used to protect the transmitter side but it's low contention.
*/
static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
while (ioc3_r_micr() & MICR_BUSY);
ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
while (ioc3_r_micr() & MICR_BUSY);
return ioc3_r_midr_r() & MIDR_DATA_MASK;
}
static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
while (ioc3_r_micr() & MICR_BUSY);
ioc3_w_midr_w(data);
ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
while (ioc3_r_micr() & MICR_BUSY);
}
static int ioc3_mii_init(struct ioc3_private *ip);
static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
return &dev->stats;
}
static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
{
struct ethhdr *eh = eth_hdr(skb);
uint32_t csum, ehsum;
unsigned int proto;
struct iphdr *ih;
uint16_t *ew;
unsigned char *cp;
/*
* Did hardware handle the checksum at all? The cases we can handle
* are:
*
* - TCP and UDP checksums of IPv4 only.
* - IPv6 would be doable but we keep that for later ...
* - Only unfragmented packets. Did somebody already tell you
* fragmentation is evil?
* - don't care about packet size. Worst case when processing a
* malformed packet we'll try to access the packet at ip header +
* 64 bytes which is still inside the skb. Even in the unlikely
* case where the checksum is right the higher layers will still
* drop the packet as appropriate.
*/
if (eh->h_proto != htons(ETH_P_IP))
return;
ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
if (ip_is_fragment(ih))
return;
proto = ih->protocol;
if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
return;
/* Same as tx - compute csum of pseudo header */
csum = hwsum +
(ih->tot_len - (ih->ihl << 2)) +
htons((uint16_t)ih->protocol) +
(ih->saddr >> 16) + (ih->saddr & 0xffff) +
(ih->daddr >> 16) + (ih->daddr & 0xffff);
/* Sum up ethernet dest addr, src addr and protocol */
ew = (uint16_t *) eh;
ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
ehsum = (ehsum & 0xffff) + (ehsum >> 16);
ehsum = (ehsum & 0xffff) + (ehsum >> 16);
csum += 0xffff ^ ehsum;
/* In the next step we also subtract the 1's complement
checksum of the trailing ethernet CRC. */
cp = (char *)eh + len; /* points at trailing CRC */
if (len & 1) {
csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
} else {
csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
}
csum = (csum & 0xffff) + (csum >> 16);
csum = (csum & 0xffff) + (csum >> 16);
if (csum == 0xffff)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static inline void ioc3_rx(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct sk_buff *skb, *new_skb;
struct ioc3 *ioc3 = ip->regs;
int rx_entry, n_entry, len;
struct ioc3_erxbuf *rxb;
unsigned long *rxr;
u32 w0, err;
rxr = (unsigned long *) ip->rxr; /* Ring base */
rx_entry = ip->rx_ci; /* RX consume index */
n_entry = ip->rx_pi;
skb = ip->rx_skbs[rx_entry];
rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
w0 = be32_to_cpu(rxb->w0);
while (w0 & ERXBUF_V) {
err = be32_to_cpu(rxb->err); /* It's valid ... */
if (err & ERXBUF_GOODPKT) {
len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
skb_trim(skb, len);
skb->protocol = eth_type_trans(skb, dev);
new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!new_skb) {
/* Ouch, drop packet and just recycle packet
to keep the ring filled. */
dev->stats.rx_dropped++;
new_skb = skb;
goto next;
}
if (likely(dev->features & NETIF_F_RXCSUM))
ioc3_tcpudp_checksum(skb,
w0 & ERXBUF_IPCKSUM_MASK, len);
netif_rx(skb);
ip->rx_skbs[rx_entry] = NULL; /* Poison */
/* Because we reserve afterwards. */
skb_put(new_skb, (1664 + RX_OFFSET));
rxb = (struct ioc3_erxbuf *) new_skb->data;
skb_reserve(new_skb, RX_OFFSET);
dev->stats.rx_packets++; /* Statistics */
dev->stats.rx_bytes += len;
} else {
/* The frame is invalid and the skb never
reached the network layer so we can just
recycle it. */
new_skb = skb;
dev->stats.rx_errors++;
}
if (err & ERXBUF_CRCERR) /* Statistics */
dev->stats.rx_crc_errors++;
if (err & ERXBUF_FRAMERR)
dev->stats.rx_frame_errors++;
next:
ip->rx_skbs[n_entry] = new_skb;
rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
rxb->w0 = 0; /* Clear valid flag */
n_entry = (n_entry + 1) & 511; /* Update erpir */
/* Now go on to the next ring entry. */
rx_entry = (rx_entry + 1) & 511;
skb = ip->rx_skbs[rx_entry];
rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
w0 = be32_to_cpu(rxb->w0);
}
ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
ip->rx_pi = n_entry;
ip->rx_ci = rx_entry;
}
static inline void ioc3_tx(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
unsigned long packets, bytes;
struct ioc3 *ioc3 = ip->regs;
int tx_entry, o_entry;
struct sk_buff *skb;
u32 etcir;
spin_lock(&ip->ioc3_lock);
etcir = ioc3_r_etcir();
tx_entry = (etcir >> 7) & 127;
o_entry = ip->tx_ci;
packets = 0;
bytes = 0;
while (o_entry != tx_entry) {
packets++;
skb = ip->tx_skbs[o_entry];
bytes += skb->len;
dev_kfree_skb_irq(skb);
ip->tx_skbs[o_entry] = NULL;
o_entry = (o_entry + 1) & 127; /* Next */
etcir = ioc3_r_etcir(); /* More pkts sent? */
tx_entry = (etcir >> 7) & 127;
}
dev->stats.tx_packets += packets;
dev->stats.tx_bytes += bytes;
ip->txqlen -= packets;
if (ip->txqlen < 128)
netif_wake_queue(dev);
ip->tx_ci = o_entry;
spin_unlock(&ip->ioc3_lock);
}
/*
* Deal with fatal IOC3 errors. This condition might be caused by a hard or
* software problems, so we should try to recover
* more gracefully if this ever happens. In theory we might be flooded
* with such error interrupts if something really goes wrong, so we might
* also consider to take the interface down.
*/
static void ioc3_error(struct net_device *dev, u32 eisr)
{
struct ioc3_private *ip = netdev_priv(dev);
unsigned char *iface = dev->name;
spin_lock(&ip->ioc3_lock);
if (eisr & EISR_RXOFLO)
printk(KERN_ERR "%s: RX overflow.\n", iface);
if (eisr & EISR_RXBUFOFLO)
printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
if (eisr & EISR_RXMEMERR)
printk(KERN_ERR "%s: RX PCI error.\n", iface);
if (eisr & EISR_RXPARERR)
printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
if (eisr & EISR_TXBUFUFLO)
printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
if (eisr & EISR_TXMEMERR)
printk(KERN_ERR "%s: TX PCI error.\n", iface);
ioc3_stop(ip);
ioc3_init(dev);
ioc3_mii_init(ip);
netif_wake_queue(dev);
spin_unlock(&ip->ioc3_lock);
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t ioc3_interrupt(int irq, void *_dev)
{
struct net_device *dev = (struct net_device *)_dev;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
EISR_TXEXPLICIT | EISR_TXMEMERR;
u32 eisr;
eisr = ioc3_r_eisr() & enabled;
ioc3_w_eisr(eisr);
(void) ioc3_r_eisr(); /* Flush */
if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
ioc3_error(dev, eisr);
if (eisr & EISR_RXTIMERINT)
ioc3_rx(dev);
if (eisr & EISR_TXEXPLICIT)
ioc3_tx(dev);
return IRQ_HANDLED;
}
static inline void ioc3_setup_duplex(struct ioc3_private *ip)
{
struct ioc3 *ioc3 = ip->regs;
if (ip->mii.full_duplex) {
ioc3_w_etcsr(ETCSR_FD);
ip->emcr |= EMCR_DUPLEX;
} else {
ioc3_w_etcsr(ETCSR_HD);
ip->emcr &= ~EMCR_DUPLEX;
}
ioc3_w_emcr(ip->emcr);
}
static void ioc3_timer(unsigned long data)
{
struct ioc3_private *ip = (struct ioc3_private *) data;
/* Print the link status if it has changed */
mii_check_media(&ip->mii, 1, 0);
ioc3_setup_duplex(ip);
ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
add_timer(&ip->ioc3_timer);
}
/*
* Try to find a PHY. There is no apparent relation between the MII addresses
* in the SGI documentation and what we find in reality, so we simply probe
* for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
* onboard IOC3s has the special oddity that probing doesn't seem to find it
* yet the interface seems to work fine, so if probing fails we for now will
* simply default to PHY 31 instead of bailing out.
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
struct net_device *dev = priv_netdev(ip);
int i, found = 0, res = 0;
int ioc3_phy_workaround = 1;
u16 word;
for (i = 0; i < 32; i++) {
word = ioc3_mdio_read(dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
found = 1;
break; /* Found a PHY */
}
}
if (!found) {
if (ioc3_phy_workaround)
i = 31;
else {
ip->mii.phy_id = -1;
res = -ENODEV;
goto out;
}
}
ip->mii.phy_id = i;
out:
return res;
}
static void ioc3_mii_start(struct ioc3_private *ip)
{
ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
ip->ioc3_timer.data = (unsigned long) ip;
ip->ioc3_timer.function = ioc3_timer;
add_timer(&ip->ioc3_timer);
}
static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
{
struct sk_buff *skb;
int i;
for (i = ip->rx_ci; i & 15; i++) {
ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
}
ip->rx_pi &= 511;
ip->rx_ci &= 511;
for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
struct ioc3_erxbuf *rxb;
skb = ip->rx_skbs[i];
rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
rxb->w0 = 0;
}
}
static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
{
struct sk_buff *skb;
int i;
for (i=0; i < 128; i++) {
skb = ip->tx_skbs[i];
if (skb) {
ip->tx_skbs[i] = NULL;
dev_kfree_skb_any(skb);
}
ip->txr[i].cmd = 0;
}
ip->tx_pi = 0;
ip->tx_ci = 0;
}
static void ioc3_free_rings(struct ioc3_private *ip)
{
struct sk_buff *skb;
int rx_entry, n_entry;
if (ip->txr) {
ioc3_clean_tx_ring(ip);
free_pages((unsigned long)ip->txr, 2);
ip->txr = NULL;
}
if (ip->rxr) {
n_entry = ip->rx_ci;
rx_entry = ip->rx_pi;
while (n_entry != rx_entry) {
skb = ip->rx_skbs[n_entry];
if (skb)
dev_kfree_skb_any(skb);
n_entry = (n_entry + 1) & 511;
}
free_page((unsigned long)ip->rxr);
ip->rxr = NULL;
}
}
static void ioc3_alloc_rings(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3_erxbuf *rxb;
unsigned long *rxr;
int i;
if (ip->rxr == NULL) {
/* Allocate and initialize rx ring. 4kb = 512 entries */
ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
rxr = (unsigned long *) ip->rxr;
if (!rxr)
printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
/* Now the rx buffers. The RX ring may be larger but
we only allocate 16 buffers for now. Need to tune
this for performance and memory later. */
for (i = 0; i < RX_BUFFS; i++) {
struct sk_buff *skb;
skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) {
show_free_areas(0);
continue;
}
ip->rx_skbs[i] = skb;
/* Because we reserve afterwards. */
skb_put(skb, (1664 + RX_OFFSET));
rxb = (struct ioc3_erxbuf *) skb->data;
rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
skb_reserve(skb, RX_OFFSET);
}
ip->rx_ci = 0;
ip->rx_pi = RX_BUFFS;
}
if (ip->txr == NULL) {
/* Allocate and initialize tx rings. 16kb = 128 bufs. */
ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
if (!ip->txr)
printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
ip->tx_pi = 0;
ip->tx_ci = 0;
}
}
static void ioc3_init_rings(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
unsigned long ring;
ioc3_free_rings(ip);
ioc3_alloc_rings(dev);
ioc3_clean_rx_ring(ip);
ioc3_clean_tx_ring(ip);
/* Now the rx ring base, consume & produce registers. */
ring = ioc3_map(ip->rxr, 0);
ioc3_w_erbr_h(ring >> 32);
ioc3_w_erbr_l(ring & 0xffffffff);
ioc3_w_ercir(ip->rx_ci << 3);
ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
ring = ioc3_map(ip->txr, 0);
ip->txqlen = 0; /* nothing queued */
/* Now the tx ring base, consume & produce registers. */
ioc3_w_etbr_h(ring >> 32);
ioc3_w_etbr_l(ring & 0xffffffff);
ioc3_w_etpir(ip->tx_pi << 7);
ioc3_w_etcir(ip->tx_ci << 7);
(void) ioc3_r_etcir(); /* Flush */
}
static inline void ioc3_ssram_disc(struct ioc3_private *ip)
{
struct ioc3 *ioc3 = ip->regs;
volatile u32 *ssram0 = &ioc3->ssram[0x0000];
volatile u32 *ssram1 = &ioc3->ssram[0x4000];
unsigned int pattern = 0x5555;
/* Assume the larger size SSRAM and enable parity checking */
ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
*ssram0 = pattern;
*ssram1 = ~pattern & IOC3_SSRAM_DM;
if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
(*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
/* set ssram size to 64 KB */
ip->emcr = EMCR_RAMPAR;
ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
} else
ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
}
static void ioc3_init(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
del_timer_sync(&ip->ioc3_timer); /* Kill if running */
ioc3_w_emcr(EMCR_RST); /* Reset */
(void) ioc3_r_emcr(); /* Flush WB */
udelay(4); /* Give it time ... */
ioc3_w_emcr(0);
(void) ioc3_r_emcr();
/* Misc registers */
#ifdef CONFIG_SGI_IP27
ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */
#else
ioc3_w_erbar(0); /* Let PCI API get it right */
#endif
(void) ioc3_r_etcdc(); /* Clear on read */
ioc3_w_ercsr(15); /* RX low watermark */
ioc3_w_ertr(0); /* Interrupt immediately */
__ioc3_set_mac_address(dev);
ioc3_w_ehar_h(ip->ehar_h);
ioc3_w_ehar_l(ip->ehar_l);
ioc3_w_ersr(42); /* XXX should be random */
ioc3_init_rings(dev);
ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
ioc3_w_emcr(ip->emcr);
ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
EISR_TXEXPLICIT | EISR_TXMEMERR);
(void) ioc3_r_eier();
}
static inline void ioc3_stop(struct ioc3_private *ip)
{
struct ioc3 *ioc3 = ip->regs;
ioc3_w_emcr(0); /* Shutup */
ioc3_w_eier(0); /* Disable interrupts */
(void) ioc3_r_eier(); /* Flush */
}
static int ioc3_open(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
return -EAGAIN;
}
ip->ehar_h = 0;
ip->ehar_l = 0;
ioc3_init(dev);
ioc3_mii_start(ip);
netif_start_queue(dev);
return 0;
}
static int ioc3_close(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
del_timer_sync(&ip->ioc3_timer);
netif_stop_queue(dev);
ioc3_stop(ip);
free_irq(dev->irq, dev);
ioc3_free_rings(ip);
return 0;
}
/*
* MENET cards have four IOC3 chips, which are attached to two sets of
* PCI slot resources each: the primary connections are on slots
* 0..3 and the secondaries are on 4..7
*
* All four ethernets are brought out to connectors; six serial ports
* (a pair from each of the first three IOC3s) are brought out to
* MiniDINs; all other subdevices are left swinging in the wind, leave
* them disabled.
*/
static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
{
struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
int ret = 0;
if (dev) {
if (dev->vendor == PCI_VENDOR_ID_SGI &&
dev->device == PCI_DEVICE_ID_SGI_IOC3)
ret = 1;
pci_dev_put(dev);
}
return ret;
}
static int ioc3_is_menet(struct pci_dev *pdev)
{
return pdev->bus->parent == NULL &&
ioc3_adjacent_is_ioc3(pdev, 0) &&
ioc3_adjacent_is_ioc3(pdev, 1) &&
ioc3_adjacent_is_ioc3(pdev, 2);
}
#ifdef CONFIG_SERIAL_8250
/*
* Note about serial ports and consoles:
* For console output, everyone uses the IOC3 UARTA (offset 0x178)
* connected to the master node (look in ip27_setup_console() and
* ip27prom_console_write()).
*
* For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
* addresses on a partitioned machine. Since we currently use the ioc3
* serial ports, we use dynamic serial port discovery that the serial.c
* driver uses for pci/pnp ports (there is an entry for the SGI ioc3
* boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
* than UARTB's, although UARTA on o200s has traditionally been known as
* port 0. So, we just use one serial port from each ioc3 (since the
* serial driver adds addresses to get to higher ports).
*
* The first one to do a register_console becomes the preferred console
* (if there is no kernel command line console= directive). /dev/console
* (ie 5, 1) is then "aliased" into the device number returned by the
* "device" routine referred to in this console structure
* (ip27prom_console_dev).
*
* Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
* around ioc3 oddities in this respect.
*
* The IOC3 serials use a 22MHz clock rate with an additional divider which
* can be programmed in the SCR register if the DLAB bit is set.
*
* Register to interrupt zero because we share the interrupt with
* the serial driver which we don't properly support yet.
*
* Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
* registered.
*/
static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
{
#define COSMISC_CONSTANT 6
struct uart_port port = {
.irq = 0,
.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = (22000000 << 1) / COSMISC_CONSTANT,
.membase = (unsigned char __iomem *) uart,
.mapbase = (unsigned long) uart,
};
unsigned char lcr;
lcr = uart->iu_lcr;
uart->iu_lcr = lcr | UART_LCR_DLAB;
uart->iu_scr = COSMISC_CONSTANT,
uart->iu_lcr = lcr;
uart->iu_lcr;
serial8250_register_port(&port);
}
static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
{
/*
* We need to recognice and treat the fourth MENET serial as it
* does not have an SuperIO chip attached to it, therefore attempting
* to access it will result in bus errors. We call something an
* MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
* in it. This is paranoid but we want to avoid blowing up on a
* showhorn PCI box that happens to have 4 IOC3 cards in it so it's
* not paranoid enough ...
*/
if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
return;
/*
* Switch IOC3 to PIO mode. It probably already was but let's be
* paranoid
*/
ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
ioc3->gpcr_s;
ioc3->gppr_6 = 0;
ioc3->gppr_6;
ioc3->gppr_7 = 0;
ioc3->gppr_7;
ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
ioc3->sscr_a;
ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
ioc3->sscr_b;
/* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
ioc3->sio_iec |= SIO_IR_SA_INT;
ioc3->sscr_a = 0;
ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
ioc3->sio_iec |= SIO_IR_SB_INT;
ioc3->sscr_b = 0;
ioc3_8250_register(&ioc3->sregs.uarta);
ioc3_8250_register(&ioc3->sregs.uartb);
}
#endif
static const struct net_device_ops ioc3_netdev_ops = {
.ndo_open = ioc3_open,
.ndo_stop = ioc3_close,
.ndo_start_xmit = ioc3_start_xmit,
.ndo_tx_timeout = ioc3_timeout,
.ndo_get_stats = ioc3_get_stats,
.ndo_set_rx_mode = ioc3_set_multicast_list,
.ndo_do_ioctl = ioc3_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ioc3_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
};
static int __devinit ioc3_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int sw_physid1, sw_physid2;
struct net_device *dev = NULL;
struct ioc3_private *ip;
struct ioc3 *ioc3;
unsigned long ioc3_base, ioc3_size;
u32 vendor, model, rev;
int err, pci_using_dac;
/* Configure DMA attributes. */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err < 0) {
printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
"for consistent allocations\n", pci_name(pdev));
goto out;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_ERR "%s: No usable DMA configuration, "
"aborting.\n", pci_name(pdev));
goto out;
}
pci_using_dac = 0;
}
if (pci_enable_device(pdev))
return -ENODEV;
dev = alloc_etherdev(sizeof(struct ioc3_private));
if (!dev) {
err = -ENOMEM;
goto out_disable;
}
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
err = pci_request_regions(pdev, "ioc3");
if (err)
goto out_free;
SET_NETDEV_DEV(dev, &pdev->dev);
ip = netdev_priv(dev);
dev->irq = pdev->irq;
ioc3_base = pci_resource_start(pdev, 0);
ioc3_size = pci_resource_len(pdev, 0);
ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
if (!ioc3) {
printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
pci_name(pdev));
err = -ENOMEM;
goto out_res;
}
ip->regs = ioc3;
#ifdef CONFIG_SERIAL_8250
ioc3_serial_probe(pdev, ioc3);
#endif
spin_lock_init(&ip->ioc3_lock);
init_timer(&ip->ioc3_timer);
ioc3_stop(ip);
ioc3_init(dev);
ip->pdev = pdev;
ip->mii.phy_id_mask = 0x1f;
ip->mii.reg_num_mask = 0x1f;
ip->mii.dev = dev;
ip->mii.mdio_read = ioc3_mdio_read;
ip->mii.mdio_write = ioc3_mdio_write;
ioc3_mii_init(ip);
if (ip->mii.phy_id == -1) {
printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
pci_name(pdev));
err = -ENODEV;
goto out_stop;
}
ioc3_mii_start(ip);
ioc3_ssram_disc(ip);
ioc3_get_eaddr(ip);
/* The IOC3-specific entries in the device structure. */
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &ioc3_netdev_ops;
dev->ethtool_ops = &ioc3_ethtool_ops;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
dev->features = NETIF_F_IP_CSUM;
sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
err = register_netdev(dev);
if (err)
goto out_stop;
mii_check_media(&ip->mii, 1, 1);
ioc3_setup_duplex(ip);
vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
model = (sw_physid2 >> 4) & 0x3f;
rev = sw_physid2 & 0xf;
printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
"rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
ip->emcr & EMCR_BUFSIZ ? 128 : 64);
return 0;
out_stop:
ioc3_stop(ip);
del_timer_sync(&ip->ioc3_timer);
ioc3_free_rings(ip);
out_res:
pci_release_regions(pdev);
out_free:
free_netdev(dev);
out_disable:
/*
* We should call pci_disable_device(pdev); here if the IOC3 wasn't
* such a weird device ...
*/
out:
return err;
}
static void __devexit ioc3_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
iounmap(ioc3);
pci_release_regions(pdev);
free_netdev(dev);
/*
* We should call pci_disable_device(pdev); here if the IOC3 wasn't
* such a weird device ...
*/
}
static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
static struct pci_driver ioc3_driver = {
.name = "ioc3-eth",
.id_table = ioc3_pci_tbl,
.probe = ioc3_probe,
.remove = __devexit_p(ioc3_remove_one),
};
static int __init ioc3_init_module(void)
{
return pci_register_driver(&ioc3_driver);
}
static void __exit ioc3_cleanup_module(void)
{
pci_unregister_driver(&ioc3_driver);
}
static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long data;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
unsigned int len;
struct ioc3_etxd *desc;
uint32_t w0 = 0;
int produce;
/*
* IOC3 has a fairly simple minded checksumming hardware which simply
* adds up the 1's complement checksum for the entire packet and
* inserts it at an offset which can be specified in the descriptor
* into the transmit packet. This means we have to compensate for the
* MAC header which should not be summed and the TCP/UDP pseudo headers
* manually.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ih = ip_hdr(skb);
const int proto = ntohs(ih->protocol);
unsigned int csoff;
uint32_t csum, ehsum;
uint16_t *eh;
/* The MAC header. skb->mac seem the logic approach
to find the MAC header - except it's a NULL pointer ... */
eh = (uint16_t *) skb->data;
/* Sum up dest addr, src addr and protocol */
ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
/* Fold ehsum. can't use csum_fold which negates also ... */
ehsum = (ehsum & 0xffff) + (ehsum >> 16);
ehsum = (ehsum & 0xffff) + (ehsum >> 16);
/* Skip IP header; it's sum is always zero and was
already filled in by ip_output.c */
csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
ih->tot_len - (ih->ihl << 2),
proto, 0xffff ^ ehsum);
csum = (csum & 0xffff) + (csum >> 16); /* Fold again */
csum = (csum & 0xffff) + (csum >> 16);
csoff = ETH_HLEN + (ih->ihl << 2);
if (proto == IPPROTO_UDP) {
csoff += offsetof(struct udphdr, check);
udp_hdr(skb)->check = csum;
}
if (proto == IPPROTO_TCP) {
csoff += offsetof(struct tcphdr, check);
tcp_hdr(skb)->check = csum;
}
w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
}
spin_lock_irq(&ip->ioc3_lock);
data = (unsigned long) skb->data;
len = skb->len;
produce = ip->tx_pi;
desc = &ip->txr[produce];
if (len <= 104) {
/* Short packet, let's copy it directly into the ring. */
skb_copy_from_linear_data(skb, desc->data, skb->len);
if (len < ETH_ZLEN) {
/* Very short packet, pad with zeros at the end. */
memset(desc->data + len, 0, ETH_ZLEN - len);
len = ETH_ZLEN;
}
desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
desc->bufcnt = cpu_to_be32(len);
} else if ((data ^ (data + len - 1)) & 0x4000) {
unsigned long b2 = (data | 0x3fffUL) + 1UL;
unsigned long s1 = b2 - data;
unsigned long s2 = data + len - b2;
desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
ETXD_B1V | ETXD_B2V | w0);
desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
(s2 << ETXD_B2CNT_SHIFT));
desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
} else {
/* Normal sized packet that doesn't cross a page boundary. */
desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
}
BARRIER();
ip->tx_skbs[produce] = skb; /* Remember skb */
produce = (produce + 1) & 127;
ip->tx_pi = produce;
ioc3_w_etpir(produce << 7); /* Fire ... */
ip->txqlen++;
if (ip->txqlen >= 127)
netif_stop_queue(dev);
spin_unlock_irq(&ip->ioc3_lock);
return NETDEV_TX_OK;
}
static void ioc3_timeout(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
spin_lock_irq(&ip->ioc3_lock);
ioc3_stop(ip);
ioc3_init(dev);
ioc3_mii_init(ip);
ioc3_mii_start(ip);
spin_unlock_irq(&ip->ioc3_lock);
netif_wake_queue(dev);
}
/*
* Given a multicast ethernet address, this routine calculates the
* address's bit index in the logical address filter mask
*/
static inline unsigned int ioc3_hash(const unsigned char *addr)
{
unsigned int temp = 0;
u32 crc;
int bits;
crc = ether_crc_le(ETH_ALEN, addr);
crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
for (bits = 6; --bits >= 0; ) {
temp <<= 1;
temp |= (crc & 0x1);
crc >>= 1;
}
return temp;
}
static void ioc3_get_drvinfo (struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ioc3_private *ip = netdev_priv(dev);
strcpy (info->driver, IOC3_NAME);
strcpy (info->version, IOC3_VERSION);
strcpy (info->bus_info, pci_name(ip->pdev));
}
static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
rc = mii_ethtool_gset(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
rc = mii_ethtool_sset(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
static int ioc3_nway_reset(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
rc = mii_nway_restart(&ip->mii);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
static u32 ioc3_get_link(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
rc = mii_link_ok(&ip->mii);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
static const struct ethtool_ops ioc3_ethtool_ops = {
.get_drvinfo = ioc3_get_drvinfo,
.get_settings = ioc3_get_settings,
.set_settings = ioc3_set_settings,
.nway_reset = ioc3_nway_reset,
.get_link = ioc3_get_link,
};
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
static void ioc3_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
u64 ehar = 0;
netif_stop_queue(dev); /* Lock out others. */
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
ip->emcr |= EMCR_PROMISC;
ioc3_w_emcr(ip->emcr);
(void) ioc3_r_emcr();
} else {
ip->emcr &= ~EMCR_PROMISC;
ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
(void) ioc3_r_emcr();
if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
multicast packets anyway, so skip computing all the
hashes and just accept all packets. */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
netdev_for_each_mc_addr(ha, dev) {
ehar |= (1UL << ioc3_hash(ha->addr));
}
ip->ehar_h = ehar >> 32;
ip->ehar_l = ehar & 0xffffffff;
}
ioc3_w_ehar_h(ip->ehar_h);
ioc3_w_ehar_l(ip->ehar_l);
}
netif_wake_queue(dev); /* Let us get going again. */
}
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
MODULE_LICENSE("GPL");
module_init(ioc3_init_module);
module_exit(ioc3_cleanup_module);
| gpl-2.0 |
Kalashnikitty/Aurora_D802 | arch/arm/mach-ixp4xx/coyote-pci.c | 5380 | 1613 | /*
* arch/arm/mach-ixp4xx/coyote-pci.c
*
* PCI setup routines for ADI Engineering Coyote platform
*
* Copyright (C) 2002 Jungo Software Technologies.
* Copyright (C) 2003 MontaVista Softwrae, Inc.
*
* Maintainer: Deepak Saxena <dsaxena@mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#define SLOT0_DEVID 14
#define SLOT1_DEVID 15
/* PCI controller GPIO to IRQ pin mappings */
#define SLOT0_INTA 6
#define SLOT1_INTA 11
void __init coyote_pci_preinit(void)
{
irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init coyote_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (slot == SLOT0_DEVID)
return IXP4XX_GPIO_IRQ(SLOT0_INTA);
else if (slot == SLOT1_DEVID)
return IXP4XX_GPIO_IRQ(SLOT1_INTA);
else return -1;
}
struct hw_pci coyote_pci __initdata = {
.nr_controllers = 1,
.preinit = coyote_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = coyote_map_irq,
};
int __init coyote_pci_init(void)
{
if (machine_is_adi_coyote())
pci_common_init(&coyote_pci);
return 0;
}
subsys_initcall(coyote_pci_init);
| gpl-2.0 |
PRJosh/kernel_msm_mako | arch/arm/mach-ixp4xx/dsmg600-pci.c | 5380 | 2064 | /*
* DSM-G600 board-level PCI initialization
*
* Copyright (C) 2006 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on ixdp425-pci.c:
* Copyright (C) 2002 Intel Corporation.
* Copyright (C) 2003-2004 MontaVista Software, Inc.
*
* Maintainer: http://www.nslu2-linux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
#define MAX_DEV 4
#define IRQ_LINES 3
/* PCI controller GPIO to IRQ pin mappings */
#define INTA 11
#define INTB 10
#define INTC 9
#define INTD 8
#define INTE 7
#define INTF 6
void __init dsmg600_pci_preinit(void)
{
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init dsmg600_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static int pci_irq_table[MAX_DEV][IRQ_LINES] = {
{ IXP4XX_GPIO_IRQ(INTE), -1, -1 },
{ IXP4XX_GPIO_IRQ(INTA), -1, -1 },
{ IXP4XX_GPIO_IRQ(INTB), IXP4XX_GPIO_IRQ(INTC),
IXP4XX_GPIO_IRQ(INTD) },
{ IXP4XX_GPIO_IRQ(INTF), -1, -1 },
};
if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES)
return pci_irq_table[slot - 1][pin - 1];
return -1;
}
struct hw_pci __initdata dsmg600_pci = {
.nr_controllers = 1,
.preinit = dsmg600_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = dsmg600_map_irq,
};
int __init dsmg600_pci_init(void)
{
if (machine_is_dsmg600())
pci_common_init(&dsmg600_pci);
return 0;
}
subsys_initcall(dsmg600_pci_init);
| gpl-2.0 |
mastero9017/kernel_n5_racer | arch/arm/mach-footbridge/ebsa285-pci.c | 5380 | 1102 | /*
* linux/arch/arm/mach-footbridge/ebsa285-pci.c
*
* PCI bios-type initialisation for PCI machines
*
* Bits taken from various places.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
switch (PCI_FUNC(dev->devfn)) {
case 1: return 14;
case 2: return 15;
case 3: return 12;
}
return irqmap_ebsa285[(slot + pin) & 3];
}
static struct hw_pci ebsa285_pci __initdata = {
.swizzle = pci_std_swizzle,
.map_irq = ebsa285_map_irq,
.nr_controllers = 1,
.setup = dc21285_setup,
.scan = dc21285_scan_bus,
.preinit = dc21285_preinit,
.postinit = dc21285_postinit,
};
static int __init ebsa285_init_pci(void)
{
if (machine_is_ebsa285())
pci_common_init(&ebsa285_pci);
return 0;
}
subsys_initcall(ebsa285_init_pci);
| gpl-2.0 |
zsoltm/linux-sunxi-exp | drivers/ide/ide_platform.c | 5636 | 3627 | /*
* Platform IDE driver
*
* Copyright (C) 2007 MontaVista Software
*
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/ata_platform.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
void __iomem *base,
void __iomem *ctrl,
struct pata_platform_info *pdata,
int irq)
{
unsigned long port = (unsigned long)base;
int i;
hw->io_ports.data_addr = port;
port += (1 << pdata->ioport_shift);
for (i = 1; i <= 7;
i++, port += (1 << pdata->ioport_shift))
hw->io_ports_array[i] = port;
hw->io_ports.ctl_addr = (unsigned long)ctrl;
hw->irq = irq;
}
static const struct ide_port_info platform_ide_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
.chipset = ide_generic,
};
static int __devinit plat_ide_probe(struct platform_device *pdev)
{
struct resource *res_base, *res_alt, *res_irq;
void __iomem *base, *alt_base;
struct pata_platform_info *pdata;
struct ide_host *host;
int ret = 0, mmio = 0;
struct ide_hw hw, *hws[] = { &hw };
struct ide_port_info d = platform_ide_port_info;
pdata = pdev->dev.platform_data;
/* get a pointer to the register memory */
res_base = platform_get_resource(pdev, IORESOURCE_IO, 0);
res_alt = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (!res_base || !res_alt) {
res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_alt = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res_base || !res_alt) {
ret = -ENOMEM;
goto out;
}
mmio = 1;
}
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq) {
ret = -EINVAL;
goto out;
}
if (mmio) {
base = devm_ioremap(&pdev->dev,
res_base->start, resource_size(res_base));
alt_base = devm_ioremap(&pdev->dev,
res_alt->start, resource_size(res_alt));
} else {
base = devm_ioport_map(&pdev->dev,
res_base->start, resource_size(res_base));
alt_base = devm_ioport_map(&pdev->dev,
res_alt->start, resource_size(res_alt));
}
memset(&hw, 0, sizeof(hw));
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
d.irq_flags |= IRQF_SHARED;
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
ret = ide_host_add(&d, hws, 1, &host);
if (ret)
goto out;
platform_set_drvdata(pdev, host);
return 0;
out:
return ret;
}
static int __devexit plat_ide_remove(struct platform_device *pdev)
{
struct ide_host *host = dev_get_drvdata(&pdev->dev);
ide_host_remove(host);
return 0;
}
static struct platform_driver platform_ide_driver = {
.driver = {
.name = "pata_platform",
.owner = THIS_MODULE,
},
.probe = plat_ide_probe,
.remove = __devexit_p(plat_ide_remove),
};
static int __init platform_ide_init(void)
{
return platform_driver_register(&platform_ide_driver);
}
static void __exit platform_ide_exit(void)
{
platform_driver_unregister(&platform_ide_driver);
}
MODULE_DESCRIPTION("Platform IDE driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pata_platform");
module_init(platform_ide_init);
module_exit(platform_ide_exit);
| gpl-2.0 |
Envious-Data/shinano-sirius_msm8974abpro | tools/power/cpupower/utils/helpers/topology.c | 5636 | 3098 | /*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
* Licensed under the terms of the GNU GPL License version 2.
*
* ToDo: Needs to be done more properly for AMD/Intel specifics
*/
/* Helper struct for qsort, must be in sync with cpupower_topology.cpu_info */
/* Be careful: Need to pass unsigned to the sort, so that offlined cores are
in the end, but double check for -1 for offlined cpus at other places */
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <helpers/helpers.h>
#include <helpers/sysfs.h>
/* returns -1 on failure, 0 on success */
int sysfs_topology_read_file(unsigned int cpu, const char *fname)
{
unsigned long value;
char linebuf[MAX_LINE_LEN];
char *endp;
char path[SYSFS_PATH_MAX];
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
cpu, fname);
if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
return -1;
value = strtoul(linebuf, &endp, 0);
if (endp == linebuf || errno == ERANGE)
return -1;
return value;
}
struct cpuid_core_info {
unsigned int pkg;
unsigned int thread;
unsigned int cpu;
/* flags */
unsigned int is_online:1;
};
static int __compare(const void *t1, const void *t2)
{
struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1;
struct cpuid_core_info *top2 = (struct cpuid_core_info *)t2;
if (top1->pkg < top2->pkg)
return -1;
else if (top1->pkg > top2->pkg)
return 1;
else if (top1->thread < top2->thread)
return -1;
else if (top1->thread > top2->thread)
return 1;
else if (top1->cpu < top2->cpu)
return -1;
else if (top1->cpu > top2->cpu)
return 1;
else
return 0;
}
/*
* Returns amount of cpus, negative on error, cpu_top must be
* passed to cpu_topology_release to free resources
*
* Array is sorted after ->pkg, ->core, then ->cpu
*/
int get_cpu_topology(struct cpupower_topology *cpu_top)
{
int cpu, cpus = sysconf(_SC_NPROCESSORS_CONF);
cpu_top->core_info = malloc(sizeof(struct cpupower_topology) * cpus);
if (cpu_top->core_info == NULL)
return -ENOMEM;
cpu_top->pkgs = cpu_top->cores = 0;
for (cpu = 0; cpu < cpus; cpu++) {
cpu_top->core_info[cpu].cpu = cpu;
cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu);
cpu_top->core_info[cpu].pkg =
sysfs_topology_read_file(cpu, "physical_package_id");
if ((int)cpu_top->core_info[cpu].pkg != -1 &&
cpu_top->core_info[cpu].pkg > cpu_top->pkgs)
cpu_top->pkgs = cpu_top->core_info[cpu].pkg;
cpu_top->core_info[cpu].core =
sysfs_topology_read_file(cpu, "core_id");
}
cpu_top->pkgs++;
qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info),
__compare);
/* Intel's cores count is not consecutively numbered, there may
* be a core_id of 3, but none of 2. Assume there always is 0
* Get amount of cores by counting duplicates in a package
for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) {
if (cpu_top->core_info[cpu].core == 0)
cpu_top->cores++;
*/
return cpus;
}
void cpu_topology_release(struct cpupower_topology cpu_top)
{
free(cpu_top.core_info);
}
| gpl-2.0 |
glfernando/remoteproc | drivers/acpi/apei/erst-dbg.c | 5636 | 5214 | /*
* APEI Error Record Serialization Table debug support
*
* ERST is a way provided by APEI to save and retrieve hardware error
* information to and from a persistent store. This file provide the
* debugging/testing support for ERST kernel support and firmware
* implementation.
*
* Copyright 2010 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <acpi/apei.h>
#include <linux/miscdevice.h>
#include "apei-internal.h"
#define ERST_DBG_PFX "ERST DBG: "
#define ERST_DBG_RECORD_LEN_MAX 0x4000
static void *erst_dbg_buf;
static unsigned int erst_dbg_buf_len;
/* Prevent erst_dbg_read/write from being invoked concurrently */
static DEFINE_MUTEX(erst_dbg_mutex);
static int erst_dbg_open(struct inode *inode, struct file *file)
{
int rc, *pos;
if (erst_disable)
return -ENODEV;
pos = (int *)&file->private_data;
rc = erst_get_record_id_begin(pos);
if (rc)
return rc;
return nonseekable_open(inode, file);
}
static int erst_dbg_release(struct inode *inode, struct file *file)
{
erst_get_record_id_end();
return 0;
}
static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int rc;
u64 record_id;
u32 record_count;
switch (cmd) {
case APEI_ERST_CLEAR_RECORD:
rc = copy_from_user(&record_id, (void __user *)arg,
sizeof(record_id));
if (rc)
return -EFAULT;
return erst_clear(record_id);
case APEI_ERST_GET_RECORD_COUNT:
rc = erst_get_record_count();
if (rc < 0)
return rc;
record_count = rc;
rc = put_user(record_count, (u32 __user *)arg);
if (rc)
return rc;
return 0;
default:
return -ENOTTY;
}
}
static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf,
size_t usize, loff_t *off)
{
int rc, *pos;
ssize_t len = 0;
u64 id;
if (*off)
return -EINVAL;
if (mutex_lock_interruptible(&erst_dbg_mutex) != 0)
return -EINTR;
pos = (int *)&filp->private_data;
retry_next:
rc = erst_get_record_id_next(pos, &id);
if (rc)
goto out;
/* no more record */
if (id == APEI_ERST_INVALID_RECORD_ID)
goto out;
retry:
rc = len = erst_read(id, erst_dbg_buf, erst_dbg_buf_len);
/* The record may be cleared by others, try read next record */
if (rc == -ENOENT)
goto retry_next;
if (rc < 0)
goto out;
if (len > ERST_DBG_RECORD_LEN_MAX) {
pr_warning(ERST_DBG_PFX
"Record (ID: 0x%llx) length is too long: %zd\n",
id, len);
rc = -EIO;
goto out;
}
if (len > erst_dbg_buf_len) {
void *p;
rc = -ENOMEM;
p = kmalloc(len, GFP_KERNEL);
if (!p)
goto out;
kfree(erst_dbg_buf);
erst_dbg_buf = p;
erst_dbg_buf_len = len;
goto retry;
}
rc = -EINVAL;
if (len > usize)
goto out;
rc = -EFAULT;
if (copy_to_user(ubuf, erst_dbg_buf, len))
goto out;
rc = 0;
out:
mutex_unlock(&erst_dbg_mutex);
return rc ? rc : len;
}
static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
size_t usize, loff_t *off)
{
int rc;
struct cper_record_header *rcd;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (usize > ERST_DBG_RECORD_LEN_MAX) {
pr_err(ERST_DBG_PFX "Too long record to be written\n");
return -EINVAL;
}
if (mutex_lock_interruptible(&erst_dbg_mutex))
return -EINTR;
if (usize > erst_dbg_buf_len) {
void *p;
rc = -ENOMEM;
p = kmalloc(usize, GFP_KERNEL);
if (!p)
goto out;
kfree(erst_dbg_buf);
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
rc = copy_from_user(erst_dbg_buf, ubuf, usize);
if (rc) {
rc = -EFAULT;
goto out;
}
rcd = erst_dbg_buf;
rc = -EINVAL;
if (rcd->record_length != usize)
goto out;
rc = erst_write(erst_dbg_buf);
out:
mutex_unlock(&erst_dbg_mutex);
return rc < 0 ? rc : usize;
}
static const struct file_operations erst_dbg_ops = {
.owner = THIS_MODULE,
.open = erst_dbg_open,
.release = erst_dbg_release,
.read = erst_dbg_read,
.write = erst_dbg_write,
.unlocked_ioctl = erst_dbg_ioctl,
.llseek = no_llseek,
};
static struct miscdevice erst_dbg_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "erst_dbg",
.fops = &erst_dbg_ops,
};
static __init int erst_dbg_init(void)
{
if (erst_disable) {
pr_info(ERST_DBG_PFX "ERST support is disabled.\n");
return -ENODEV;
}
return misc_register(&erst_dbg_dev);
}
static __exit void erst_dbg_exit(void)
{
misc_deregister(&erst_dbg_dev);
kfree(erst_dbg_buf);
}
module_init(erst_dbg_init);
module_exit(erst_dbg_exit);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("APEI Error Record Serialization Table debug support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mysteryemotionz/v20j-geeb | arch/arm/mach-imx/ehci-imx27.c | 7940 | 2276 | /*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
* Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/mxc_ehci.h>
#define USBCTRL_OTGBASE_OFFSET 0x600
#define MX27_OTG_SIC_SHIFT 29
#define MX27_OTG_SIC_MASK (0x3 << MX27_OTG_SIC_SHIFT)
#define MX27_OTG_PM_BIT (1 << 24)
#define MX27_H2_SIC_SHIFT 21
#define MX27_H2_SIC_MASK (0x3 << MX27_H2_SIC_SHIFT)
#define MX27_H2_PM_BIT (1 << 16)
#define MX27_H2_DT_BIT (1 << 5)
#define MX27_H1_SIC_SHIFT 13
#define MX27_H1_SIC_MASK (0x3 << MX27_H1_SIC_SHIFT)
#define MX27_H1_PM_BIT (1 << 8)
#define MX27_H1_DT_BIT (1 << 4)
int mx27_initialize_usb_hw(int port, unsigned int flags)
{
unsigned int v;
v = readl(MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
switch (port) {
case 0: /* OTG port */
v &= ~(MX27_OTG_SIC_MASK | MX27_OTG_PM_BIT);
v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_OTG_SIC_SHIFT;
if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
v |= MX27_OTG_PM_BIT;
break;
case 1: /* H1 port */
v &= ~(MX27_H1_SIC_MASK | MX27_H1_PM_BIT | MX27_H1_DT_BIT);
v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H1_SIC_SHIFT;
if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
v |= MX27_H1_PM_BIT;
if (!(flags & MXC_EHCI_TTL_ENABLED))
v |= MX27_H1_DT_BIT;
break;
case 2: /* H2 port */
v &= ~(MX27_H2_SIC_MASK | MX27_H2_PM_BIT | MX27_H2_DT_BIT);
v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H2_SIC_SHIFT;
if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
v |= MX27_H2_PM_BIT;
if (!(flags & MXC_EHCI_TTL_ENABLED))
v |= MX27_H2_DT_BIT;
break;
default:
return -EINVAL;
}
writel(v, MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
return 0;
}
| gpl-2.0 |
dh-harald/amlogic-kernel | arch/frv/kernel/setup.c | 8452 | 32760 | /* setup.c: FRV specific setup
*
* Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from arch/m68k/kernel/setup.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <generated/utsrelease.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/console.h>
#include <linux/genhd.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/major.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/sections.h>
#include <asm/pgalloc.h>
#include <asm/busctl-regs.h>
#include <asm/serial-regs.h>
#include <asm/timer-regs.h>
#include <asm/irc-regs.h>
#include <asm/spr-regs.h>
#include <asm/mb-regs.h>
#include <asm/mb93493-regs.h>
#include <asm/gdb-stub.h>
#include <asm/io.h>
#ifdef CONFIG_BLK_DEV_INITRD
#include <asm/pgtable.h>
#endif
#include "local.h"
#ifdef CONFIG_MB93090_MB00
static void __init mb93090_display(void);
#endif
#ifdef CONFIG_MMU
static void __init setup_linux_memory(void);
#else
static void __init setup_uclinux_memory(void);
#endif
#ifdef CONFIG_MB93090_MB00
static char __initdata mb93090_banner[] = "FJ/RH FR-V Linux";
static char __initdata mb93090_version[] = UTS_RELEASE;
int __nongprelbss mb93090_mb00_detected;
#endif
const char __frv_unknown_system[] = "unknown";
const char __frv_mb93091_cb10[] = "mb93091-cb10";
const char __frv_mb93091_cb11[] = "mb93091-cb11";
const char __frv_mb93091_cb30[] = "mb93091-cb30";
const char __frv_mb93091_cb41[] = "mb93091-cb41";
const char __frv_mb93091_cb60[] = "mb93091-cb60";
const char __frv_mb93091_cb70[] = "mb93091-cb70";
const char __frv_mb93091_cb451[] = "mb93091-cb451";
const char __frv_mb93090_mb00[] = "mb93090-mb00";
const char __frv_mb93493[] = "mb93493";
const char __frv_mb93093[] = "mb93093";
static const char *__nongprelbss cpu_series;
static const char *__nongprelbss cpu_core;
static const char *__nongprelbss cpu_silicon;
static const char *__nongprelbss cpu_mmu;
static const char *__nongprelbss cpu_system;
static const char *__nongprelbss cpu_board1;
static const char *__nongprelbss cpu_board2;
static unsigned long __nongprelbss cpu_psr_all;
static unsigned long __nongprelbss cpu_hsr0_all;
unsigned long __nongprelbss pdm_suspend_mode;
unsigned long __nongprelbss rom_length;
unsigned long __nongprelbss memory_start;
unsigned long __nongprelbss memory_end;
unsigned long __nongprelbss dma_coherent_mem_start;
unsigned long __nongprelbss dma_coherent_mem_end;
unsigned long __initdata __sdram_old_base;
unsigned long __initdata num_mappedpages;
struct cpuinfo_frv __nongprelbss boot_cpu_data;
char __initdata command_line[COMMAND_LINE_SIZE];
char __initdata redboot_command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_PM
#define __pminit
#define __pminitdata
#else
#define __pminit __init
#define __pminitdata __initdata
#endif
struct clock_cmode {
uint8_t xbus, sdram, corebus, core, dsu;
};
#define _frac(N,D) ((N)<<4 | (D))
#define _x0_16 _frac(1,6)
#define _x0_25 _frac(1,4)
#define _x0_33 _frac(1,3)
#define _x0_375 _frac(3,8)
#define _x0_5 _frac(1,2)
#define _x0_66 _frac(2,3)
#define _x0_75 _frac(3,4)
#define _x1 _frac(1,1)
#define _x1_5 _frac(3,2)
#define _x2 _frac(2,1)
#define _x3 _frac(3,1)
#define _x4 _frac(4,1)
#define _x4_5 _frac(9,2)
#define _x6 _frac(6,1)
#define _x8 _frac(8,1)
#define _x9 _frac(9,1)
int __nongprelbss clock_p0_current;
int __nongprelbss clock_cm_current;
int __nongprelbss clock_cmode_current;
#ifdef CONFIG_PM
int __nongprelbss clock_cmodes_permitted;
unsigned long __nongprelbss clock_bits_settable;
#endif
static struct clock_cmode __pminitdata undef_clock_cmode = { _x1, _x1, _x1, _x1, _x1 };
static struct clock_cmode __pminitdata clock_cmodes_fr401_fr403[16] = {
[4] = { _x1, _x1, _x2, _x2, _x0_25 },
[5] = { _x1, _x2, _x4, _x4, _x0_5 },
[8] = { _x1, _x1, _x1, _x2, _x0_25 },
[9] = { _x1, _x2, _x2, _x4, _x0_5 },
[11] = { _x1, _x4, _x4, _x8, _x1 },
[12] = { _x1, _x1, _x2, _x4, _x0_5 },
[13] = { _x1, _x2, _x4, _x8, _x1 },
};
static struct clock_cmode __pminitdata clock_cmodes_fr405[16] = {
[0] = { _x1, _x1, _x1, _x1, _x0_5 },
[1] = { _x1, _x1, _x1, _x3, _x0_25 },
[2] = { _x1, _x1, _x2, _x6, _x0_5 },
[3] = { _x1, _x2, _x2, _x6, _x0_5 },
[4] = { _x1, _x1, _x2, _x2, _x0_16 },
[8] = { _x1, _x1, _x1, _x2, _x0_16 },
[9] = { _x1, _x2, _x2, _x4, _x0_33 },
[12] = { _x1, _x1, _x2, _x4, _x0_33 },
[14] = { _x1, _x3, _x3, _x9, _x0_75 },
[15] = { _x1, _x1_5, _x1_5, _x4_5, _x0_375 },
#define CLOCK_CMODES_PERMITTED_FR405 0xd31f
};
static struct clock_cmode __pminitdata clock_cmodes_fr555[16] = {
[0] = { _x1, _x2, _x2, _x4, _x0_33 },
[1] = { _x1, _x3, _x3, _x6, _x0_5 },
[2] = { _x1, _x2, _x4, _x8, _x0_66 },
[3] = { _x1, _x1_5, _x3, _x6, _x0_5 },
[4] = { _x1, _x3, _x3, _x9, _x0_75 },
[5] = { _x1, _x2, _x2, _x6, _x0_5 },
[6] = { _x1, _x1_5, _x1_5, _x4_5, _x0_375 },
};
static const struct clock_cmode __pminitdata *clock_cmodes;
static int __pminitdata clock_doubled;
static struct uart_port __pminitdata __frv_uart0 = {
.uartclk = 0,
.membase = (char *) UART0_BASE,
.irq = IRQ_CPU_UART0,
.regshift = 3,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
};
static struct uart_port __pminitdata __frv_uart1 = {
.uartclk = 0,
.membase = (char *) UART1_BASE,
.irq = IRQ_CPU_UART1,
.regshift = 3,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
};
#if 0
static void __init printk_xampr(unsigned long ampr, unsigned long amlr, char i_d, int n)
{
unsigned long phys, virt, cxn, size;
#ifdef CONFIG_MMU
virt = amlr & 0xffffc000;
cxn = amlr & 0x3fff;
#else
virt = ampr & 0xffffc000;
cxn = 0;
#endif
phys = ampr & xAMPRx_PPFN;
size = 1 << (((ampr & xAMPRx_SS) >> 4) + 17);
printk("%cAMPR%d: va %08lx-%08lx [pa %08lx] %c%c%c%c [cxn:%04lx]\n",
i_d, n,
virt, virt + size - 1,
phys,
ampr & xAMPRx_S ? 'S' : '-',
ampr & xAMPRx_C ? 'C' : '-',
ampr & DAMPRx_WP ? 'W' : '-',
ampr & xAMPRx_V ? 'V' : '-',
cxn
);
}
#endif
/*****************************************************************************/
/*
* dump the memory map
*/
static void __init dump_memory_map(void)
{
#if 0
/* dump the protection map */
printk_xampr(__get_IAMPR(0), __get_IAMLR(0), 'I', 0);
printk_xampr(__get_IAMPR(1), __get_IAMLR(1), 'I', 1);
printk_xampr(__get_IAMPR(2), __get_IAMLR(2), 'I', 2);
printk_xampr(__get_IAMPR(3), __get_IAMLR(3), 'I', 3);
printk_xampr(__get_IAMPR(4), __get_IAMLR(4), 'I', 4);
printk_xampr(__get_IAMPR(5), __get_IAMLR(5), 'I', 5);
printk_xampr(__get_IAMPR(6), __get_IAMLR(6), 'I', 6);
printk_xampr(__get_IAMPR(7), __get_IAMLR(7), 'I', 7);
printk_xampr(__get_IAMPR(8), __get_IAMLR(8), 'I', 8);
printk_xampr(__get_IAMPR(9), __get_IAMLR(9), 'i', 9);
printk_xampr(__get_IAMPR(10), __get_IAMLR(10), 'I', 10);
printk_xampr(__get_IAMPR(11), __get_IAMLR(11), 'I', 11);
printk_xampr(__get_IAMPR(12), __get_IAMLR(12), 'I', 12);
printk_xampr(__get_IAMPR(13), __get_IAMLR(13), 'I', 13);
printk_xampr(__get_IAMPR(14), __get_IAMLR(14), 'I', 14);
printk_xampr(__get_IAMPR(15), __get_IAMLR(15), 'I', 15);
printk_xampr(__get_DAMPR(0), __get_DAMLR(0), 'D', 0);
printk_xampr(__get_DAMPR(1), __get_DAMLR(1), 'D', 1);
printk_xampr(__get_DAMPR(2), __get_DAMLR(2), 'D', 2);
printk_xampr(__get_DAMPR(3), __get_DAMLR(3), 'D', 3);
printk_xampr(__get_DAMPR(4), __get_DAMLR(4), 'D', 4);
printk_xampr(__get_DAMPR(5), __get_DAMLR(5), 'D', 5);
printk_xampr(__get_DAMPR(6), __get_DAMLR(6), 'D', 6);
printk_xampr(__get_DAMPR(7), __get_DAMLR(7), 'D', 7);
printk_xampr(__get_DAMPR(8), __get_DAMLR(8), 'D', 8);
printk_xampr(__get_DAMPR(9), __get_DAMLR(9), 'D', 9);
printk_xampr(__get_DAMPR(10), __get_DAMLR(10), 'D', 10);
printk_xampr(__get_DAMPR(11), __get_DAMLR(11), 'D', 11);
printk_xampr(__get_DAMPR(12), __get_DAMLR(12), 'D', 12);
printk_xampr(__get_DAMPR(13), __get_DAMLR(13), 'D', 13);
printk_xampr(__get_DAMPR(14), __get_DAMLR(14), 'D', 14);
printk_xampr(__get_DAMPR(15), __get_DAMLR(15), 'D', 15);
#endif
#if 0
/* dump the bus controller registers */
printk("LGCR: %08lx\n", __get_LGCR());
printk("Master: %08lx-%08lx CR=%08lx\n",
__get_LEMBR(), __get_LEMBR() + __get_LEMAM(),
__get_LMAICR());
int loop;
for (loop = 1; loop <= 7; loop++) {
unsigned long lcr = __get_LCR(loop), lsbr = __get_LSBR(loop);
printk("CS#%d: %08lx-%08lx %c%c%c%c%c%c%c%c%c\n",
loop,
lsbr, lsbr + __get_LSAM(loop),
lcr & 0x80000000 ? 'r' : '-',
lcr & 0x40000000 ? 'w' : '-',
lcr & 0x08000000 ? 'b' : '-',
lcr & 0x04000000 ? 'B' : '-',
lcr & 0x02000000 ? 'C' : '-',
lcr & 0x01000000 ? 'D' : '-',
lcr & 0x00800000 ? 'W' : '-',
lcr & 0x00400000 ? 'R' : '-',
(lcr & 0x00030000) == 0x00000000 ? '4' :
(lcr & 0x00030000) == 0x00010000 ? '2' :
(lcr & 0x00030000) == 0x00020000 ? '1' :
'-'
);
}
#endif
#if 0
printk("\n");
#endif
} /* end dump_memory_map() */
/*****************************************************************************/
/*
* attempt to detect a VDK motherboard and DAV daughter board on an MB93091 system
*/
#ifdef CONFIG_MB93091_VDK
static void __init detect_mb93091(void)
{
#ifdef CONFIG_MB93090_MB00
/* Detect CB70 without motherboard */
if (!(cpu_system == __frv_mb93091_cb70 && ((*(unsigned short *)0xffc00030) & 0x100))) {
cpu_board1 = __frv_mb93090_mb00;
mb93090_mb00_detected = 1;
}
#endif
#ifdef CONFIG_FUJITSU_MB93493
cpu_board2 = __frv_mb93493;
#endif
} /* end detect_mb93091() */
#endif
/*****************************************************************************/
/*
* determine the CPU type and set appropriate parameters
*
* Family Series CPU Core Silicon Imple Vers
* ----------------------------------------------------------
* FR-V --+-> FR400 --+-> FR401 --+-> MB93401 02 00 [1]
* | | |
* | | +-> MB93401/A 02 01
* | | |
* | | +-> MB93403 02 02
* | |
* | +-> FR405 ----> MB93405 04 00
* |
* +-> FR450 ----> FR451 ----> MB93451 05 00
* |
* +-> FR500 ----> FR501 --+-> MB93501 01 01 [2]
* | |
* | +-> MB93501/A 01 02
* |
* +-> FR550 --+-> FR551 ----> MB93555 03 01
*
* [1] The MB93401 is an obsolete CPU replaced by the MB93401A
* [2] The MB93501 is an obsolete CPU replaced by the MB93501A
*
* Imple is PSR(Processor Status Register)[31:28].
* Vers is PSR(Processor Status Register)[27:24].
*
* A "Silicon" consists of CPU core and some on-chip peripherals.
*/
static void __init determine_cpu(void)
{
unsigned long hsr0 = __get_HSR(0);
unsigned long psr = __get_PSR();
/* work out what selectable services the CPU supports */
__set_PSR(psr | PSR_EM | PSR_EF | PSR_CM | PSR_NEM);
cpu_psr_all = __get_PSR();
__set_PSR(psr);
__set_HSR(0, hsr0 | HSR0_GRLE | HSR0_GRHE | HSR0_FRLE | HSR0_FRHE);
cpu_hsr0_all = __get_HSR(0);
__set_HSR(0, hsr0);
/* derive other service specs from the CPU type */
cpu_series = "unknown";
cpu_core = "unknown";
cpu_silicon = "unknown";
cpu_mmu = "Prot";
cpu_system = __frv_unknown_system;
clock_cmodes = NULL;
clock_doubled = 0;
#ifdef CONFIG_PM
clock_bits_settable = CLOCK_BIT_CM_H | CLOCK_BIT_CM_M | CLOCK_BIT_P0;
#endif
switch (PSR_IMPLE(psr)) {
case PSR_IMPLE_FR401:
cpu_series = "fr400";
cpu_core = "fr401";
pdm_suspend_mode = HSR0_PDM_PLL_RUN;
switch (PSR_VERSION(psr)) {
case PSR_VERSION_FR401_MB93401:
cpu_silicon = "mb93401";
cpu_system = __frv_mb93091_cb10;
clock_cmodes = clock_cmodes_fr401_fr403;
clock_doubled = 1;
break;
case PSR_VERSION_FR401_MB93401A:
cpu_silicon = "mb93401/A";
cpu_system = __frv_mb93091_cb11;
clock_cmodes = clock_cmodes_fr401_fr403;
break;
case PSR_VERSION_FR401_MB93403:
cpu_silicon = "mb93403";
#ifndef CONFIG_MB93093_PDK
cpu_system = __frv_mb93091_cb30;
#else
cpu_system = __frv_mb93093;
#endif
clock_cmodes = clock_cmodes_fr401_fr403;
break;
default:
break;
}
break;
case PSR_IMPLE_FR405:
cpu_series = "fr400";
cpu_core = "fr405";
pdm_suspend_mode = HSR0_PDM_PLL_STOP;
switch (PSR_VERSION(psr)) {
case PSR_VERSION_FR405_MB93405:
cpu_silicon = "mb93405";
cpu_system = __frv_mb93091_cb60;
clock_cmodes = clock_cmodes_fr405;
#ifdef CONFIG_PM
clock_bits_settable |= CLOCK_BIT_CMODE;
clock_cmodes_permitted = CLOCK_CMODES_PERMITTED_FR405;
#endif
/* the FPGA on the CB70 has extra registers
* - it has 0x0046 in the VDK_ID FPGA register at 0x1a0, which is
* how we tell the difference between it and a CB60
*/
if (*(volatile unsigned short *) 0xffc001a0 == 0x0046)
cpu_system = __frv_mb93091_cb70;
break;
default:
break;
}
break;
case PSR_IMPLE_FR451:
cpu_series = "fr450";
cpu_core = "fr451";
pdm_suspend_mode = HSR0_PDM_PLL_STOP;
#ifdef CONFIG_PM
clock_bits_settable |= CLOCK_BIT_CMODE;
clock_cmodes_permitted = CLOCK_CMODES_PERMITTED_FR405;
#endif
switch (PSR_VERSION(psr)) {
case PSR_VERSION_FR451_MB93451:
cpu_silicon = "mb93451";
cpu_mmu = "Prot, SAT, xSAT, DAT";
cpu_system = __frv_mb93091_cb451;
clock_cmodes = clock_cmodes_fr405;
break;
default:
break;
}
break;
case PSR_IMPLE_FR501:
cpu_series = "fr500";
cpu_core = "fr501";
pdm_suspend_mode = HSR0_PDM_PLL_STOP;
switch (PSR_VERSION(psr)) {
case PSR_VERSION_FR501_MB93501: cpu_silicon = "mb93501"; break;
case PSR_VERSION_FR501_MB93501A: cpu_silicon = "mb93501/A"; break;
default:
break;
}
break;
case PSR_IMPLE_FR551:
cpu_series = "fr550";
cpu_core = "fr551";
pdm_suspend_mode = HSR0_PDM_PLL_RUN;
switch (PSR_VERSION(psr)) {
case PSR_VERSION_FR551_MB93555:
cpu_silicon = "mb93555";
cpu_mmu = "Prot, SAT";
cpu_system = __frv_mb93091_cb41;
clock_cmodes = clock_cmodes_fr555;
clock_doubled = 1;
break;
default:
break;
}
break;
default:
break;
}
printk("- Series:%s CPU:%s Silicon:%s\n",
cpu_series, cpu_core, cpu_silicon);
#ifdef CONFIG_MB93091_VDK
detect_mb93091();
#endif
#if defined(CONFIG_MB93093_PDK) && defined(CONFIG_FUJITSU_MB93493)
cpu_board2 = __frv_mb93493;
#endif
} /* end determine_cpu() */
/*****************************************************************************/
/*
* calculate the bus clock speed
*/
void __pminit determine_clocks(int verbose)
{
const struct clock_cmode *mode, *tmode;
unsigned long clkc, psr, quot;
clkc = __get_CLKC();
psr = __get_PSR();
clock_p0_current = !!(clkc & CLKC_P0);
clock_cm_current = clkc & CLKC_CM;
clock_cmode_current = (clkc & CLKC_CMODE) >> CLKC_CMODE_s;
if (verbose)
printk("psr=%08lx hsr0=%08lx clkc=%08lx\n", psr, __get_HSR(0), clkc);
/* the CB70 has some alternative ways of setting the clock speed through switches accessed
* through the FPGA. */
if (cpu_system == __frv_mb93091_cb70) {
unsigned short clkswr = *(volatile unsigned short *) 0xffc00104UL & 0x1fffUL;
if (clkswr & 0x1000)
__clkin_clock_speed_HZ = 60000000UL;
else
__clkin_clock_speed_HZ =
((clkswr >> 8) & 0xf) * 10000000 +
((clkswr >> 4) & 0xf) * 1000000 +
((clkswr ) & 0xf) * 100000;
}
/* the FR451 is currently fixed at 24MHz */
else if (cpu_system == __frv_mb93091_cb451) {
//__clkin_clock_speed_HZ = 24000000UL; // CB451-FPGA
unsigned short clkswr = *(volatile unsigned short *) 0xffc00104UL & 0x1fffUL;
if (clkswr & 0x1000)
__clkin_clock_speed_HZ = 60000000UL;
else
__clkin_clock_speed_HZ =
((clkswr >> 8) & 0xf) * 10000000 +
((clkswr >> 4) & 0xf) * 1000000 +
((clkswr ) & 0xf) * 100000;
}
/* otherwise determine the clockspeed from VDK or other registers */
else {
__clkin_clock_speed_HZ = __get_CLKIN();
}
/* look up the appropriate clock relationships table entry */
mode = &undef_clock_cmode;
if (clock_cmodes) {
tmode = &clock_cmodes[(clkc & CLKC_CMODE) >> CLKC_CMODE_s];
if (tmode->xbus)
mode = tmode;
}
#define CLOCK(SRC,RATIO) ((SRC) * (((RATIO) >> 4) & 0x0f) / ((RATIO) & 0x0f))
if (clock_doubled)
__clkin_clock_speed_HZ <<= 1;
__ext_bus_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->xbus);
__sdram_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->sdram);
__dsu_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->dsu);
switch (clkc & CLKC_CM) {
case 0: /* High */
__core_bus_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->corebus);
__core_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->core);
break;
case 1: /* Medium */
__core_bus_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->sdram);
__core_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->sdram);
break;
case 2: /* Low; not supported */
case 3: /* UNDEF */
printk("Unsupported CLKC CM %ld\n", clkc & CLKC_CM);
panic("Bye");
}
__res_bus_clock_speed_HZ = __ext_bus_clock_speed_HZ;
if (clkc & CLKC_P0)
__res_bus_clock_speed_HZ >>= 1;
if (verbose) {
printk("CLKIN: %lu.%3.3luMHz\n",
__clkin_clock_speed_HZ / 1000000,
(__clkin_clock_speed_HZ / 1000) % 1000);
printk("CLKS:"
" ext=%luMHz res=%luMHz sdram=%luMHz cbus=%luMHz core=%luMHz dsu=%luMHz\n",
__ext_bus_clock_speed_HZ / 1000000,
__res_bus_clock_speed_HZ / 1000000,
__sdram_clock_speed_HZ / 1000000,
__core_bus_clock_speed_HZ / 1000000,
__core_clock_speed_HZ / 1000000,
__dsu_clock_speed_HZ / 1000000
);
}
/* calculate the number of __delay() loop iterations per sec (2 insn loop) */
__delay_loops_MHz = __core_clock_speed_HZ / (1000000 * 2);
/* set the serial prescaler */
__serial_clock_speed_HZ = __res_bus_clock_speed_HZ;
quot = 1;
while (__serial_clock_speed_HZ / quot / 16 / 65536 > 3000)
quot += 1;
/* double the divisor if P0 is clear, so that if/when P0 is set, it's still achievable
* - we have to be careful - dividing too much can mean we can't get 115200 baud
*/
if (__serial_clock_speed_HZ > 32000000 && !(clkc & CLKC_P0))
quot <<= 1;
__serial_clock_speed_HZ /= quot;
__frv_uart0.uartclk = __serial_clock_speed_HZ;
__frv_uart1.uartclk = __serial_clock_speed_HZ;
if (verbose)
printk(" uart=%luMHz\n", __serial_clock_speed_HZ / 1000000 * quot);
while (!(__get_UART0_LSR() & UART_LSR_TEMT))
continue;
while (!(__get_UART1_LSR() & UART_LSR_TEMT))
continue;
__set_UCPVR(quot);
__set_UCPSR(0);
} /* end determine_clocks() */
/*****************************************************************************/
/*
* reserve some DMA consistent memory
*/
#ifdef CONFIG_RESERVE_DMA_COHERENT
static void __init reserve_dma_coherent(void)
{
unsigned long ampr;
/* find the first non-kernel memory tile and steal it */
#define __steal_AMPR(r) \
if (__get_DAMPR(r) & xAMPRx_V) { \
ampr = __get_DAMPR(r); \
__set_DAMPR(r, ampr | xAMPRx_S | xAMPRx_C); \
__set_IAMPR(r, 0); \
goto found; \
}
__steal_AMPR(1);
__steal_AMPR(2);
__steal_AMPR(3);
__steal_AMPR(4);
__steal_AMPR(5);
__steal_AMPR(6);
if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551) {
__steal_AMPR(7);
__steal_AMPR(8);
__steal_AMPR(9);
__steal_AMPR(10);
__steal_AMPR(11);
__steal_AMPR(12);
__steal_AMPR(13);
__steal_AMPR(14);
}
/* unable to grant any DMA consistent memory */
printk("No DMA consistent memory reserved\n");
return;
found:
dma_coherent_mem_start = ampr & xAMPRx_PPFN;
ampr &= xAMPRx_SS;
ampr >>= 4;
ampr = 1 << (ampr - 3 + 20);
dma_coherent_mem_end = dma_coherent_mem_start + ampr;
printk("DMA consistent memory reserved %lx-%lx\n",
dma_coherent_mem_start, dma_coherent_mem_end);
} /* end reserve_dma_coherent() */
#endif
/*****************************************************************************/
/*
* calibrate the delay loop
*/
void __cpuinit calibrate_delay(void)
{
loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
printk("Calibrating delay loop... %lu.%02lu BogoMIPS\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
} /* end calibrate_delay() */
/*****************************************************************************/
/*
* look through the command line for some things we need to know immediately
*/
static void __init parse_cmdline_early(char *cmdline)
{
if (!cmdline)
return;
while (*cmdline) {
if (*cmdline == ' ')
cmdline++;
/* "mem=XXX[kKmM]" sets SDRAM size to <mem>, overriding the value we worked
* out from the SDRAM controller mask register
*/
if (!memcmp(cmdline, "mem=", 4)) {
unsigned long long mem_size;
mem_size = memparse(cmdline + 4, &cmdline);
memory_end = memory_start + mem_size;
}
while (*cmdline && *cmdline != ' ')
cmdline++;
}
} /* end parse_cmdline_early() */
/*****************************************************************************/
/*
*
*/
void __init setup_arch(char **cmdline_p)
{
#ifdef CONFIG_MMU
printk("Linux FR-V port done by Red Hat Inc <dhowells@redhat.com>\n");
#else
printk("uClinux FR-V port done by Red Hat Inc <dhowells@redhat.com>\n");
#endif
memcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
determine_cpu();
determine_clocks(1);
/* For printk-directly-beats-on-serial-hardware hack */
console_set_baud(115200);
#ifdef CONFIG_GDBSTUB
gdbstub_set_baud(115200);
#endif
#ifdef CONFIG_RESERVE_DMA_COHERENT
reserve_dma_coherent();
#endif
dump_memory_map();
#ifdef CONFIG_MB93090_MB00
if (mb93090_mb00_detected)
mb93090_display();
#endif
/* register those serial ports that are available */
#ifdef CONFIG_FRV_ONCPU_SERIAL
#ifndef CONFIG_GDBSTUB_UART0
__reg(UART0_BASE + UART_IER * 8) = 0;
early_serial_setup(&__frv_uart0);
#endif
#ifndef CONFIG_GDBSTUB_UART1
__reg(UART1_BASE + UART_IER * 8) = 0;
early_serial_setup(&__frv_uart1);
#endif
#endif
/* deal with the command line - RedBoot may have passed one to the kernel */
memcpy(command_line, boot_command_line, sizeof(command_line));
*cmdline_p = &command_line[0];
parse_cmdline_early(command_line);
/* set up the memory description
* - by now the stack is part of the init task */
printk("Memory %08lx-%08lx\n", memory_start, memory_end);
BUG_ON(memory_start == memory_end);
init_mm.start_code = (unsigned long) &_stext;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
#if 0 /* DAVIDM - don't set brk just incase someone decides to use it */
init_mm.brk = (unsigned long) &_end;
#else
init_mm.brk = (unsigned long) 0;
#endif
#ifdef DEBUG
printk("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x BSS=0x%06x-0x%06x\n",
(int) &_stext, (int) &_etext,
(int) &_sdata, (int) &_edata,
(int) &_sbss, (int) &_ebss);
#endif
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
#ifdef CONFIG_MMU
setup_linux_memory();
#else
setup_uclinux_memory();
#endif
/* get kmalloc into gear */
paging_init();
/* init DMA */
frv_dma_init();
#ifdef DEBUG
printk("Done setup_arch\n");
#endif
/* start the decrement timer running */
// asm volatile("movgs %0,timerd" :: "r"(10000000));
// __set_HSR(0, __get_HSR(0) | HSR0_ETMD);
} /* end setup_arch() */
#if 0
/*****************************************************************************/
/*
*
*/
static int __devinit setup_arch_serial(void)
{
/* register those serial ports that are available */
#ifndef CONFIG_GDBSTUB_UART0
early_serial_setup(&__frv_uart0);
#endif
#ifndef CONFIG_GDBSTUB_UART1
early_serial_setup(&__frv_uart1);
#endif
return 0;
} /* end setup_arch_serial() */
late_initcall(setup_arch_serial);
#endif
/*****************************************************************************/
/*
* set up the memory map for normal MMU linux
*/
#ifdef CONFIG_MMU
static void __init setup_linux_memory(void)
{
unsigned long bootmap_size, low_top_pfn, kstart, kend, high_mem;
kstart = (unsigned long) &__kernel_image_start - PAGE_OFFSET;
kend = (unsigned long) &__kernel_image_end - PAGE_OFFSET;
kstart = kstart & PAGE_MASK;
kend = (kend + PAGE_SIZE - 1) & PAGE_MASK;
/* give all the memory to the bootmap allocator, tell it to put the
* boot mem_map immediately following the kernel image
*/
bootmap_size = init_bootmem_node(NODE_DATA(0),
kend >> PAGE_SHIFT, /* map addr */
memory_start >> PAGE_SHIFT, /* start of RAM */
memory_end >> PAGE_SHIFT /* end of RAM */
);
/* pass the memory that the kernel can immediately use over to the bootmem allocator */
max_mapnr = num_physpages = (memory_end - memory_start) >> PAGE_SHIFT;
low_top_pfn = (KERNEL_LOWMEM_END - KERNEL_LOWMEM_START) >> PAGE_SHIFT;
high_mem = 0;
if (num_physpages > low_top_pfn) {
#ifdef CONFIG_HIGHMEM
high_mem = num_physpages - low_top_pfn;
#else
max_mapnr = num_physpages = low_top_pfn;
#endif
}
else {
low_top_pfn = num_physpages;
}
min_low_pfn = memory_start >> PAGE_SHIFT;
max_low_pfn = low_top_pfn;
max_pfn = memory_end >> PAGE_SHIFT;
num_mappedpages = low_top_pfn;
printk(KERN_NOTICE "%ldMB LOWMEM available.\n", low_top_pfn >> (20 - PAGE_SHIFT));
free_bootmem(memory_start, low_top_pfn << PAGE_SHIFT);
#ifdef CONFIG_HIGHMEM
if (high_mem)
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", high_mem >> (20 - PAGE_SHIFT));
#endif
/* take back the memory occupied by the kernel image and the bootmem alloc map */
reserve_bootmem(kstart, kend - kstart + bootmap_size,
BOOTMEM_DEFAULT);
/* reserve the memory occupied by the initial ramdisk */
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
reserve_bootmem(INITRD_START, INITRD_SIZE,
BOOTMEM_DEFAULT);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
}
else {
printk(KERN_ERR
"initrd extends beyond end of memory (0x%08lx > 0x%08lx)\n"
"disabling initrd\n",
INITRD_START + INITRD_SIZE,
low_top_pfn << PAGE_SHIFT);
initrd_start = 0;
}
}
#endif
} /* end setup_linux_memory() */
#endif
/*****************************************************************************/
/*
* set up the memory map for uClinux
*/
#ifndef CONFIG_MMU
static void __init setup_uclinux_memory(void)
{
#ifdef CONFIG_PROTECT_KERNEL
unsigned long dampr;
#endif
unsigned long kend;
int bootmap_size;
kend = (unsigned long) &__kernel_image_end;
kend = (kend + PAGE_SIZE - 1) & PAGE_MASK;
/* give all the memory to the bootmap allocator, tell it to put the
* boot mem_map immediately following the kernel image
*/
bootmap_size = init_bootmem_node(NODE_DATA(0),
kend >> PAGE_SHIFT, /* map addr */
memory_start >> PAGE_SHIFT, /* start of RAM */
memory_end >> PAGE_SHIFT /* end of RAM */
);
/* free all the usable memory */
free_bootmem(memory_start, memory_end - memory_start);
high_memory = (void *) (memory_end & PAGE_MASK);
max_mapnr = num_physpages = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
min_low_pfn = memory_start >> PAGE_SHIFT;
max_low_pfn = memory_end >> PAGE_SHIFT;
max_pfn = max_low_pfn;
/* now take back the bits the core kernel is occupying */
#ifndef CONFIG_PROTECT_KERNEL
reserve_bootmem(kend, bootmap_size, BOOTMEM_DEFAULT);
reserve_bootmem((unsigned long) &__kernel_image_start,
kend - (unsigned long) &__kernel_image_start,
BOOTMEM_DEFAULT);
#else
dampr = __get_DAMPR(0);
dampr &= xAMPRx_SS;
dampr = (dampr >> 4) + 17;
dampr = 1 << dampr;
reserve_bootmem(__get_DAMPR(0) & xAMPRx_PPFN, dampr, BOOTMEM_DEFAULT);
#endif
/* reserve some memory to do uncached DMA through if requested */
#ifdef CONFIG_RESERVE_DMA_COHERENT
if (dma_coherent_mem_start)
reserve_bootmem(dma_coherent_mem_start,
dma_coherent_mem_end - dma_coherent_mem_start,
BOOTMEM_DEFAULT);
#endif
} /* end setup_uclinux_memory() */
#endif
/*****************************************************************************/
/*
* get CPU information for use by procfs
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
const char *gr, *fr, *fm, *fp, *cm, *nem, *ble;
#ifdef CONFIG_PM
const char *sep;
#endif
gr = cpu_hsr0_all & HSR0_GRHE ? "gr0-63" : "gr0-31";
fr = cpu_hsr0_all & HSR0_FRHE ? "fr0-63" : "fr0-31";
fm = cpu_psr_all & PSR_EM ? ", Media" : "";
fp = cpu_psr_all & PSR_EF ? ", FPU" : "";
cm = cpu_psr_all & PSR_CM ? ", CCCR" : "";
nem = cpu_psr_all & PSR_NEM ? ", NE" : "";
ble = cpu_psr_all & PSR_BE ? "BE" : "LE";
seq_printf(m,
"CPU-Series:\t%s\n"
"CPU-Core:\t%s, %s, %s%s%s\n"
"CPU:\t\t%s\n"
"MMU:\t\t%s\n"
"FP-Media:\t%s%s%s\n"
"System:\t\t%s",
cpu_series,
cpu_core, gr, ble, cm, nem,
cpu_silicon,
cpu_mmu,
fr, fm, fp,
cpu_system);
if (cpu_board1)
seq_printf(m, ", %s", cpu_board1);
if (cpu_board2)
seq_printf(m, ", %s", cpu_board2);
seq_printf(m, "\n");
#ifdef CONFIG_PM
seq_printf(m, "PM-Controls:");
sep = "\t";
if (clock_bits_settable & CLOCK_BIT_CMODE) {
seq_printf(m, "%scmode=0x%04hx", sep, clock_cmodes_permitted);
sep = ", ";
}
if (clock_bits_settable & CLOCK_BIT_CM) {
seq_printf(m, "%scm=0x%lx", sep, clock_bits_settable & CLOCK_BIT_CM);
sep = ", ";
}
if (clock_bits_settable & CLOCK_BIT_P0) {
seq_printf(m, "%sp0=0x3", sep);
sep = ", ";
}
seq_printf(m, "%ssuspend=0x22\n", sep);
#endif
seq_printf(m,
"PM-Status:\tcmode=%d, cm=%d, p0=%d\n",
clock_cmode_current, clock_cm_current, clock_p0_current);
#define print_clk(TAG, VAR) \
seq_printf(m, "Clock-" TAG ":\t%lu.%2.2lu MHz\n", VAR / 1000000, (VAR / 10000) % 100)
print_clk("In", __clkin_clock_speed_HZ);
print_clk("Core", __core_clock_speed_HZ);
print_clk("SDRAM", __sdram_clock_speed_HZ);
print_clk("CBus", __core_bus_clock_speed_HZ);
print_clk("Res", __res_bus_clock_speed_HZ);
print_clk("Ext", __ext_bus_clock_speed_HZ);
print_clk("DSU", __dsu_clock_speed_HZ);
seq_printf(m,
"BogoMips:\t%lu.%02lu\n",
(loops_per_jiffy * HZ) / 500000, ((loops_per_jiffy * HZ) / 5000) % 100);
return 0;
} /* end show_cpuinfo() */
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? (void *) 0x12345678 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
void arch_gettod(int *year, int *mon, int *day, int *hour,
int *min, int *sec)
{
*year = *mon = *day = *hour = *min = *sec = 0;
}
/*****************************************************************************/
/*
*
*/
#ifdef CONFIG_MB93090_MB00
static void __init mb93090_sendlcdcmd(uint32_t cmd)
{
unsigned long base = __addr_LCD();
int loop;
/* request reading of the busy flag */
__set_LCD(base, LCD_CMD_READ_BUSY);
__set_LCD(base, LCD_CMD_READ_BUSY & ~LCD_E);
/* wait for the busy flag to become clear */
for (loop = 10000; loop > 0; loop--)
if (!(__get_LCD(base) & 0x80))
break;
/* send the command */
__set_LCD(base, cmd);
__set_LCD(base, cmd & ~LCD_E);
} /* end mb93090_sendlcdcmd() */
/*****************************************************************************/
/*
* write to the MB93090 LEDs and LCD
*/
static void __init mb93090_display(void)
{
const char *p;
__set_LEDS(0);
/* set up the LCD */
mb93090_sendlcdcmd(LCD_CMD_CLEAR);
mb93090_sendlcdcmd(LCD_CMD_FUNCSET(1,1,0));
mb93090_sendlcdcmd(LCD_CMD_ON(0,0));
mb93090_sendlcdcmd(LCD_CMD_HOME);
mb93090_sendlcdcmd(LCD_CMD_SET_DD_ADDR(0));
for (p = mb93090_banner; *p; p++)
mb93090_sendlcdcmd(LCD_DATA_WRITE(*p));
mb93090_sendlcdcmd(LCD_CMD_SET_DD_ADDR(64));
for (p = mb93090_version; *p; p++)
mb93090_sendlcdcmd(LCD_DATA_WRITE(*p));
} /* end mb93090_display() */
#endif // CONFIG_MB93090_MB00
| gpl-2.0 |
nighthawk149/xpenology-4.2-kernel | drivers/media/video/sn9c102/sn9c102_ov7630.c | 12804 | 19528 | /***************************************************************************
* Plug-in for OV7630 image sensor connected to the SN9C1xx PC Camera *
* Controllers *
* *
* Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the Free Software *
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
***************************************************************************/
#include "sn9c102_sensor.h"
#include "sn9c102_devtable.h"
static int ov7630_init(struct sn9c102_device* cam)
{
int err = 0;
switch (sn9c102_get_bridge(cam)) {
case BRIDGE_SN9C101:
case BRIDGE_SN9C102:
err = sn9c102_write_const_regs(cam, {0x00, 0x14}, {0x60, 0x17},
{0x0f, 0x18}, {0x50, 0x19});
err += sn9c102_i2c_write(cam, 0x12, 0x8d);
err += sn9c102_i2c_write(cam, 0x12, 0x0d);
err += sn9c102_i2c_write(cam, 0x11, 0x00);
err += sn9c102_i2c_write(cam, 0x15, 0x35);
err += sn9c102_i2c_write(cam, 0x16, 0x03);
err += sn9c102_i2c_write(cam, 0x17, 0x1c);
err += sn9c102_i2c_write(cam, 0x18, 0xbd);
err += sn9c102_i2c_write(cam, 0x19, 0x06);
err += sn9c102_i2c_write(cam, 0x1a, 0xf6);
err += sn9c102_i2c_write(cam, 0x1b, 0x04);
err += sn9c102_i2c_write(cam, 0x20, 0x44);
err += sn9c102_i2c_write(cam, 0x23, 0xee);
err += sn9c102_i2c_write(cam, 0x26, 0xa0);
err += sn9c102_i2c_write(cam, 0x27, 0x9a);
err += sn9c102_i2c_write(cam, 0x28, 0x20);
err += sn9c102_i2c_write(cam, 0x29, 0x30);
err += sn9c102_i2c_write(cam, 0x2f, 0x3d);
err += sn9c102_i2c_write(cam, 0x30, 0x24);
err += sn9c102_i2c_write(cam, 0x32, 0x86);
err += sn9c102_i2c_write(cam, 0x60, 0xa9);
err += sn9c102_i2c_write(cam, 0x61, 0x42);
err += sn9c102_i2c_write(cam, 0x65, 0x00);
err += sn9c102_i2c_write(cam, 0x69, 0x38);
err += sn9c102_i2c_write(cam, 0x6f, 0x88);
err += sn9c102_i2c_write(cam, 0x70, 0x0b);
err += sn9c102_i2c_write(cam, 0x71, 0x00);
err += sn9c102_i2c_write(cam, 0x74, 0x21);
err += sn9c102_i2c_write(cam, 0x7d, 0xf7);
break;
case BRIDGE_SN9C103:
err = sn9c102_write_const_regs(cam, {0x00, 0x02}, {0x00, 0x03},
{0x1a, 0x04}, {0x20, 0x05},
{0x20, 0x06}, {0x20, 0x07},
{0x03, 0x10}, {0x0a, 0x14},
{0x60, 0x17}, {0x0f, 0x18},
{0x50, 0x19}, {0x1d, 0x1a},
{0x10, 0x1b}, {0x02, 0x1c},
{0x03, 0x1d}, {0x0f, 0x1e},
{0x0c, 0x1f}, {0x00, 0x20},
{0x10, 0x21}, {0x20, 0x22},
{0x30, 0x23}, {0x40, 0x24},
{0x50, 0x25}, {0x60, 0x26},
{0x70, 0x27}, {0x80, 0x28},
{0x90, 0x29}, {0xa0, 0x2a},
{0xb0, 0x2b}, {0xc0, 0x2c},
{0xd0, 0x2d}, {0xe0, 0x2e},
{0xf0, 0x2f}, {0xff, 0x30});
err += sn9c102_i2c_write(cam, 0x12, 0x8d);
err += sn9c102_i2c_write(cam, 0x12, 0x0d);
err += sn9c102_i2c_write(cam, 0x15, 0x34);
err += sn9c102_i2c_write(cam, 0x11, 0x01);
err += sn9c102_i2c_write(cam, 0x1b, 0x04);
err += sn9c102_i2c_write(cam, 0x20, 0x44);
err += sn9c102_i2c_write(cam, 0x23, 0xee);
err += sn9c102_i2c_write(cam, 0x26, 0xa0);
err += sn9c102_i2c_write(cam, 0x27, 0x9a);
err += sn9c102_i2c_write(cam, 0x28, 0x20);
err += sn9c102_i2c_write(cam, 0x29, 0x30);
err += sn9c102_i2c_write(cam, 0x2f, 0x3d);
err += sn9c102_i2c_write(cam, 0x30, 0x24);
err += sn9c102_i2c_write(cam, 0x32, 0x86);
err += sn9c102_i2c_write(cam, 0x60, 0xa9);
err += sn9c102_i2c_write(cam, 0x61, 0x42);
err += sn9c102_i2c_write(cam, 0x65, 0x00);
err += sn9c102_i2c_write(cam, 0x69, 0x38);
err += sn9c102_i2c_write(cam, 0x6f, 0x88);
err += sn9c102_i2c_write(cam, 0x70, 0x0b);
err += sn9c102_i2c_write(cam, 0x71, 0x00);
err += sn9c102_i2c_write(cam, 0x74, 0x21);
err += sn9c102_i2c_write(cam, 0x7d, 0xf7);
break;
case BRIDGE_SN9C105:
case BRIDGE_SN9C120:
err = sn9c102_write_const_regs(cam, {0x40, 0x02}, {0x00, 0x03},
{0x1a, 0x04}, {0x03, 0x10},
{0x0a, 0x14}, {0xe2, 0x17},
{0x0b, 0x18}, {0x00, 0x19},
{0x1d, 0x1a}, {0x10, 0x1b},
{0x02, 0x1c}, {0x03, 0x1d},
{0x0f, 0x1e}, {0x0c, 0x1f},
{0x00, 0x20}, {0x24, 0x21},
{0x3b, 0x22}, {0x47, 0x23},
{0x60, 0x24}, {0x71, 0x25},
{0x80, 0x26}, {0x8f, 0x27},
{0x9d, 0x28}, {0xaa, 0x29},
{0xb8, 0x2a}, {0xc4, 0x2b},
{0xd1, 0x2c}, {0xdd, 0x2d},
{0xe8, 0x2e}, {0xf4, 0x2f},
{0xff, 0x30}, {0x00, 0x3f},
{0xc7, 0x40}, {0x01, 0x41},
{0x44, 0x42}, {0x00, 0x43},
{0x44, 0x44}, {0x00, 0x45},
{0x44, 0x46}, {0x00, 0x47},
{0xc7, 0x48}, {0x01, 0x49},
{0xc7, 0x4a}, {0x01, 0x4b},
{0xc7, 0x4c}, {0x01, 0x4d},
{0x44, 0x4e}, {0x00, 0x4f},
{0x44, 0x50}, {0x00, 0x51},
{0x44, 0x52}, {0x00, 0x53},
{0xc7, 0x54}, {0x01, 0x55},
{0xc7, 0x56}, {0x01, 0x57},
{0xc7, 0x58}, {0x01, 0x59},
{0x44, 0x5a}, {0x00, 0x5b},
{0x44, 0x5c}, {0x00, 0x5d},
{0x44, 0x5e}, {0x00, 0x5f},
{0xc7, 0x60}, {0x01, 0x61},
{0xc7, 0x62}, {0x01, 0x63},
{0xc7, 0x64}, {0x01, 0x65},
{0x44, 0x66}, {0x00, 0x67},
{0x44, 0x68}, {0x00, 0x69},
{0x44, 0x6a}, {0x00, 0x6b},
{0xc7, 0x6c}, {0x01, 0x6d},
{0xc7, 0x6e}, {0x01, 0x6f},
{0xc7, 0x70}, {0x01, 0x71},
{0x44, 0x72}, {0x00, 0x73},
{0x44, 0x74}, {0x00, 0x75},
{0x44, 0x76}, {0x00, 0x77},
{0xc7, 0x78}, {0x01, 0x79},
{0xc7, 0x7a}, {0x01, 0x7b},
{0xc7, 0x7c}, {0x01, 0x7d},
{0x44, 0x7e}, {0x00, 0x7f},
{0x17, 0x84}, {0x00, 0x85},
{0x2e, 0x86}, {0x00, 0x87},
{0x09, 0x88}, {0x00, 0x89},
{0xe8, 0x8a}, {0x0f, 0x8b},
{0xda, 0x8c}, {0x0f, 0x8d},
{0x40, 0x8e}, {0x00, 0x8f},
{0x37, 0x90}, {0x00, 0x91},
{0xcf, 0x92}, {0x0f, 0x93},
{0xfa, 0x94}, {0x0f, 0x95},
{0x00, 0x96}, {0x00, 0x97},
{0x00, 0x98}, {0x66, 0x99},
{0x00, 0x9a}, {0x40, 0x9b},
{0x20, 0x9c}, {0x00, 0x9d},
{0x00, 0x9e}, {0x00, 0x9f},
{0x2d, 0xc0}, {0x2d, 0xc1},
{0x3a, 0xc2}, {0x00, 0xc3},
{0x04, 0xc4}, {0x3f, 0xc5},
{0x00, 0xc6}, {0x00, 0xc7},
{0x50, 0xc8}, {0x3c, 0xc9},
{0x28, 0xca}, {0xd8, 0xcb},
{0x14, 0xcc}, {0xec, 0xcd},
{0x32, 0xce}, {0xdd, 0xcf},
{0x32, 0xd0}, {0xdd, 0xd1},
{0x6a, 0xd2}, {0x50, 0xd3},
{0x60, 0xd4}, {0x00, 0xd5},
{0x00, 0xd6});
err += sn9c102_i2c_write(cam, 0x12, 0x80);
err += sn9c102_i2c_write(cam, 0x12, 0x48);
err += sn9c102_i2c_write(cam, 0x01, 0x80);
err += sn9c102_i2c_write(cam, 0x02, 0x80);
err += sn9c102_i2c_write(cam, 0x03, 0x80);
err += sn9c102_i2c_write(cam, 0x04, 0x10);
err += sn9c102_i2c_write(cam, 0x05, 0x20);
err += sn9c102_i2c_write(cam, 0x06, 0x80);
err += sn9c102_i2c_write(cam, 0x11, 0x00);
err += sn9c102_i2c_write(cam, 0x0c, 0x20);
err += sn9c102_i2c_write(cam, 0x0d, 0x20);
err += sn9c102_i2c_write(cam, 0x15, 0x80);
err += sn9c102_i2c_write(cam, 0x16, 0x03);
err += sn9c102_i2c_write(cam, 0x17, 0x1b);
err += sn9c102_i2c_write(cam, 0x18, 0xbd);
err += sn9c102_i2c_write(cam, 0x19, 0x05);
err += sn9c102_i2c_write(cam, 0x1a, 0xf6);
err += sn9c102_i2c_write(cam, 0x1b, 0x04);
err += sn9c102_i2c_write(cam, 0x21, 0x1b);
err += sn9c102_i2c_write(cam, 0x22, 0x00);
err += sn9c102_i2c_write(cam, 0x23, 0xde);
err += sn9c102_i2c_write(cam, 0x24, 0x10);
err += sn9c102_i2c_write(cam, 0x25, 0x8a);
err += sn9c102_i2c_write(cam, 0x26, 0xa0);
err += sn9c102_i2c_write(cam, 0x27, 0xca);
err += sn9c102_i2c_write(cam, 0x28, 0xa2);
err += sn9c102_i2c_write(cam, 0x29, 0x74);
err += sn9c102_i2c_write(cam, 0x2a, 0x88);
err += sn9c102_i2c_write(cam, 0x2b, 0x34);
err += sn9c102_i2c_write(cam, 0x2c, 0x88);
err += sn9c102_i2c_write(cam, 0x2e, 0x00);
err += sn9c102_i2c_write(cam, 0x2f, 0x00);
err += sn9c102_i2c_write(cam, 0x30, 0x00);
err += sn9c102_i2c_write(cam, 0x32, 0xc2);
err += sn9c102_i2c_write(cam, 0x33, 0x08);
err += sn9c102_i2c_write(cam, 0x4c, 0x40);
err += sn9c102_i2c_write(cam, 0x4d, 0xf3);
err += sn9c102_i2c_write(cam, 0x60, 0x05);
err += sn9c102_i2c_write(cam, 0x61, 0x40);
err += sn9c102_i2c_write(cam, 0x62, 0x12);
err += sn9c102_i2c_write(cam, 0x63, 0x57);
err += sn9c102_i2c_write(cam, 0x64, 0x73);
err += sn9c102_i2c_write(cam, 0x65, 0x00);
err += sn9c102_i2c_write(cam, 0x66, 0x55);
err += sn9c102_i2c_write(cam, 0x67, 0x01);
err += sn9c102_i2c_write(cam, 0x68, 0xac);
err += sn9c102_i2c_write(cam, 0x69, 0x38);
err += sn9c102_i2c_write(cam, 0x6f, 0x1f);
err += sn9c102_i2c_write(cam, 0x70, 0x01);
err += sn9c102_i2c_write(cam, 0x71, 0x00);
err += sn9c102_i2c_write(cam, 0x72, 0x10);
err += sn9c102_i2c_write(cam, 0x73, 0x50);
err += sn9c102_i2c_write(cam, 0x74, 0x20);
err += sn9c102_i2c_write(cam, 0x76, 0x01);
err += sn9c102_i2c_write(cam, 0x77, 0xf3);
err += sn9c102_i2c_write(cam, 0x78, 0x90);
err += sn9c102_i2c_write(cam, 0x79, 0x98);
err += sn9c102_i2c_write(cam, 0x7a, 0x98);
err += sn9c102_i2c_write(cam, 0x7b, 0x00);
err += sn9c102_i2c_write(cam, 0x7c, 0x38);
err += sn9c102_i2c_write(cam, 0x7d, 0xff);
break;
default:
break;
}
return err;
}
static int ov7630_get_ctrl(struct sn9c102_device* cam,
struct v4l2_control* ctrl)
{
enum sn9c102_bridge bridge = sn9c102_get_bridge(cam);
int err = 0;
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0)
return -EIO;
break;
case V4L2_CID_RED_BALANCE:
if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
ctrl->value = sn9c102_pread_reg(cam, 0x05);
else
ctrl->value = sn9c102_pread_reg(cam, 0x07);
break;
case V4L2_CID_BLUE_BALANCE:
ctrl->value = sn9c102_pread_reg(cam, 0x06);
break;
case SN9C102_V4L2_CID_GREEN_BALANCE:
if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
ctrl->value = sn9c102_pread_reg(cam, 0x07);
else
ctrl->value = sn9c102_pread_reg(cam, 0x05);
break;
break;
case V4L2_CID_GAIN:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_DO_WHITE_BALANCE:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_WHITENESS:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x0d)) < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_AUTOGAIN:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x13)) < 0)
return -EIO;
ctrl->value &= 0x01;
break;
case V4L2_CID_VFLIP:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x75)) < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x80) ? 1 : 0;
break;
case SN9C102_V4L2_CID_GAMMA:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x14)) < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x02) ? 1 : 0;
break;
case SN9C102_V4L2_CID_BAND_FILTER:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x2d)) < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x02) ? 1 : 0;
break;
default:
return -EINVAL;
}
return err ? -EIO : 0;
}
static int ov7630_set_ctrl(struct sn9c102_device* cam,
const struct v4l2_control* ctrl)
{
enum sn9c102_bridge bridge = sn9c102_get_bridge(cam);
int err = 0;
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
err += sn9c102_i2c_write(cam, 0x10, ctrl->value);
break;
case V4L2_CID_RED_BALANCE:
if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
err += sn9c102_write_reg(cam, ctrl->value, 0x05);
else
err += sn9c102_write_reg(cam, ctrl->value, 0x07);
break;
case V4L2_CID_BLUE_BALANCE:
err += sn9c102_write_reg(cam, ctrl->value, 0x06);
break;
case SN9C102_V4L2_CID_GREEN_BALANCE:
if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
err += sn9c102_write_reg(cam, ctrl->value, 0x07);
else
err += sn9c102_write_reg(cam, ctrl->value, 0x05);
break;
case V4L2_CID_GAIN:
err += sn9c102_i2c_write(cam, 0x00, ctrl->value);
break;
case V4L2_CID_DO_WHITE_BALANCE:
err += sn9c102_i2c_write(cam, 0x0c, ctrl->value);
break;
case V4L2_CID_WHITENESS:
err += sn9c102_i2c_write(cam, 0x0d, ctrl->value);
break;
case V4L2_CID_AUTOGAIN:
err += sn9c102_i2c_write(cam, 0x13, ctrl->value |
(ctrl->value << 1));
break;
case V4L2_CID_VFLIP:
err += sn9c102_i2c_write(cam, 0x75, 0x0e | (ctrl->value << 7));
break;
case SN9C102_V4L2_CID_GAMMA:
err += sn9c102_i2c_write(cam, 0x14, ctrl->value << 2);
break;
case SN9C102_V4L2_CID_BAND_FILTER:
err += sn9c102_i2c_write(cam, 0x2d, ctrl->value << 2);
break;
default:
return -EINVAL;
}
return err ? -EIO : 0;
}
static int ov7630_set_crop(struct sn9c102_device* cam,
const struct v4l2_rect* rect)
{
struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
int err = 0;
u8 h_start = 0, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1;
switch (sn9c102_get_bridge(cam)) {
case BRIDGE_SN9C101:
case BRIDGE_SN9C102:
case BRIDGE_SN9C103:
h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1;
break;
case BRIDGE_SN9C105:
case BRIDGE_SN9C120:
h_start = (u8)(rect->left - s->cropcap.bounds.left) + 4;
break;
default:
break;
}
err += sn9c102_write_reg(cam, h_start, 0x12);
err += sn9c102_write_reg(cam, v_start, 0x13);
return err;
}
static int ov7630_set_pix_format(struct sn9c102_device* cam,
const struct v4l2_pix_format* pix)
{
int err = 0;
switch (sn9c102_get_bridge(cam)) {
case BRIDGE_SN9C101:
case BRIDGE_SN9C102:
case BRIDGE_SN9C103:
if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8)
err += sn9c102_write_reg(cam, 0x50, 0x19);
else
err += sn9c102_write_reg(cam, 0x20, 0x19);
break;
case BRIDGE_SN9C105:
case BRIDGE_SN9C120:
if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) {
err += sn9c102_write_reg(cam, 0xe5, 0x17);
err += sn9c102_i2c_write(cam, 0x11, 0x04);
} else {
err += sn9c102_write_reg(cam, 0xe2, 0x17);
err += sn9c102_i2c_write(cam, 0x11, 0x02);
}
break;
default:
break;
}
return err;
}
static const struct sn9c102_sensor ov7630 = {
.name = "OV7630",
.maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
.supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103 |
BRIDGE_SN9C105 | BRIDGE_SN9C120,
.sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE,
.frequency = SN9C102_I2C_100KHZ,
.interface = SN9C102_I2C_2WIRES,
.i2c_slave_id = 0x21,
.init = &ov7630_init,
.qctrl = {
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "global gain",
.minimum = 0x00,
.maximum = 0x3f,
.step = 0x01,
.default_value = 0x14,
.flags = 0,
},
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "exposure",
.minimum = 0x00,
.maximum = 0xff,
.step = 0x01,
.default_value = 0x60,
.flags = 0,
},
{
.id = V4L2_CID_WHITENESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "white balance background: red",
.minimum = 0x00,
.maximum = 0x3f,
.step = 0x01,
.default_value = 0x20,
.flags = 0,
},
{
.id = V4L2_CID_DO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "white balance background: blue",
.minimum = 0x00,
.maximum = 0x3f,
.step = 0x01,
.default_value = 0x20,
.flags = 0,
},
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "red balance",
.minimum = 0x00,
.maximum = 0x7f,
.step = 0x01,
.default_value = 0x20,
.flags = 0,
},
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "blue balance",
.minimum = 0x00,
.maximum = 0x7f,
.step = 0x01,
.default_value = 0x20,
.flags = 0,
},
{
.id = V4L2_CID_AUTOGAIN,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "auto adjust",
.minimum = 0x00,
.maximum = 0x01,
.step = 0x01,
.default_value = 0x00,
.flags = 0,
},
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "vertical flip",
.minimum = 0x00,
.maximum = 0x01,
.step = 0x01,
.default_value = 0x01,
.flags = 0,
},
{
.id = SN9C102_V4L2_CID_GREEN_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "green balance",
.minimum = 0x00,
.maximum = 0x7f,
.step = 0x01,
.default_value = 0x20,
.flags = 0,
},
{
.id = SN9C102_V4L2_CID_BAND_FILTER,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "band filter",
.minimum = 0x00,
.maximum = 0x01,
.step = 0x01,
.default_value = 0x00,
.flags = 0,
},
{
.id = SN9C102_V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "rgb gamma",
.minimum = 0x00,
.maximum = 0x01,
.step = 0x01,
.default_value = 0x00,
.flags = 0,
},
},
.get_ctrl = &ov7630_get_ctrl,
.set_ctrl = &ov7630_set_ctrl,
.cropcap = {
.bounds = {
.left = 0,
.top = 0,
.width = 640,
.height = 480,
},
.defrect = {
.left = 0,
.top = 0,
.width = 640,
.height = 480,
},
},
.set_crop = &ov7630_set_crop,
.pix_format = {
.width = 640,
.height = 480,
.pixelformat = V4L2_PIX_FMT_SN9C10X,
.priv = 8,
},
.set_pix_format = &ov7630_set_pix_format
};
int sn9c102_probe_ov7630(struct sn9c102_device* cam)
{
int pid, ver, err = 0;
switch (sn9c102_get_bridge(cam)) {
case BRIDGE_SN9C101:
case BRIDGE_SN9C102:
err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01},
{0x28, 0x17});
break;
case BRIDGE_SN9C103: /* do _not_ change anything! */
err = sn9c102_write_const_regs(cam, {0x09, 0x01}, {0x42, 0x01},
{0x28, 0x17}, {0x44, 0x02});
pid = sn9c102_i2c_try_read(cam, &ov7630, 0x0a);
if (err || pid < 0) /* try a different initialization */
err += sn9c102_write_const_regs(cam, {0x01, 0x01},
{0x00, 0x01});
break;
case BRIDGE_SN9C105:
case BRIDGE_SN9C120:
err = sn9c102_write_const_regs(cam, {0x01, 0xf1}, {0x00, 0xf1},
{0x29, 0x01}, {0x74, 0x02},
{0x0e, 0x01}, {0x44, 0x01});
break;
default:
break;
}
pid = sn9c102_i2c_try_read(cam, &ov7630, 0x0a);
ver = sn9c102_i2c_try_read(cam, &ov7630, 0x0b);
if (err || pid < 0 || ver < 0)
return -EIO;
if (pid != 0x76 || ver != 0x31)
return -ENODEV;
sn9c102_attach_sensor(cam, &ov7630);
return 0;
}
| gpl-2.0 |
MoKee/android_kernel_htc_villec2 | drivers/infiniband/hw/mthca/mthca_profile.c | 13572 | 9370 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "mthca_profile.h"
enum {
MTHCA_RES_QP,
MTHCA_RES_EEC,
MTHCA_RES_SRQ,
MTHCA_RES_CQ,
MTHCA_RES_EQP,
MTHCA_RES_EEEC,
MTHCA_RES_EQ,
MTHCA_RES_RDB,
MTHCA_RES_MCG,
MTHCA_RES_MPT,
MTHCA_RES_MTT,
MTHCA_RES_UAR,
MTHCA_RES_UDAV,
MTHCA_RES_UARC,
MTHCA_RES_NUM
};
enum {
MTHCA_NUM_EQS = 32,
MTHCA_NUM_PDS = 1 << 15
};
s64 mthca_make_profile(struct mthca_dev *dev,
struct mthca_profile *request,
struct mthca_dev_lim *dev_lim,
struct mthca_init_hca_param *init_hca)
{
struct mthca_resource {
u64 size;
u64 start;
int type;
int num;
int log_num;
};
u64 mem_base, mem_avail;
s64 total_size = 0;
struct mthca_resource *profile;
struct mthca_resource tmp;
int i, j;
profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
if (!profile)
return -ENOMEM;
profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz;
profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz;
profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz;
profile[MTHCA_RES_CQ].size = dev_lim->cqc_entry_sz;
profile[MTHCA_RES_EQP].size = dev_lim->eqpc_entry_sz;
profile[MTHCA_RES_EEEC].size = dev_lim->eeec_entry_sz;
profile[MTHCA_RES_EQ].size = dev_lim->eqc_entry_sz;
profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE;
profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE;
profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz;
profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size;
profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz;
profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
profile[MTHCA_RES_UARC].size = request->uarc_size;
profile[MTHCA_RES_QP].num = request->num_qp;
profile[MTHCA_RES_SRQ].num = request->num_srq;
profile[MTHCA_RES_EQP].num = request->num_qp;
profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp;
profile[MTHCA_RES_CQ].num = request->num_cq;
profile[MTHCA_RES_EQ].num = MTHCA_NUM_EQS;
profile[MTHCA_RES_MCG].num = request->num_mcg;
profile[MTHCA_RES_MPT].num = request->num_mpt;
profile[MTHCA_RES_MTT].num = request->num_mtt;
profile[MTHCA_RES_UAR].num = request->num_uar;
profile[MTHCA_RES_UARC].num = request->num_uar;
profile[MTHCA_RES_UDAV].num = request->num_udav;
for (i = 0; i < MTHCA_RES_NUM; ++i) {
profile[i].type = i;
profile[i].log_num = max(ffs(profile[i].num) - 1, 0);
profile[i].size *= profile[i].num;
if (mthca_is_memfree(dev))
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
}
if (mthca_is_memfree(dev)) {
mem_base = 0;
mem_avail = dev_lim->hca.arbel.max_icm_sz;
} else {
mem_base = dev->ddr_start;
mem_avail = dev->fw.tavor.fw_start - dev->ddr_start;
}
/*
* Sort the resources in decreasing order of size. Since they
* all have sizes that are powers of 2, we'll be able to keep
* resources aligned to their size and pack them without gaps
* using the sorted order.
*/
for (i = MTHCA_RES_NUM; i > 0; --i)
for (j = 1; j < i; ++j) {
if (profile[j].size > profile[j - 1].size) {
tmp = profile[j];
profile[j] = profile[j - 1];
profile[j - 1] = tmp;
}
}
for (i = 0; i < MTHCA_RES_NUM; ++i) {
if (profile[i].size) {
profile[i].start = mem_base + total_size;
total_size += profile[i].size;
}
if (total_size > mem_avail) {
mthca_err(dev, "Profile requires 0x%llx bytes; "
"won't fit in 0x%llx bytes of context memory.\n",
(unsigned long long) total_size,
(unsigned long long) mem_avail);
kfree(profile);
return -ENOMEM;
}
if (profile[i].size)
mthca_dbg(dev, "profile[%2d]--%2d/%2d @ 0x%16llx "
"(size 0x%8llx)\n",
i, profile[i].type, profile[i].log_num,
(unsigned long long) profile[i].start,
(unsigned long long) profile[i].size);
}
if (mthca_is_memfree(dev))
mthca_dbg(dev, "HCA context memory: reserving %d KB\n",
(int) (total_size >> 10));
else
mthca_dbg(dev, "HCA memory: allocated %d KB/%d KB (%d KB free)\n",
(int) (total_size >> 10), (int) (mem_avail >> 10),
(int) ((mem_avail - total_size) >> 10));
for (i = 0; i < MTHCA_RES_NUM; ++i) {
switch (profile[i].type) {
case MTHCA_RES_QP:
dev->limits.num_qps = profile[i].num;
init_hca->qpc_base = profile[i].start;
init_hca->log_num_qps = profile[i].log_num;
break;
case MTHCA_RES_EEC:
dev->limits.num_eecs = profile[i].num;
init_hca->eec_base = profile[i].start;
init_hca->log_num_eecs = profile[i].log_num;
break;
case MTHCA_RES_SRQ:
dev->limits.num_srqs = profile[i].num;
init_hca->srqc_base = profile[i].start;
init_hca->log_num_srqs = profile[i].log_num;
break;
case MTHCA_RES_CQ:
dev->limits.num_cqs = profile[i].num;
init_hca->cqc_base = profile[i].start;
init_hca->log_num_cqs = profile[i].log_num;
break;
case MTHCA_RES_EQP:
init_hca->eqpc_base = profile[i].start;
break;
case MTHCA_RES_EEEC:
init_hca->eeec_base = profile[i].start;
break;
case MTHCA_RES_EQ:
dev->limits.num_eqs = profile[i].num;
init_hca->eqc_base = profile[i].start;
init_hca->log_num_eqs = profile[i].log_num;
break;
case MTHCA_RES_RDB:
for (dev->qp_table.rdb_shift = 0;
request->num_qp << dev->qp_table.rdb_shift < profile[i].num;
++dev->qp_table.rdb_shift)
; /* nothing */
dev->qp_table.rdb_base = (u32) profile[i].start;
init_hca->rdb_base = profile[i].start;
break;
case MTHCA_RES_MCG:
dev->limits.num_mgms = profile[i].num >> 1;
dev->limits.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
init_hca->log_mc_entry_sz = ffs(MTHCA_MGM_ENTRY_SIZE) - 1;
init_hca->log_mc_table_sz = profile[i].log_num;
init_hca->mc_hash_sz = 1 << (profile[i].log_num - 1);
break;
case MTHCA_RES_MPT:
dev->limits.num_mpts = profile[i].num;
dev->mr_table.mpt_base = profile[i].start;
init_hca->mpt_base = profile[i].start;
init_hca->log_mpt_sz = profile[i].log_num;
break;
case MTHCA_RES_MTT:
dev->limits.num_mtt_segs = profile[i].num;
dev->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
init_hca->mtt_seg_sz = ffs(dev->limits.mtt_seg_size) - 7;
break;
case MTHCA_RES_UAR:
dev->limits.num_uars = profile[i].num;
init_hca->uar_scratch_base = profile[i].start;
break;
case MTHCA_RES_UDAV:
dev->av_table.ddr_av_base = profile[i].start;
dev->av_table.num_ddr_avs = profile[i].num;
break;
case MTHCA_RES_UARC:
dev->uar_table.uarc_size = request->uarc_size;
dev->uar_table.uarc_base = profile[i].start;
init_hca->uarc_base = profile[i].start;
init_hca->log_uarc_sz = ffs(request->uarc_size) - 13;
init_hca->log_uar_sz = ffs(request->num_uar) - 1;
break;
default:
break;
}
}
/*
* PDs don't take any HCA memory, but we assign them as part
* of the HCA profile anyway.
*/
dev->limits.num_pds = MTHCA_NUM_PDS;
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT &&
init_hca->log_mpt_sz > 23) {
mthca_warn(dev, "MPT table too large (requested size 2^%d >= 2^24)\n",
init_hca->log_mpt_sz);
mthca_warn(dev, "Disabling memory key throughput optimization.\n");
dev->mthca_flags &= ~MTHCA_FLAG_SINAI_OPT;
}
/*
* For Tavor, FMRs use ioremapped PCI memory. For 32 bit
* systems it may use too much vmalloc space to map all MTT
* memory, so we reserve some MTTs for FMR access, taking them
* out of the MR pool. They don't use additional memory, but
* we assign them as part of the HCA profile anyway.
*/
if (mthca_is_memfree(dev) || BITS_PER_LONG == 64)
dev->limits.fmr_reserved_mtts = 0;
else
dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts;
kfree(profile);
return total_size;
}
| gpl-2.0 |
vyatta/vyatta-strongswan | src/libstrongswan/plugins/openssl/openssl_sha1_prf.c | 5 | 2767 | /*
* Copyright (C) 2010 Martin Willi
* Hochschule fuer Technik Rapperswil
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include "openssl_sha1_prf.h"
#include <openssl/sha.h>
typedef struct private_openssl_sha1_prf_t private_openssl_sha1_prf_t;
/**
* Private data of an openssl_sha1_prf_t object.
*/
struct private_openssl_sha1_prf_t {
/**
* Public openssl_sha1_prf_t interface.
*/
openssl_sha1_prf_t public;
/**
* SHA1 context
*/
SHA_CTX ctx;
};
METHOD(prf_t, get_bytes, void,
private_openssl_sha1_prf_t *this, chunk_t seed, u_int8_t *bytes)
{
SHA1_Update(&this->ctx, seed.ptr, seed.len);
if (bytes)
{
u_int32_t *hash = (u_int32_t*)bytes;
hash[0] = htonl(this->ctx.h0);
hash[1] = htonl(this->ctx.h1);
hash[2] = htonl(this->ctx.h2);
hash[3] = htonl(this->ctx.h3);
hash[4] = htonl(this->ctx.h4);
}
}
METHOD(prf_t, get_block_size, size_t,
private_openssl_sha1_prf_t *this)
{
return HASH_SIZE_SHA1;
}
METHOD(prf_t, allocate_bytes, void,
private_openssl_sha1_prf_t *this, chunk_t seed, chunk_t *chunk)
{
if (chunk)
{
*chunk = chunk_alloc(HASH_SIZE_SHA1);
get_bytes(this, seed, chunk->ptr);
}
else
{
get_bytes(this, seed, NULL);
}
}
METHOD(prf_t, get_key_size, size_t,
private_openssl_sha1_prf_t *this)
{
return HASH_SIZE_SHA1;
}
METHOD(prf_t, set_key, void,
private_openssl_sha1_prf_t *this, chunk_t key)
{
SHA1_Init(&this->ctx);
if (key.len >= 4)
{
this->ctx.h0 ^= untoh32(key.ptr);
}
if (key.len >= 8)
{
this->ctx.h1 ^= untoh32(key.ptr + 4);
}
if (key.len >= 12)
{
this->ctx.h2 ^= untoh32(key.ptr + 8);
}
if (key.len >= 16)
{
this->ctx.h3 ^= untoh32(key.ptr + 12);
}
if (key.len >= 20)
{
this->ctx.h4 ^= untoh32(key.ptr + 16);
}
}
METHOD(prf_t, destroy, void,
private_openssl_sha1_prf_t *this)
{
free(this);
}
/**
* See header
*/
openssl_sha1_prf_t *openssl_sha1_prf_create(pseudo_random_function_t algo)
{
private_openssl_sha1_prf_t *this;
if (algo != PRF_KEYED_SHA1)
{
return NULL;
}
INIT(this,
.public = {
.prf = {
.get_block_size = _get_block_size,
.get_bytes = _get_bytes,
.allocate_bytes = _allocate_bytes,
.get_key_size = _get_key_size,
.set_key = _set_key,
.destroy = _destroy,
},
},
);
return &this->public;
}
| gpl-2.0 |
NaturalGIS/QGIS | src/gui/qgsprojectionselectionwidget.cpp | 5 | 12981 | /***************************************************************************
qgsprojectionselectionwidget.cpp
--------------------------------------
Date : 05.01.2015
Copyright : (C) 2015 Denis Rouzaud
Email : denis.rouzaud@gmail.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include <QHBoxLayout>
#include "qgsprojectionselectionwidget.h"
#include "qgsapplication.h"
#include "qgsprojectionselectiondialog.h"
#include "qgsproject.h"
#include "qgssettings.h"
#include "qgshighlightablecombobox.h"
QgsProjectionSelectionWidget::QgsProjectionSelectionWidget( QWidget *parent )
: QWidget( parent )
{
QHBoxLayout *layout = new QHBoxLayout();
layout->setContentsMargins( 0, 0, 0, 0 );
layout->setSpacing( 6 );
setLayout( layout );
mCrsComboBox = new QgsHighlightableComboBox( this );
mCrsComboBox->addItem( tr( "invalid projection" ), QgsProjectionSelectionWidget::CurrentCrs );
mCrsComboBox->setSizePolicy( QSizePolicy::Ignored, QSizePolicy::Preferred );
mProjectCrs = QgsProject::instance()->crs();
addProjectCrsOption();
QgsSettings settings;
mDefaultCrs = QgsCoordinateReferenceSystem( settings.value( QStringLiteral( "/projections/defaultProjectCrs" ), geoEpsgCrsAuthId(), QgsSettings::App ).toString() );
if ( mDefaultCrs.authid() != mProjectCrs.authid() )
{
//only show default CRS option if it's different to the project CRS, avoids
//needlessly cluttering the widget
addDefaultCrsOption();
}
addRecentCrs();
layout->addWidget( mCrsComboBox );
mButton = new QToolButton( this );
mButton->setIcon( QgsApplication::getThemeIcon( QStringLiteral( "mActionSetProjection.svg" ) ) );
mButton->setToolTip( tr( "Select CRS" ) );
layout->addWidget( mButton );
setFocusPolicy( Qt::StrongFocus );
setFocusProxy( mButton );
setAcceptDrops( true );
connect( mButton, &QToolButton::clicked, this, &QgsProjectionSelectionWidget::selectCrs );
connect( mCrsComboBox, static_cast<void ( QComboBox::* )( int )>( &QComboBox::currentIndexChanged ), this, &QgsProjectionSelectionWidget::comboIndexChanged );
}
QgsCoordinateReferenceSystem QgsProjectionSelectionWidget::crs() const
{
switch ( static_cast< CrsOption >( mCrsComboBox->currentData().toInt() ) )
{
case QgsProjectionSelectionWidget::LayerCrs:
return mLayerCrs;
case QgsProjectionSelectionWidget::ProjectCrs:
return mProjectCrs;
case QgsProjectionSelectionWidget::DefaultCrs:
return mDefaultCrs;
case QgsProjectionSelectionWidget::CurrentCrs:
return mCrs;
case QgsProjectionSelectionWidget::RecentCrs:
{
long srsid = mCrsComboBox->currentData( Qt::UserRole + 1 ).toLongLong();
QgsCoordinateReferenceSystem crs = QgsCoordinateReferenceSystem::fromSrsId( srsid );
return crs;
}
case QgsProjectionSelectionWidget::CrsNotSet:
return QgsCoordinateReferenceSystem();
}
return mCrs;
}
void QgsProjectionSelectionWidget::setOptionVisible( const QgsProjectionSelectionWidget::CrsOption option, const bool visible )
{
int optionIndex = mCrsComboBox->findData( option );
if ( visible && optionIndex < 0 )
{
//add missing CRS option
switch ( option )
{
case QgsProjectionSelectionWidget::LayerCrs:
{
setLayerCrs( mLayerCrs );
return;
}
case QgsProjectionSelectionWidget::ProjectCrs:
{
addProjectCrsOption();
return;
}
case QgsProjectionSelectionWidget::DefaultCrs:
{
addDefaultCrsOption();
return;
}
case QgsProjectionSelectionWidget::CurrentCrs:
{
addCurrentCrsOption();
return;
}
case QgsProjectionSelectionWidget::RecentCrs:
//recently used CRS option cannot be readded
return;
case QgsProjectionSelectionWidget::CrsNotSet:
{
addNotSetOption();
if ( optionVisible( CurrentCrs ) && !mCrs.isValid() )
{
// hide invalid option if not set option is shown
setOptionVisible( CurrentCrs, false );
}
return;
}
}
}
else if ( !visible && optionIndex >= 0 )
{
//remove CRS option
mCrsComboBox->removeItem( optionIndex );
if ( option == CrsNotSet )
{
setOptionVisible( CurrentCrs, true );
}
}
}
void QgsProjectionSelectionWidget::setNotSetText( const QString &text )
{
mNotSetText = text;
int optionIndex = mCrsComboBox->findData( CrsNotSet );
if ( optionIndex >= 0 )
{
mCrsComboBox->setItemText( optionIndex, mNotSetText );
}
}
void QgsProjectionSelectionWidget::setMessage( const QString &text )
{
mMessage = text;
}
bool QgsProjectionSelectionWidget::optionVisible( QgsProjectionSelectionWidget::CrsOption option ) const
{
int optionIndex = mCrsComboBox->findData( option );
return optionIndex >= 0;
}
void QgsProjectionSelectionWidget::selectCrs()
{
//find out crs id of current proj4 string
QgsProjectionSelectionDialog dlg( this );
if ( !mMessage.isEmpty() )
dlg.setMessage( mMessage );
dlg.setCrs( mCrs );
if ( optionVisible( QgsProjectionSelectionWidget::CrsOption::CrsNotSet ) )
{
dlg.setShowNoProjection( true );
}
if ( dlg.exec() )
{
mCrsComboBox->blockSignals( true );
mCrsComboBox->setCurrentIndex( mCrsComboBox->findData( QgsProjectionSelectionWidget::CurrentCrs ) );
mCrsComboBox->blockSignals( false );
QgsCoordinateReferenceSystem crs = dlg.crs();
setCrs( crs );
emit crsChanged( crs );
}
else
{
QApplication::restoreOverrideCursor();
}
}
void QgsProjectionSelectionWidget::dragEnterEvent( QDragEnterEvent *event )
{
if ( !( event->possibleActions() & Qt::CopyAction ) )
{
event->ignore();
return;
}
if ( mapLayerFromMimeData( event->mimeData() ) )
{
// dragged an acceptable layer, phew
event->setDropAction( Qt::CopyAction );
event->accept();
mCrsComboBox->setHighlighted( true );
update();
}
else
{
event->ignore();
}
}
void QgsProjectionSelectionWidget::dragLeaveEvent( QDragLeaveEvent *event )
{
if ( mCrsComboBox->isHighlighted() )
{
event->accept();
mCrsComboBox->setHighlighted( false );
update();
}
else
{
event->ignore();
}
}
void QgsProjectionSelectionWidget::dropEvent( QDropEvent *event )
{
if ( !( event->possibleActions() & Qt::CopyAction ) )
{
event->ignore();
return;
}
if ( QgsMapLayer *layer = mapLayerFromMimeData( event->mimeData() ) )
{
// dropped a map layer
setFocus( Qt::MouseFocusReason );
event->setDropAction( Qt::CopyAction );
event->accept();
if ( layer->crs().isValid() )
setCrs( layer->crs() );
}
else
{
event->ignore();
}
mCrsComboBox->setHighlighted( false );
update();
}
void QgsProjectionSelectionWidget::addNotSetOption()
{
mCrsComboBox->insertItem( 0, mNotSetText, QgsProjectionSelectionWidget::CrsNotSet );
if ( !mCrs.isValid() )
whileBlocking( mCrsComboBox )->setCurrentIndex( 0 );
}
void QgsProjectionSelectionWidget::comboIndexChanged( int idx )
{
switch ( static_cast< CrsOption >( mCrsComboBox->itemData( idx ).toInt() ) )
{
case QgsProjectionSelectionWidget::LayerCrs:
emit crsChanged( mLayerCrs );
break;
case QgsProjectionSelectionWidget::ProjectCrs:
emit crsChanged( mProjectCrs );
break;
case QgsProjectionSelectionWidget::CurrentCrs:
emit crsChanged( mCrs );
break;
case QgsProjectionSelectionWidget::DefaultCrs:
emit crsChanged( mDefaultCrs );
break;
case QgsProjectionSelectionWidget::RecentCrs:
{
long srsid = mCrsComboBox->itemData( idx, Qt::UserRole + 1 ).toLongLong();
QgsCoordinateReferenceSystem crs = QgsCoordinateReferenceSystem::fromSrsId( srsid );
emit crsChanged( crs );
break;
}
case QgsProjectionSelectionWidget::CrsNotSet:
emit cleared();
emit crsChanged( QgsCoordinateReferenceSystem() );
break;
}
updateTooltip();
}
void QgsProjectionSelectionWidget::setCrs( const QgsCoordinateReferenceSystem &crs )
{
if ( crs.isValid() )
{
if ( !optionVisible( QgsProjectionSelectionWidget::CurrentCrs ) )
setOptionVisible( QgsProjectionSelectionWidget::CurrentCrs, true );
mCrsComboBox->setItemText( mCrsComboBox->findData( QgsProjectionSelectionWidget::CurrentCrs ),
crsOptionText( crs ) );
mCrsComboBox->blockSignals( true );
mCrsComboBox->setCurrentIndex( mCrsComboBox->findData( QgsProjectionSelectionWidget::CurrentCrs ) );
mCrsComboBox->blockSignals( false );
}
else
{
int crsNotSetIndex = mCrsComboBox->findData( QgsProjectionSelectionWidget::CrsNotSet );
if ( crsNotSetIndex >= 0 )
{
mCrsComboBox->blockSignals( true );
mCrsComboBox->setCurrentIndex( crsNotSetIndex );
mCrsComboBox->blockSignals( false );
}
else
{
mCrsComboBox->setItemText( mCrsComboBox->findData( QgsProjectionSelectionWidget::CurrentCrs ),
crsOptionText( crs ) );
}
}
if ( mCrs != crs )
{
mCrs = crs;
emit crsChanged( crs );
}
updateTooltip();
}
void QgsProjectionSelectionWidget::setLayerCrs( const QgsCoordinateReferenceSystem &crs )
{
int layerItemIndex = mCrsComboBox->findData( QgsProjectionSelectionWidget::LayerCrs );
if ( crs.isValid() )
{
if ( layerItemIndex > -1 )
{
mCrsComboBox->setItemText( layerItemIndex, tr( "Layer CRS: %1" ).arg( crs.userFriendlyIdentifier() ) );
}
else
{
mCrsComboBox->insertItem( firstRecentCrsIndex(), tr( "Layer CRS: %1" ).arg( crs.userFriendlyIdentifier() ), QgsProjectionSelectionWidget::LayerCrs );
}
}
else
{
if ( layerItemIndex > -1 )
{
mCrsComboBox->removeItem( layerItemIndex );
}
}
mLayerCrs = crs;
}
void QgsProjectionSelectionWidget::addProjectCrsOption()
{
if ( mProjectCrs.isValid() )
{
mCrsComboBox->addItem( tr( "Project CRS: %1" ).arg( mProjectCrs.userFriendlyIdentifier() ), QgsProjectionSelectionWidget::ProjectCrs );
}
}
void QgsProjectionSelectionWidget::addDefaultCrsOption()
{
mCrsComboBox->addItem( tr( "Default CRS: %1" ).arg( mDefaultCrs.userFriendlyIdentifier() ), QgsProjectionSelectionWidget::DefaultCrs );
}
void QgsProjectionSelectionWidget::addCurrentCrsOption()
{
int index = optionVisible( CrsNotSet ) ? 1 : 0;
mCrsComboBox->insertItem( index, crsOptionText( mCrs ), QgsProjectionSelectionWidget::CurrentCrs );
}
QString QgsProjectionSelectionWidget::crsOptionText( const QgsCoordinateReferenceSystem &crs )
{
if ( crs.isValid() )
return crs.userFriendlyIdentifier();
else
return tr( "invalid projection" );
}
void QgsProjectionSelectionWidget::addRecentCrs()
{
const QList< QgsCoordinateReferenceSystem> recentProjections = QgsCoordinateReferenceSystem::recentCoordinateReferenceSystems();
for ( const QgsCoordinateReferenceSystem &crs : recentProjections )
{
long srsid = crs.srsid();
//check if already shown
if ( crsIsShown( srsid ) )
{
continue;
}
if ( crs.isValid() )
{
mCrsComboBox->addItem( crs.userFriendlyIdentifier(), QgsProjectionSelectionWidget::RecentCrs );
mCrsComboBox->setItemData( mCrsComboBox->count() - 1, QVariant( ( long long )srsid ), Qt::UserRole + 1 );
}
}
}
bool QgsProjectionSelectionWidget::crsIsShown( const long srsid ) const
{
return srsid == mLayerCrs.srsid() || srsid == mDefaultCrs.srsid() || srsid == mProjectCrs.srsid();
}
int QgsProjectionSelectionWidget::firstRecentCrsIndex() const
{
for ( int i = 0; i < mCrsComboBox->count(); ++i )
{
if ( static_cast< CrsOption >( mCrsComboBox->itemData( i ).toInt() ) == RecentCrs )
{
return i;
}
}
return -1;
}
void QgsProjectionSelectionWidget::updateTooltip()
{
QgsCoordinateReferenceSystem c = crs();
if ( c.isValid() )
setToolTip( c.toWkt( QgsCoordinateReferenceSystem::WKT2_2018, true ) );
else
setToolTip( QString() );
}
QgsMapLayer *QgsProjectionSelectionWidget::mapLayerFromMimeData( const QMimeData *data ) const
{
const QgsMimeDataUtils::UriList uriList = QgsMimeDataUtils::decodeUriList( data );
for ( const QgsMimeDataUtils::Uri &u : uriList )
{
// is this uri from the current project?
if ( QgsMapLayer *layer = u.mapLayer() )
{
return layer;
}
}
return nullptr;
}
| gpl-2.0 |
gautierhattenberger/paparazzi | sw/airborne/firmwares/fixedwing/main_ap.c | 5 | 9727 | /*
* Copyright (C) 2003-2010 The Paparazzi Team
*
* This file is part of paparazzi.
*
* paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with paparazzi; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/**
* @file firmwares/fixedwing/main_ap.c
*
* AP ( AutoPilot ) tasks
*
* This process is reponsible for the collecting the different sensors data,
* calling the appropriate estimation algorithms and running the different control loops.
*/
#define MODULES_C
#define ABI_C
#include <math.h>
#include "firmwares/fixedwing/main_ap.h"
#include "mcu.h"
#include "mcu_periph/sys_time.h"
#include "inter_mcu.h"
#include "link_mcu.h"
// Sensors
#if USE_GPS
#include "subsystems/gps.h"
#endif
#if USE_IMU
#include "subsystems/imu.h"
#endif
#if USE_AHRS
#include "subsystems/ahrs.h"
#endif
#if USE_BARO_BOARD
#include "subsystems/sensors/baro.h"
PRINT_CONFIG_MSG_VALUE("USE_BARO_BOARD is TRUE, reading onboard baro: ", BARO_BOARD)
#endif
// autopilot
#include "state.h"
#include "autopilot.h"
#include "firmwares/fixedwing/nav.h"
#include "generated/flight_plan.h"
// datalink & telemetry
#if PERIODIC_TELEMETRY
#include "subsystems/datalink/telemetry.h"
#endif
// modules & settings
#include "subsystems/settings.h"
#include "generated/modules.h"
#include "generated/settings.h"
#if defined RADIO_CONTROL || defined RADIO_CONTROL_AUTO1
#include "modules/settings/rc_settings.h"
#endif
#include "subsystems/abi.h"
#include "led.h"
#ifdef USE_NPS
#include "nps_autopilot.h"
#endif
/* Default trim commands for roll, pitch and yaw */
#ifndef COMMAND_ROLL_TRIM
#define COMMAND_ROLL_TRIM 0
#endif
#ifndef COMMAND_PITCH_TRIM
#define COMMAND_PITCH_TRIM 0
#endif
#ifndef COMMAND_YAW_TRIM
#define COMMAND_YAW_TRIM 0
#endif
/* if PRINT_CONFIG is defined, print some config options */
PRINT_CONFIG_VAR(PERIODIC_FREQUENCY)
#if !USE_GENERATED_AUTOPILOT
PRINT_CONFIG_VAR(NAVIGATION_FREQUENCY)
#endif
PRINT_CONFIG_VAR(CONTROL_FREQUENCY)
/* TELEMETRY_FREQUENCY is defined in generated/periodic_telemetry.h
* defaults to 60Hz or set by TELEMETRY_FREQUENCY configure option in airframe file
*/
#ifndef TELEMETRY_FREQUENCY
#define TELEMETRY_FREQUENCY 60
#endif
PRINT_CONFIG_VAR(TELEMETRY_FREQUENCY)
/* MODULES_FREQUENCY is defined in generated/modules.h
* according to main_freq parameter set for modules in airframe file
*/
PRINT_CONFIG_VAR(MODULES_FREQUENCY)
/* BARO_PERIODIC_FREQUENCY is defined in baro_board.makefile
* defaults to 50Hz or set by BARO_PERIODIC_FREQUENCY configure option in airframe file
*/
#if USE_BARO_BOARD
PRINT_CONFIG_VAR(BARO_PERIODIC_FREQUENCY)
#endif
#if USE_IMU
#ifdef AHRS_PROPAGATE_FREQUENCY
#if (AHRS_PROPAGATE_FREQUENCY > PERIODIC_FREQUENCY)
#warning "PERIODIC_FREQUENCY should be least equal or greater than AHRS_PROPAGATE_FREQUENCY"
INFO_VALUE("it is recommended to configure in your airframe PERIODIC_FREQUENCY to at least ", AHRS_PROPAGATE_FREQUENCY)
#endif
#endif
#endif // USE_IMU
tid_t modules_tid; ///< id for modules_periodic_task() timer
tid_t telemetry_tid; ///< id for telemetry_periodic() timer
tid_t sensors_tid; ///< id for sensors_task() timer
tid_t attitude_tid; ///< id for attitude_loop() timer
#if !USE_GENERATED_AUTOPILOT
tid_t navigation_tid; ///< id for navigation_task() timer
#endif
tid_t monitor_tid; ///< id for monitor_task() timer
#if USE_BARO_BOARD
tid_t baro_tid; ///< id for baro_periodic() timer
#endif
void init_ap(void)
{
#ifndef SINGLE_MCU /** init done in main_fbw in single MCU */
mcu_init();
#endif /* SINGLE_MCU */
/** - start interrupt task */
mcu_int_enable();
#if defined(PPRZ_TRIG_INT_COMPR_FLASH)
pprz_trig_int_init();
#endif
/****** initialize and reset state interface ********/
stateInit();
/************* Sensors initialization ***************/
#if USE_AHRS
ahrs_init();
#endif
#if USE_BARO_BOARD
baro_init();
#endif
/************* Links initialization ***************/
#if defined MCU_SPI_LINK || defined MCU_UART_LINK || defined MCU_CAN_LINK
link_mcu_init();
#endif
/************ Internal status ***************/
autopilot_init();
modules_init();
// call autopilot implementation init after guidance modules init
// it will set startup mode
#if USE_GENERATED_AUTOPILOT
autopilot_generated_init();
#else
autopilot_static_init();
#endif
settings_init();
/**** start timers for periodic functions *****/
sensors_tid = sys_time_register_timer(1. / PERIODIC_FREQUENCY, NULL);
#if !USE_GENERATED_AUTOPILOT
navigation_tid = sys_time_register_timer(1. / NAVIGATION_FREQUENCY, NULL);
#endif
attitude_tid = sys_time_register_timer(1. / CONTROL_FREQUENCY, NULL);
modules_tid = sys_time_register_timer(1. / MODULES_FREQUENCY, NULL);
telemetry_tid = sys_time_register_timer(1. / TELEMETRY_FREQUENCY, NULL);
monitor_tid = sys_time_register_timer(1.0, NULL);
#if USE_BARO_BOARD
baro_tid = sys_time_register_timer(1. / BARO_PERIODIC_FREQUENCY, NULL);
#endif
#if DOWNLINK
downlink_init();
#endif
/* set initial trim values.
* these are passed to fbw via inter_mcu.
*/
PPRZ_MUTEX_LOCK(ap_state_mtx);
ap_state->command_roll_trim = COMMAND_ROLL_TRIM;
ap_state->command_pitch_trim = COMMAND_PITCH_TRIM;
ap_state->command_yaw_trim = COMMAND_YAW_TRIM;
PPRZ_MUTEX_UNLOCK(ap_state_mtx);
#if USE_IMU
// send body_to_imu from here for now
AbiSendMsgBODY_TO_IMU_QUAT(1, orientationGetQuat_f(&imu.body_to_imu));
#endif
}
void handle_periodic_tasks_ap(void)
{
if (sys_time_check_and_ack_timer(sensors_tid)) {
sensors_task();
}
#if USE_BARO_BOARD
if (sys_time_check_and_ack_timer(baro_tid)) {
baro_periodic();
}
#endif
#if USE_GENERATED_AUTOPILOT
if (sys_time_check_and_ack_timer(attitude_tid)) {
autopilot_periodic();
}
#else
// static autopilot
if (sys_time_check_and_ack_timer(navigation_tid)) {
navigation_task();
}
#ifndef AHRS_TRIGGERED_ATTITUDE_LOOP
if (sys_time_check_and_ack_timer(attitude_tid)) {
attitude_loop();
}
#endif
#endif
if (sys_time_check_and_ack_timer(modules_tid)) {
modules_periodic_task();
}
if (sys_time_check_and_ack_timer(monitor_tid)) {
monitor_task();
}
if (sys_time_check_and_ack_timer(telemetry_tid)) {
reporting_task();
LED_PERIODIC();
}
}
/**************************** Periodic tasks ***********************************/
/**
* Send a series of initialisation messages followed by a stream of periodic ones.
* Called at 60Hz.
*/
void reporting_task(void)
{
static uint8_t boot = true;
/* initialisation phase during boot */
if (boot) {
#if DOWNLINK
autopilot_send_version();
#endif
boot = false;
}
/* then report periodicly */
else {
//PeriodicSendAp(DefaultChannel, DefaultDevice);
#if PERIODIC_TELEMETRY
periodic_telemetry_send_Ap(DefaultPeriodic, &(DefaultChannel).trans_tx, &(DefaultDevice).device);
#endif
}
}
/** Run at PERIODIC_FREQUENCY (60Hz if not defined) */
void sensors_task(void)
{
//FIXME: this is just a kludge
#if USE_AHRS && defined SITL && !USE_NPS
update_ahrs_from_sim();
#endif
}
#ifdef LOW_BATTERY_KILL_DELAY
#warning LOW_BATTERY_KILL_DELAY has been renamed to CATASTROPHIC_BAT_KILL_DELAY, please update your airframe file!
#endif
/** Maximum time allowed for catastrophic battery level before going into kill mode */
#ifndef CATASTROPHIC_BAT_KILL_DELAY
#define CATASTROPHIC_BAT_KILL_DELAY 5
#endif
/** Maximum distance from HOME waypoint before going into kill mode */
#ifndef KILL_MODE_DISTANCE
#define KILL_MODE_DISTANCE (1.5*MAX_DIST_FROM_HOME)
#endif
/** Default minimal speed for takeoff in m/s */
#ifndef MIN_SPEED_FOR_TAKEOFF
#define MIN_SPEED_FOR_TAKEOFF 5.
#endif
/** monitor stuff run at 1Hz */
void monitor_task(void)
{
if (autopilot.flight_time) {
autopilot.flight_time++;
}
#if defined DATALINK || defined SITL
datalink_time++;
#endif
static uint8_t t = 0;
if (ap_electrical.vsupply < CATASTROPHIC_BAT_LEVEL) {
t++;
} else {
t = 0;
}
#if !USE_GENERATED_AUTOPILOT
// only check for static autopilot
autopilot.kill_throttle |= (t >= CATASTROPHIC_BAT_KILL_DELAY);
autopilot.kill_throttle |= autopilot.launch && (dist2_to_home > Square(KILL_MODE_DISTANCE));
#endif
if (!autopilot.flight_time &&
stateGetHorizontalSpeedNorm_f() > MIN_SPEED_FOR_TAKEOFF) {
autopilot.flight_time = 1;
autopilot.launch = true; /* Not set in non auto launch */
#if DOWNLINK
uint16_t time_sec = sys_time.nb_sec;
DOWNLINK_SEND_TAKEOFF(DefaultChannel, DefaultDevice, &time_sec);
#endif
}
}
/*********** EVENT ***********************************************************/
void event_task_ap(void)
{
#ifndef SINGLE_MCU
/* for SINGLE_MCU done in main_fbw */
/* event functions for mcu peripherals: i2c, usb_serial.. */
mcu_event();
#endif /* SINGLE_MCU */
#if USE_BARO_BOARD
BaroEvent();
#endif
modules_event_task();
#if defined MCU_SPI_LINK || defined MCU_UART_LINK
link_mcu_event_task();
#endif
if (inter_mcu_received_fbw) {
/* receive radio control task from fbw */
inter_mcu_received_fbw = false;
autopilot_on_rc_frame();
}
autopilot_event();
} /* event_task_ap() */
| gpl-2.0 |
prpplague/RCA-DSB772WE | drivers/pci/quirks.c | 5 | 74842 | /*
* This file contains work-arounds for many known PCI hardware
* bugs. Devices present only on certain architectures (host
* bridges et cetera) should be handled in arch-specific code.
*
* Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
*
* Copyright (c) 1999 Martin Mares <mj@ucw.cz>
*
* Init/reset quirks for USB host controllers should be in the
* USB quirks file, where their drivers can access reuse it.
*
* The bridge optimization stuff has been removed. If you really
* have a silly BIOS which is unable to set your host bridge right,
* use the PowerTweak utility (see http://powertweak.sourceforge.net).
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
#include "pci.h"
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
int pcie_mch_quirk;
EXPORT_SYMBOL(pcie_mch_quirk);
#ifdef CONFIG_PCI_QUIRKS
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
*/
static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
{
dev->broken_parity_status = 1; /* This device gives false positives */
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
/* Deal with broken BIOS'es that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
{
struct pci_dev *d = NULL;
unsigned char dlc;
/* We have to make sure a particular bit is set in the PIIX3
ISA bridge, so we have to go out and find it. */
while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
pci_read_config_byte(d, 0x82, &dlc);
if (!(dlc & 1<<1)) {
dev_err(&d->dev, "PIIX3: Enabling Passive Release\n");
dlc |= 1<<1;
pci_write_config_byte(d, 0x82, dlc);
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
but VIA don't answer queries. If you happen to have good contacts at VIA
ask them for me please -- Alan
This appears to be BIOS not version dependent. So presumably there is a
chipset level fix */
static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
isa_dma_bridge_buggy=1;
dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
}
}
/*
* Its not totally clear which chipsets are the problematic ones
* We know 82C586 and 82C596 variants are affected.
*/
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
static void __devinit quirk_nopcipci(struct pci_dev *dev)
{
if ((pci_pci_problems & PCIPCI_FAIL)==0) {
dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_FAIL;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
static void __devinit quirk_nopciamd(struct pci_dev *dev)
{
u8 rev;
pci_read_config_byte(dev, 0x08, &rev);
if (rev == 0x13) {
/* Erratum 24 */
dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
pci_pci_problems |= PCIAGP_FAIL;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
/*
* Triton requires workarounds to be used by the drivers
*/
static void __devinit quirk_triton(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_TRITON)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_TRITON;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
* VIA Apollo KT133 needs PCI latency patch
* Made according to a windows driver based patch by George E. Breese
* see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
* Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
* the info on which Mr Breese based his work.
*
* Updated based on further information from the site and also on
* information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
struct pci_dev *p;
u8 busarb;
/* Ok we have a potential problem chipset here. Now see if we have
a buggy southbridge */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
if (p!=NULL) {
/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
/* Check for buggy part revisions */
if (p->revision < 0x40 || p->revision > 0x42)
goto exit;
} else {
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
if (p==NULL) /* No problem parts */
goto exit;
/* Check for buggy part revisions */
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
/*
* Ok we have the problem. Now set the PCI master grant to
* occur every master grant. The apparent bug is that under high
* PCI load (quite common in Linux of course) you can get data
* loss when the CPU is held off the bus for 3 bus master requests
* This happens to include the IDE controllers....
*
* VIA only apply this fix when an SB Live! is present but under
* both Linux and Windows this isnt enough, and we have seen
* corruption without SB Live! but with things like 3 UDMA IDE
* controllers. So we ignore that bit of the VIA recommendation..
*/
pci_read_config_byte(dev, 0x76, &busarb);
/* Set bit 4 and bi 5 of byte 76 to 0x01
"Master priority rotation on every PCI master grant */
busarb &= ~(1<<5);
busarb |= (1<<4);
pci_write_config_byte(dev, 0x76, busarb);
dev_info(&dev->dev, "Applying VIA southbridge workaround\n");
exit:
pci_dev_put(p);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
/* Must restore this on a resume from RAM */
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
/*
* VIA Apollo VP3 needs ETBF on BT848/878
*/
static void __devinit quirk_viaetbf(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VIAETBF)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VIAETBF;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
static void __devinit quirk_vsfx(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VSFX)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VSFX;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
/*
* Ali Magik requires workarounds to be used by the drivers
* that DMA to AGP space. Latency must be set to 0xA and triton
* workaround applied too
* [Info kindly provided by ALi]
*/
static void __init quirk_alimagik(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
/*
* Natoma has some interesting boundary conditions with Zoran stuff
* at least
*/
static void __devinit quirk_natoma(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_NATOMA)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_NATOMA;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
* This chip can cause PCI parity errors if config register 0xA0 is read
* while DMAs are occurring.
*/
static void __devinit quirk_citrine(struct pci_dev *dev)
{
dev->cfg_size = 0xA0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
/*
* S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
* If it's needed, re-allocate the region.
*/
static void __devinit quirk_s3_64M(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
r->start = 0;
r->end = 0x3ffffff;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
unsigned size, int nr, const char *name)
{
region &= ~(size-1);
if (region) {
struct pci_bus_region bus_region;
struct resource *res = dev->resource + nr;
res->name = pci_name(dev);
res->start = region;
res->end = region + size - 1;
res->flags = IORESOURCE_IO;
/* Convert from PCI bus to resource space. */
bus_region.start = res->start;
bus_region.end = res->end;
pcibios_bus_to_resource(dev, res, &bus_region);
pci_claim_resource(dev, nr);
dev_info(&dev->dev, "quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name);
}
}
/*
* ATI Northbridge setups MCE the processor if you even
* read somewhere between 0x3b0->0x3bb or read 0x3d3
*/
static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev)
{
dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
request_region(0x3b0, 0x0C, "RadeonIGP");
request_region(0x3d3, 0x01, "RadeonIGP");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
/*
* Let's make the southbridge information explicit instead
* of having to worry about people probing the ACPI areas,
* for example.. (Yes, it happens, and if you read the wrong
* ACPI register it will put the machine to sleep with no
* way of waking it up again. Bummer).
*
* ALI M7101: Two IO regions pointed to by words at
* 0xE0 (64 bytes of ACPI registers)
* 0xE2 (32 bytes of SMB registers)
*/
static void __devinit quirk_ali7101_acpi(struct pci_dev *dev)
{
u16 region;
pci_read_config_word(dev, 0xE0, ®ion);
quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
pci_read_config_word(dev, 0xE2, ®ion);
quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
{
u32 devres;
u32 mask, size, base;
pci_read_config_dword(dev, port, &devres);
if ((devres & enable) != enable)
return;
mask = (devres >> 16) & 15;
base = devres & 0xffff;
size = 16;
for (;;) {
unsigned bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
}
/*
* For now we only print it out. Eventually we'll want to
* reserve it (at least if it's in the 0x1000+ range), but
* let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
}
static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
{
u32 devres;
u32 mask, size, base;
pci_read_config_dword(dev, port, &devres);
if ((devres & enable) != enable)
return;
base = devres & 0xffff0000;
mask = (devres & 0x3f) << 16;
size = 128 << 16;
for (;;) {
unsigned bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
}
/*
* For now we only print it out. Eventually we'll want to
* reserve it, but let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
}
/*
* PIIX4 ACPI: Two IO regions pointed to by longwords at
* 0x40 (64 bytes of ACPI registers)
* 0x90 (16 bytes of SMB registers)
* and a few strange programmable PIIX4 device resources.
*/
static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
{
u32 region, res_a;
pci_read_config_dword(dev, 0x40, ®ion);
quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
pci_read_config_dword(dev, 0x90, ®ion);
quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
/* Device resource A has enables for some of the other ones */
pci_read_config_dword(dev, 0x5c, &res_a);
piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
/* Device resource D is just bitfields for static resources */
/* Device 12 enabled? */
if (res_a & (1 << 29)) {
piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
}
/* Device 13 enabled? */
if (res_a & (1 << 30)) {
piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
}
piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
/*
* ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
* 0x40 (128 bytes of ACPI, GPIO & TCO registers)
* 0x58 (64 bytes of GPIO I/O space)
*/
static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
{
u32 region;
pci_read_config_dword(dev, 0x40, ®ion);
quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
pci_read_config_dword(dev, 0x58, ®ion);
quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
{
u32 region;
pci_read_config_dword(dev, 0x40, ®ion);
quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO");
pci_read_config_dword(dev, 0x48, ®ion);
quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi);
/*
* VIA ACPI: One IO region pointed to by longword at
* 0x48 or 0x20 (256 bytes of ACPI registers)
*/
static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev)
{
u32 region;
if (dev->revision & 0x10) {
pci_read_config_dword(dev, 0x48, ®ion);
region &= PCI_BASE_ADDRESS_IO_MASK;
quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
/*
* VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
* 0x48 (256 bytes of ACPI registers)
* 0x70 (128 bytes of hardware monitoring register)
* 0x90 (16 bytes of SMB registers)
*/
static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev)
{
u16 hm;
u32 smb;
quirk_vt82c586_acpi(dev);
pci_read_config_word(dev, 0x70, &hm);
hm &= PCI_BASE_ADDRESS_IO_MASK;
quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon");
pci_read_config_dword(dev, 0x90, &smb);
smb &= PCI_BASE_ADDRESS_IO_MASK;
quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
/*
* VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
* 0x88 (128 bytes of power management registers)
* 0xd0 (16 bytes of SMB registers)
*/
static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
{
u16 pm, smb;
pci_read_config_word(dev, 0x88, &pm);
pm &= PCI_BASE_ADDRESS_IO_MASK;
quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
pci_read_config_word(dev, 0xd0, &smb);
smb &= PCI_BASE_ADDRESS_IO_MASK;
quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
/*
* VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
* devices to the external APIC.
*
* TODO: When we have device-specific interrupt routers,
* this code will go away from quirks.
*/
static void quirk_via_ioapic(struct pci_dev *dev)
{
u8 tmp;
if (nr_ioapics < 1)
tmp = 0; /* nothing routed to external APIC */
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
tmp == 0 ? "Disa" : "Ena");
/* Offset 0x58: External APIC IRQ output control */
pci_write_config_byte (dev, 0x58, tmp);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
/*
* VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
* This leads to doubled level interrupt rates.
* Set this bit to get rid of cycle wastage.
* Otherwise uncritical.
*/
static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
{
u8 misc_control2;
#define BYPASS_APIC_DEASSERT 8
pci_read_config_byte(dev, 0x5B, &misc_control2);
if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
/*
* The AMD io apic can hang the box when an apic irq is masked.
* We check all revs >= B0 (yet not in the pre production!) as the bug
* is currently marked NoFix
*
* We have multiple reports of hangs with this chipset that went away with
* noapic specified. For the moment we assume it's the erratum. We may be wrong
* of course. However the advice is demonstrably good even if so..
*/
static void __devinit quirk_amd_ioapic(struct pci_dev *dev)
{
if (dev->revision >= 0x02) {
dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
dev_warn(&dev->dev, " : booting with the \"noapic\" option\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
static void __init quirk_ioapic_rmw(struct pci_dev *dev)
{
if (dev->devfn == 0 && dev->bus->number == 0)
sis_apic_bug = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
#define AMD8131_revA0 0x01
#define AMD8131_revB0 0x11
#define AMD8131_MISC 0x40
#define AMD8131_NIOAMODE_BIT 0
static void quirk_amd_8131_ioapic(struct pci_dev *dev)
{
unsigned char tmp;
if (nr_ioapics == 0)
return;
if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) {
dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n");
pci_read_config_byte( dev, AMD8131_MISC, &tmp);
tmp &= ~(1 << AMD8131_NIOAMODE_BIT);
pci_write_config_byte( dev, AMD8131_MISC, tmp);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
#endif /* CONFIG_X86_IO_APIC */
/*
* Some settings of MMRBC can lead to data corruption so block changes.
* See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
*/
static void __init quirk_amd_8131_mmrbc(struct pci_dev *dev)
{
if (dev->subordinate && dev->revision <= 0x12) {
dev_info(&dev->dev, "AMD8131 rev %x detected; "
"disabling PCI-X MMRBC\n", dev->revision);
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
/*
* FIXME: it is questionable that quirk_via_acpi
* is needed. It shows up as an ISA bridge, and does not
* support the PCI_INTERRUPT_LINE register at all. Therefore
* it seems like setting the pci_dev's 'irq' to the
* value of the ACPI SCI interrupt is only done for convenience.
* -jgarzik
*/
static void __devinit quirk_via_acpi(struct pci_dev *d)
{
/*
* VIA ACPI device: SCI IRQ line in PCI config byte 0x42
*/
u8 irq;
pci_read_config_byte(d, 0x42, &irq);
irq &= 0xf;
if (irq && (irq != 2))
d->irq = irq;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
/*
* VIA bridges which have VLink
*/
static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
static void quirk_via_bridge(struct pci_dev *dev)
{
/* See what bridge we have and find the device ranges */
switch (dev->device) {
case PCI_DEVICE_ID_VIA_82C686:
/* The VT82C686 is special, it attaches to PCI and can have
any device number. All its subdevices are functions of
that single device. */
via_vlink_dev_lo = PCI_SLOT(dev->devfn);
via_vlink_dev_hi = PCI_SLOT(dev->devfn);
break;
case PCI_DEVICE_ID_VIA_8237:
case PCI_DEVICE_ID_VIA_8237A:
via_vlink_dev_lo = 15;
break;
case PCI_DEVICE_ID_VIA_8235:
via_vlink_dev_lo = 16;
break;
case PCI_DEVICE_ID_VIA_8231:
case PCI_DEVICE_ID_VIA_8233_0:
case PCI_DEVICE_ID_VIA_8233A:
case PCI_DEVICE_ID_VIA_8233C_0:
via_vlink_dev_lo = 17;
break;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
/**
* quirk_via_vlink - VIA VLink IRQ number update
* @dev: PCI device
*
* If the device we are dealing with is on a PIC IRQ we need to
* ensure that the IRQ line register which usually is not relevant
* for PCI cards, is actually written so that interrupts get sent
* to the right place.
* We only do this on systems where a VIA south bridge was detected,
* and only for VIA devices on the motherboard (see quirk_via_bridge
* above).
*/
static void quirk_via_vlink(struct pci_dev *dev)
{
u8 irq, new_irq;
/* Check if we have VLink at all */
if (via_vlink_dev_lo == -1)
return;
new_irq = dev->irq;
/* Don't quirk interrupts outside the legacy IRQ range */
if (!new_irq || new_irq > 15)
return;
/* Internal device ? */
if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
return;
/* This is an internal VLink device on a PIC interrupt. The BIOS
ought to have set this but may not have, so we redo it */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
if (new_irq != irq) {
dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n",
irq, new_irq);
udelay(15); /* unknown if delay really needed */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
/*
* VIA VT82C598 has its device ID settable and many BIOSes
* set it to the ID of VT82C597 for backward compatibility.
* We need to switch it off to be able to recognize the real
* type of the chip.
*/
static void __devinit quirk_vt82c598_id(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0xfc, 0);
pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
/*
* CardBus controllers have a legacy base address that enables them
* to respond as i82365 pcmcia controllers. We don't want them to
* do this even if the Linux CardBus driver is not loaded, because
* the Linux i82365 driver does not (and should not) handle CardBus.
*/
static void quirk_cardbus_legacy(struct pci_dev *dev)
{
if ((PCI_CLASS_BRIDGE_CARDBUS << 8) ^ dev->class)
return;
pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
/*
* Following the PCI ordering rules is optional on the AMD762. I'm not
* sure what the designers were smoking but let's not inhale...
*
* To be fair to AMD, it follows the spec by default, its BIOS people
* who turn it off!
*/
static void quirk_amd_ordering(struct pci_dev *dev)
{
u32 pcic;
pci_read_config_dword(dev, 0x4C, &pcic);
if ((pcic&6)!=6) {
pcic |= 6;
dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
pci_write_config_dword(dev, 0x4C, pcic);
pci_read_config_dword(dev, 0x84, &pcic);
pcic |= (1<<23); /* Required in this mode */
pci_write_config_dword(dev, 0x84, pcic);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
/*
* DreamWorks provided workaround for Dunord I-3000 problem
*
* This card decodes and responds to addresses not apparently
* assigned to it. We force a larger allocation to ensure that
* nothing gets put too close to it.
*/
static void __devinit quirk_dunord ( struct pci_dev * dev )
{
struct resource *r = &dev->resource [1];
r->start = 0;
r->end = 0xffffff;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
/*
* i82380FB mobile docking controller: its PCI-to-PCI bridge
* is subtractive decoding (transparent), and does indicate this
* in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
* instead of 0x01.
*/
static void __devinit quirk_transparent_bridge(struct pci_dev *dev)
{
dev->transparent = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
/*
* Common misconfiguration of the MediaGX/Geode PCI master that will
* reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1
* datasheets found at http://www.national.com/ds/GX for info on what
* these bits do. <christer@weinigel.se>
*/
static void quirk_mediagx_master(struct pci_dev *dev)
{
u8 reg;
pci_read_config_byte(dev, 0x41, ®);
if (reg & 2) {
reg &= ~2;
dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg);
pci_write_config_byte(dev, 0x41, reg);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
/*
* Ensure C0 rev restreaming is off. This is normally done by
* the BIOS but in the odd case it is not the results are corruption
* hence the presence of a Linux check
*/
static void quirk_disable_pxb(struct pci_dev *pdev)
{
u16 config;
if (pdev->revision != 0x04) /* Only C0 requires this */
return;
pci_read_config_word(pdev, 0x40, &config);
if (config & (1<<6)) {
config &= ~(1<<6);
pci_write_config_word(pdev, 0x40, config);
dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
{
/* set sb600/sb700/sb800 sata to ahci mode */
u8 tmp;
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
if (tmp == 0x01) {
pci_read_config_byte(pdev, 0x40, &tmp);
pci_write_config_byte(pdev, 0x40, tmp|1);
pci_write_config_byte(pdev, 0x9, 1);
pci_write_config_byte(pdev, 0xa, 6);
pci_write_config_byte(pdev, 0x40, tmp);
pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
dev_info(&pdev->dev, "set SATA to AHCI mode\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
/*
* Serverworks CSB5 IDE does not fully support native mode
*/
static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev)
{
u8 prog;
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
if (prog & 5) {
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
/* PCI layer will sort out resources */
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
/*
* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
*/
static void __init quirk_ide_samemode(struct pci_dev *pdev)
{
u8 prog;
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n");
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
/*
* Some ATA devices break if put into D3
*/
static void __devinit quirk_no_ata_d3(struct pci_dev *pdev)
{
/* Quirk the legacy ATA devices only. The AHCI ones are ok */
if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
*/
static void __init quirk_eisa_bridge(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_EISA << 8;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
/*
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
* users to be irritated by just another PCI Device in the Win98 device
* manager. (see the file prog/hotplug/README.p4b in the lm_sensors
* package 2.7.0 for details)
*
* The SMBus PCI Device can be activated by setting a bit in the ICH LPC
* bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
* becomes necessary to do this tweak in two steps -- the chosen trigger
* is either the Host bridge (preferred) or on-board VGA controller.
*
* Note that we used to unhide the SMBus that way on Toshiba laptops
* (Satellite A40 and Tecra M2) but then found that the thermal management
* was done by SMM code, which could cause unsynchronized concurrent
* accesses to the SMBus registers, with potentially bad effects. Thus you
* should be very careful when adding new entries: if SMM is accessing the
* Intel SMBus, this is a very good reason to leave it hidden.
*
* Likewise, many recent laptops use ACPI for thermal management. If the
* ACPI DSDT code accesses the SMBus, then Linux should not access it
* natively, and keeping the SMBus hidden is the right thing to do. If you
* are about to add an entry in the table below, please first disassemble
* the DSDT and double-check that there is no code accessing the SMBus.
*/
static int asus_hides_smbus;
static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
{
if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
switch(dev->subsystem_device) {
case 0x8025: /* P4B-LX */
case 0x8070: /* P4B */
case 0x8088: /* P4B533 */
case 0x1626: /* L3C notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
switch(dev->subsystem_device) {
case 0x80b1: /* P4GE-V */
case 0x80b2: /* P4PE */
case 0x8093: /* P4B533-V */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
switch(dev->subsystem_device) {
case 0x8030: /* P4T533 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
switch (dev->subsystem_device) {
case 0x8070: /* P4G8X Deluxe */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
switch (dev->subsystem_device) {
case 0x80c9: /* PU-DLS */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch (dev->subsystem_device) {
case 0x184b: /* W1N notebook */
case 0x186a: /* M6Ne notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
switch (dev->subsystem_device) {
case 0x80f2: /* P4P800-X */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
switch (dev->subsystem_device) {
case 0x1882: /* M6V notebook */
case 0x1977: /* A6VA notebook */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
case 0x088C: /* HP Compaq nc8000 */
case 0x0890: /* HP Compaq nc6000 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
switch (dev->subsystem_device) {
case 0x12bf: /* HP xw4100 */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
case 0xC00C: /* Samsung P35 notebook */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
case 0x0058: /* Compaq Evo N620c */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
switch(dev->subsystem_device) {
case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs, therefore checking
* its on-board VGA controller */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG)
switch(dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
switch (dev->subsystem_device) {
case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
/* Motherboard doesn't have host bridge
* subvendor/subdevice IDs, therefore checking
* its on-board VGA controller */
asus_hides_smbus = 1;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
static void asus_hides_smbus_lpc(struct pci_dev *dev)
{
u16 val;
if (likely(!asus_hides_smbus))
return;
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8) {
pci_write_config_word(dev, 0xF2, val & (~0x8));
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8)
dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val);
else
dev_info(&dev->dev, "Enabled i801 SMBus device\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
/* It appears we just have one such device. If not, we have a warning */
static void __iomem *asus_rcba_base;
static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
{
u32 rcba;
if (likely(!asus_hides_smbus))
return;
WARN_ON(asus_rcba_base);
pci_read_config_dword(dev, 0xF0, &rcba);
/* use bits 31:14, 16 kB aligned */
asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
if (asus_rcba_base == NULL)
return;
}
static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
{
u32 val;
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
/* read the Function Disable register, dword mode only */
val = readl(asus_rcba_base + 0x3418);
writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
}
static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
{
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
iounmap(asus_rcba_base);
asus_rcba_base = NULL;
dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
}
static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
{
asus_hides_smbus_lpc_ich6_suspend(dev);
asus_hides_smbus_lpc_ich6_resume_early(dev);
asus_hides_smbus_lpc_ich6_resume(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
/*
* SiS 96x south bridge: BIOS typically hides SMBus device...
*/
static void quirk_sis_96x_smbus(struct pci_dev *dev)
{
u8 val = 0;
pci_read_config_byte(dev, 0x77, &val);
if (val & 0x10) {
dev_info(&dev->dev, "Enabling SiS 96x SMBus\n");
pci_write_config_byte(dev, 0x77, val & ~0x10);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
/*
* ... This is further complicated by the fact that some SiS96x south
* bridges pretend to be 85C503/5513 instead. In that case see if we
* spotted a compatible north bridge to make sure.
* (pci_find_device doesn't work yet)
*
* We can also enable the sis96x bit in the discovery register..
*/
#define SIS_DETECT_REGISTER 0x40
static void quirk_sis_503(struct pci_dev *dev)
{
u8 reg;
u16 devid;
pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
return;
}
/*
* Ok, it now shows up as a 96x.. run the 96x quirk by
* hand in case it has already been processed.
* (depends on link order, which is apparently not guaranteed)
*/
dev->device = devid;
quirk_sis_96x_smbus(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
/*
* On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
* and MC97 modem controller are disabled when a second PCI soundcard is
* present. This patch, tweaking the VT8237 ISA bridge, enables them.
* -- bjd
*/
static void asus_hides_ac97_lpc(struct pci_dev *dev)
{
u8 val;
int asus_hides_ac97 = 0;
if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_VIA_8237)
asus_hides_ac97 = 1;
}
if (!asus_hides_ac97)
return;
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0) {
pci_write_config_byte(dev, 0x50, val & (~0xc0));
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0)
dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val);
else
dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
/*
* If we are using libata we can drive this chip properly but must
* do this early on to make the additional device appear during
* the PCI scanning.
*/
static void quirk_jmicron_ata(struct pci_dev *pdev)
{
u32 conf1, conf5, class;
u8 hdr;
/* Only poke fn 0 */
if (PCI_FUNC(pdev->devfn))
return;
pci_read_config_dword(pdev, 0x40, &conf1);
pci_read_config_dword(pdev, 0x80, &conf5);
conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
conf5 &= ~(1 << 24); /* Clear bit 24 */
switch (pdev->device) {
case PCI_DEVICE_ID_JMICRON_JMB360:
/* The controller should be in single function ahci mode */
conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
break;
case PCI_DEVICE_ID_JMICRON_JMB365:
case PCI_DEVICE_ID_JMICRON_JMB366:
/* Redirect IDE second PATA port to the right spot */
conf5 |= (1 << 24);
/* Fall through */
case PCI_DEVICE_ID_JMICRON_JMB361:
case PCI_DEVICE_ID_JMICRON_JMB363:
/* Enable dual function mode, AHCI on fn 0, IDE fn1 */
/* Set the class codes correctly and then direct IDE 0 */
conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */
break;
case PCI_DEVICE_ID_JMICRON_JMB368:
/* The controller should be in single function IDE mode */
conf1 |= 0x00C00000; /* Set 22, 23 */
break;
}
pci_write_config_dword(pdev, 0x40, conf1);
pci_write_config_dword(pdev, 0x80, conf5);
/* Update pdev accordingly */
pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
pdev->hdr_type = hdr & 0x7f;
pdev->multifunction = !!(hdr & 0x80);
pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
pdev->class = class >> 8;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
#endif
#ifdef CONFIG_X86_IO_APIC
static void __init quirk_alder_ioapic(struct pci_dev *pdev)
{
int i;
if ((pdev->class >> 8) != 0xff00)
return;
/* the first BAR is the location of the IO APIC...we must
* not touch this (and it's already covered by the fixmap), so
* forcibly insert it into the resource tree */
if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
insert_resource(&iomem_resource, &pdev->resource[0]);
/* The next five BARs all seem to be rubbish, so just clean
* them out */
for (i=1; i < 6; i++) {
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
{
pcie_mch_quirk = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
/*
* It's possible for the MSI to get corrupted if shpc and acpi
* are used together on certain PXH-based systems.
*/
static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
{
pci_msi_off(dev);
dev->no_msi = 1;
dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
/*
* Some Intel PCI Express chipsets have trouble with downstream
* device power management.
*/
static void quirk_intel_pcie_pm(struct pci_dev * dev)
{
pci_pm_d3_delay = 120;
dev->no_d1d2 = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
/*
* Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
* but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
* Re-allocate the region if needed...
*/
static void __init quirk_tc86c001_ide(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if (r->start & 0x8) {
r->start = 0;
r->end = 0xf;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
quirk_tc86c001_ide);
static void __devinit quirk_netmos(struct pci_dev *dev)
{
unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
unsigned int num_serial = dev->subsystem_device & 0xf;
/*
* These Netmos parts are multiport serial devices with optional
* parallel ports. Even when parallel ports are present, they
* are identified as class SERIAL, which means the serial driver
* will claim them. To prevent this, mark them as class OTHER.
* These combo devices should be claimed by parport_serial.
*
* The subdevice ID is of the form 0x00PS, where <P> is the number
* of parallel ports and <S> is the number of serial ports.
*/
switch (dev->device) {
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9835:
case PCI_DEVICE_ID_NETMOS_9845:
case PCI_DEVICE_ID_NETMOS_9855:
if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
num_parallel) {
dev_info(&dev->dev, "Netmos %04x (%u parallel, "
"%u serial); changing class SERIAL to OTHER "
"(use parport_serial)\n",
dev->device, num_parallel, num_serial);
dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
(dev->class & 0xff);
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command, pmcsr;
u8 __iomem *csr;
u8 cmd_hi;
int pm;
switch (dev->device) {
/* PCI IDs taken from drivers/net/e100.c */
case 0x1029:
case 0x1030 ... 0x1034:
case 0x1038 ... 0x103E:
case 0x1050 ... 0x1057:
case 0x1059:
case 0x1064 ... 0x106B:
case 0x1091 ... 0x1095:
case 0x1209:
case 0x1229:
case 0x2449:
case 0x2459:
case 0x245D:
case 0x27DC:
break;
default:
return;
}
/*
* Some firmware hands off the e100 with interrupts enabled,
* which can cause a flood of interrupts if packets are
* received before the driver attaches to the device. So
* disable all e100 interrupts here. The driver will
* re-enable them when it's ready.
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
return;
/*
* Check that the device is in the D0 power state. If it's not,
* there is no point to look any further.
*/
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (pm) {
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
return;
}
/* Convert from PCI bus to resource space. */
csr = ioremap(pci_resource_start(dev, 0), 8);
if (!csr) {
dev_warn(&dev->dev, "Can't map e100 registers\n");
return;
}
cmd_hi = readb(csr + 3);
if (cmd_hi == 0) {
dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; "
"disabling\n");
writeb(1, csr + 3);
}
iounmap(csr);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
/*
* The 82575 and 82598 may experience data corruption issues when transitioning
* out of L0S. To prevent this we need to disable L0S on the pci-e link
*/
static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev)
{
dev_info(&dev->dev, "Disabling L0s\n");
pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
{
/* rev 1 ncr53c810 chips don't set the class at all which means
* they don't get their resources remapped. Fix that here.
*/
if (dev->class == PCI_CLASS_NOT_DEFINED) {
dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n");
dev->class = PCI_CLASS_STORAGE_SCSI;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
/* Enable 1k I/O space granularity on the Intel P64H2 */
static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
{
u16 en1k;
u8 io_base_lo, io_limit_lo;
unsigned long base, limit;
struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
pci_read_config_word(dev, 0x40, &en1k);
if (en1k & 0x200) {
dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
base = (io_base_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
limit = (io_limit_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
if (base <= limit) {
res->start = base;
res->end = limit + 0x3ff;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
/* Fix the IOBL_ADR for 1k I/O space granularity on the Intel P64H2
* The IOBL_ADR gets re-written to 4k boundaries in pci_setup_bridge()
* in drivers/pci/setup-bus.c
*/
static void __devinit quirk_p64h2_1k_io_fix_iobl(struct pci_dev *dev)
{
u16 en1k, iobl_adr, iobl_adr_1k;
struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
pci_read_config_word(dev, 0x40, &en1k);
if (en1k & 0x200) {
pci_read_config_word(dev, PCI_IO_BASE, &iobl_adr);
iobl_adr_1k = iobl_adr | (res->start >> 8) | (res->end & 0xfc00);
if (iobl_adr != iobl_adr_1k) {
dev_info(&dev->dev, "Fixing P64H2 IOBL_ADR from 0x%x to 0x%x for 1KB granularity\n",
iobl_adr,iobl_adr_1k);
pci_write_config_word(dev, PCI_IO_BASE, iobl_adr_1k);
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io_fix_iobl);
/* Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
*/
static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
{
uint8_t b;
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
dev_info(&dev->dev,
"Linking AER extended capability\n");
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
{
/*
* Disable PCI Bus Parking and PCI Master read caching on CX700
* which causes unspecified timing errors with a VT6212L on the PCI
* bus leading to USB2.0 packet loss. The defaults are that these
* features are turned off but some BIOSes turn them on.
*/
uint8_t b;
if (pci_read_config_byte(dev, 0x76, &b) == 0) {
if (b & 0x40) {
/* Turn off PCI Bus Parking */
pci_write_config_byte(dev, 0x76, b ^ 0x40);
dev_info(&dev->dev,
"Disabling VIA CX700 PCI parking\n");
}
}
if (pci_read_config_byte(dev, 0x72, &b) == 0) {
if (b != 0) {
/* Turn off PCI Master read caching */
pci_write_config_byte(dev, 0x72, 0x0);
/* Set PCI Master Bus time-out to "1x16 PCLK" */
pci_write_config_byte(dev, 0x75, 0x1);
/* Disable "Read FIFO Timer" */
pci_write_config_byte(dev, 0x77, 0x0);
dev_info(&dev->dev,
"Disabling VIA CX700 PCI caching\n");
}
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
* VPD end tag will hang the device. This problem was initially
* observed when a vpd entry was created in sysfs
* ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
* will dump 32k of data. Reading a full 32k will cause an access
* beyond the VPD end tag causing the device to hang. Once the device
* is hung, the bnx2 driver will not be able to reset the device.
* We believe that it is legal to read beyond the end tag and
* therefore the solution is to limit the read/write length.
*/
static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
{
/*
* Only disable the VPD capability for 5706, 5706S, 5708,
* 5708S and 5709 rev. A
*/
if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
(dev->device == PCI_DEVICE_ID_NX2_5706S) ||
(dev->device == PCI_DEVICE_ID_NX2_5708) ||
(dev->device == PCI_DEVICE_ID_NX2_5708S) ||
((dev->device == PCI_DEVICE_ID_NX2_5709) &&
(dev->revision & 0xf0) == 0x0)) {
if (dev->vpd)
dev->vpd->len = 0x80;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5706,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5706S,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5708,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5708S,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
* some other busses controlled by the chipset even if Linux is not
* aware of it. Instead of setting the flag on all busses in the
* machine, simply disable MSI globally.
*/
static void __init quirk_disable_all_msi(struct pci_dev *dev)
{
pci_no_msi();
dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void __devinit quirk_disable_msi(struct pci_dev *dev)
{
if (dev->subordinate) {
dev_warn(&dev->dev, "MSI quirk detected; "
"subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
static int __devinit msi_ht_cap_enabled(struct pci_dev *dev)
{
int pos, ttl = 48;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0)
{
dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
flags & HT_MSI_FLAGS_ENABLE ?
"enabled" : "disabled");
return (flags & HT_MSI_FLAGS_ENABLE) != 0;
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
return 0;
}
/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
static void __devinit quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
dev_warn(&dev->dev, "MSI quirk detected; "
"subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
quirk_msi_ht_cap);
/* The nVidia CK804 chipset may have 2 HT MSI mappings.
* MSI are supported if the MSI capability set in any of these mappings.
*/
static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
struct pci_dev *pdev;
if (!dev->subordinate)
return;
/* check HT MSI cap on this chipset and the root one.
* a single one having MSI is enough to be sure that MSI are supported.
*/
pdev = pci_get_slot(dev->bus, 0);
if (!pdev)
return;
if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
dev_warn(&dev->dev, "MSI quirk detected; "
"subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
pci_dev_put(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_msi_ht_cap);
/* Force enable MSI mapping capability on HT bridges */
static void __devinit ht_enable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
flags | HT_MSI_FLAGS_ENABLE);
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
ht_enable_msi_mapping);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
static void __devinit nvenet_msi_disable(struct pci_dev *dev)
{
if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
dev_info(&dev->dev,
"Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
dev->no_msi = 1;
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_15,
nvenet_msi_disable);
static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
{
struct pci_dev *host_bridge;
int pos, ttl = 48;
/*
* HT MSI mapping should be disabled on devices that are below
* a non-Hypertransport host bridge. Locate the host bridge...
*/
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (host_bridge == NULL) {
dev_warn(&dev->dev,
"nv_msi_ht_cap_quirk didn't locate host bridge\n");
return;
}
pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
if (pos != 0) {
/* Host bridge is to HT */
ht_enable_msi_mapping(dev);
return;
}
/* Host bridge is not to HT, disable HT MSI mapping on this device */
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
dev_info(&dev->dev, "Disabling HT MSI mapping");
pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
flags & ~HT_MSI_FLAGS_ENABLE);
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk);
static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
static void __devinit quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
{
struct pci_dev *p;
/* SB700 MSI issue will be fixed at HW level from revision A21,
* we need check PCI REVISION ID of SMBus controller to get SB700
* revision.
*/
p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (!p)
return;
if ((p->revision < 0x3B) && (p->revision >= 0x30))
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
pci_dev_put(p);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5714,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5714S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5715,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5715S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
quirk_msi_intx_disable_bug);
#endif /* CONFIG_PCI_MSI */
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
{
while (f < end) {
if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
(f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
dev_dbg(&dev->dev, "calling %pF\n", f->hook);
f->hook(dev);
}
f++;
}
}
extern struct pci_fixup __start_pci_fixups_early[];
extern struct pci_fixup __end_pci_fixups_early[];
extern struct pci_fixup __start_pci_fixups_header[];
extern struct pci_fixup __end_pci_fixups_header[];
extern struct pci_fixup __start_pci_fixups_final[];
extern struct pci_fixup __end_pci_fixups_final[];
extern struct pci_fixup __start_pci_fixups_enable[];
extern struct pci_fixup __end_pci_fixups_enable[];
extern struct pci_fixup __start_pci_fixups_resume[];
extern struct pci_fixup __end_pci_fixups_resume[];
extern struct pci_fixup __start_pci_fixups_resume_early[];
extern struct pci_fixup __end_pci_fixups_resume_early[];
extern struct pci_fixup __start_pci_fixups_suspend[];
extern struct pci_fixup __end_pci_fixups_suspend[];
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
{
struct pci_fixup *start, *end;
switch(pass) {
case pci_fixup_early:
start = __start_pci_fixups_early;
end = __end_pci_fixups_early;
break;
case pci_fixup_header:
start = __start_pci_fixups_header;
end = __end_pci_fixups_header;
break;
case pci_fixup_final:
start = __start_pci_fixups_final;
end = __end_pci_fixups_final;
break;
case pci_fixup_enable:
start = __start_pci_fixups_enable;
end = __end_pci_fixups_enable;
break;
case pci_fixup_resume:
start = __start_pci_fixups_resume;
end = __end_pci_fixups_resume;
break;
case pci_fixup_resume_early:
start = __start_pci_fixups_resume_early;
end = __end_pci_fixups_resume_early;
break;
case pci_fixup_suspend:
start = __start_pci_fixups_suspend;
end = __end_pci_fixups_suspend;
break;
default:
/* stupid compiler warning, you would think with an enum... */
return;
}
pci_do_fixups(dev, start, end);
}
#else
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
#endif
EXPORT_SYMBOL(pci_fixup_device);
| gpl-2.0 |
Kevindeving/android_kernel_lge_gee | net/ipv4/tcp.c | 5 | 95033 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Florian La Roche, <flla@stud.uni-sb.de>
* Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
* Linus Torvalds, <torvalds@cs.helsinki.fi>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Matthew Dillon, <dillon@apollo.west.oic.com>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Jorge Cwik, <jorge@laser.satlink.net>
*
* Fixes:
* Alan Cox : Numerous verify_area() calls
* Alan Cox : Set the ACK bit on a reset
* Alan Cox : Stopped it crashing if it closed while
* sk->inuse=1 and was trying to connect
* (tcp_err()).
* Alan Cox : All icmp error handling was broken
* pointers passed where wrong and the
* socket was looked up backwards. Nobody
* tested any icmp error code obviously.
* Alan Cox : tcp_err() now handled properly. It
* wakes people on errors. poll
* behaves and the icmp error race
* has gone by moving it into sock.c
* Alan Cox : tcp_send_reset() fixed to work for
* everything not just packets for
* unknown sockets.
* Alan Cox : tcp option processing.
* Alan Cox : Reset tweaked (still not 100%) [Had
* syn rule wrong]
* Herp Rosmanith : More reset fixes
* Alan Cox : No longer acks invalid rst frames.
* Acking any kind of RST is right out.
* Alan Cox : Sets an ignore me flag on an rst
* receive otherwise odd bits of prattle
* escape still
* Alan Cox : Fixed another acking RST frame bug.
* Should stop LAN workplace lockups.
* Alan Cox : Some tidyups using the new skb list
* facilities
* Alan Cox : sk->keepopen now seems to work
* Alan Cox : Pulls options out correctly on accepts
* Alan Cox : Fixed assorted sk->rqueue->next errors
* Alan Cox : PSH doesn't end a TCP read. Switched a
* bit to skb ops.
* Alan Cox : Tidied tcp_data to avoid a potential
* nasty.
* Alan Cox : Added some better commenting, as the
* tcp is hard to follow
* Alan Cox : Removed incorrect check for 20 * psh
* Michael O'Reilly : ack < copied bug fix.
* Johannes Stille : Misc tcp fixes (not all in yet).
* Alan Cox : FIN with no memory -> CRASH
* Alan Cox : Added socket option proto entries.
* Also added awareness of them to accept.
* Alan Cox : Added TCP options (SOL_TCP)
* Alan Cox : Switched wakeup calls to callbacks,
* so the kernel can layer network
* sockets.
* Alan Cox : Use ip_tos/ip_ttl settings.
* Alan Cox : Handle FIN (more) properly (we hope).
* Alan Cox : RST frames sent on unsynchronised
* state ack error.
* Alan Cox : Put in missing check for SYN bit.
* Alan Cox : Added tcp_select_window() aka NET2E
* window non shrink trick.
* Alan Cox : Added a couple of small NET2E timer
* fixes
* Charles Hedrick : TCP fixes
* Toomas Tamm : TCP window fixes
* Alan Cox : Small URG fix to rlogin ^C ack fight
* Charles Hedrick : Rewrote most of it to actually work
* Linus : Rewrote tcp_read() and URG handling
* completely
* Gerhard Koerting: Fixed some missing timer handling
* Matthew Dillon : Reworked TCP machine states as per RFC
* Gerhard Koerting: PC/TCP workarounds
* Adam Caldwell : Assorted timer/timing errors
* Matthew Dillon : Fixed another RST bug
* Alan Cox : Move to kernel side addressing changes.
* Alan Cox : Beginning work on TCP fastpathing
* (not yet usable)
* Arnt Gulbrandsen: Turbocharged tcp_check() routine.
* Alan Cox : TCP fast path debugging
* Alan Cox : Window clamping
* Michael Riepe : Bug in tcp_check()
* Matt Dillon : More TCP improvements and RST bug fixes
* Matt Dillon : Yet more small nasties remove from the
* TCP code (Be very nice to this man if
* tcp finally works 100%) 8)
* Alan Cox : BSD accept semantics.
* Alan Cox : Reset on closedown bug.
* Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
* Michael Pall : Handle poll() after URG properly in
* all cases.
* Michael Pall : Undo the last fix in tcp_read_urg()
* (multi URG PUSH broke rlogin).
* Michael Pall : Fix the multi URG PUSH problem in
* tcp_readable(), poll() after URG
* works now.
* Michael Pall : recv(...,MSG_OOB) never blocks in the
* BSD api.
* Alan Cox : Changed the semantics of sk->socket to
* fix a race and a signal problem with
* accept() and async I/O.
* Alan Cox : Relaxed the rules on tcp_sendto().
* Yury Shevchuk : Really fixed accept() blocking problem.
* Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
* clients/servers which listen in on
* fixed ports.
* Alan Cox : Cleaned the above up and shrank it to
* a sensible code size.
* Alan Cox : Self connect lockup fix.
* Alan Cox : No connect to multicast.
* Ross Biro : Close unaccepted children on master
* socket close.
* Alan Cox : Reset tracing code.
* Alan Cox : Spurious resets on shutdown.
* Alan Cox : Giant 15 minute/60 second timer error
* Alan Cox : Small whoops in polling before an
* accept.
* Alan Cox : Kept the state trace facility since
* it's handy for debugging.
* Alan Cox : More reset handler fixes.
* Alan Cox : Started rewriting the code based on
* the RFC's for other useful protocol
* references see: Comer, KA9Q NOS, and
* for a reference on the difference
* between specifications and how BSD
* works see the 4.4lite source.
* A.N.Kuznetsov : Don't time wait on completion of tidy
* close.
* Linus Torvalds : Fin/Shutdown & copied_seq changes.
* Linus Torvalds : Fixed BSD port reuse to work first syn
* Alan Cox : Reimplemented timers as per the RFC
* and using multiple timers for sanity.
* Alan Cox : Small bug fixes, and a lot of new
* comments.
* Alan Cox : Fixed dual reader crash by locking
* the buffers (much like datagram.c)
* Alan Cox : Fixed stuck sockets in probe. A probe
* now gets fed up of retrying without
* (even a no space) answer.
* Alan Cox : Extracted closing code better
* Alan Cox : Fixed the closing state machine to
* resemble the RFC.
* Alan Cox : More 'per spec' fixes.
* Jorge Cwik : Even faster checksumming.
* Alan Cox : tcp_data() doesn't ack illegal PSH
* only frames. At least one pc tcp stack
* generates them.
* Alan Cox : Cache last socket.
* Alan Cox : Per route irtt.
* Matt Day : poll()->select() match BSD precisely on error
* Alan Cox : New buffers
* Marc Tamsky : Various sk->prot->retransmits and
* sk->retransmits misupdating fixed.
* Fixed tcp_write_timeout: stuck close,
* and TCP syn retries gets used now.
* Mark Yarvis : In tcp_read_wakeup(), don't send an
* ack if state is TCP_CLOSED.
* Alan Cox : Look up device on a retransmit - routes may
* change. Doesn't yet cope with MSS shrink right
* but it's a start!
* Marc Tamsky : Closing in closing fixes.
* Mike Shaver : RFC1122 verifications.
* Alan Cox : rcv_saddr errors.
* Alan Cox : Block double connect().
* Alan Cox : Small hooks for enSKIP.
* Alexey Kuznetsov: Path MTU discovery.
* Alan Cox : Support soft errors.
* Alan Cox : Fix MTU discovery pathological case
* when the remote claims no mtu!
* Marc Tamsky : TCP_CLOSE fix.
* Colin (G3TNE) : Send a reset on syn ack replies in
* window but wrong (fixes NT lpd problems)
* Pedro Roque : Better TCP window handling, delayed ack.
* Joerg Reuter : No modification of locked buffers in
* tcp_do_retransmit()
* Eric Schenk : Changed receiver side silly window
* avoidance algorithm to BSD style
* algorithm. This doubles throughput
* against machines running Solaris,
* and seems to result in general
* improvement.
* Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
* Willy Konynenberg : Transparent proxying support.
* Mike McLagan : Routing by source
* Keith Owens : Do proper merging with partial SKB's in
* tcp_do_sendmsg to avoid burstiness.
* Eric Schenk : Fix fast close down bug with
* shutdown() followed by close().
* Andi Kleen : Make poll agree with SIGIO
* Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
* lingertime == 0 (RFC 793 ABORT Call)
* Hirokazu Takahashi : Use copy_from_user() instead of
* csum_and_copy_from_user() if possible.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or(at your option) any later version.
*
* Description of States:
*
* TCP_SYN_SENT sent a connection request, waiting for ack
*
* TCP_SYN_RECV received a connection request, sent ack,
* waiting for final ack in three-way handshake.
*
* TCP_ESTABLISHED connection established
*
* TCP_FIN_WAIT1 our side has shutdown, waiting to complete
* transmission of remaining buffered data
*
* TCP_FIN_WAIT2 all buffered data sent, waiting for remote
* to shutdown
*
* TCP_CLOSING both sides have shutdown but we still have
* data we have to finish sending
*
* TCP_TIME_WAIT timeout to catch resent junk before entering
* closed, can only be entered from FIN_WAIT2
* or CLOSING. Required because the other end
* may not have gotten our last ACK causing it
* to retransmit the data packet (which we ignore)
*
* TCP_CLOSE_WAIT remote side has shutdown and is waiting for
* us to finish writing our data and to shutdown
* (we have to close() to move on to LAST_ACK)
*
* TCP_LAST_ACK out side has shutdown after remote has
* shutdown. There may still be data in our
* buffer that we have to finish sending
*
* TCP_CLOSE socket is finished
*/
#define pr_fmt(fmt) "TCP: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/scatterlist.h>
#include <linux/splice.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/uid_stat.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/ip6_route.h>
#include <net/ipv6.h>
#include <net/transp_v6.h>
#include <net/netdma.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
int sysctl_tcp_autocorking __read_mostly = 1;
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
int sysctl_tcp_wmem[3] __read_mostly;
int sysctl_tcp_rmem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);
atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
/*
* Current number of TCP sockets.
*/
struct percpu_counter tcp_sockets_allocated;
EXPORT_SYMBOL(tcp_sockets_allocated);
/*
* TCP splice context
*/
struct tcp_splice_state {
struct pipe_inode_info *pipe;
size_t len;
unsigned int flags;
};
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
int tcp_memory_pressure __read_mostly;
EXPORT_SYMBOL(tcp_memory_pressure);
void tcp_enter_memory_pressure(struct sock *sk)
{
if (!tcp_memory_pressure) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
tcp_memory_pressure = 1;
}
}
EXPORT_SYMBOL(tcp_enter_memory_pressure);
/* Convert seconds to retransmits based on initial and max timeout */
static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
{
u8 res = 0;
if (seconds > 0) {
int period = timeout;
res = 1;
while (seconds > period && res < 255) {
res++;
timeout <<= 1;
if (timeout > rto_max)
timeout = rto_max;
period += timeout;
}
}
return res;
}
/* Convert retransmits to seconds based on initial and max timeout */
static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
{
int period = 0;
if (retrans > 0) {
period = timeout;
while (--retrans) {
timeout <<= 1;
if (timeout > rto_max)
timeout = rto_max;
period += timeout;
}
}
return period;
}
/* Address-family independent initialization for a tcp_sock.
*
* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
void tcp_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
INIT_LIST_HEAD(&tp->tsq_node);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
tp->snd_cwnd = TCP_INIT_CWND;
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sysctl_tcp_reordering;
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
sk->sk_state = TCP_CLOSE;
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
icsk->icsk_sync_mss = tcp_sync_mss;
/* TCP Cookie Transactions */
if (sysctl_tcp_cookie_size > 0) {
/* Default, cookies without s_data_payload. */
tp->cookie_values =
kzalloc(sizeof(*tp->cookie_values),
sk->sk_allocation);
if (tp->cookie_values != NULL)
kref_init(&tp->cookie_values->kref);
}
/* Presumed zeroed, in order of appearance:
* cookie_in_always, cookie_out_never,
* s_data_constant, s_data_in, s_data_out
*/
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
sock_update_memcg(sk);
sk_sockets_allocated_inc(sk);
local_bh_enable();
}
EXPORT_SYMBOL(tcp_init_sock);
/*
* Wait for a TCP event.
*
* Note that we don't need to lock the socket, as the upper poll layers
* take care of normal races (between the test and the event) and we don't
* go look at any of the socket buffers directly.
*/
unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
unsigned int mask;
struct sock *sk = sock->sk;
const struct tcp_sock *tp = tcp_sk(sk);
sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == TCP_LISTEN)
return inet_csk_listen_poll(sk);
/* Socket is not locked. We are protected from async events
* by poll logic and correct handling of state changes
* made by other threads is impossible in any case.
*/
mask = 0;
/*
* POLLHUP is certainly not done right. But poll() doesn't
* have a notion of HUP in just one direction, and for a
* socket the read side is more interesting.
*
* Some poll() documentation says that POLLHUP is incompatible
* with the POLLOUT/POLLWR flags, so somebody should check this
* all. But careful, it tends to be safer to return too many
* bits than too few, and you can easily break real applications
* if you don't tell them that something has hung up!
*
* Check-me.
*
* Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
* our fs/select.c). It means that after we received EOF,
* poll always returns immediately, making impossible poll() on write()
* in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
* if and only if shutdown has been made in both directions.
* Actually, it is interesting to look how Solaris and DUX
* solve this dilemma. I would prefer, if POLLHUP were maskable,
* then we could set it on SND_SHUTDOWN. BTW examples given
* in Stevens' books assume exactly this behaviour, it explains
* why POLLHUP is incompatible with POLLOUT. --ANK
*
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
* blocking on fresh not-connected or disconnected socket. --ANK
*/
if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
/* Connected? */
if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
if (tp->urg_seq == tp->copied_seq &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->urg_data)
target++;
/* Potential race condition. If read of tp below will
* escape above sk->sk_state, we can be illegally awaken
* in SYN_* states. */
if (tp->rcv_nxt - tp->copied_seq >= target)
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
&sk->sk_socket->flags);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
* wspace test but before the flags are set,
* IO signal will be lost.
*/
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
} else
mask |= POLLOUT | POLLWRNORM;
if (tp->urg_data & TCP_URG_VALID)
mask |= POLLPRI;
}
/* This barrier is coupled with smp_wmb() in tcp_reset() */
smp_rmb();
if (sk->sk_err)
mask |= POLLERR;
return mask;
}
EXPORT_SYMBOL(tcp_poll);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct tcp_sock *tp = tcp_sk(sk);
int answ;
switch (cmd) {
case SIOCINQ:
if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
lock_sock(sk);
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else if (sock_flag(sk, SOCK_URGINLINE) ||
!tp->urg_data ||
before(tp->urg_seq, tp->copied_seq) ||
!before(tp->urg_seq, tp->rcv_nxt)) {
answ = tp->rcv_nxt - tp->copied_seq;
/* Subtract 1, if FIN was received */
if (answ && sock_flag(sk, SOCK_DONE))
answ--;
} else
answ = tp->urg_seq - tp->copied_seq;
release_sock(sk);
break;
case SIOCATMARK:
answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
break;
case SIOCOUTQ:
if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
answ = tp->write_seq - tp->snd_una;
break;
case SIOCOUTQNSD:
if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
answ = tp->write_seq - tp->snd_nxt;
break;
default:
return -ENOIOCTLCMD;
}
return put_user(answ, (int __user *)arg);
}
EXPORT_SYMBOL(tcp_ioctl);
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
}
static inline int forced_push(const struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
tcb->tcp_flags = TCPHDR_ACK;
tcb->sacked = 0;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
{
if (flags & MSG_OOB)
tp->snd_up = tp->write_seq;
}
/* If a not yet filled skb is pushed, do not send it if
* we have packets in Qdisc or NIC queues :
* Because TX completion will happen shortly, it gives a chance
* to coalesce future sendmsg() payload into this skb, without
* need for a timer, and with no latency trade off.
* As packets containing data payload have a bigger truesize
* than pure acks (dataless) packets, the last check prevents
* autocorking if we only have an ACK in Qdisc/NIC queues.
*/
static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
int size_goal)
{
return skb->len < size_goal &&
sysctl_tcp_autocorking &&
atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
}
static void tcp_push(struct sock *sk, int flags, int mss_now,
int nonagle, int size_goal)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (!tcp_send_head(sk))
return;
skb = tcp_write_queue_tail(sk);
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
tcp_mark_urg(tp, flags);
if (tcp_should_autocork(sk, skb, size_goal)) {
/* avoid atomic op if TSQ_THROTTLED bit is already set */
if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
set_bit(TSQ_THROTTLED, &tp->tsq_flags);
}
return;
}
if (flags & MSG_MORE)
nonagle = TCP_NAGLE_CORK;
__tcp_push_pending_frames(sk, mss_now, nonagle);
}
static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{
struct tcp_splice_state *tss = rd_desc->arg.data;
int ret;
ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
tss->flags);
if (ret > 0)
rd_desc->count -= ret;
return ret;
}
static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
{
/* Store TCP splice context information in read_descriptor_t. */
read_descriptor_t rd_desc = {
.arg.data = tss,
.count = tss->len,
};
return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
}
/**
* tcp_splice_read - splice data from TCP socket to a pipe
* @sock: socket to splice from
* @ppos: position (not valid)
* @pipe: pipe to splice to
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* Will read pages from given socket and fill them into a pipe.
*
**/
ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct sock *sk = sock->sk;
struct tcp_splice_state tss = {
.pipe = pipe,
.len = len,
.flags = flags,
};
long timeo;
ssize_t spliced;
int ret;
sock_rps_record_flow(sk);
/*
* We can't seek on a socket input
*/
if (unlikely(*ppos))
return -ESPIPE;
ret = spliced = 0;
lock_sock(sk);
timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
while (tss.len) {
ret = __tcp_splice_read(sk, &tss);
if (ret < 0)
break;
else if (!ret) {
if (spliced)
break;
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
ret = sock_error(sk);
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk->sk_state == TCP_CLOSE) {
/*
* This occurs when user tries to read
* from never connected socket.
*/
if (!sock_flag(sk, SOCK_DONE))
ret = -ENOTCONN;
break;
}
if (!timeo) {
ret = -EAGAIN;
break;
}
sk_wait_data(sk, &timeo);
if (signal_pending(current)) {
ret = sock_intr_errno(timeo);
break;
}
continue;
}
tss.len -= ret;
spliced += ret;
if (!timeo)
break;
release_sock(sk);
lock_sock(sk);
if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current))
break;
}
release_sock(sk);
if (spliced)
return spliced;
return ret;
}
EXPORT_SYMBOL(tcp_splice_read);
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
{
struct sk_buff *skb;
/* The TCP header must be at least 32-bit aligned. */
size = ALIGN(size, 4);
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
if (skb) {
if (sk_wmem_schedule(sk, skb->truesize)) {
skb_reserve(skb, sk->sk_prot->max_header);
/*
* Make sure that we have exactly size bytes
* available to the caller, no more, no less.
*/
skb->reserved_tailroom = skb->end - skb->tail - size;
return skb;
}
__kfree_skb(skb);
} else {
sk->sk_prot->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
}
return NULL;
}
static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
int large_allowed)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 xmit_size_goal, old_size_goal;
xmit_size_goal = mss_now;
if (large_allowed && sk_can_gso(sk)) {
xmit_size_goal = ((sk->sk_gso_max_size - 1) -
inet_csk(sk)->icsk_af_ops->net_header_len -
inet_csk(sk)->icsk_ext_hdr_len -
tp->tcp_header_len);
/* TSQ : try to have two TSO segments in flight */
xmit_size_goal = min_t(u32, xmit_size_goal,
sysctl_tcp_limit_output_bytes >> 1);
xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
/* We try hard to avoid divides here */
old_size_goal = tp->xmit_size_goal_segs * mss_now;
if (likely(old_size_goal <= xmit_size_goal &&
old_size_goal + mss_now > xmit_size_goal)) {
xmit_size_goal = old_size_goal;
} else {
tp->xmit_size_goal_segs =
min_t(u16, xmit_size_goal / mss_now,
sk->sk_gso_max_segs);
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
}
}
return max(xmit_size_goal, mss_now);
}
static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
{
int mss_now;
mss_now = tcp_current_mss(sk);
*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
return mss_now;
}
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
size_t psize, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
int mss_now, size_goal;
int err;
ssize_t copied;
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_send_mss(sk, &size_goal, flags);
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
while (psize > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct page *page = pages[poffset / PAGE_SIZE];
int copy, i, can_coalesce;
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
if (!skb)
goto wait_for_memory;
skb_entail(sk, skb);
copy = size_goal;
}
if (copy > size)
copy = size;
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, page, offset);
if (!can_coalesce && i >= MAX_SKB_FRAGS) {
tcp_mark_push(tp, skb);
goto new_segment;
}
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
if (can_coalesce) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, copy);
}
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
sk_mem_charge(sk, copy);
skb->ip_summed = CHECKSUM_PARTIAL;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
skb_shinfo(skb)->gso_segs = 0;
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
copied += copy;
poffset += copy;
if (!(psize -= copy))
goto out;
if (skb->len < size_goal || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_send_mss(sk, &size_goal, flags);
}
out:
if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
return copied;
do_error:
if (copied)
goto out;
out_err:
return sk_stream_error(sk, flags, err);
}
int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
ssize_t res;
if (!(sk->sk_route_caps & NETIF_F_SG) ||
!(sk->sk_route_caps & NETIF_F_ALL_CSUM))
return sock_no_sendpage(sk->sk_socket, page, offset, size,
flags);
lock_sock(sk);
res = do_tcp_sendpages(sk, &page, offset, size, flags);
release_sock(sk);
return res;
}
EXPORT_SYMBOL(tcp_sendpage);
static inline int select_size(const struct sock *sk, bool sg)
{
const struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
if (sg) {
if (sk_can_gso(sk)) {
/* Small frames wont use a full page:
* Payload will immediately follow tcp header.
*/
tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
} else {
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
if (tmp >= pgbreak &&
tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
tmp = pgbreak;
}
}
return tmp;
}
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags, err, copied;
int mss_now, size_goal;
bool sg;
long timeo;
lock_sock(sk);
flags = msg->msg_flags;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
/* This should be in poll */
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
iovlen = msg->msg_iovlen;
iov = msg->msg_iov;
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
sg = !!(sk->sk_route_caps & NETIF_F_SG);
while (--iovlen >= 0) {
size_t seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
while (seglen > 0) {
int copy = 0;
int max = size_goal;
skb = tcp_write_queue_tail(sk);
if (tcp_send_head(sk)) {
if (skb->ip_summed == CHECKSUM_NONE)
max = mss_now;
copy = max - skb->len;
}
if (copy <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
*/
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
skb = sk_stream_alloc_skb(sk,
select_size(sk, sg),
sk->sk_allocation);
if (!skb)
goto wait_for_memory;
/*
* Check whether we can use HW checksum.
*/
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, skb);
copy = size_goal;
max = size_goal;
}
/* Try to append data to the end of skb. */
if (copy > seglen)
copy = seglen;
/* Where to copy to? */
if (skb_availroom(skb) > 0) {
/* We have some space in skb head. Superb! */
copy = min_t(int, copy, skb_availroom(skb));
err = skb_add_data_nocache(sk, skb, from, copy);
if (err)
goto do_fault;
} else {
int merge = 0;
int i = skb_shinfo(skb)->nr_frags;
struct page *page = sk->sk_sndmsg_page;
int off;
if (page && page_count(page) == 1)
sk->sk_sndmsg_off = 0;
off = sk->sk_sndmsg_off;
if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
merge = 1;
} else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
* busy. */
tcp_mark_push(tp, skb);
goto new_segment;
} else if (page) {
if (off == PAGE_SIZE) {
put_page(page);
sk->sk_sndmsg_page = page = NULL;
off = 0;
}
} else
off = 0;
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
if (!page) {
/* Allocate new cache page. */
if (!(page = sk_stream_alloc_page(sk)))
goto wait_for_memory;
}
/* Time to copy data. We are close to
* the end! */
err = skb_copy_to_page_nocache(sk, from, skb,
page, off, copy);
if (err) {
/* If this page was new, give it to the
* socket so it does not get leaked.
*/
if (!sk->sk_sndmsg_page) {
sk->sk_sndmsg_page = page;
sk->sk_sndmsg_off = 0;
}
goto do_error;
}
/* Update the skb. */
if (merge) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
skb_fill_page_desc(skb, i, page, off, copy);
if (sk->sk_sndmsg_page) {
get_page(page);
} else if (off + copy < PAGE_SIZE) {
get_page(page);
sk->sk_sndmsg_page = page;
}
}
sk->sk_sndmsg_off = off + copy;
}
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
skb_shinfo(skb)->gso_segs = 0;
from += copy;
copied += copy;
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
if (skb->len < max || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_send_mss(sk, &size_goal, flags);
}
}
out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
release_sock(sk);
if (copied > 0)
uid_stat_tcp_snd(current_uid(), copied);
return copied;
do_fault:
if (!skb->len) {
tcp_unlink_write_queue(skb, sk);
/* It is the one place in all of TCP, except connection
* reset, where we can be unlinking the send_head.
*/
tcp_check_send_head(sk, skb);
sk_wmem_free_skb(sk, skb);
}
do_error:
if (copied)
goto out;
out_err:
err = sk_stream_error(sk, flags, err);
release_sock(sk);
return err;
}
EXPORT_SYMBOL(tcp_sendmsg);
/*
* Handle reading urgent data. BSD has very simple semantics for
* this, no blocking and very strange errors 8)
*/
static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
/* No URG data to read. */
if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
tp->urg_data == TCP_URG_READ)
return -EINVAL; /* Yes this is right ! */
if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
return -ENOTCONN;
if (tp->urg_data & TCP_URG_VALID) {
int err = 0;
char c = tp->urg_data;
if (!(flags & MSG_PEEK))
tp->urg_data = TCP_URG_READ;
/* Read urgent data. */
msg->msg_flags |= MSG_OOB;
if (len > 0) {
if (!(flags & MSG_TRUNC))
err = memcpy_toiovec(msg->msg_iov, &c, 1);
len = 1;
} else
msg->msg_flags |= MSG_TRUNC;
return err ? -EFAULT : len;
}
if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
return 0;
/* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
* the available implementations agree in this case:
* this call should never block, independent of the
* blocking state of the socket.
* Mike <pall@rz.uni-karlsruhe.de>
*/
return -EAGAIN;
}
/* Clean up the receive buffer for full frames taken by the user,
* then send an ACK if necessary. COPIED is the number of bytes
* tcp_recvmsg has given to the user so far, it speeds up the
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
void tcp_cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_sock *tp = tcp_sk(sk);
int time_to_ack = 0;
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
"cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
if (inet_csk_ack_scheduled(sk)) {
const struct inet_connection_sock *icsk = inet_csk(sk);
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */
if (icsk->icsk_ack.blocked ||
/* Once-per-two-segments ACK was not sent by tcp_input.c */
tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
/*
* If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained
* receive buffer and there was a small segment
* in queue.
*/
(copied > 0 &&
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
!icsk->icsk_ack.pingpong)) &&
!atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = 1;
}
/* We send an ACK if we can now advertise a non-zero window
* which has been raised "significantly".
*
* Even if window raised up to infinity, do not send window open ACK
* in states, where we will not receive more. It is useless.
*/
if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
__u32 rcv_window_now = tcp_receive_window(tp);
/* Optimize, __tcp_select_window() is not cheap. */
if (2*rcv_window_now <= tp->window_clamp) {
__u32 new_window = __tcp_select_window(sk);
/* Send ACK now, if this read freed lots of space
* in our buffer. Certainly, new_window is new window.
* We can advertise it now, if it is not less than current one.
* "Lots" means "at least twice" here.
*/
if (new_window && new_window >= 2 * rcv_window_now)
time_to_ack = 1;
}
}
if (time_to_ack)
tcp_send_ack(sk);
}
static void tcp_prequeue_process(struct sock *sk)
{
struct sk_buff *skb;
struct tcp_sock *tp = tcp_sk(sk);
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
/* RX process wants to run with disabled BHs, though it is not
* necessary */
local_bh_disable();
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk_backlog_rcv(sk, skb);
local_bh_enable();
/* Clear memory counter. */
tp->ucopy.memory = 0;
}
#ifdef CONFIG_NET_DMA
static void tcp_service_net_dma(struct sock *sk, bool wait)
{
dma_cookie_t done, used;
dma_cookie_t last_issued;
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan)
return;
last_issued = tp->ucopy.dma_cookie;
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
do {
if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
last_issued, &done,
&used) == DMA_SUCCESS) {
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
break;
} else {
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
used) == DMA_SUCCESS)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
}
} while (wait);
}
#endif
static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{
struct sk_buff *skb;
u32 offset;
skb_queue_walk(&sk->sk_receive_queue, skb) {
offset = seq - TCP_SKB_CB(skb)->seq;
if (tcp_hdr(skb)->syn)
offset--;
if (offset < skb->len || tcp_hdr(skb)->fin) {
*off = offset;
return skb;
}
}
return NULL;
}
/*
* This routine provides an alternative to tcp_recvmsg() for routines
* that would like to handle copying from skbuffs directly in 'sendfile'
* fashion.
* Note:
* - It is assumed that the socket was locked by the caller.
* - The routine does not block.
* - At present, there is no support for reading OOB data
* or for 'peeking' the socket using this routine
* (although both would be easy to implement).
*/
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor)
{
struct sk_buff *skb;
struct tcp_sock *tp = tcp_sk(sk);
u32 seq = tp->copied_seq;
u32 offset;
int copied = 0;
if (sk->sk_state == TCP_LISTEN)
return -ENOTCONN;
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
if (offset < skb->len) {
int used;
size_t len;
len = skb->len - offset;
/* Stop reading if we hit a patch of urgent data */
if (tp->urg_data) {
u32 urg_offset = tp->urg_seq - seq;
if (urg_offset < len)
len = urg_offset;
if (!len)
break;
}
used = recv_actor(desc, skb, offset, len);
if (used < 0) {
if (!copied)
copied = used;
break;
} else if (used <= len) {
seq += used;
copied += used;
offset += used;
}
/*
* If recv_actor drops the lock (e.g. TCP splice
* receive) the skb pointer might be invalid when
* getting here: tcp_collapse might have deleted it
* while aggregating skbs from the socket queue.
*/
skb = tcp_recv_skb(sk, seq-1, &offset);
if (!skb || (offset+1 != skb->len))
break;
}
if (tcp_hdr(skb)->fin) {
sk_eat_skb(sk, skb, 0);
++seq;
break;
}
sk_eat_skb(sk, skb, 0);
if (!desc->count)
break;
tp->copied_seq = seq;
}
tp->copied_seq = seq;
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
if (copied > 0) {
tcp_cleanup_rbuf(sk, copied);
uid_stat_tcp_rcv(current_uid(), copied);
}
return copied;
}
EXPORT_SYMBOL(tcp_read_sock);
/*
* This routine copies from a sock struct into the user buffer.
*
* Technical note: in 2.3 we work on _locked_ socket, so that
* tricks with *seq access order and skb->users are not required.
* Probably, code can be easily improved even more.
*/
int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
int copied = 0;
u32 peek_seq;
u32 *seq;
unsigned long used;
int err;
int target; /* Read at least this many bytes */
long timeo;
struct task_struct *user_recv = NULL;
int copied_early = 0;
struct sk_buff *skb;
u32 urg_hole = 0;
lock_sock(sk);
err = -ENOTCONN;
if (sk->sk_state == TCP_LISTEN)
goto out;
timeo = sock_rcvtimeo(sk, nonblock);
/* Urgent data needs to be handled specially. */
if (flags & MSG_OOB)
goto recv_urg;
seq = &tp->copied_seq;
if (flags & MSG_PEEK) {
peek_seq = tp->copied_seq;
seq = &peek_seq;
}
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
#ifdef CONFIG_NET_DMA
tp->ucopy.dma_chan = NULL;
preempt_disable();
skb = skb_peek_tail(&sk->sk_receive_queue);
{
int available = 0;
if (skb)
available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
net_dma_find_channel()) {
preempt_enable_no_resched();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
} else {
preempt_enable_no_resched();
}
}
#endif
do {
u32 offset;
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
if (tp->urg_data && tp->urg_seq == *seq) {
if (copied)
break;
if (signal_pending(current)) {
copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
break;
}
}
/* Next get a buffer. */
skb_queue_walk(&sk->sk_receive_queue, skb) {
/* Now that we have two receive queues this
* shouldn't happen.
*/
if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
"recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
*seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
flags))
break;
offset = *seq - TCP_SKB_CB(skb)->seq;
if (tcp_hdr(skb)->syn)
offset--;
if (offset < skb->len)
goto found_ok_skb;
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
WARN(!(flags & MSG_PEEK),
"recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
*seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
}
/* Well, if we have backlog, try to process it now yet. */
if (copied >= target && !sk->sk_backlog.tail)
break;
if (copied) {
if (sk->sk_err ||
sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
!timeo ||
signal_pending(current))
break;
} else {
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
copied = sock_error(sk);
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk->sk_state == TCP_CLOSE) {
if (!sock_flag(sk, SOCK_DONE)) {
/* This occurs when user tries to read
* from never connected socket.
*/
copied = -ENOTCONN;
break;
}
break;
}
if (!timeo) {
copied = -EAGAIN;
break;
}
if (signal_pending(current)) {
copied = sock_intr_errno(timeo);
break;
}
}
tcp_cleanup_rbuf(sk, copied);
if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
/* Install new reader */
if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
user_recv = current;
tp->ucopy.task = user_recv;
tp->ucopy.iov = msg->msg_iov;
}
tp->ucopy.len = len;
WARN_ON(tp->copied_seq != tp->rcv_nxt &&
!(flags & (MSG_PEEK | MSG_TRUNC)));
/* Ugly... If prequeue is not empty, we have to
* process it before releasing socket, otherwise
* order will be broken at second iteration.
* More elegant solution is required!!!
*
* Look: we have the following (pseudo)queues:
*
* 1. packets in flight
* 2. backlog
* 3. prequeue
* 4. receive_queue
*
* Each queue can be processed only if the next ones
* are empty. At this point we have empty receive_queue.
* But prequeue _can_ be not empty after 2nd iteration,
* when we jumped to start of loop because backlog
* processing added something to receive_queue.
* We cannot release_sock(), because backlog contains
* packets arrived _after_ prequeued ones.
*
* Shortly, algorithm is clear --- to process all
* the queues in order. We could make it more directly,
* requeueing packets from backlog to prequeue, if
* is not empty. It is more elegant, but eats cycles,
* unfortunately.
*/
if (!skb_queue_empty(&tp->ucopy.prequeue))
goto do_prequeue;
/* __ Set realtime policy in scheduler __ */
}
#ifdef CONFIG_NET_DMA
if (tp->ucopy.dma_chan) {
if (tp->rcv_wnd == 0 &&
!skb_queue_empty(&sk->sk_async_wait_queue)) {
tcp_service_net_dma(sk, true);
tcp_cleanup_rbuf(sk, copied);
} else
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
}
#endif
if (copied >= target) {
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
} else
sk_wait_data(sk, &timeo);
#ifdef CONFIG_NET_DMA
tcp_service_net_dma(sk, false); /* Don't block */
tp->ucopy.wakeup = 0;
#endif
if (user_recv) {
int chunk;
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
len -= chunk;
copied += chunk;
}
if (tp->rcv_nxt == tp->copied_seq &&
!skb_queue_empty(&tp->ucopy.prequeue)) {
do_prequeue:
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
}
}
if ((flags & MSG_PEEK) &&
(peek_seq - copied - urg_hole != tp->copied_seq)) {
if (net_ratelimit())
printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
current->comm, task_pid_nr(current));
peek_seq = tp->copied_seq;
}
continue;
found_ok_skb:
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
used = len;
/* Do we have urgent data here? */
if (tp->urg_data) {
u32 urg_offset = tp->urg_seq - *seq;
if (urg_offset < used) {
if (!urg_offset) {
if (!sock_flag(sk, SOCK_URGINLINE)) {
++*seq;
urg_hole++;
offset++;
used--;
if (!used)
goto skip_copy;
}
} else
used = urg_offset;
}
}
if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
tp->ucopy.dma_chan, skb, offset,
msg->msg_iov, used,
tp->ucopy.pinned_list);
if (tp->ucopy.dma_cookie < 0) {
pr_alert("%s: dma_cookie < 0\n",
__func__);
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
if ((offset + used) == skb->len)
copied_early = 1;
} else
#endif
{
err = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, used);
if (err) {
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
}
}
*seq += used;
copied += used;
len -= used;
tcp_rcv_space_adjust(sk);
skip_copy:
if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
tp->urg_data = 0;
tcp_fast_path_check(sk);
}
if (used + offset < skb->len)
continue;
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
copied_early = 0;
}
continue;
found_fin_ok:
/* Process the FIN. */
++*seq;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
copied_early = 0;
}
break;
} while (len > 0);
if (user_recv) {
if (!skb_queue_empty(&tp->ucopy.prequeue)) {
int chunk;
tp->ucopy.len = copied > 0 ? len : 0;
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
}
tp->ucopy.task = NULL;
tp->ucopy.len = 0;
}
#ifdef CONFIG_NET_DMA
tcp_service_net_dma(sk, true); /* Wait for queue to drain */
tp->ucopy.dma_chan = NULL;
if (tp->ucopy.pinned_list) {
dma_unpin_iovec_pages(tp->ucopy.pinned_list);
tp->ucopy.pinned_list = NULL;
}
#endif
/* According to UNIX98, msg_name/msg_namelen are ignored
* on connected socket. I was just happy when found this 8) --ANK
*/
/* Clean up data we have read: This will do ACK frames. */
tcp_cleanup_rbuf(sk, copied);
release_sock(sk);
if (copied > 0)
uid_stat_tcp_rcv(current_uid(), copied);
return copied;
out:
release_sock(sk);
return err;
recv_urg:
err = tcp_recv_urg(sk, msg, len, flags);
if (err > 0)
uid_stat_tcp_rcv(current_uid(), err);
goto out;
}
EXPORT_SYMBOL(tcp_recvmsg);
void tcp_set_state(struct sock *sk, int state)
{
int oldstate = sk->sk_state;
switch (state) {
case TCP_ESTABLISHED:
if (oldstate != TCP_ESTABLISHED)
TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
sk->sk_prot->unhash(sk);
if (inet_csk(sk)->icsk_bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(sk);
/* fall through */
default:
if (oldstate == TCP_ESTABLISHED)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
}
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
sk->sk_state = state;
#ifdef STATE_TRACE
SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
#endif
}
EXPORT_SYMBOL_GPL(tcp_set_state);
/*
* State processing on a close. This implements the state shift for
* sending our FIN frame. Note that we only send a FIN for some
* states. A shutdown() may have already sent the FIN, or we may be
* closed.
*/
static const unsigned char new_state[16] = {
/* current state: new state: action: */
/* (Invalid) */ TCP_CLOSE,
/* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
/* TCP_SYN_SENT */ TCP_CLOSE,
/* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
/* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
/* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
/* TCP_TIME_WAIT */ TCP_CLOSE,
/* TCP_CLOSE */ TCP_CLOSE,
/* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
/* TCP_LAST_ACK */ TCP_LAST_ACK,
/* TCP_LISTEN */ TCP_CLOSE,
/* TCP_CLOSING */ TCP_CLOSING,
};
static int tcp_close_state(struct sock *sk)
{
int next = (int)new_state[sk->sk_state];
int ns = next & TCP_STATE_MASK;
tcp_set_state(sk, ns);
return next & TCP_ACTION_FIN;
}
/*
* Shutdown the sending side of a connection. Much like close except
* that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
*/
void tcp_shutdown(struct sock *sk, int how)
{
/* We need to grab some memory, and put together a FIN,
* and then put it into the queue to be sent.
* Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
*/
if (!(how & SEND_SHUTDOWN))
return;
/* If we've already sent a FIN, or it's a closed state, skip this. */
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_SYN_SENT |
TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
/* Clear out any half completed packets. FIN if needed. */
if (tcp_close_state(sk))
tcp_send_fin(sk);
}
}
EXPORT_SYMBOL(tcp_shutdown);
bool tcp_check_oom(struct sock *sk, int shift)
{
bool too_many_orphans, out_of_socket_memory;
too_many_orphans = tcp_too_many_orphans(sk, shift);
out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans && net_ratelimit())
pr_info("too many orphaned sockets\n");
if (out_of_socket_memory && net_ratelimit())
pr_info("out of memory -- consider tuning tcp_mem\n");
return too_many_orphans || out_of_socket_memory;
}
void tcp_close(struct sock *sk, long timeout)
{
struct sk_buff *skb;
int data_was_unread = 0;
int state;
lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if (sk->sk_state == TCP_LISTEN) {
tcp_set_state(sk, TCP_CLOSE);
/* Special case. */
inet_csk_listen_stop(sk);
goto adjudge_to_death;
}
/* We need to flush the recv. buffs. We do this only on the
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
*/
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
tcp_hdr(skb)->fin;
data_was_unread += len;
__kfree_skb(skb);
}
sk_mem_reclaim(sk);
/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
if (sk->sk_state == TCP_CLOSE)
goto adjudge_to_death;
/* As outlined in RFC 2525, section 2.17, we send a RST here because
* data was lost. To witness the awful effects of the old behavior of
* always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
* GET in an FTP client, suspend the process, wait for the client to
* advertise a zero window, then kill -9 the FTP client, wheee...
* Note: timeout is always zero in such a case.
*/
if (data_was_unread) {
/* Unread data was tossed, zap the connection. */
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, sk->sk_allocation);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
* zapping the connection.
*/
/* RED-PEN. Formally speaking, we have broken TCP state
* machine. State transitions:
*
* TCP_ESTABLISHED -> TCP_FIN_WAIT1
* TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
* TCP_CLOSE_WAIT -> TCP_LAST_ACK
*
* are legal only when FIN has been sent (i.e. in window),
* rather than queued out of window. Purists blame.
*
* F.e. "RFC state" is ESTABLISHED,
* if Linux state is FIN-WAIT-1, but FIN is still not sent.
*
* The visible declinations are that sometimes
* we enter time-wait state, when it is not required really
* (harmless), do not send active resets, when they are
* required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
* they look as CLOSING or LAST_ACK for Linux)
* Probably, I missed some more holelets.
* --ANK
*/
tcp_send_fin(sk);
}
sk_stream_wait_close(sk, timeout);
adjudge_to_death:
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
/* Now socket is owned by kernel and we acquire BH lock
to finish close. No need to check for user refs.
*/
local_bh_disable();
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
percpu_counter_inc(sk->sk_prot->orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
goto out;
/* This is a (useful) BSD violating of the RFC. There is a
* problem with TCP as specified in that the other end could
* keep a socket open forever with no application left this end.
* We use a 3 minute timeout (about the same as BSD) then kill
* our end. If they send after that then tough - BUT: long enough
* that we won't make the old 4*rto = almost no time - whoops
* reset mistake.
*
* Nope, it was not mistake. It is really desired behaviour
* f.e. on http servers, when such sockets are useless, but
* consume significant resources. Let's do it with special
* linger2 option. --ANK
*/
if (sk->sk_state == TCP_FIN_WAIT2) {
struct tcp_sock *tp = tcp_sk(sk);
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPABORTONLINGER);
} else {
const int tmo = tcp_fin_time(sk);
if (tmo > TCP_TIMEWAIT_LEN) {
inet_csk_reset_keepalive_timer(sk,
tmo - TCP_TIMEWAIT_LEN);
} else {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto out;
}
}
}
if (sk->sk_state != TCP_CLOSE) {
sk_mem_reclaim(sk);
if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY);
}
}
if (sk->sk_state == TCP_CLOSE)
inet_csk_destroy_sock(sk);
/* Otherwise, socket is reprieved until protocol close. */
out:
bh_unlock_sock(sk);
local_bh_enable();
sock_put(sk);
}
EXPORT_SYMBOL(tcp_close);
/* These states need RST on ABORT according to RFC793 */
static inline int tcp_need_reset(int state)
{
return (1 << state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
}
int tcp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int err = 0;
int old_state = sk->sk_state;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
/* ABORT function of RFC793 */
if (old_state == TCP_LISTEN) {
inet_csk_listen_stop(sk);
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
/* The last check adjusts for discrepancy of Linux wrt. RFC
* states
*/
tcp_send_active_reset(sk, gfp_any());
sk->sk_err = ECONNRESET;
} else if (old_state == TCP_SYN_SENT)
sk->sk_err = ECONNRESET;
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
tcp_write_queue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_NET_DMA
__skb_queue_purge(&sk->sk_async_wait_queue);
#endif
inet->inet_dport = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt = 0;
if ((tp->write_seq += tp->max_window + 2) == 0)
tp->write_seq = 1;
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->packets_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_cnt = 0;
tp->bytes_acked = 0;
tp->window_clamp = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk);
return err;
}
EXPORT_SYMBOL(tcp_disconnect);
/*
* Socket option code for TCP.
*/
static int do_tcp_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int val;
int err = 0;
/* These are data/string values, all the others are ints */
switch (optname) {
case TCP_CONGESTION: {
char name[TCP_CA_NAME_MAX];
if (optlen < 1)
return -EINVAL;
val = strncpy_from_user(name, optval,
min_t(long, TCP_CA_NAME_MAX-1, optlen));
if (val < 0)
return -EFAULT;
name[val] = 0;
lock_sock(sk);
err = tcp_set_congestion_control(sk, name);
release_sock(sk);
return err;
}
case TCP_COOKIE_TRANSACTIONS: {
struct tcp_cookie_transactions ctd;
struct tcp_cookie_values *cvp = NULL;
if (sizeof(ctd) > optlen)
return -EINVAL;
if (copy_from_user(&ctd, optval, sizeof(ctd)))
return -EFAULT;
if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
return -EINVAL;
if (ctd.tcpct_cookie_desired == 0) {
/* default to global value */
} else if ((0x1 & ctd.tcpct_cookie_desired) ||
ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
return -EINVAL;
}
if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
/* Supercedes all other values */
lock_sock(sk);
if (tp->cookie_values != NULL) {
kref_put(&tp->cookie_values->kref,
tcp_cookie_values_release);
tp->cookie_values = NULL;
}
tp->rx_opt.cookie_in_always = 0; /* false */
tp->rx_opt.cookie_out_never = 1; /* true */
release_sock(sk);
return err;
}
/* Allocate ancillary memory before locking.
*/
if (ctd.tcpct_used > 0 ||
(tp->cookie_values == NULL &&
(sysctl_tcp_cookie_size > 0 ||
ctd.tcpct_cookie_desired > 0 ||
ctd.tcpct_s_data_desired > 0))) {
cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
GFP_KERNEL);
if (cvp == NULL)
return -ENOMEM;
kref_init(&cvp->kref);
}
lock_sock(sk);
tp->rx_opt.cookie_in_always =
(TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
tp->rx_opt.cookie_out_never = 0; /* false */
if (tp->cookie_values != NULL) {
if (cvp != NULL) {
/* Changed values are recorded by a changed
* pointer, ensuring the cookie will differ,
* without separately hashing each value later.
*/
kref_put(&tp->cookie_values->kref,
tcp_cookie_values_release);
} else {
cvp = tp->cookie_values;
}
}
if (cvp != NULL) {
cvp->cookie_desired = ctd.tcpct_cookie_desired;
if (ctd.tcpct_used > 0) {
memcpy(cvp->s_data_payload, ctd.tcpct_value,
ctd.tcpct_used);
cvp->s_data_desired = ctd.tcpct_used;
cvp->s_data_constant = 1; /* true */
} else {
/* No constant payload data. */
cvp->s_data_desired = ctd.tcpct_s_data_desired;
cvp->s_data_constant = 0; /* false */
}
tp->cookie_values = cvp;
}
release_sock(sk);
return err;
}
default:
/* fallthru */
break;
}
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case TCP_MAXSEG:
/* Values greater than interface MTU won't take effect. However
* at the point when this call is done we typically don't yet
* know which interface is going to be used */
if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
err = -EINVAL;
break;
}
tp->rx_opt.user_mss = val;
break;
case TCP_NODELAY:
if (val) {
/* TCP_NODELAY is weaker than TCP_CORK, so that
* this option on corked socket is remembered, but
* it is not activated until cork is cleared.
*
* However, when TCP_NODELAY is set we make
* an explicit push, which overrides even TCP_CORK
* for currently queued segments.
*/
tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk);
} else {
tp->nonagle &= ~TCP_NAGLE_OFF;
}
break;
case TCP_THIN_LINEAR_TIMEOUTS:
if (val < 0 || val > 1)
err = -EINVAL;
else
tp->thin_lto = val;
break;
case TCP_THIN_DUPACK:
if (val < 0 || val > 1)
err = -EINVAL;
else
tp->thin_dupack = val;
break;
case TCP_CORK:
/* When set indicates to always queue non-full frames.
* Later the user clears this option and we transmit
* any pending partial frames in the queue. This is
* meant to be used alongside sendfile() to get properly
* filled frames when the user (for example) must write
* out headers with a write() call first and then use
* sendfile to send out the data parts.
*
* TCP_CORK can be set together with TCP_NODELAY and it is
* stronger than TCP_NODELAY.
*/
if (val) {
tp->nonagle |= TCP_NAGLE_CORK;
} else {
tp->nonagle &= ~TCP_NAGLE_CORK;
if (tp->nonagle&TCP_NAGLE_OFF)
tp->nonagle |= TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk);
}
break;
case TCP_KEEPIDLE:
if (val < 1 || val > MAX_TCP_KEEPIDLE)
err = -EINVAL;
else {
tp->keepalive_time = val * HZ;
if (sock_flag(sk, SOCK_KEEPOPEN) &&
!((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))) {
u32 elapsed = keepalive_time_elapsed(tp);
if (tp->keepalive_time > elapsed)
elapsed = tp->keepalive_time - elapsed;
else
elapsed = 0;
inet_csk_reset_keepalive_timer(sk, elapsed);
}
}
break;
case TCP_KEEPINTVL:
if (val < 1 || val > MAX_TCP_KEEPINTVL)
err = -EINVAL;
else
tp->keepalive_intvl = val * HZ;
break;
case TCP_KEEPCNT:
if (val < 1 || val > MAX_TCP_KEEPCNT)
err = -EINVAL;
else
tp->keepalive_probes = val;
break;
case TCP_SYNCNT:
if (val < 1 || val > MAX_TCP_SYNCNT)
err = -EINVAL;
else
icsk->icsk_syn_retries = val;
break;
case TCP_LINGER2:
if (val < 0)
tp->linger2 = -1;
else if (val > sysctl_tcp_fin_timeout / HZ)
tp->linger2 = 0;
else
tp->linger2 = val * HZ;
break;
case TCP_DEFER_ACCEPT:
/* Translate value in seconds to number of retransmits */
icsk->icsk_accept_queue.rskq_defer_accept =
secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
TCP_RTO_MAX / HZ);
break;
case TCP_WINDOW_CLAMP:
if (!val) {
if (sk->sk_state != TCP_CLOSE) {
err = -EINVAL;
break;
}
tp->window_clamp = 0;
} else
tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
SOCK_MIN_RCVBUF / 2 : val;
break;
case TCP_QUICKACK:
if (!val) {
icsk->icsk_ack.pingpong = 1;
} else {
icsk->icsk_ack.pingpong = 0;
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
inet_csk_ack_scheduled(sk)) {
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
tcp_cleanup_rbuf(sk, 1);
if (!(val & 1))
icsk->icsk_ack.pingpong = 1;
}
}
break;
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
/* Read the IP->Key mappings from userspace */
err = tp->af_specific->md5_parse(sk, optval, optlen);
break;
#endif
case TCP_USER_TIMEOUT:
/* Cap the max timeout in ms TCP will retry/retrans
* before giving up and aborting (ETIMEDOUT) a connection.
*/
if (val < 0)
err = -EINVAL;
else
icsk->icsk_user_timeout = msecs_to_jiffies(val);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
unsigned int optlen)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (level != SOL_TCP)
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
optval, optlen);
return do_tcp_setsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(tcp_setsockopt);
#ifdef CONFIG_COMPAT
int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_TCP)
return inet_csk_compat_setsockopt(sk, level, optname,
optval, optlen);
return do_tcp_setsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(compat_tcp_setsockopt);
#endif
/* Return information about state of tcp endpoint in API format. */
void tcp_get_info(const struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp;
memset(info, 0, sizeof(*info));
info->tcpi_state = sk->sk_state;
info->tcpi_ca_state = icsk->icsk_ca_state;
info->tcpi_retransmits = icsk->icsk_retransmits;
info->tcpi_probes = icsk->icsk_probes_out;
info->tcpi_backoff = icsk->icsk_backoff;
if (tp->rx_opt.tstamp_ok)
info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
if (tcp_is_sack(tp))
info->tcpi_options |= TCPI_OPT_SACK;
if (tp->rx_opt.wscale_ok) {
info->tcpi_options |= TCPI_OPT_WSCALE;
info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
}
if (tp->ecn_flags & TCP_ECN_OK)
info->tcpi_options |= TCPI_OPT_ECN;
if (tp->ecn_flags & TCP_ECN_SEEN)
info->tcpi_options |= TCPI_OPT_ECN_SEEN;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
info->tcpi_snd_mss = tp->mss_cache;
info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
if (sk->sk_state == TCP_LISTEN) {
info->tcpi_unacked = sk->sk_ack_backlog;
info->tcpi_sacked = sk->sk_max_ack_backlog;
} else {
info->tcpi_unacked = tp->packets_out;
info->tcpi_sacked = tp->sacked_out;
}
info->tcpi_lost = tp->lost_out;
info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
info->tcpi_snd_ssthresh = tp->snd_ssthresh;
info->tcpi_snd_cwnd = tp->snd_cwnd;
info->tcpi_advmss = tp->advmss;
info->tcpi_reordering = tp->reordering;
info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
info->tcpi_rcv_space = tp->rcvq_space.space;
info->tcpi_total_retrans = tp->total_retrans;
if (sk->sk_socket) {
struct file *filep = sk->sk_socket->file;
if (filep)
info->tcpi_count = atomic_read(&filep->f_count);
}
}
EXPORT_SYMBOL_GPL(tcp_get_info);
static int do_tcp_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int val, len;
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache;
if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
val = tp->rx_opt.user_mss;
break;
case TCP_NODELAY:
val = !!(tp->nonagle&TCP_NAGLE_OFF);
break;
case TCP_CORK:
val = !!(tp->nonagle&TCP_NAGLE_CORK);
break;
case TCP_KEEPIDLE:
val = keepalive_time_when(tp) / HZ;
break;
case TCP_KEEPINTVL:
val = keepalive_intvl_when(tp) / HZ;
break;
case TCP_KEEPCNT:
val = keepalive_probes(tp);
break;
case TCP_SYNCNT:
val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
break;
case TCP_LINGER2:
val = tp->linger2;
if (val >= 0)
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
break;
case TCP_DEFER_ACCEPT:
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
break;
case TCP_WINDOW_CLAMP:
val = tp->window_clamp;
break;
case TCP_INFO: {
struct tcp_info info;
if (get_user(len, optlen))
return -EFAULT;
tcp_get_info(sk, &info);
len = min_t(unsigned int, len, sizeof(info));
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &info, len))
return -EFAULT;
return 0;
}
case TCP_QUICKACK:
val = !icsk->icsk_ack.pingpong;
break;
case TCP_CONGESTION:
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
return -EFAULT;
return 0;
case TCP_COOKIE_TRANSACTIONS: {
struct tcp_cookie_transactions ctd;
struct tcp_cookie_values *cvp = tp->cookie_values;
if (get_user(len, optlen))
return -EFAULT;
if (len < sizeof(ctd))
return -EINVAL;
memset(&ctd, 0, sizeof(ctd));
ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
TCP_COOKIE_IN_ALWAYS : 0)
| (tp->rx_opt.cookie_out_never ?
TCP_COOKIE_OUT_NEVER : 0);
if (cvp != NULL) {
ctd.tcpct_flags |= (cvp->s_data_in ?
TCP_S_DATA_IN : 0)
| (cvp->s_data_out ?
TCP_S_DATA_OUT : 0);
ctd.tcpct_cookie_desired = cvp->cookie_desired;
ctd.tcpct_s_data_desired = cvp->s_data_desired;
memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
cvp->cookie_pair_size);
ctd.tcpct_used = cvp->cookie_pair_size;
}
if (put_user(sizeof(ctd), optlen))
return -EFAULT;
if (copy_to_user(optval, &ctd, sizeof(ctd)))
return -EFAULT;
return 0;
}
case TCP_THIN_LINEAR_TIMEOUTS:
val = tp->thin_lto;
break;
case TCP_THIN_DUPACK:
val = tp->thin_dupack;
break;
case TCP_USER_TIMEOUT:
val = jiffies_to_msecs(icsk->icsk_user_timeout);
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (level != SOL_TCP)
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
optval, optlen);
return do_tcp_getsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(tcp_getsockopt);
#ifdef CONFIG_COMPAT
int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_TCP)
return inet_csk_compat_getsockopt(sk, level, optname,
optval, optlen);
return do_tcp_getsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
unsigned thlen;
unsigned int seq;
__be32 delta;
unsigned int oldlen;
unsigned int mss;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
th = tcp_hdr(skb);
thlen = th->doff * 4;
if (thlen < sizeof(*th))
goto out;
if (!pskb_may_pull(skb, thlen))
goto out;
oldlen = (u16)~skb->len;
__skb_pull(skb, thlen);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
if (unlikely(type &
~(SKB_GSO_TCPV4 |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_TCPV6 |
0) ||
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
goto out;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
segs = skb_segment(skb, features);
if (IS_ERR(segs))
goto out;
delta = htonl(oldlen + (thlen + mss));
skb = segs;
th = tcp_hdr(skb);
seq = ntohl(th->seq);
do {
th->fin = th->psh = 0;
th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
(__force u32)delta));
if (skb->ip_summed != CHECKSUM_PARTIAL)
th->check =
csum_fold(csum_partial(skb_transport_header(skb),
thlen, skb->csum));
seq += mss;
skb = skb->next;
th = tcp_hdr(skb);
th->seq = htonl(seq);
th->cwr = 0;
} while (skb->next);
delta = htonl(oldlen + (skb->tail - skb->transport_header) +
skb->data_len);
th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
(__force u32)delta));
if (skb->ip_summed != CHECKSUM_PARTIAL)
th->check = csum_fold(csum_partial(skb_transport_header(skb),
thlen, skb->csum));
out:
return segs;
}
EXPORT_SYMBOL(tcp_tso_segment);
struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
struct sk_buff *p;
struct tcphdr *th;
struct tcphdr *th2;
unsigned int len;
unsigned int thlen;
__be32 flags;
unsigned int mss = 1;
unsigned int hlen;
unsigned int off;
int flush = 1;
int i;
off = skb_gro_offset(skb);
hlen = off + sizeof(*th);
th = skb_gro_header_fast(skb, off);
if (skb_gro_header_hard(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
goto out;
}
thlen = th->doff * 4;
if (thlen < sizeof(*th))
goto out;
hlen = off + thlen;
if (skb_gro_header_hard(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
goto out;
}
skb_gro_pull(skb, thlen);
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
for (; (p = *head); head = &p->next) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
th2 = tcp_hdr(p);
if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
goto found;
}
goto out_check_final;
found:
flush = NAPI_GRO_CB(p)->flush;
flush |= (__force int)(flags & TCP_FLAG_CWR);
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
for (i = sizeof(*th); i < thlen; i += 4)
flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i);
mss = skb_shinfo(p)->gso_size;
flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
if (flush || skb_gro_receive(head, skb)) {
mss = 1;
goto out_check_final;
}
p = *head;
th2 = tcp_hdr(p);
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
out_check_final:
flush = len < mss;
flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
TCP_FLAG_RST | TCP_FLAG_SYN |
TCP_FLAG_FIN));
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
pp = head;
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
EXPORT_SYMBOL(tcp_gro_receive);
int tcp_gro_complete(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
return 0;
}
EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
{
int cpu;
for_each_possible_cpu(cpu) {
struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
if (p->md5_desc.tfm)
crypto_free_hash(p->md5_desc.tfm);
}
free_percpu(pool);
}
void tcp_free_md5sig_pool(void)
{
struct tcp_md5sig_pool __percpu *pool = NULL;
spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
pool = tcp_md5sig_pool;
tcp_md5sig_pool = NULL;
}
spin_unlock_bh(&tcp_md5sig_pool_lock);
if (pool)
__tcp_free_md5sig_pool(pool);
}
EXPORT_SYMBOL(tcp_free_md5sig_pool);
static struct tcp_md5sig_pool __percpu *
__tcp_alloc_md5sig_pool(struct sock *sk)
{
int cpu;
struct tcp_md5sig_pool __percpu *pool;
pool = alloc_percpu(struct tcp_md5sig_pool);
if (!pool)
return NULL;
for_each_possible_cpu(cpu) {
struct crypto_hash *hash;
hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (!hash || IS_ERR(hash))
goto out_free;
per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
}
return pool;
out_free:
__tcp_free_md5sig_pool(pool);
return NULL;
}
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
struct tcp_md5sig_pool __percpu *pool;
int alloc = 0;
retry:
spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
alloc = 1;
spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
spin_unlock_bh(&tcp_md5sig_pool_lock);
cpu_relax();
goto retry;
} else
spin_unlock_bh(&tcp_md5sig_pool_lock);
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
struct tcp_md5sig_pool __percpu *p;
p = __tcp_alloc_md5sig_pool(sk);
spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
spin_unlock_bh(&tcp_md5sig_pool_lock);
return NULL;
}
pool = tcp_md5sig_pool;
if (pool) {
/* oops, it has already been assigned. */
spin_unlock_bh(&tcp_md5sig_pool_lock);
__tcp_free_md5sig_pool(p);
} else {
tcp_md5sig_pool = pool = p;
spin_unlock_bh(&tcp_md5sig_pool_lock);
}
}
return pool;
}
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
/**
* tcp_get_md5sig_pool - get md5sig_pool for this user
*
* We use percpu structure, so if we succeed, we exit with preemption
* and BH disabled, to make sure another thread or softirq handling
* wont try to get same context.
*/
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
struct tcp_md5sig_pool __percpu *p;
local_bh_disable();
spin_lock(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
spin_unlock(&tcp_md5sig_pool_lock);
if (p)
return this_cpu_ptr(p);
local_bh_enable();
return NULL;
}
EXPORT_SYMBOL(tcp_get_md5sig_pool);
void tcp_put_md5sig_pool(void)
{
local_bh_enable();
tcp_free_md5sig_pool();
}
EXPORT_SYMBOL(tcp_put_md5sig_pool);
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
const struct tcphdr *th)
{
struct scatterlist sg;
struct tcphdr hdr;
int err;
/* We are not allowed to change tcphdr, make a local copy */
memcpy(&hdr, th, sizeof(hdr));
hdr.check = 0;
/* options aren't included in the hash */
sg_init_one(&sg, &hdr, sizeof(hdr));
err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
return err;
}
EXPORT_SYMBOL(tcp_md5_hash_header);
int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
const struct sk_buff *skb, unsigned int header_len)
{
struct scatterlist sg;
const struct tcphdr *tp = tcp_hdr(skb);
struct hash_desc *desc = &hp->md5_desc;
unsigned i;
const unsigned head_data_len = skb_headlen(skb) > header_len ?
skb_headlen(skb) - header_len : 0;
const struct skb_shared_info *shi = skb_shinfo(skb);
struct sk_buff *frag_iter;
sg_init_table(&sg, 1);
sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
if (crypto_hash_update(desc, &sg, head_data_len))
return 1;
for (i = 0; i < shi->nr_frags; ++i) {
const struct skb_frag_struct *f = &shi->frags[i];
unsigned int offset = f->page_offset;
struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
sg_set_page(&sg, page, skb_frag_size(f),
offset_in_page(offset));
if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
return 1;
}
skb_walk_frags(skb, frag_iter)
if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
return 1;
return 0;
}
EXPORT_SYMBOL(tcp_md5_hash_skb_data);
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
{
struct scatterlist sg;
sg_init_one(&sg, key->key, key->keylen);
return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
}
EXPORT_SYMBOL(tcp_md5_hash_key);
#endif
/**
* Each Responder maintains up to two secret values concurrently for
* efficient secret rollover. Each secret value has 4 states:
*
* Generating. (tcp_secret_generating != tcp_secret_primary)
* Generates new Responder-Cookies, but not yet used for primary
* verification. This is a short-term state, typically lasting only
* one round trip time (RTT).
*
* Primary. (tcp_secret_generating == tcp_secret_primary)
* Used both for generation and primary verification.
*
* Retiring. (tcp_secret_retiring != tcp_secret_secondary)
* Used for verification, until the first failure that can be
* verified by the newer Generating secret. At that time, this
* cookie's state is changed to Secondary, and the Generating
* cookie's state is changed to Primary. This is a short-term state,
* typically lasting only one round trip time (RTT).
*
* Secondary. (tcp_secret_retiring == tcp_secret_secondary)
* Used for secondary verification, after primary verification
* failures. This state lasts no more than twice the Maximum Segment
* Lifetime (2MSL). Then, the secret is discarded.
*/
struct tcp_cookie_secret {
/* The secret is divided into two parts. The digest part is the
* equivalent of previously hashing a secret and saving the state,
* and serves as an initialization vector (IV). The message part
* serves as the trailing secret.
*/
u32 secrets[COOKIE_WORKSPACE_WORDS];
unsigned long expires;
};
#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
#define TCP_SECRET_LIFE (HZ * 600)
static struct tcp_cookie_secret tcp_secret_one;
static struct tcp_cookie_secret tcp_secret_two;
/* Essentially a circular list, without dynamic allocation. */
static struct tcp_cookie_secret *tcp_secret_generating;
static struct tcp_cookie_secret *tcp_secret_primary;
static struct tcp_cookie_secret *tcp_secret_retiring;
static struct tcp_cookie_secret *tcp_secret_secondary;
static DEFINE_SPINLOCK(tcp_secret_locker);
/* Select a pseudo-random word in the cookie workspace.
*/
static inline u32 tcp_cookie_work(const u32 *ws, const int n)
{
return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
}
/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
* Called in softirq context.
* Returns: 0 for success.
*/
int tcp_cookie_generator(u32 *bakery)
{
unsigned long jiffy = jiffies;
if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
spin_lock_bh(&tcp_secret_locker);
if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
/* refreshed by another */
memcpy(bakery,
&tcp_secret_generating->secrets[0],
COOKIE_WORKSPACE_WORDS);
} else {
/* still needs refreshing */
get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
/* The first time, paranoia assumes that the
* randomization function isn't as strong. But,
* this secret initialization is delayed until
* the last possible moment (packet arrival).
* Although that time is observable, it is
* unpredictably variable. Mash in the most
* volatile clock bits available, and expire the
* secret extra quickly.
*/
if (unlikely(tcp_secret_primary->expires ==
tcp_secret_secondary->expires)) {
struct timespec tv;
getnstimeofday(&tv);
bakery[COOKIE_DIGEST_WORDS+0] ^=
(u32)tv.tv_nsec;
tcp_secret_secondary->expires = jiffy
+ TCP_SECRET_1MSL
+ (0x0f & tcp_cookie_work(bakery, 0));
} else {
tcp_secret_secondary->expires = jiffy
+ TCP_SECRET_LIFE
+ (0xff & tcp_cookie_work(bakery, 1));
tcp_secret_primary->expires = jiffy
+ TCP_SECRET_2MSL
+ (0x1f & tcp_cookie_work(bakery, 2));
}
memcpy(&tcp_secret_secondary->secrets[0],
bakery, COOKIE_WORKSPACE_WORDS);
rcu_assign_pointer(tcp_secret_generating,
tcp_secret_secondary);
rcu_assign_pointer(tcp_secret_retiring,
tcp_secret_primary);
/*
* Neither call_rcu() nor synchronize_rcu() needed.
* Retiring data is not freed. It is replaced after
* further (locked) pointer updates, and a quiet time
* (minimum 1MSL, maximum LIFE - 2MSL).
*/
}
spin_unlock_bh(&tcp_secret_locker);
} else {
rcu_read_lock_bh();
memcpy(bakery,
&rcu_dereference(tcp_secret_generating)->secrets[0],
COOKIE_WORKSPACE_WORDS);
rcu_read_unlock_bh();
}
return 0;
}
EXPORT_SYMBOL(tcp_cookie_generator);
void tcp_done(struct sock *sk)
{
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
else
inet_csk_destroy_sock(sk);
}
EXPORT_SYMBOL_GPL(tcp_done);
extern struct tcp_congestion_ops tcp_reno;
static __initdata unsigned long thash_entries;
static int __init set_thash_entries(char *str)
{
if (!str)
return 0;
thash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("thash_entries=", set_thash_entries);
void tcp_init_mem(struct net *net)
{
unsigned long limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
net->ipv4.sysctl_tcp_mem[1] = limit;
net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
}
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long limit;
int max_rshare, max_wshare, cnt;
unsigned int i;
unsigned long jiffy = jiffies;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
percpu_counter_init(&tcp_sockets_allocated, 0);
percpu_counter_init(&tcp_orphan_count, 0);
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* Size and allocate the main established and bind bucket
* hash tables.
*
* The methodology is similar to that of the buffer cache.
*/
tcp_hashinfo.ehash =
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
(totalram_pages >= 128 * 1024) ?
13 : 15,
0,
NULL,
&tcp_hashinfo.ehash_mask,
0,
thash_entries ? 0 : 512 * 1024);
for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
}
if (inet_ehash_locks_alloc(&tcp_hashinfo))
panic("TCP: failed to alloc ehash_locks");
tcp_hashinfo.bhash =
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_mask + 1,
(totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.bhash_size,
NULL,
0,
64 * 1024);
tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
spin_lock_init(&tcp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
cnt = tcp_hashinfo.ehash_mask + 1;
tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
sysctl_tcp_max_orphans = cnt / 2;
sysctl_max_syn_backlog = max(128, cnt / 256);
tcp_init_mem(&init_net);
/* Set per-socket limits to no more than 1/128 the pressure threshold */
limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
max_wshare = min(4UL*1024*1024, limit);
max_rshare = min(6UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
sysctl_tcp_rmem[2] = max(87380, max_rshare);
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
tcp_register_congestion_control(&tcp_reno);
memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
tcp_secret_one.expires = jiffy; /* past due */
tcp_secret_two.expires = jiffy; /* past due */
tcp_secret_generating = &tcp_secret_one;
tcp_secret_primary = &tcp_secret_one;
tcp_secret_retiring = &tcp_secret_two;
tcp_secret_secondary = &tcp_secret_two;
tcp_tasklet_init();
}
static int tcp_is_local(struct net *net, __be32 addr) {
struct rtable *rt;
struct flowi4 fl4 = { .daddr = addr };
rt = ip_route_output_key(net, &fl4);
if (IS_ERR_OR_NULL(rt))
return 0;
return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK);
}
#endif
/*
* tcp_nuke_addr - destroy all sockets on the given local address
* if local address is the unspecified address (0.0.0.0 or ::), destroy all
* sockets with local addresses that are not configured.
*/
int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
{
int family = addr->sa_family;
unsigned int bucket;
struct in_addr *in = NULL;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr *in6 = NULL;
#endif
if (family == AF_INET) {
in = &((struct sockaddr_in *)addr)->sin_addr;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
} else if (family == AF_INET6) {
in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
#endif
} else {
return -EAFNOSUPPORT;
}
for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
struct hlist_nulls_node *node;
struct sock *sk;
spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
restart:
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
struct inet_sock *inet = inet_sk(sk);
if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
continue;
if (sock_flag(sk, SOCK_DEAD))
continue;
if (family == AF_INET) {
__be32 s4 = inet->inet_rcv_saddr;
if (s4 == LOOPBACK4_IPV6)
continue;
if (in->s_addr != s4 &&
!(in->s_addr == INADDR_ANY &&
!tcp_is_local(net, s4)))
continue;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (family == AF_INET6) {
struct in6_addr *s6;
if (!inet->pinet6)
continue;
s6 = &inet->pinet6->rcv_saddr;
if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
continue;
if (!ipv6_addr_equal(in6, s6) &&
!(ipv6_addr_equal(in6, &in6addr_any) &&
!tcp_is_local6(net, s6)))
continue;
}
#endif
sock_hold(sk);
spin_unlock_bh(lock);
local_bh_disable();
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
tcp_done(sk);
bh_unlock_sock(sk);
local_bh_enable();
sock_put(sk);
goto restart;
}
spin_unlock_bh(lock);
}
return 0;
}
| gpl-2.0 |
legumbre/gdb-z80 | gdb/m68klinux-nat.c | 5 | 15682 | /* Motorola m68k native support for GNU/Linux.
Copyright (C) 1996, 1998, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
2008, 2009, 2010 Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "defs.h"
#include "frame.h"
#include "inferior.h"
#include "language.h"
#include "gdbcore.h"
#include "gdb_string.h"
#include "regcache.h"
#include "target.h"
#include "linux-nat.h"
#include "m68k-tdep.h"
#include <sys/param.h>
#include <sys/dir.h>
#include <signal.h>
#include <sys/ptrace.h>
#include <sys/user.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <sys/procfs.h>
#ifdef HAVE_SYS_REG_H
#include <sys/reg.h>
#endif
#include <sys/file.h>
#include "gdb_stat.h"
#include "floatformat.h"
#include "target.h"
/* Prototypes for supply_gregset etc. */
#include "gregset.h"
/* This table must line up with gdbarch_register_name in "m68k-tdep.c". */
static const int regmap[] =
{
PT_D0, PT_D1, PT_D2, PT_D3, PT_D4, PT_D5, PT_D6, PT_D7,
PT_A0, PT_A1, PT_A2, PT_A3, PT_A4, PT_A5, PT_A6, PT_USP,
PT_SR, PT_PC,
/* PT_FP0, ..., PT_FP7 */
21, 24, 27, 30, 33, 36, 39, 42,
/* PT_FPCR, PT_FPSR, PT_FPIAR */
45, 46, 47
};
/* Which ptrace request retrieves which registers?
These apply to the corresponding SET requests as well. */
#define NUM_GREGS (18)
#define MAX_NUM_REGS (NUM_GREGS + 11)
int
getregs_supplies (int regno)
{
return 0 <= regno && regno < NUM_GREGS;
}
int
getfpregs_supplies (int regno)
{
return M68K_FP0_REGNUM <= regno && regno <= M68K_FPI_REGNUM;
}
/* Does the current host support the GETREGS request? */
int have_ptrace_getregs =
#ifdef HAVE_PTRACE_GETREGS
1
#else
0
#endif
;
/* Fetching registers directly from the U area, one at a time. */
/* Fetch one register. */
static void
fetch_register (struct regcache *regcache, int regno)
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
long regaddr;
int i;
char buf[MAX_REGISTER_SIZE];
int tid;
/* Overload thread id onto process id */
tid = TIDGET (inferior_ptid);
if (tid == 0)
tid = PIDGET (inferior_ptid); /* no thread id, just use process id */
regaddr = 4 * regmap[regno];
for (i = 0; i < register_size (gdbarch, regno); i += sizeof (long))
{
errno = 0;
*(long *) &buf[i] = ptrace (PTRACE_PEEKUSER, tid, regaddr, 0);
regaddr += sizeof (long);
if (errno != 0)
error (_("Couldn't read register %s (#%d): %s."),
gdbarch_register_name (gdbarch, regno),
regno, safe_strerror (errno));
}
regcache_raw_supply (regcache, regno, buf);
}
/* Fetch register values from the inferior.
If REGNO is negative, do this for all registers.
Otherwise, REGNO specifies which register (so we can save time). */
static void
old_fetch_inferior_registers (struct regcache *regcache, int regno)
{
if (regno >= 0)
{
fetch_register (regcache, regno);
}
else
{
for (regno = 0;
regno < gdbarch_num_regs (get_regcache_arch (regcache));
regno++)
{
fetch_register (regcache, regno);
}
}
}
/* Store one register. */
static void
store_register (const struct regcache *regcache, int regno)
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
long regaddr;
int i;
int tid;
char buf[MAX_REGISTER_SIZE];
/* Overload thread id onto process id */
tid = TIDGET (inferior_ptid);
if (tid == 0)
tid = PIDGET (inferior_ptid); /* no thread id, just use process id */
regaddr = 4 * regmap[regno];
/* Put the contents of regno into a local buffer */
regcache_raw_collect (regcache, regno, buf);
/* Store the local buffer into the inferior a chunk at the time. */
for (i = 0; i < register_size (gdbarch, regno); i += sizeof (long))
{
errno = 0;
ptrace (PTRACE_POKEUSER, tid, regaddr, *(long *) &buf[i]);
regaddr += sizeof (long);
if (errno != 0)
error (_("Couldn't write register %s (#%d): %s."),
gdbarch_register_name (gdbarch, regno),
regno, safe_strerror (errno));
}
}
/* Store our register values back into the inferior.
If REGNO is negative, do this for all registers.
Otherwise, REGNO specifies which register (so we can save time). */
static void
old_store_inferior_registers (const struct regcache *regcache, int regno)
{
if (regno >= 0)
{
store_register (regcache, regno);
}
else
{
for (regno = 0;
regno < gdbarch_num_regs (get_regcache_arch (regcache));
regno++)
{
store_register (regcache, regno);
}
}
}
/* Given a pointer to a general register set in /proc format
(elf_gregset_t *), unpack the register contents and supply
them as gdb's idea of the current register values. */
void
supply_gregset (struct regcache *regcache, const elf_gregset_t *gregsetp)
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
const elf_greg_t *regp = (const elf_greg_t *) gregsetp;
int regi;
for (regi = M68K_D0_REGNUM;
regi <= gdbarch_sp_regnum (gdbarch);
regi++)
regcache_raw_supply (regcache, regi, ®p[regmap[regi]]);
regcache_raw_supply (regcache, gdbarch_ps_regnum (gdbarch),
®p[PT_SR]);
regcache_raw_supply (regcache,
gdbarch_pc_regnum (gdbarch), ®p[PT_PC]);
}
/* Fill register REGNO (if it is a general-purpose register) in
*GREGSETPS with the value in GDB's register array. If REGNO is -1,
do this for all registers. */
void
fill_gregset (const struct regcache *regcache,
elf_gregset_t *gregsetp, int regno)
{
elf_greg_t *regp = (elf_greg_t *) gregsetp;
int i;
for (i = 0; i < NUM_GREGS; i++)
if (regno == -1 || regno == i)
regcache_raw_collect (regcache, i, regp + regmap[i]);
}
#ifdef HAVE_PTRACE_GETREGS
/* Fetch all general-purpose registers from process/thread TID and
store their values in GDB's register array. */
static void
fetch_regs (struct regcache *regcache, int tid)
{
elf_gregset_t regs;
if (ptrace (PTRACE_GETREGS, tid, 0, (int) ®s) < 0)
{
if (errno == EIO)
{
/* The kernel we're running on doesn't support the GETREGS
request. Reset `have_ptrace_getregs'. */
have_ptrace_getregs = 0;
return;
}
perror_with_name (_("Couldn't get registers"));
}
supply_gregset (regcache, (const elf_gregset_t *) ®s);
}
/* Store all valid general-purpose registers in GDB's register array
into the process/thread specified by TID. */
static void
store_regs (const struct regcache *regcache, int tid, int regno)
{
elf_gregset_t regs;
if (ptrace (PTRACE_GETREGS, tid, 0, (int) ®s) < 0)
perror_with_name (_("Couldn't get registers"));
fill_gregset (regcache, ®s, regno);
if (ptrace (PTRACE_SETREGS, tid, 0, (int) ®s) < 0)
perror_with_name (_("Couldn't write registers"));
}
#else
static void fetch_regs (struct regcache *regcache, int tid) {}
static void store_regs (const struct regcache *regcache, int tid, int regno) {}
#endif
/* Transfering floating-point registers between GDB, inferiors and cores. */
/* What is the address of fpN within the floating-point register set F? */
#define FPREG_ADDR(f, n) (&(f)->fpregs[(n) * 3])
/* Fill GDB's register array with the floating-point register values in
*FPREGSETP. */
void
supply_fpregset (struct regcache *regcache, const elf_fpregset_t *fpregsetp)
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
int regi;
for (regi = gdbarch_fp0_regnum (gdbarch);
regi < gdbarch_fp0_regnum (gdbarch) + 8; regi++)
regcache_raw_supply (regcache, regi,
FPREG_ADDR (fpregsetp,
regi - gdbarch_fp0_regnum (gdbarch)));
regcache_raw_supply (regcache, M68K_FPC_REGNUM, &fpregsetp->fpcntl[0]);
regcache_raw_supply (regcache, M68K_FPS_REGNUM, &fpregsetp->fpcntl[1]);
regcache_raw_supply (regcache, M68K_FPI_REGNUM, &fpregsetp->fpcntl[2]);
}
/* Fill register REGNO (if it is a floating-point register) in
*FPREGSETP with the value in GDB's register array. If REGNO is -1,
do this for all registers. */
void
fill_fpregset (const struct regcache *regcache,
elf_fpregset_t *fpregsetp, int regno)
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
int i;
/* Fill in the floating-point registers. */
for (i = gdbarch_fp0_regnum (gdbarch);
i < gdbarch_fp0_regnum (gdbarch) + 8; i++)
if (regno == -1 || regno == i)
regcache_raw_collect (regcache, i,
FPREG_ADDR (fpregsetp,
i - gdbarch_fp0_regnum (gdbarch)));
/* Fill in the floating-point control registers. */
for (i = M68K_FPC_REGNUM; i <= M68K_FPI_REGNUM; i++)
if (regno == -1 || regno == i)
regcache_raw_collect (regcache, i,
&fpregsetp->fpcntl[i - M68K_FPC_REGNUM]);
}
#ifdef HAVE_PTRACE_GETREGS
/* Fetch all floating-point registers from process/thread TID and store
thier values in GDB's register array. */
static void
fetch_fpregs (struct regcache *regcache, int tid)
{
elf_fpregset_t fpregs;
if (ptrace (PTRACE_GETFPREGS, tid, 0, (int) &fpregs) < 0)
perror_with_name (_("Couldn't get floating point status"));
supply_fpregset (regcache, (const elf_fpregset_t *) &fpregs);
}
/* Store all valid floating-point registers in GDB's register array
into the process/thread specified by TID. */
static void
store_fpregs (const struct regcache *regcache, int tid, int regno)
{
elf_fpregset_t fpregs;
if (ptrace (PTRACE_GETFPREGS, tid, 0, (int) &fpregs) < 0)
perror_with_name (_("Couldn't get floating point status"));
fill_fpregset (regcache, &fpregs, regno);
if (ptrace (PTRACE_SETFPREGS, tid, 0, (int) &fpregs) < 0)
perror_with_name (_("Couldn't write floating point status"));
}
#else
static void fetch_fpregs (struct regcache *regcache, int tid) {}
static void store_fpregs (const struct regcache *regcache, int tid, int regno) {}
#endif
/* Transferring arbitrary registers between GDB and inferior. */
/* Fetch register REGNO from the child process. If REGNO is -1, do
this for all registers (including the floating point and SSE
registers). */
static void
m68k_linux_fetch_inferior_registers (struct target_ops *ops,
struct regcache *regcache, int regno)
{
int tid;
/* Use the old method of peeking around in `struct user' if the
GETREGS request isn't available. */
if (! have_ptrace_getregs)
{
old_fetch_inferior_registers (regcache, regno);
return;
}
/* GNU/Linux LWP ID's are process ID's. */
tid = TIDGET (inferior_ptid);
if (tid == 0)
tid = PIDGET (inferior_ptid); /* Not a threaded program. */
/* Use the PTRACE_GETFPXREGS request whenever possible, since it
transfers more registers in one system call, and we'll cache the
results. But remember that fetch_fpxregs can fail, and return
zero. */
if (regno == -1)
{
fetch_regs (regcache, tid);
/* The call above might reset `have_ptrace_getregs'. */
if (! have_ptrace_getregs)
{
old_fetch_inferior_registers (regcache, -1);
return;
}
fetch_fpregs (regcache, tid);
return;
}
if (getregs_supplies (regno))
{
fetch_regs (regcache, tid);
return;
}
if (getfpregs_supplies (regno))
{
fetch_fpregs (regcache, tid);
return;
}
internal_error (__FILE__, __LINE__,
_("Got request for bad register number %d."), regno);
}
/* Store register REGNO back into the child process. If REGNO is -1,
do this for all registers (including the floating point and SSE
registers). */
static void
m68k_linux_store_inferior_registers (struct target_ops *ops,
struct regcache *regcache, int regno)
{
int tid;
/* Use the old method of poking around in `struct user' if the
SETREGS request isn't available. */
if (! have_ptrace_getregs)
{
old_store_inferior_registers (regcache, regno);
return;
}
/* GNU/Linux LWP ID's are process ID's. */
tid = TIDGET (inferior_ptid);
if (tid == 0)
tid = PIDGET (inferior_ptid); /* Not a threaded program. */
/* Use the PTRACE_SETFPREGS requests whenever possible, since it
transfers more registers in one system call. But remember that
store_fpregs can fail, and return zero. */
if (regno == -1)
{
store_regs (regcache, tid, regno);
store_fpregs (regcache, tid, regno);
return;
}
if (getregs_supplies (regno))
{
store_regs (regcache, tid, regno);
return;
}
if (getfpregs_supplies (regno))
{
store_fpregs (regcache, tid, regno);
return;
}
internal_error (__FILE__, __LINE__,
_("Got request to store bad register number %d."), regno);
}
/* Interpreting register set info found in core files. */
/* Provide registers to GDB from a core file.
(We can't use the generic version of this function in
core-regset.c, because we need to use elf_gregset_t instead of
gregset_t.)
CORE_REG_SECT points to an array of bytes, which are the contents
of a `note' from a core file which BFD thinks might contain
register contents. CORE_REG_SIZE is its size.
WHICH says which register set corelow suspects this is:
0 --- the general-purpose register set, in elf_gregset_t format
2 --- the floating-point register set, in elf_fpregset_t format
REG_ADDR isn't used on GNU/Linux. */
static void
fetch_core_registers (struct regcache *regcache,
char *core_reg_sect, unsigned core_reg_size,
int which, CORE_ADDR reg_addr)
{
elf_gregset_t gregset;
elf_fpregset_t fpregset;
switch (which)
{
case 0:
if (core_reg_size != sizeof (gregset))
warning (_("Wrong size gregset in core file."));
else
{
memcpy (&gregset, core_reg_sect, sizeof (gregset));
supply_gregset (regcache, (const elf_gregset_t *) &gregset);
}
break;
case 2:
if (core_reg_size != sizeof (fpregset))
warning (_("Wrong size fpregset in core file."));
else
{
memcpy (&fpregset, core_reg_sect, sizeof (fpregset));
supply_fpregset (regcache, (const elf_fpregset_t *) &fpregset);
}
break;
default:
/* We've covered all the kinds of registers we know about here,
so this must be something we wouldn't know what to do with
anyway. Just ignore it. */
break;
}
}
/* Register that we are able to handle GNU/Linux ELF core file
formats. */
static struct core_fns linux_elf_core_fns =
{
bfd_target_elf_flavour, /* core_flavour */
default_check_format, /* check_format */
default_core_sniffer, /* core_sniffer */
fetch_core_registers, /* core_read_registers */
NULL /* next */
};
void _initialize_m68k_linux_nat (void);
void
_initialize_m68k_linux_nat (void)
{
struct target_ops *t;
/* Fill in the generic GNU/Linux methods. */
t = linux_target ();
/* Add our register access methods. */
t->to_fetch_registers = m68k_linux_fetch_inferior_registers;
t->to_store_registers = m68k_linux_store_inferior_registers;
/* Register the target. */
linux_nat_add_target (t);
deprecated_add_core_fns (&linux_elf_core_fns);
}
| gpl-2.0 |
namgk/kernel-tut | drivers/oprofile/cpu_buffer.c | 5 | 7080 | /**
* @file cpu_buffer.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*
* Each CPU has a local buffer that stores PC value/event
* pairs. We also log context switches when we notice them.
* Eventually each CPU's buffer is processed into the global
* event buffer by sync_buffer().
*
* We use a local buffer for two reasons: an NMI or similar
* interrupt cannot synchronise, and high sampling rates
* would lead to catastrophic global synchronisation if
* a global buffer was used.
*/
#include <linux/sched.h>
#include <linux/oprofile.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprof.h"
DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
static int work_enabled;
void free_cpu_buffers(void)
{
int i;
for_each_online_cpu(i)
vfree(per_cpu(cpu_buffer, i).buffer);
}
int alloc_cpu_buffers(void)
{
int i;
unsigned long buffer_size = fs_cpu_buffer_size;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
cpu_to_node(i));
if (!b->buffer)
goto fail;
b->last_task = NULL;
b->last_is_kernel = -1;
b->tracing = 0;
b->buffer_size = buffer_size;
b->tail_pos = 0;
b->head_pos = 0;
b->sample_received = 0;
b->sample_lost_overflow = 0;
b->backtrace_aborted = 0;
b->sample_invalid_eip = 0;
b->cpu = i;
INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
}
return 0;
fail:
free_cpu_buffers();
return -ENOMEM;
}
void start_cpu_work(void)
{
int i;
work_enabled = 1;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
* fire at once.
*/
schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
}
}
void end_cpu_work(void)
{
int i;
work_enabled = 0;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
cancel_delayed_work(&b->work);
}
flush_scheduled_work();
}
/* Resets the cpu buffer to a sane state. */
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
{
/* reset these to invalid values; the next sample
* collected will populate the buffer with proper
* values to initialize the buffer
*/
cpu_buf->last_is_kernel = -1;
cpu_buf->last_task = NULL;
}
/* compute number of available slots in cpu_buffer queue */
static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
{
unsigned long head = b->head_pos;
unsigned long tail = b->tail_pos;
if (tail > head)
return (tail - head) - 1;
return tail + (b->buffer_size - head) - 1;
}
static void increment_head(struct oprofile_cpu_buffer * b)
{
unsigned long new_head = b->head_pos + 1;
/* Ensure anything written to the slot before we
* increment is visible */
wmb();
if (new_head < b->buffer_size)
b->head_pos = new_head;
else
b->head_pos = 0;
}
static inline void
add_sample(struct oprofile_cpu_buffer * cpu_buf,
unsigned long pc, unsigned long event)
{
struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
entry->eip = pc;
entry->event = event;
increment_head(cpu_buf);
}
static inline void
add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
{
add_sample(buffer, ESCAPE_CODE, value);
}
/* This must be safe from any context. It's safe writing here
* because of the head/tail separation of the writer and reader
* of the CPU buffer.
*
* is_kernel is needed because on some architectures you cannot
* tell if you are in kernel or user space simply by looking at
* pc. We tag this in the buffer by generating kernel enter/exit
* events whenever is_kernel changes
*/
static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
int is_kernel, unsigned long event)
{
struct task_struct * task;
cpu_buf->sample_received++;
if (pc == ESCAPE_CODE) {
cpu_buf->sample_invalid_eip++;
return 0;
}
if (nr_available_slots(cpu_buf) < 3) {
cpu_buf->sample_lost_overflow++;
return 0;
}
is_kernel = !!is_kernel;
task = current;
/* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel;
add_code(cpu_buf, is_kernel);
}
/* notice a task switch */
if (cpu_buf->last_task != task) {
cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task);
}
add_sample(cpu_buf, pc, event);
return 1;
}
static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
{
if (nr_available_slots(cpu_buf) < 4) {
cpu_buf->sample_lost_overflow++;
return 0;
}
add_code(cpu_buf, CPU_TRACE_BEGIN);
cpu_buf->tracing = 1;
return 1;
}
static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
{
cpu_buf->tracing = 0;
}
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
if (!backtrace_depth) {
log_sample(cpu_buf, pc, is_kernel, event);
return;
}
if (!oprofile_begin_trace(cpu_buf))
return;
/* if log_sample() fail we can't backtrace since we lost the source
* of this event */
if (log_sample(cpu_buf, pc, is_kernel, event))
oprofile_ops.backtrace(regs, backtrace_depth);
oprofile_end_trace(cpu_buf);
}
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
int is_kernel = !user_mode(regs);
unsigned long pc = profile_pc(regs);
oprofile_add_ext_sample(pc, regs, event, is_kernel);
}
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
log_sample(cpu_buf, pc, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
if (!cpu_buf->tracing)
return;
if (nr_available_slots(cpu_buf) < 1) {
cpu_buf->tracing = 0;
cpu_buf->sample_lost_overflow++;
return;
}
/* broken frame can give an eip with the same value as an escape code,
* abort the trace if we get it */
if (pc == ESCAPE_CODE) {
cpu_buf->tracing = 0;
cpu_buf->backtrace_aborted++;
return;
}
add_sample(cpu_buf, pc, 0);
}
/*
* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses
*
* By using schedule_delayed_work_on and then schedule_delayed_work
* we guarantee this will stay on the correct cpu
*/
static void wq_sync_buffer(struct work_struct *work)
{
struct oprofile_cpu_buffer * b =
container_of(work, struct oprofile_cpu_buffer, work.work);
if (b->cpu != smp_processor_id()) {
printk("WQ on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu);
}
sync_buffer(b->cpu);
/* don't re-add the work if we're shutting down */
if (work_enabled)
schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
}
| gpl-2.0 |
mgottschlag/kwin-tiling | libs/kephal/kephal/screens.cpp | 5 | 4583 | /*
* Copyright 2008 Aike J Sommer <dev@aikesommer.name>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details
*
* You should have received a copy of the GNU Library General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "screens.h"
#include "configurations.h"
#include <QApplication>
#include <QDesktopWidget>
#ifdef SCREENS_FACTORY
void SCREENS_FACTORY();
#endif
#include <QDebug>
namespace Kephal {
Screens * Screens::self() {
#ifdef SCREENS_FACTORY
if (Screens::s_instance == 0) {
SCREENS_FACTORY();
}
#endif
return Screens::s_instance;
}
Screens::Screens(QObject * parent)
: QObject(parent)
{
Screens::s_instance = this;
}
Screens::~Screens()
{
Screens::s_instance = 0;
}
Screen * Screens::screen(int id) {
foreach (Screen * screen, screens()) {
if (screen->id() == id) {
return screen;
}
}
return 0;
}
Screen * Screens::primaryScreen()
{
#if 1
return screen(QApplication::desktop()->primaryScreen());
#else
Configuration * config = Configurations::self()->activeConfiguration();
if (! config) {
return 0;
}
int id = config->primaryScreen();
return screen(id);
#endif
}
Screens * Screens::s_instance = 0;
Screen::Screen(QObject * parent)
: QObject(parent)
{
}
QRect Screen::geom() {
return QRect(position(), size());
}
bool Screen::isPrimary() const {
return Screens::self()->primaryScreen() == this;
}
int ScreenUtils::numScreens() {
return Screens::self()->screens().size();
}
QRect ScreenUtils::screenGeometry(int id) {
if (id >= numScreens())
return QRect();
if (id == -1)
return QApplication::desktop()->screenGeometry();
else
return Screens::self()->screen(id)->geom();
}
QSize ScreenUtils::screenSize(int id) {
if (id >= numScreens())
return QSize();
if (id == -1)
return QApplication::desktop()->screenGeometry().size();
else
return Screens::self()->screen(id)->size();
}
QRect ScreenUtils::desktopGeometry() {
//QApplication::desktop()->geometry() was used before,
//but returns the wrong size just after a screen has been added
QRect desktopRect;
for(int i = 0; i < numScreens(); i++){
desktopRect |= screenGeometry(i);
}
return desktopRect;
}
int ScreenUtils::distance(const QRect & r, const QPoint & p) {
if (! r.isValid()) {
return p.manhattanLength();
} else if (r.contains(p)) {
return 0;
} else if (p.x() >= r.left() && p.x() <= r.right()) {
return p.y() < r.top() ? (r.top() - p.y()) : (p.y() - r.bottom());
} else if (p.y() >= r.top() && p.y() <= r.bottom()) {
return p.x() < r.left() ? (r.left() - p.x()) : (p.x() - r.right());
} else if (p.x() < r.left()) {
return ((p.y() < r.top() ? r.topLeft() : r.bottomLeft()) - p).manhattanLength();
} else {
return ((p.y() < r.top() ? r.topRight() : r.bottomRight()) - p).manhattanLength();
}
}
int ScreenUtils::screenId(QPoint p) {
if (numScreens() == 0) {
return 0;
}
int minDist = distance(screenGeometry(0), p);
int minScreen = 0;
for(int i = 1; i < numScreens() && minDist > 0; i++) {
int dist = distance(screenGeometry(i), p);
if (dist < minDist) {
minDist = dist;
minScreen = i;
}
}
return minScreen;
}
int ScreenUtils::primaryScreenId() {
if (!Screens::self()->primaryScreen()) {
return 0;
}
return Screens::self()->primaryScreen()->id();
}
}
| gpl-2.0 |
lifeinoppo/littlefishlet-scode | RES/REF/linux_1/kernel/blk_drv/hd.c | 5 | 17551 | /*
* linux/kernel/hd.c
*
* (C) 1991 Linus Torvalds
*/
/*
* This is the low-level hd interrupt support. It traverses the
* request-list, using interrupts to jump between functions. As
* all the functions are called within interrupts, we may not
* sleep. Special care is recommended.
*
* modified by Drew Eckhardt to check nr of hd's from the CMOS.
*/
/*
* ±¾³ÌÐòÊǵײãÓ²ÅÌÖжϸ¨Öú³ÌÐò¡£Ö÷ÒªÓÃÓÚɨÃèÇëÇóÁÐ±í£¬Ê¹ÓÃÖжÏÔÚº¯ÊýÖ®¼äÌø×ª¡£
* ÓÉÓÚËùÓеĺ¯Êý¶¼ÊÇÔÚÖжÏÀïµ÷Óõģ¬ËùÒÔÕâЩº¯Êý²»¿ÉÒÔ˯Ãß¡£ÇëÌØ±ð×¢Òâ¡£
* ÓÉDrew Eckhardt Ð޸ģ¬ÀûÓÃCMOS ÐÅÏ¢¼ì²âÓ²ÅÌÊý¡£
*/
#include <linux/config.h> // ÄÚºËÅäÖÃÍ·Îļþ¡£¶¨Òå¼üÅÌÓïÑÔºÍÓ²ÅÌÀàÐÍ£¨HD_TYPE£©¿ÉÑ¡Ïî¡£
#include <linux/sched.h> // µ÷¶È³ÌÐòÍ·Îļþ£¬¶¨ÒåÁËÈÎÎñ½á¹¹task_struct¡¢³õʼÈÎÎñ0 µÄÊý¾Ý£¬
// »¹ÓÐһЩÓйØÃèÊö·û²ÎÊýÉèÖúͻñÈ¡µÄǶÈëʽ»ã±àº¯ÊýºêÓï¾ä¡£
#include <linux/fs.h> // ÎļþϵͳͷÎļþ¡£¶¨ÒåÎļþ±í½á¹¹£¨file,buffer_head,m_inode µÈ£©¡£
#include <linux/kernel.h> // ÄÚºËÍ·Îļþ¡£º¬ÓÐһЩÄں˳£Óú¯ÊýµÄÔÐζ¨Òå¡£
#include <linux/hdreg.h> // Ó²Å̲ÎÊýÍ·Îļþ¡£¶¨Òå·ÃÎÊÓ²Å̼ĴæÆ÷¶Ë¿Ú£¬×´Ì¬Â룬·ÖÇø±íµÈÐÅÏ¢¡£
#include <asm/system.h> // ϵͳͷÎļþ¡£¶¨ÒåÁËÉèÖûòÐÞ¸ÄÃèÊö·û/ÖжÏÃŵȵÄǶÈëʽ»ã±àºê¡£
#include <asm/io.h> // io Í·Îļþ¡£¶¨ÒåÓ²¼þ¶Ë¿ÚÊäÈë/Êä³öºê»ã±àÓï¾ä¡£
#include <asm/segment.h> // ¶Î²Ù×÷Í·Îļþ¡£¶¨ÒåÁËÓйضμĴæÆ÷²Ù×÷µÄǶÈëʽ»ã±àº¯Êý¡£
#define MAJOR_NR 3 // Ó²ÅÌÖ÷É豸ºÅÊÇ3¡£
#include "blk.h" // ¿éÉ豸ͷÎļþ¡£¶¨ÒåÇëÇóÊý¾Ý½á¹¹¡¢¿éÉ豸Êý¾Ý½á¹¹ºÍºêº¯ÊýµÈÐÅÏ¢¡£
#define CMOS_READ(addr) ({ \ // ¶ÁCMOS ²ÎÊýºêº¯Êý¡£
outb_p (0x80 | addr, 0x70);
inb_p (0x71);
}
)
/* Max read/write errors/sector */
#define MAX_ERRORS 7 // ¶Á/дһ¸öÉÈÇøÊ±ÔÊÐíµÄ×î¶à³ö´í´ÎÊý¡£
#define MAX_HD 2 // ϵͳ֧³ÖµÄ×î¶àÓ²ÅÌÊý¡£
static void recal_intr (void); // Ó²ÅÌÖжϳÌÐòÔÚ¸´Î»²Ù×÷ʱ»áµ÷ÓõÄÖØÐÂУÕýº¯Êý(287 ÐÐ)¡£
static int recalibrate = 1; // ÖØÐÂУÕý±êÖ¾¡£
static int reset = 1; // ¸´Î»±êÖ¾¡£
/*
* This struct defines the HD's and their types.
*/
/* ÏÂÃæ½á¹¹¶¨ÒåÁËÓ²Å̲ÎÊý¼°ÀàÐÍ */
// ¸÷×ֶηֱðÊÇ´ÅÍ·Êý¡¢Ã¿´ÅµÀÉÈÇøÊý¡¢ÖùÃæÊý¡¢Ð´Ç°Ô¤²¹³¥ÖùÃæºÅ¡¢´ÅÍ·×ÅÂ½ÇøÖùÃæºÅ¡¢¿ØÖÆ×Ö½Ú¡£
struct hd_i_struct
{
int head, sect, cyl, wpcom, lzone, ctl;
};
#ifdef HD_TYPE // Èç¹ûÒѾÔÚinclude/linux/config.h Öж¨ÒåÁËHD_TYPE¡
struct hd_i_struct hd_info[] =
{
HD_TYPE}; // È¡¶¨ÒåºÃµÄ²ÎÊý×÷Ϊhd_info[]µÄÊý¾Ý¡£
#define NR_HD ((sizeof (hd_info))/(sizeof (struct hd_i_struct))) // ¼ÆËãÓ²ÅÌÊý¡£
#else // ·ñÔò£¬¶¼ÉèΪ0 Öµ¡£
struct hd_i_struct hd_info[] =
{
{
0, 0, 0, 0, 0, 0}
,
{
0, 0, 0, 0, 0, 0}
};
static int NR_HD = 0;
#endif
// ¶¨ÒåÓ²ÅÌ·ÖÇø½á¹¹¡£¸ø³öÿ¸ö·ÖÇøµÄÎïÀíÆðʼÉÈÇøºÅ¡¢·ÖÇøÉÈÇø×ÜÊý¡£
// ÆäÖÐ5 µÄ±¶Êý´¦µÄÏÀýÈçhd[0]ºÍhd[5]µÈ£©´ú±íÕû¸öÓ²ÅÌÖеIJÎÊý¡£
static struct hd_struct
{
long start_sect;
long nr_sects;
}
hd[5 * MAX_HD] =
{
{
0, 0}
,};
// ¶Á¶Ë¿Úport£¬¹²¶Ánr ×Ö£¬±£´æÔÚbuf ÖС£
#define port_read(port,buf,nr) \
__asm__( "cld;rep;insw":: "d" (port), "D" (buf), "c" (nr): "cx", "di")
// д¶Ë¿Úport£¬¹²Ð´nr ×Ö£¬´Óbuf ÖÐÈ¡Êý¾Ý¡£
#define port_write(port,buf,nr) \
__asm__( "cld;rep;outsw":: "d" (port), "S" (buf), "c" (nr): "cx", "si")
extern void hd_interrupt (void);
extern void rd_load (void);
/* This may be used only once, enforced by 'static int callable' */
/* ÏÂÃæ¸Ãº¯ÊýÖ»ÔÚ³õʼ»¯Ê±±»µ÷ÓÃÒ»´Î¡£Óþ²Ì¬±äÁ¿callable ×÷Ϊ¿Éµ÷ÓñêÖ¾¡£*/
// ¸Ãº¯ÊýµÄ²ÎÊýÓɳõʼ»¯³ÌÐòinit/main.c µÄinit ×Ó³ÌÐòÉèÖÃΪָÏò0x90080 ´¦£¬´Ë´¦´æ·Å×Åsetup.s
// ³ÌÐò´ÓBIOS È¡µÃµÄ2 ¸öÓ²Å̵Ļù±¾²ÎÊý±í(32 ×Ö½Ú)¡£Ó²Å̲ÎÊý±íÐÅÏ¢²Î¼ûÏÂÃæÁбíºóµÄ˵Ã÷¡£
// ±¾º¯ÊýÖ÷Òª¹¦ÄÜÊǶÁÈ¡CMOS ºÍÓ²Å̲ÎÊý±íÐÅÏ¢£¬ÓÃÓÚÉèÖÃÓ²ÅÌ·ÖÇø½á¹¹hd£¬²¢¼ÓÔØRAM ÐéÄâÅ̺Í
// ¸ùÎļþϵͳ¡£
int sys_setup (void *BIOS)
{
static int callable = 1;
int i, drive;
unsigned char cmos_disks;
struct partition *p;
struct buffer_head *bh;
// ³õʼ»¯Ê±callable=1£¬µ±ÔËÐиú¯Êýʱ½«ÆäÉèÖÃΪ0£¬Ê¹±¾º¯ÊýÖ»ÄÜÖ´ÐÐÒ»´Î¡£
if (!callable)
return -1;
callable = 0;
// Èç¹ûûÓÐÔÚconfig.h Öж¨ÒåÓ²Å̲ÎÊý£¬¾Í´Ó0x90080 ´¦¶ÁÈë¡£
#ifndef HD_TYPE
for (drive = 0; drive < 2; drive++)
{
hd_info[drive].cyl = *(unsigned short *) BIOS; // ÖùÃæÊý¡£
hd_info[drive].head = *(unsigned char *) (2 + BIOS); // ´ÅÍ·Êý¡£
hd_info[drive].wpcom = *(unsigned short *) (5 + BIOS); // дǰԤ²¹³¥ÖùÃæºÅ¡£
hd_info[drive].ctl = *(unsigned char *) (8 + BIOS); // ¿ØÖÆ×Ö½Ú¡£
hd_info[drive].lzone = *(unsigned short *) (12 + BIOS); // ´ÅÍ·×ÅÂ½ÇøÖùÃæºÅ¡£
hd_info[drive].sect = *(unsigned char *) (14 + BIOS); // ÿ´ÅµÀÉÈÇøÊý¡£
BIOS += 16; // ÿ¸öÓ²Å̵IJÎÊý±í³¤16 ×Ö½Ú£¬ÕâÀïBIOS Ö¸ÏòÏÂÒ»¸ö±í¡£
}
// setup.s ³ÌÐòÔÚÈ¡BIOS ÖеÄÓ²Å̲ÎÊý±íÐÅϢʱ£¬Èç¹ûÖ»ÓÐ1 ¸öÓ²ÅÌ£¬¾Í»á½«¶ÔÓ¦µÚ2 ¸öÓ²Å̵Ä
// 16 ×Ö½ÚÈ«²¿ÇåÁã¡£Òò´ËÕâÀïÖ»ÒªÅжϵÚ2 ¸öÓ²ÅÌÖùÃæÊýÊÇ·ñΪ0 ¾Í¿ÉÒÔÖªµÀÓÐûÓеÚ2 ¸öÓ²ÅÌÁË¡£
if (hd_info[1].cyl)
NR_HD = 2; // Ó²ÅÌÊýÖÃΪ2¡£
else
NR_HD = 1;
#endif
// ÉèÖÃÿ¸öÓ²ÅÌµÄÆðʼÉÈÇøºÅºÍÉÈÇø×ÜÊý¡£ÆäÖбàºÅi*5 º¬Òå²Î¼û±¾³ÌÐòºóµÄÓйØËµÃ÷¡£
for (i = 0; i < NR_HD; i++)
{
hd[i * 5].start_sect = 0; // Ó²ÅÌÆðʼÉÈÇøºÅ¡£
hd[i * 5].nr_sects = hd_info[i].head * hd_info[i].sect * hd_info[i].cyl; // Ó²ÅÌ×ÜÉÈÇøÊý¡£
}
/*
We querry CMOS about hard disks : it could be that
we have a SCSI/ESDI/etc controller that is BIOS
compatable with ST-506, and thus showing up in our
BIOS table, but not register compatable, and therefore
not present in CMOS.
Furthurmore, we will assume that our ST-506 drives
<if any> are the primary drives in the system, and
the ones reflected as drive 1 or 2.
The first drive is stored in the high nibble of CMOS
byte 0x12, the second in the low nibble. This will be
either a 4 bit drive type or 0xf indicating use byte 0x19
for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS.
Needless to say, a non-zero value means we have
an AT controller hard disk for that drive.
*/
/*
* ÎÒÃǶÔCMOS ÓйØÓ²Å̵ÄÐÅÏ¢ÓÐЩ»³ÒÉ£º¿ÉÄÜ»á³öÏÖÕâÑùµÄÇé¿ö£¬ÎÒÃÇÓÐÒ»¿éSCSI/ESDI/µÈµÄ
* ¿ØÖÆÆ÷£¬ËüÊÇÒÔST-506 ·½Ê½ÓëBIOS ¼æÈݵģ¬Òò¶ø»á³öÏÖÔÚÎÒÃǵÄBIOS ²ÎÊý±íÖУ¬µ«È´ÓÖ²»
* ÊǼĴæÆ÷¼æÈݵģ¬Òò´ËÕâЩ²ÎÊýÔÚCMOS ÖÐÓÖ²»´æÔÚ¡£
* ÁíÍ⣬ÎÒÃǼÙÉèST-506 Çý¶¯Æ÷£¨Èç¹ûÓеϰ£©ÊÇϵͳÖеĻù±¾Çý¶¯Æ÷£¬Ò²¼´ÒÔÇý¶¯Æ÷1 »ò2
* ³öÏÖµÄÇý¶¯Æ÷¡£
* µÚ1 ¸öÇý¶¯Æ÷²ÎÊý´æ·ÅÔÚCMOS ×Ö½Ú0x12 µÄ¸ß°ë×Ö½ÚÖУ¬µÚ2 ¸ö´æ·ÅÔڵͰë×Ö½ÚÖС£¸Ã4 λ×Ö½Ú
* ÐÅÏ¢¿ÉÒÔÊÇÇý¶¯Æ÷ÀàÐÍ£¬Ò²¿ÉÄܽöÊÇ0xf¡£0xf ±íʾʹÓÃCMOS ÖÐ0x19 ×Ö½Ú×÷ΪÇý¶¯Æ÷1 µÄ8 λ
* ÀàÐÍ×Ö½Ú£¬Ê¹ÓÃCMOS ÖÐ0x1A ×Ö½Ú×÷ΪÇý¶¯Æ÷2 µÄÀàÐÍ×Ö½Ú¡£
* ×ÜÖ®£¬Ò»¸ö·ÇÁãÖµÒâζ×ÅÎÒÃÇÓÐÒ»¸öAT ¿ØÖÆÆ÷Ó²Å̼æÈݵÄÇý¶¯Æ÷¡£
*/
// ÕâÀï¸ù¾ÝÉÏÊöÔÀíÀ´¼ì²âÓ²Å̵½µ×ÊÇ·ñÊÇAT ¿ØÖÆÆ÷¼æÈݵġ£ÓйØCMOS ÐÅÏ¢Çë²Î¼û4.2.3.1 ½Ú¡£
if ((cmos_disks = CMOS_READ (0x12)) & 0xf0)
if (cmos_disks & 0x0f)
NR_HD = 2;
else
NR_HD = 1;
else
NR_HD = 0;
// ÈôNR_HD=0£¬ÔòÁ½¸öÓ²Å̶¼²»ÊÇAT ¿ØÖÆÆ÷¼æÈݵģ¬Ó²ÅÌÊý¾Ý½á¹¹ÇåÁã¡£
// ÈôNR_HD=1£¬Ôò½«µÚ2 ¸öÓ²Å̵IJÎÊýÇåÁã¡£
for (i = NR_HD; i < 2; i++)
{
hd[i * 5].start_sect = 0;
hd[i * 5].nr_sects = 0;
}
// ¶Áȡÿһ¸öÓ²ÅÌÉϵÚ1 ¿éÊý¾Ý£¨µÚ1 ¸öÉÈÇøÓÐÓã©£¬»ñÈ¡ÆäÖеķÖÇø±íÐÅÏ¢¡£
// Ê×ÏÈÀûÓú¯Êýbread()¶ÁÓ²Å̵Ú1 ¿éÊý¾Ý(fs/buffer.c,267)£¬²ÎÊýÖеÄ0x300 ÊÇÓ²Å̵ÄÖ÷É豸ºÅ
// (²Î¼ûÁбíºóµÄ˵Ã÷)¡£È»ºó¸ù¾ÝÓ²ÅÌÍ·1 ¸öÉÈÇøÎ»ÖÃ0x1fe ´¦µÄÁ½¸ö×Ö½ÚÊÇ·ñΪ'55AA'À´ÅжÏ
// ¸ÃÉÈÇøÖÐλÓÚ0x1BE ¿ªÊ¼µÄ·ÖÇø±íÊÇ·ñÓÐЧ¡£×îºó½«·ÖÇø±íÐÅÏ¢·ÅÈëÓ²ÅÌ·ÖÇøÊý¾Ý½á¹¹hd ÖС£
for (drive = 0; drive < NR_HD; drive++)
{
if (!(bh = bread (0x300 + drive * 5, 0)))
{ // 0x300, 0x305 Âß¼É豸ºÅ¡£
printk ("Unable to read partition table of drive %d\n\r", drive);
panic ("");
}
if (bh->b_data[510] != 0x55 || (unsigned char) bh->b_data[511] != 0xAA)
{ // ÅжÏÓ²ÅÌÐÅÏ¢ÓÐЧ±êÖ¾'55AA'¡£
printk ("Bad partition table on drive %d\n\r", drive);
panic ("");
}
p = 0x1BE + (void *) bh->b_data; // ·ÖÇø±íλÓÚÓ²Å̵Ú1 ÉÈÇøµÄ0x1BE ´¦¡£
for (i = 1; i < 5; i++, p++)
{
hd[i + 5 * drive].start_sect = p->start_sect;
hd[i + 5 * drive].nr_sects = p->nr_sects;
}
brelse (bh); // ÊÍ·ÅΪ´æ·ÅÓ²ÅÌ¿é¶øÉêÇëµÄÄڴ滺³åÇøÒ³¡£
}
if (NR_HD) // Èç¹ûÓÐÓ²ÅÌ´æÔÚ²¢ÇÒÒѶÁÈë·ÖÇø±í£¬Ôò´òÓ¡·ÖÇø±íÕý³£ÐÅÏ¢¡£
printk ("Partition table%s ok.\n\r", (NR_HD > 1) ? "s" : "");
rd_load (); // ¼ÓÔØ£¨´´½¨£©RAMDISK(kernel/blk_drv/ramdisk.c,71)¡£
mount_root (); // °²×°¸ùÎļþϵͳ(fs/super.c,242)¡£
return (0);
}
//// Åжϲ¢Ñ»·µÈ´ýÇý¶¯Æ÷¾ÍÐ÷¡£
// ¶ÁÓ²ÅÌ¿ØÖÆÆ÷״̬¼Ä´æÆ÷¶Ë¿ÚHD_STATUS(0x1f7)£¬²¢Ñ»·¼ì²âÇý¶¯Æ÷¾ÍÐ÷±ÈÌØÎ»ºÍ¿ØÖÆÆ÷æλ¡£
static int controller_ready (void)
{
int retries = 10000;
while (--retries && (inb_p (HD_STATUS) & 0xc0) != 0x40);
return (retries); // ·µ»ØµÈ´ýÑ»·µÄ´ÎÊý¡£
}
//// ¼ì²âÓ²ÅÌÖ´ÐÐÃüÁîºóµÄ״̬¡£(win_±íʾÎÂÇÐË¹ÌØÓ²Å̵ÄËõд)
// ¶Áȡ״̬¼Ä´æÆ÷ÖеÄÃüÁîÖ´Ðнá¹û״̬¡£·µ»Ø0 ±íʾÕý³££¬1 ³ö´í¡£Èç¹ûÖ´ÐÐÃüÁî´í£¬
// ÔòÔÙ¶Á´íÎó¼Ä´æÆ÷HD_ERROR(0x1f1)¡£
static int win_result (void)
{
int i = inb_p (HD_STATUS); // ȡ״̬ÐÅÏ¢¡£
if ((i & (BUSY_STAT | READY_STAT | WRERR_STAT | SEEK_STAT | ERR_STAT))
== (READY_STAT | SEEK_STAT))
return (0); /* ok */
if (i & 1)
i = inb (HD_ERROR); // ÈôERR_STAT ÖÃ룬Ôò¶ÁÈ¡´íÎó¼Ä´æÆ÷¡£
return (1);
}
//// ÏòÓ²ÅÌ¿ØÖÆÆ÷·¢ËÍÃüÁî¿é£¨²Î¼ûÁбíºóµÄ˵Ã÷£©¡£
// µ÷ÓòÎÊý£ºdrive - Ó²Å̺Å(0-1)£» nsect - ¶ÁдÉÈÇøÊý£»
// sect - ÆðʼÉÈÇø£» head - ´ÅÍ·ºÅ£»
// cyl - ÖùÃæºÅ£» cmd - ÃüÁîÂ룻
// *intr_addr() - Ó²ÅÌÖжϴ¦Àí³ÌÐòÖн«µ÷ÓõÄC ´¦Àíº¯Êý¡£
static void hd_out (unsigned int drive, unsigned int nsect, unsigned int sect,
unsigned int head, unsigned int cyl, unsigned int cmd,
void (*intr_addr) (void))
{
register int port asm ("dx"); // port ±äÁ¿¶ÔÓ¦¼Ä´æÆ÷dx¡£
if (drive > 1 || head > 15) // Èç¹ûÇý¶¯Æ÷ºÅ(0,1)>1 »ò´ÅÍ·ºÅ>15£¬Ôò³ÌÐò²»Ö§³Ö¡£
panic ("Trying to write bad sector");
if (!controller_ready ()) // Èç¹ûµÈ´ýÒ»¶Îʱ¼äºóÈÔδ¾ÍÐ÷Ôò³ö´í£¬ËÀ»ú¡£
panic ("HD controller not ready");
do_hd = intr_addr; // do_hd º¯ÊýÖ¸Õ뽫ÔÚÓ²ÅÌÖжϳÌÐòÖб»µ÷Óá£
outb_p (hd_info[drive].ctl, HD_CMD); // Ïò¿ØÖƼĴæÆ÷(0x3f6)Êä³ö¿ØÖÆ×Ö½Ú¡£
port = HD_DATA; // ÖÃdx ΪÊý¾Ý¼Ä´æÆ÷¶Ë¿Ú(0x1f0)¡£
outb_p (hd_info[drive].wpcom >> 2, ++port); // ²ÎÊý£ºÐ´Ô¤²¹³¥ÖùÃæºÅ(Ðè³ý4)¡£
outb_p (nsect, ++port); // ²ÎÊý£º¶Á/дÉÈÇø×ÜÊý¡£
outb_p (sect, ++port); // ²ÎÊý£ºÆðʼÉÈÇø¡£
outb_p (cyl, ++port); // ²ÎÊý£ºÖùÃæºÅµÍ8 λ¡£
outb_p (cyl >> 8, ++port); // ²ÎÊý£ºÖùÃæºÅ¸ß8 λ¡£
outb_p (0xA0 | (drive << 4) | head, ++port); // ²ÎÊý£ºÇý¶¯Æ÷ºÅ+´ÅÍ·ºÅ¡£
outb (cmd, ++port); // ÃüÁӲÅÌ¿ØÖÆÃüÁî¡£
}
//// µÈ´ýÓ²Å̾ÍÐ÷¡£Ò²¼´Ñ»·µÈ´ýÖ÷״̬¿ØÖÆÆ÷æ±ê־λ¸´Î»¡£Èô½öÓоÍÐ÷»òѰµÀ½áÊø±êÖ¾
// ÖÃ룬Ôò³É¹¦£¬·µ»Ø0¡£Èô¾¹ýÒ»¶Îʱ¼äÈÔΪæ£¬Ôò·µ»Ø1¡£
static int drive_busy (void)
{
unsigned int i;
for (i = 0; i < 10000; i++) // Ñ»·µÈ´ý¾ÍÐ÷±ê־λÖÃλ¡£
if (READY_STAT == (inb_p (HD_STATUS) & (BUSY_STAT | READY_STAT)))
break;
i = inb (HD_STATUS); // ÔÙÈ¡Ö÷¿ØÖÆÆ÷״̬×Ö½Ú¡£
i &= BUSY_STAT | READY_STAT | SEEK_STAT; // ¼ì²âæλ¡¢¾ÍÐ÷λºÍѰµÀ½áÊøÎ»¡£
if (i == READY_STAT | SEEK_STAT) // Èô½öÓоÍÐ÷»òѰµÀ½áÊø±êÖ¾£¬Ôò·µ»Ø0¡£
return (0);
printk ("HD controller times out\n\r"); // ·ñÔòµÈ´ý³¬Ê±£¬ÏÔʾÐÅÏ¢¡£²¢·µ»Ø1¡£
return (1);
}
//// Õï¶Ï¸´Î»£¨ÖØÐÂУÕý£©Ó²ÅÌ¿ØÖÆÆ÷¡£
static void reset_controller (void)
{
int i;
outb (4, HD_CMD); // Ïò¿ØÖƼĴæÆ÷¶Ë¿Ú·¢ËÍ¿ØÖÆ×Ö½Ú(4-¸´Î»)¡£
for (i = 0; i < 100; i++)
nop (); // µÈ´ýÒ»¶Îʱ¼ä£¨Ñ»·¿Õ²Ù×÷£©¡£
outb (hd_info[0].ctl & 0x0f, HD_CMD); // ÔÙ·¢ËÍÕý³£µÄ¿ØÖÆ×Ö½Ú(²»½ûÖ¹ÖØÊÔ¡¢ÖضÁ)¡£
if (drive_busy ()) // ÈôµÈ´ýÓ²Å̾ÍÐ÷³¬Ê±£¬ÔòÏÔʾ³ö´íÐÅÏ¢¡£
printk ("HD-controller still busy\n\r");
if ((i = inb (HD_ERROR)) != 1) // È¡´íÎó¼Ä´æÆ÷£¬Èô²»µÈÓÚ1£¨ÎÞ´íÎó£©Ôò³ö´í¡£
printk ("HD-controller reset failed: %02x\n\r", i);
}
//// ¸´Î»Ó²ÅÌnr¡£Ê×Ïȸ´Î»£¨ÖØÐÂУÕý£©Ó²ÅÌ¿ØÖÆÆ÷¡£È»ºó·¢ËÍÓ²ÅÌ¿ØÖÆÆ÷ÃüÁî¡°½¨Á¢Çý¶¯Æ÷²ÎÊý¡±£¬
// ÆäÖÐrecal_intr()ÊÇÔÚÓ²ÅÌÖжϴ¦Àí³ÌÐòÖе÷ÓõÄÖØÐÂУÕý´¦Àíº¯Êý¡£
static void reset_hd (int nr)
{
reset_controller ();
hd_out (nr, hd_info[nr].sect, hd_info[nr].sect, hd_info[nr].head - 1,
hd_info[nr].cyl, WIN_SPECIFY, &recal_intr);
}
//// ÒâÍâÓ²ÅÌÖжϵ÷Óú¯Êý¡£
// ·¢ÉúÒâÍâÓ²ÅÌÖжÏʱ£¬Ó²ÅÌÖжϴ¦Àí³ÌÐòÖе÷ÓõÄĬÈÏC ´¦Àíº¯Êý¡£ÔÚ±»µ÷Óú¯ÊýÖ¸ÕëΪ¿Õʱ
// µ÷Óøú¯Êý¡£²Î¼û(kernel/system_call.s,241 ÐÐ)¡£
void unexpected_hd_interrupt (void)
{
printk ("Unexpected HD interrupt\n\r");
}
//// ¶ÁдӲÅÌʧ°Ü´¦Àíµ÷Óú¯Êý¡£
static void bad_rw_intr (void)
{
if (++CURRENT->errors >= MAX_ERRORS) // Èç¹û¶ÁÉÈÇøÊ±µÄ³ö´í´ÎÊý´óÓÚ»òµÈÓÚ7 ´Îʱ£¬
end_request (0); // Ôò½áÊøÇëÇó²¢»½Ðѵȴý¸ÃÇëÇóµÄ½ø³Ì£¬¶øÇÒ
// ¶ÔÓ¦»º³åÇø¸üбêÖ¾¸´Î»£¨Ã»ÓиüУ©¡£
if (CURRENT->errors > MAX_ERRORS / 2) // Èç¹û¶ÁÒ»ÉÈÇøÊ±µÄ³ö´í´ÎÊýÒѾ´óÓÚ3 ´Î£¬
reset = 1; // ÔòÒªÇóÖ´Ðи´Î»Ó²ÅÌ¿ØÖÆÆ÷²Ù×÷¡£
}
//// ¶Á²Ù×÷Öжϵ÷Óú¯Êý¡£½«ÔÚÖ´ÐÐÓ²ÅÌÖжϴ¦Àí³ÌÐòÖб»µ÷Óá£
static void read_intr (void)
{
if (win_result ())
{ // Èô¿ØÖÆÆ÷æ¡¢¶Áд´í»òÃüÁîÖ´ÐÐ´í£¬
bad_rw_intr (); // Ôò½øÐжÁдӲÅÌʧ°Ü´¦Àí
do_hd_request (); // È»ºóÔÙ´ÎÇëÇóÓ²ÅÌ×÷ÏàÓ¦(¸´Î»)´¦Àí¡£
return;
}
port_read (HD_DATA, CURRENT->buffer, 256); // ½«Êý¾Ý´ÓÊý¾Ý¼Ä´æÆ÷¿Ú¶Áµ½ÇëÇó½á¹¹»º³åÇø¡£
CURRENT->errors = 0; // Çå³ö´í´ÎÊý¡£
CURRENT->buffer += 512; // µ÷Õû»º³åÇøÖ¸Õ룬ָÏòеĿÕÇø¡£
CURRENT->sector++; // ÆðʼÉÈÇøºÅ¼Ó1£¬
if (--CURRENT->nr_sectors)
{ // Èç¹ûËùÐè¶Á³öµÄÉÈÇøÊý»¹Ã»ÓжÁÍ꣬Ôò
do_hd = &read_intr; // ÔÙ´ÎÖÃÓ²Å̵÷ÓÃC º¯ÊýÖ¸ÕëΪread_intr()
return; // ÒòΪӲÅÌÖжϴ¦Àí³ÌÐòÿ´Îµ÷ÓÃdo_hd ʱ
} // ¶¼»á½«¸Ãº¯ÊýÖ¸ÕëÖÿա£²Î¼ûsystem_call.s
end_request (1); // ÈôÈ«²¿ÉÈÇøÊý¾ÝÒѾ¶ÁÍ꣬Ôò´¦ÀíÇëÇó½áÊøÊÂÒË£¬
do_hd_request (); // Ö´ÐÐÆäËüÓ²ÅÌÇëÇó²Ù×÷¡£
}
//// дÉÈÇøÖжϵ÷Óú¯Êý¡£ÔÚÓ²ÅÌÖжϴ¦Àí³ÌÐòÖб»µ÷Óá£
// ÔÚдÃüÁîÖ´Ðк󣬻á²úÉúÓ²ÅÌÖжÏÐźţ¬Ö´ÐÐÓ²ÅÌÖжϴ¦Àí³ÌÐò£¬´ËʱÔÚÓ²ÅÌÖжϴ¦Àí³ÌÐòÖе÷ÓõÄ
// C º¯ÊýÖ¸Õëdo_hd()ÒѾָÏòwrite_intr()£¬Òò´Ë»áÔÚд²Ù×÷Íê³É£¨»ò³ö´í£©ºó£¬Ö´Ðиú¯Êý¡£
static void write_intr (void)
{
if (win_result ())
{ // Èç¹ûÓ²ÅÌ¿ØÖÆÆ÷·µ»Ø´íÎóÐÅÏ¢£¬
bad_rw_intr (); // ÔòÊ×ÏȽøÐÐÓ²Å̶Áдʧ°Ü´¦Àí£¬
do_hd_request (); // È»ºóÔÙ´ÎÇëÇóÓ²ÅÌ×÷ÏàÓ¦(¸´Î»)´¦Àí£¬
return; // È»ºó·µ»Ø£¨Ò²Í˳öÁË´Ë´ÎÓ²ÅÌÖжϣ©¡£
}
if (--CURRENT->nr_sectors)
{ // ·ñÔò½«ÓûдÉÈÇøÊý¼õ1£¬Èô»¹ÓÐÉÈÇøÒªÐ´£¬Ôò
CURRENT->sector++; // µ±Ç°ÇëÇóÆðʼÉÈÇøºÅ+1£¬
CURRENT->buffer += 512; // µ÷ÕûÇëÇ󻺳åÇøÖ¸Õ룬
do_hd = &write_intr; // ÖÃÓ²ÅÌÖжϳÌÐòµ÷Óú¯ÊýÖ¸ÕëΪwrite_intr()£¬
port_write (HD_DATA, CURRENT->buffer, 256); // ÔÙÏòÊý¾Ý¼Ä´æÆ÷¶Ë¿Úд256 ×Ö½Ú¡£
return; // ·µ»ØµÈ´ýÓ²ÅÌÔÙ´ÎÍê³Éд²Ù×÷ºóµÄÖжϴ¦Àí¡£
}
end_request (1); // ÈôÈ«²¿ÉÈÇøÊý¾ÝÒѾдÍ꣬Ôò´¦ÀíÇëÇó½áÊøÊÂÒË£¬
do_hd_request (); // Ö´ÐÐÆäËüÓ²ÅÌÇëÇó²Ù×÷¡£
}
//// Ó²ÅÌÖØÐÂУÕý£¨¸´Î»£©Öжϵ÷Óú¯Êý¡£ÔÚÓ²ÅÌÖжϴ¦Àí³ÌÐòÖб»µ÷Óá£
// Èç¹ûÓ²ÅÌ¿ØÖÆÆ÷·µ»Ø´íÎóÐÅÏ¢£¬ÔòÊ×ÏȽøÐÐÓ²Å̶Áдʧ°Ü´¦Àí£¬È»ºóÇëÇóÓ²ÅÌ×÷ÏàÓ¦(¸´Î»)´¦Àí¡£
static void recal_intr (void)
{
if (win_result ())
bad_rw_intr ();
do_hd_request ();
}
// Ö´ÐÐÓ²Å̶ÁдÇëÇó²Ù×÷¡£
void do_hd_request (void)
{
int i, r;
unsigned int block, dev;
unsigned int sec, head, cyl;
unsigned int nsect;
INIT_REQUEST; // ¼ì²âÇëÇóÏîµÄºÏ·¨ÐÔ(²Î¼ûkernel/blk_drv/blk.h,127)¡£
// È¡É豸ºÅÖеÄ×ÓÉ豸ºÅ(¼ûÁбíºó¶ÔÓ²ÅÌÉ豸ºÅµÄ˵Ã÷)¡£×ÓÉ豸ºÅ¼´ÊÇÓ²ÅÌÉϵķÖÇøºÅ¡£
dev = MINOR (CURRENT->dev); // CURRENT ¶¨ÒåΪ(blk_dev[MAJOR_NR].current_request)¡£
block = CURRENT->sector; // ÇëÇóµÄÆðʼÉÈÇø¡£
// Èç¹û×ÓÉ豸ºÅ²»´æÔÚ»òÕ߯ðʼÉÈÇø´óÓڸ÷ÖÇøÉÈÇøÊý-2£¬Ôò½áÊø¸ÃÇëÇ󣬲¢Ìø×ªµ½±êºÅrepeat ´¦
// £¨¶¨ÒåÔÚINIT_REQUEST ¿ªÊ¼´¦£©¡£ÒòΪһ´ÎÒªÇó¶Áд2 ¸öÉÈÇø£¨512*2 ×Ö½Ú£©£¬ËùÒÔÇëÇóµÄÉÈÇøºÅ
// ²»ÄÜ´óÓÚ·ÖÇøÖÐ×îºóµ¹ÊýµÚ¶þ¸öÉÈÇøºÅ¡£
if (dev >= 5 * NR_HD || block + 2 > hd[dev].nr_sects)
{
end_request (0);
goto repeat; // ¸Ã±êºÅÔÚblk.h ×îºóÃæ¡£
}
block += hd[dev].start_sect; // ½«ËùÐè¶ÁµÄ¿é¶ÔÓ¦µ½Õû¸öÓ²ÅÌÉϵľø¶ÔÉÈÇøºÅ¡£
dev /= 5; // ´Ëʱdev ´ú±íÓ²Å̺ţ¨0 »ò1£©¡£
// ÏÂÃæÇ¶Èë»ã±à´úÂëÓÃÀ´´ÓÓ²ÅÌÐÅÏ¢½á¹¹Öиù¾ÝÆðʼÉÈÇøºÅºÍÿ´ÅµÀÉÈÇøÊý¼ÆËãÔڴŵÀÖеÄ
// ÉÈÇøºÅ(sec)¡¢ËùÔÚÖùÃæºÅ(cyl)ºÍ´ÅÍ·ºÅ(head)¡£
__asm__ ("divl %4": "=a" (block), "=d" (sec):"" (block), "1" (0),
"r" (hd_info[dev].
sect));
__asm__ ("divl %4": "=a" (cyl), "=d" (head):"" (block), "1" (0),
"r" (hd_info[dev].
head));
sec++;
nsect = CURRENT->nr_sectors; // Óû¶Á/дµÄÉÈÇøÊý¡£
// Èç¹ûreset ÖÃ1£¬ÔòÖ´Ðи´Î»²Ù×÷¡£¸´Î»Ó²Å̺ͿØÖÆÆ÷£¬²¢ÖÃÐèÒªÖØÐÂУÕý±êÖ¾£¬·µ»Ø¡£
if (reset)
{
reset = 0;
recalibrate = 1;
reset_hd (CURRENT_DEV);
return;
}
// Èç¹ûÖØÐÂУÕý±êÖ¾(recalibrate)ÖÃ룬ÔòÊ×Ïȸ´Î»¸Ã±êÖ¾£¬È»ºóÏòÓ²ÅÌ¿ØÖÆÆ÷·¢ËÍÖØÐÂУÕýÃüÁî¡£
if (recalibrate)
{
recalibrate = 0;
hd_out (dev, hd_info[CURRENT_DEV].sect, 0, 0, 0,
WIN_RESTORE, &recal_intr);
return;
}
// Èç¹ûµ±Ç°ÇëÇóÊÇдÉÈÇø²Ù×÷£¬Ôò·¢ËÍдÃüÁѻ·¶Áȡ״̬¼Ä´æÆ÷ÐÅÏ¢²¢ÅжÏÇëÇó·þÎñ±êÖ¾
// DRQ_STAT ÊÇ·ñÖÃλ¡£DRQ_STAT ÊÇÓ²ÅÌ״̬¼Ä´æÆ÷µÄÇëÇó·þÎñ루include/linux/hdreg.h£¬27£©¡£
if (CURRENT->cmd == WRITE)
{
hd_out (dev, nsect, sec, head, cyl, WIN_WRITE, &write_intr);
for (i = 0; i < 3000 && !(r = inb_p (HD_STATUS) & DRQ_STAT); i++)
/* nothing */ ;
// Èç¹ûÇëÇó·þÎñλÖÃλÔòÍ˳öÑ»·¡£ÈôµÈµ½Ñ»·½áÊøÒ²Ã»ÓÐÖÃ룬Ôò´Ë´ÎдӲÅ̲Ù×÷ʧ°Ü£¬È¥´¦Àí
// ÏÂÒ»¸öÓ²ÅÌÇëÇó¡£·ñÔòÏòÓ²ÅÌ¿ØÖÆÆ÷Êý¾Ý¼Ä´æÆ÷¶Ë¿ÚHD_DATA дÈë1 ¸öÉÈÇøµÄÊý¾Ý¡£
if (!r)
{
bad_rw_intr ();
goto repeat; // ¸Ã±êºÅÔÚblk.h ×îºóÃæ£¬Ò²¼´Ìøµ½301 ÐС£
}
port_write (HD_DATA, CURRENT->buffer, 256);
// Èç¹ûµ±Ç°ÇëÇóÊǶÁÓ²ÅÌÉÈÇø£¬ÔòÏòÓ²ÅÌ¿ØÖÆÆ÷·¢ËͶÁÉÈÇøÃüÁî¡£
}
else if (CURRENT->cmd == READ)
{
hd_out (dev, nsect, sec, head, cyl, WIN_READ, &read_intr);
}
else
panic ("unknown hd-command");
}
// Ó²ÅÌϵͳ³õʼ»¯¡£
void hd_init (void)
{
blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; // do_hd_request()¡£
set_intr_gate (0x2E, &hd_interrupt); // ÉèÖÃÓ²ÅÌÖжÏÃÅÏòÁ¿ int 0x2E(46)¡£
// hd_interrupt ÔÚ(kernel/system_call.s,221)¡£
outb_p (inb_p (0x21) & 0xfb, 0x21); // ¸´Î»½ÓÁªµÄÖ÷8259A int2 µÄÆÁ±Î룬ÔÊÐí´ÓƬ
// ·¢³öÖжÏÇëÇóÐźš£
outb (inb_p (0xA1) & 0xbf, 0xA1); // ¸´Î»Ó²Å̵ÄÖжÏÇëÇóÆÁ±Î루ÔÚ´ÓÆ¬ÉÏ£©£¬ÔÊÐí
// Ó²ÅÌ¿ØÖÆÆ÷·¢ËÍÖжÏÇëÇóÐźš£
}
| gpl-2.0 |
smarkwell/asuswrt-merlin | release/src/router/httpd/sysdeps/web-qca.c | 5 | 37008 | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/*
* ASUS Home Gateway Reference Design
* Web Page Configuration Support Routines
*
* Copyright 2004, ASUSTeK Inc.
* All Rights Reserved.
*
* THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
* KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
* SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
*/
#ifdef WEBS
#include <webs.h>
#include <uemf.h>
#include <ej.h>
#else /* !WEBS */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <unistd.h>
#include <limits.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <assert.h>
#include <httpd.h>
#endif /* WEBS */
#include <typedefs.h>
#include <bcmnvram.h>
#include <bcmutils.h>
#include <shutils.h>
#include <qca.h>
#include <iwlib.h>
//#include <stapriv.h>
#include <ethutils.h>
#include <shared.h>
#include <sys/mman.h>
#ifndef O_BINARY
#define O_BINARY 0
#endif
#ifndef MAP_FAILED
#define MAP_FAILED (-1)
#endif
#define wan_prefix(unit, prefix) snprintf(prefix, sizeof(prefix), "wan%d_", unit)
//static char * rfctime(const time_t *timep);
//static char * reltime(unsigned int seconds);
void reltime(unsigned int seconds, char *buf);
static int wl_status(int eid, webs_t wp, int argc, char_t **argv, int unit);
#include <fcntl.h>
#include <signal.h>
#include <time.h>
#include <sys/klog.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <linux/sockios.h>
#include <net/if_arp.h>
#include <dirent.h>
const char *get_wifname(int band)
{
if (band)
return WIF_5G;
else
return WIF_2G;
}
typedef struct _WPS_CONFIGURED_VALUE {
unsigned short Configured; // 1:un-configured/2:configured
char BSSID[18];
char SSID[32 + 1];
char AuthMode[16]; // Open System/Shared Key/WPA-Personal/WPA2-Personal/WPA-Enterprise/WPA2-Enterprise
char Encryp[8]; // None/WEP/TKIP/AES
char DefaultKeyIdx;
char WPAKey[64 + 1];
} WPS_CONFIGURED_VALUE;
/* shared/sysdeps/api-qca.c */
extern u_int ieee80211_mhz2ieee(u_int freq);
extern int get_channel_list_via_driver(int unit, char *buffer, int len);
extern int get_channel_list_via_country(int unit, const char *country_code, char *buffer, int len);
static void getWPSConfig(int unit, WPS_CONFIGURED_VALUE *result)
{
char buf[128];
FILE *fp;
memset(result, 0, sizeof(result));
sprintf(buf, "hostapd_cli -i%s get_config", get_wifname(unit));
fp = popen(buf, "r");
if (fp) {
while (fgets(buf, sizeof(buf), fp) != NULL) {
char *pt1, *pt2;
chomp(buf);
//BSSID
if ((pt1 = strstr(buf, "bssid="))) {
pt2 = pt1 + strlen("bssid=");
strcpy(result->BSSID, pt2);
}
//SSID
if ((pt1 = strstr(buf, "ssid="))) {
pt2 = pt1 + strlen("ssid=");
strcpy(result->SSID, pt2);
}
//Configured
else if ((pt1 = strstr(buf, "wps_state="))) {
pt2 = pt1 + strlen("wps_state=");
if (!strcmp(pt2, "configured") ||
(!strcmp(pt2, "disabled") && nvram_get_int("w_Setting"))
)
result->Configured = 2;
else
result->Configured = 1;
}
//WPAKey
else if ((pt1 = strstr(buf, "passphrase="))) {
pt2 = pt1 + strlen("passphrase=");
strcpy(result->WPAKey, pt2);
}
//AuthMode
else if ((pt1 = strstr(buf, "key_mgmt="))) {
pt2 = pt1 + strlen("key_mgmt=");
strcpy(result->AuthMode, pt2);/* FIXME: NEED TRANSFORM CONTENT */
}
//Encryp
else if ((pt1 = strstr(buf, "rsn_pairwise_cipher="))) {
pt2 = pt1 + strlen("rsn_pairwise_cipher=");
if (!strcmp(pt2, "NONE"))
strcpy(result->Encryp, "None");
else if (!strncmp(pt2, "WEP", 3))
strcpy(result->Encryp, "WEP");
else if (!strcmp(pt2, "TKIP"))
strcpy(result->Encryp, "TKIP");
else if (!strncmp(pt2, "CCMP", 4))
strcpy(result->Encryp, "AES");
}
}
pclose(fp);
}
//dbg("%s: SSID[%s], Configured[%d], WPAKey[%s], AuthMode[%s], Encryp[%s]\n", __FUNCTION__, result->SSID, result->Configured, result->WPAKey, result->AuthMode, result->Encryp);
}
char *getAPPhyMode(int unit)
{
char buf[64];
FILE *fp;
int len;
char *pt1, *pt2;
sprintf(buf, "iwpriv %s get_mode", get_wifname(unit));
fp = popen(buf, "r");
if (fp) {
memset(buf, 0, sizeof(buf));
len = fread(buf, 1, sizeof(buf), fp);
pclose(fp);
if (len > 1) {
buf[len-1] = '\0';
pt1 = strstr(buf, "get_mode:");
if (pt1) {
pt2 = pt1 + strlen("get_mode:");
chomp(pt2);
return pt2;
}
}
}
return "";
}
unsigned int getAPChannel(int unit)
{
char buf[8192];
FILE *fp;
int len, i = 0;
char *pt1, *pt2, ch_mhz[5];
sprintf(buf, "iwconfig %s", get_wifname(unit));
fp = popen(buf, "r");
if (fp) {
memset(buf, 0, sizeof(buf));
len = fread(buf, 1, sizeof(buf), fp);
pclose(fp);
if (len > 1) {
buf[len-1] = '\0';
pt1 = strstr(buf, "Frequency:");
if (pt1) {
pt2 = pt1 + strlen("Frequency:");
pt1 = strstr(pt2, " GHz");
if (pt1) {
*pt1 = '\0';
memset(ch_mhz, 0, sizeof(ch_mhz));
len = strlen(pt2);
for (i = 0; i < 5; i++) {
if (i < len) {
if (pt2[i] == '.')
continue;
sprintf(ch_mhz, "%s%c", ch_mhz, pt2[i]);
}
else
sprintf(ch_mhz, "%s0", ch_mhz);
}
//dbg("Frequency:%s MHz\n", ch_mhz);
return ieee80211_mhz2ieee((unsigned int)atoi(ch_mhz));
}
}
}
}
return 0;
}
long getSTAConnTime(char *ifname, char *bssid)
{
char buf[8192];
FILE *fp;
int len;
char *pt1,*pt2;
sprintf(buf, "hostapd_cli -i%s sta %s", ifname, bssid);
fp = popen(buf, "r");
if (fp) {
memset(buf, 0, sizeof(buf));
len = fread(buf, 1, sizeof(buf), fp);
pclose(fp);
if (len > 1) {
buf[len-1] = '\0';
pt1 = strstr(buf, "connected_time=");
if (pt1) {
pt2 = pt1 + strlen("connected_time=");
chomp(pt2);
return atol(pt2);
}
}
}
return 0;
}
typedef struct _WLANCONFIG_LIST {
char addr[18];
unsigned int aid;
unsigned int chan;
char txrate[6];
char rxrate[6];
unsigned int rssi;
unsigned int idle;
unsigned int txseq;
unsigned int rcseq;
char caps[12];
char acaps[10];
char erp[7];
char state_maxrate[20];
char wps[4];
char conn_time[12];
char rsn[4];
char wme[4];
char mode[31];
} WLANCONFIG_LIST;
#define MAX_STA_NUM 256
typedef struct _WIFI_STA_TABLE {
int Num;
WLANCONFIG_LIST Entry[ MAX_STA_NUM ];
} WIFI_STA_TABLE;
static int getSTAInfo(int unit, WIFI_STA_TABLE *sta_info)
{
#define STA_INFO_PATH "/tmp/wlanconfig_athX_list"
FILE *fp;
int ret = 0;
char *unit_name;
char *p, *ifname;
char *wl_ifnames;
char line_buf[300]; // max 14x
sta_info->Num = 0;
unit_name = strdup(get_wifname(unit));
if (!unit_name)
return ret;
wl_ifnames = strdup(nvram_safe_get("lan_ifnames"));
if (!wl_ifnames) {
free(unit_name);
return ret;
}
p = wl_ifnames;
while ((ifname = strsep(&p, " ")) != NULL) {
while (*ifname == ' ') ++ifname;
if (*ifname == 0) break;
if(strncmp(ifname,unit_name,strlen(unit_name)))
continue;
doSystem("wlanconfig %s list > %s", ifname, STA_INFO_PATH);
fp = fopen(STA_INFO_PATH, "r");
if (fp) {
//fseek(fp, 131, SEEK_SET); // ignore header
fgets(line_buf, sizeof(line_buf), fp); // ignore header
while ( fgets(line_buf, sizeof(line_buf), fp) ) {
WLANCONFIG_LIST *result = &sta_info->Entry[sta_info->Num++];
memset(result, 0, sizeof(*result));
sscanf(line_buf, "%s%u%u%s%s%u%u%u%u%s%s%s%s%s%s%s%s%s",
result->addr,
&result->aid,
&result->chan,
result->txrate,
result->rxrate,
&result->rssi,
&result->idle,
&result->txseq,
&result->rcseq,
result->caps,
result->acaps,
result->erp,
result->state_maxrate,
result->wps,
result->conn_time,
result->rsn,
result->wme,
result->mode);
#if 0
dbg("[%s][%u][%u][%s][%s][%u][%u][%u][%u][%s][%s][%s][%s][%s][%s][%s]\n",
result->addr,
result->aid,
result->chan,
result->txrate,
result->rxrate,
result->rssi,
result->idle,
result->txseq,
result->rcseq,
result->caps,
result->acaps,
result->erp,
result->state_maxrate,
result->wps,
result->rsn,
result->wme);
#endif
}
fclose(fp);
unlink(STA_INFO_PATH);
}
}
free(wl_ifnames);
free(unit_name);
return ret;
}
char* GetBW(int BW)
{
switch(BW)
{
case BW_10:
return "10M";
case BW_20:
return "20M";
case BW_40:
return "40M";
#if defined(RTAC52U) || defined(RTAC51U)
case BW_80:
return "80M";
#endif
default:
return "N/A";
}
}
char* GetPhyMode(int Mode)
{
switch(Mode)
{
case MODE_CCK:
return "CCK";
case MODE_OFDM:
return "OFDM";
case MODE_HTMIX:
return "HTMIX";
case MODE_HTGREENFIELD:
return "GREEN";
#if defined(RTAC52U) || defined(RTAC51U)
case MODE_VHT:
return "VHT";
#endif
default:
return "N/A";
}
}
int MCSMappingRateTable[] =
{2, 4, 11, 22, // CCK
12, 18, 24, 36, 48, 72, 96, 108, // OFDM
13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234, 260, // 20MHz, 800ns GI, MCS: 0 ~ 15
39, 78, 117, 156, 234, 312, 351, 390, // 20MHz, 800ns GI, MCS: 16 ~ 23
27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540, // 40MHz, 800ns GI, MCS: 0 ~ 15
81, 162, 243, 324, 486, 648, 729, 810, // 40MHz, 800ns GI, MCS: 16 ~ 23
14, 29, 43, 57, 87, 115, 130, 144, 29, 59, 87, 115, 173, 230, 260, 288, // 20MHz, 400ns GI, MCS: 0 ~ 15
43, 87, 130, 173, 260, 317, 390, 433, // 20MHz, 400ns GI, MCS: 16 ~ 23
30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600, // 40MHz, 400ns GI, MCS: 0 ~ 15
90, 180, 270, 360, 540, 720, 810, 900,
13, 26, 39, 52, 78, 104, 117, 130, 156, /* 11ac: 20Mhz, 800ns GI, MCS: 0~8 */
27, 54, 81, 108, 162, 216, 243, 270, 324, 360, /*11ac: 40Mhz, 800ns GI, MCS: 0~9 */
59, 117, 176, 234, 351, 468, 527, 585, 702, 780, /*11ac: 80Mhz, 800ns GI, MCS: 0~9 */
14, 29, 43, 57, 87, 115, 130, 144, 173, /* 11ac: 20Mhz, 400ns GI, MCS: 0~8 */
30, 60, 90, 120, 180, 240, 270, 300, 360, 400, /*11ac: 40Mhz, 400ns GI, MCS: 0~9 */
65, 130, 195, 260, 390, 520, 585, 650, 780, 867 /*11ac: 80Mhz, 400ns GI, MCS: 0~9 */
};
#define FN_GETRATE(_fn_, _st_) \
_fn_(_st_ HTSetting) \
{ \
int rate_count = sizeof(MCSMappingRateTable)/sizeof(int); \
int rate_index = 0; \
\
if (HTSetting.field.MODE >= MODE_VHT) \
{ \
if (HTSetting.field.BW == BW_20) { \
rate_index = 108 + \
((unsigned char)HTSetting.field.ShortGI * 29) + \
((unsigned char)HTSetting.field.MCS); \
} \
else if (HTSetting.field.BW == BW_40) { \
rate_index = 117 + \
((unsigned char)HTSetting.field.ShortGI * 29) + \
((unsigned char)HTSetting.field.MCS); \
} \
else if (HTSetting.field.BW == BW_80) { \
rate_index = 127 + \
((unsigned char)HTSetting.field.ShortGI * 29) + \
((unsigned char)HTSetting.field.MCS); \
} \
} \
else \
if (HTSetting.field.MODE >= MODE_HTMIX) \
{ \
rate_index = 12 + ((unsigned char)HTSetting.field.BW *24) + ((unsigned char)HTSetting.field.ShortGI *48) + ((unsigned char)HTSetting.field.MCS); \
} \
else \
if (HTSetting.field.MODE == MODE_OFDM) \
rate_index = (unsigned char)(HTSetting.field.MCS) + 4; \
else if (HTSetting.field.MODE == MODE_CCK) \
rate_index = (unsigned char)(HTSetting.field.MCS); \
\
if (rate_index < 0) \
rate_index = 0; \
\
if (rate_index >= rate_count) \
rate_index = rate_count-1; \
\
return (MCSMappingRateTable[rate_index] * 5)/10; \
}
int FN_GETRATE(getRate, MACHTTRANSMIT_SETTING) //getRate(MACHTTRANSMIT_SETTING)
int FN_GETRATE(getRate_2g, MACHTTRANSMIT_SETTING_2G) //getRate_2g(MACHTTRANSMIT_SETTING_2G)
#if defined(RTAC52U) || defined(RTAC51U)
int FN_GETRATE(getRate_11ac, MACHTTRANSMIT_SETTING_11AC) //getRate_11ac(MACHTTRANSMIT_SETTING_11AC)
#endif
int
ej_wl_status(int eid, webs_t wp, int argc, char_t **argv, int unit)
{
int retval = 0;
int ii = 0;
char word[256], *next;
foreach (word, nvram_safe_get("wl_ifnames"), next) {
retval += wl_status(eid, wp, argc, argv, ii);
retval += websWrite(wp, "\n");
ii++;
}
return retval;
}
int
ej_wl_status_2g(int eid, webs_t wp, int argc, char_t **argv)
{
return ej_wl_status(eid, wp, argc, argv, 0);
}
static int
show_wliface_info(webs_t wp, int unit, char *ifname, char *op_mode, char *ssid)
{
int ret = 0;
FILE *fp;
unsigned char mac_addr[ETHER_ADDR_LEN];
char tmpstr[1024], cmd[] = "iwconfig staXYYYYYY";
char *p, ap_bssid[] = "00:00:00:00:00:00XXX";
if (unit < 0 || !ifname || !op_mode || !ssid)
return 0;
memset(&mac_addr, 0, sizeof(mac_addr));
get_iface_hwaddr(ifname, mac_addr);
ret += websWrite(wp, "=======================================================================================\n"); // separator
ret += websWrite(wp, "OP Mode : %s\n", op_mode);
ret += websWrite(wp, "SSID : %s\n", ssid);
sprintf(cmd, "iwconfig %s", ifname);
if ((fp = popen(cmd, "r")) != NULL && fread(tmpstr, 1, sizeof(tmpstr), fp) > 1) {
pclose(fp);
*(tmpstr + sizeof(tmpstr) - 1) = '\0';
*ap_bssid = '\0';
if ((p = strstr(tmpstr, "Access Point: ")) != NULL) {
strncpy(ap_bssid, p + 14, 17);
ap_bssid[17] = '\0';
}
ret += websWrite(wp, "BSSID : %s\n", ap_bssid);
}
ret += websWrite(wp, "MAC address : %02X:%02X:%02X:%02X:%02X:%02X\n",
mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
*tmpstr = '\0';
strcpy(tmpstr, getAPPhyMode(unit));
ret += websWrite(wp, "Phy Mode : %s\n", tmpstr);
ret += websWrite(wp, "Channel : %u\n", getAPChannel(unit));
return ret;
}
static int
wl_status(int eid, webs_t wp, int argc, char_t **argv, int unit)
{
int ret = 0, wl_mode_x, i;
WIFI_STA_TABLE *sta_info;
char tmp[128], prefix[] = "wlXXXXXXXXXX_", *ifname, *op_mode;
#if defined(RTCONFIG_WIRELESSREPEATER) && defined(RTCONFIG_PROXYSTA)
if (mediabridge_mode()) {
/* Media bridge mode */
snprintf(prefix, sizeof(prefix), "wl%d.1_", unit);
ifname = nvram_safe_get(strcat_r(prefix, "ifname", tmp));
if (unit != nvram_get_int("wlc_band")) {
snprintf(prefix, sizeof(prefix), "wl%d_", unit);
ret += websWrite(wp, "%s radio is disabled\n",
nvram_match(strcat_r(prefix, "nband", tmp), "1") ? "5 GHz" : "2.4 GHz");
return ret;
}
ret += show_wliface_info(wp, unit, ifname, "Media Bridge", nvram_safe_get("wlc_ssid"));
} else {
#endif
/* Router mode, Repeater and AP mode */
#if defined(RTCONFIG_WIRELESSREPEATER)
if (!unit && repeater_mode()) {
/* Show P-AP information first, if we are about to show 2.4G information in repeater mode. */
snprintf(prefix, sizeof(prefix), "wl%d.1_", nvram_get_int("wlc_band"));
ifname = nvram_safe_get(strcat_r(prefix, "ifname", tmp));
ret += show_wliface_info(wp, unit, ifname, "Repeater", nvram_safe_get("wlc_ssid"));
ret += websWrite(wp, "\n");
}
#endif
snprintf(prefix, sizeof(prefix), "wl%d_", unit);
ifname = nvram_safe_get(strcat_r(prefix, "ifname", tmp));
if (!get_radio_status(ifname)) {
#if defined(BAND_2G_ONLY)
ret += websWrite(wp, "2.4 GHz radio is disabled\n");
#else
ret += websWrite(wp, "%s radio is disabled\n",
nvram_match(strcat_r(prefix, "nband", tmp), "1") ? "5 GHz" : "2.4 GHz");
#endif
return ret;
}
wl_mode_x = nvram_get_int(strcat_r(prefix, "mode_x", tmp));
op_mode = "AP";
if (wl_mode_x == 1)
op_mode = "WDS Only";
else if (wl_mode_x == 2)
op_mode = "Hybrid";
ret += show_wliface_info(wp, unit, ifname, op_mode, nvram_safe_get(strcat_r(prefix, "ssid", tmp)));
ret += websWrite(wp, "\nStations List \n");
ret += websWrite(wp, "----------------------------------------\n");
#if 0 //barton++
ret += websWrite(wp, "%-18s%-4s%-8s%-4s%-4s%-4s%-5s%-5s%-12s\n",
"MAC", "PSM", "PhyMode", "BW", "MCS", "SGI", "STBC", "Rate", "Connect Time");
#else
ret += websWrite(wp, "%-18s%-7s%-7s%-12s\n",
"MAC", "TXRATE", "RXRATE", "Connect Time");
#endif
if ((sta_info = malloc(sizeof(*sta_info))) != NULL) {
getSTAInfo(unit, sta_info);
for(i = 0; i < sta_info->Num; i++) {
ret += websWrite(wp, "%s %6s %6s %8s\n",
sta_info->Entry[i].addr,
sta_info->Entry[i].txrate,
sta_info->Entry[i].rxrate,
sta_info->Entry[i].conn_time
);
}
free(sta_info);
}
#if defined(RTCONFIG_WIRELESSREPEATER) && defined(RTCONFIG_PROXYSTA)
}
#endif
return ret;
}
static int ej_wl_sta_list(int unit, webs_t wp)
{
WIFI_STA_TABLE *sta_info;
char *value;
int firstRow = 1;
int i;
if ((sta_info = malloc(sizeof(*sta_info))) != NULL)
{
getSTAInfo(unit, sta_info);
for(i = 0; i < sta_info->Num; i++)
{
if (firstRow == 1)
firstRow = 0;
else
websWrite(wp, ", ");
websWrite(wp, "[");
websWrite(wp, "\"%s\"", sta_info->Entry[i].addr);
value = "Yes";
websWrite(wp, ", \"%s\"", value);
value = "";
websWrite(wp, ", \"%s\"", value);
websWrite(wp, ", \"%d\"", sta_info->Entry[i].rssi);
websWrite(wp, "]");
}
free(sta_info);
}
return 0;
}
int ej_wl_sta_list_2g(int eid, webs_t wp, int argc, char_t **argv)
{
ej_wl_sta_list(0, wp);
return 0;
}
int ej_wl_sta_list_5g(int eid, webs_t wp, int argc, char_t **argv)
{
ej_wl_sta_list(1, wp);
return 0;
}
char *getWscStatus(int unit)
{
char buf[512];
FILE *fp;
int len;
char *pt1,*pt2;
sprintf(buf, "hostapd_cli -i%s wps_get_status", get_wifname(unit));
fp = popen(buf, "r");
if (fp) {
memset(buf, 0, sizeof(buf));
len = fread(buf, 1, sizeof(buf), fp);
pclose(fp);
if (len > 1) {
buf[len-1] = '\0';
pt1 = strstr(buf, "Last WPS result: ");
if (pt1) {
pt2 = pt1 + strlen("Last WPS result: ");
pt1 = strstr(pt2, "Peer Address: ");
if (pt1) {
*pt1 = '\0';
chomp(pt2);
}
return pt2;
}
}
}
return "";
}
char *getAPPIN(int unit)
{
static char buffer[128];
char cmd[64];
FILE *fp;
int len;
buffer[0] = '\0';
sprintf(cmd, "hostapd_cli -i%s wps_ap_pin get", get_wifname(unit));
fp = popen(cmd, "r");
if (fp) {
len = fread(buffer, 1, sizeof(buffer), fp);
pclose(fp);
if (len > 1) {
buffer[len] = '\0';
//dbg("%s: AP PIN[%s]\n", __FUNCTION__, buffer);
return buffer;
}
}
return "";
}
int
wl_wps_info(int eid, webs_t wp, int argc, char_t **argv, int unit)
{
int j = -1, u = unit;
char tmpstr[128];
WPS_CONFIGURED_VALUE result;
int retval=0;
char tmp[128], prefix[] = "wlXXXXXXXXXX_";
char *wps_sta_pin;
char tag1[] = "<wps_infoXXXXXX>", tag2[] = "</wps_infoXXXXXX>";
#if defined(RTCONFIG_WPSMULTIBAND)
for (j = -1; j < MAX_NR_WL_IF; ++j) {
#endif
switch (j) {
case 0: /* fall through */
case 1:
u = j;
sprintf(tag1, "<wps_info%d>", j);
sprintf(tag2, "</wps_info%d>", j);
break;
case -1: /* fall through */
default:
u = unit;
strcpy(tag1, "<wps_info>");
strcpy(tag2, "</wps_info>");
}
snprintf(prefix, sizeof(prefix), "wl%d_", u);
#if defined(RTCONFIG_WPSMULTIBAND)
if (!nvram_get(strcat_r(prefix, "ifname", tmp)))
continue;
#endif
memset(&result, 0, sizeof(result));
getWPSConfig(u, &result);
if (j == -1)
retval += websWrite(wp, "<wps>\n");
//0. WSC Status
memset(tmpstr, 0, sizeof(tmpstr));
strcpy(tmpstr, getWscStatus(u));
retval += websWrite(wp, "%s%s%s\n", tag1, tmpstr, tag2);
//1. WPS Configured
if (result.Configured==2)
retval += websWrite(wp, "%s%s%s\n", tag1, "Yes", tag2);
else
retval += websWrite(wp, "%s%s%s\n", tag1, "No", tag2);
//2. WPS SSID
memset(tmpstr, 0, sizeof(tmpstr));
char_to_ascii(tmpstr, result.SSID);
retval += websWrite(wp, "%s%s%s\n", tag1, tmpstr, tag2);
//3. WPS AuthMode
retval += websWrite(wp, "%s%s%s\n", tag1, result.AuthMode, tag2);
//4. WPS Encryp
retval += websWrite(wp, "%s%s%s\n", tag1, result.Encryp, tag2);
//5. WPS DefaultKeyIdx
memset(tmpstr, 0, sizeof(tmpstr));
sprintf(tmpstr, "%d", result.DefaultKeyIdx);/* FIXME: TBD */
retval += websWrite(wp, "%s%s%s\n", tag1, tmpstr, tag2);
//6. WPS WPAKey
if (!strlen(result.WPAKey))
retval += websWrite(wp, "%sNone%s\n", tag1, tag2);
else
{
memset(tmpstr, 0, sizeof(tmpstr));
char_to_ascii(tmpstr, result.WPAKey);
retval += websWrite(wp, "%s%s%s\n", tag1, tmpstr, tag2);
}
//7. AP PIN Code
memset(tmpstr, 0, sizeof(tmpstr));
strcpy(tmpstr, getAPPIN(u));
retval += websWrite(wp, "%s%s%s\n", tag1, tmpstr, tag2);
//8. Saved WPAKey
if (!strlen(nvram_safe_get(strcat_r(prefix, "wpa_psk", tmp))))
retval += websWrite(wp, "%s%s%s\n", tag1, "None", tag2);
else
{
char_to_ascii(tmpstr, nvram_safe_get(strcat_r(prefix, "wpa_psk", tmp)));
retval += websWrite(wp, "%s%s%s\n", tag1, tmpstr, tag2);
}
//9. WPS enable?
if (!strcmp(nvram_safe_get(strcat_r(prefix, "wps_mode", tmp)), "enabled"))
retval += websWrite(wp, "%s%s%s\n", tag1, "None", tag2);
else
retval += websWrite(wp, "%s%s%s\n", tag1, nvram_safe_get("wps_enable"), tag2);
//A. WPS mode
wps_sta_pin = nvram_safe_get("wps_sta_pin");
if (strlen(wps_sta_pin) && strcmp(wps_sta_pin, "00000000"))
retval += websWrite(wp, "%s%s%s\n", tag1, "1", tag2);
else
retval += websWrite(wp, "%s%s%s\n", tag1, "2", tag2);
//B. current auth mode
if (!strlen(nvram_safe_get(strcat_r(prefix, "auth_mode_x", tmp))))
retval += websWrite(wp, "%s%s%s\n", tag1, "None", tag2);
else
retval += websWrite(wp, "%s%s%s\n", tag1, nvram_safe_get(strcat_r(prefix, "auth_mode_x", tmp)), tag2);
//C. WPS band
retval += websWrite(wp, "%s%d%s\n", tag1, u, tag2);
#if defined(RTCONFIG_WPSMULTIBAND)
}
#endif
retval += websWrite(wp, "</wps>");
return retval;
}
int
ej_wps_info(int eid, webs_t wp, int argc, char_t **argv)
{
return wl_wps_info(eid, wp, argc, argv, 1);
}
int
ej_wps_info_2g(int eid, webs_t wp, int argc, char_t **argv)
{
return wl_wps_info(eid, wp, argc, argv, 0);
}
// Wireless Client List /* Start --Alicia, 08.09.23 */
int ej_wl_auth_list(int eid, webs_t wp, int argc, char_t **argv)
{
//only for ath0 & ath1
WLANCONFIG_LIST *result;
#define AUTH_INFO_PATH "/tmp/auth_athX_list"
FILE *fp;
int ret = 0;
char *p, *ifname;
char *wl_ifnames;
char line_buf[300]; // max 14x
char *value;
int firstRow;
result = (WLANCONFIG_LIST *)malloc(sizeof(WLANCONFIG_LIST));
memset(result, 0, sizeof(result));
wl_ifnames = strdup(nvram_safe_get("wl_ifnames"));
if (!wl_ifnames)
return ret;
p = wl_ifnames;
firstRow = 1;
while ((ifname = strsep(&p, " ")) != NULL) {
while (*ifname == ' ') ++ifname;
if (*ifname == 0) break;
doSystem("wlanconfig %s list > %s", ifname, AUTH_INFO_PATH);
fp = fopen(AUTH_INFO_PATH, "r");
if (fp) {
//fseek(fp, 131, SEEK_SET); // ignore header
fgets(line_buf, sizeof(line_buf), fp); // ignore header
while ( fgets(line_buf, sizeof(line_buf), fp) ) {
sscanf(line_buf, "%s%u%u%s%s%u%u%u%u%s%s%s%s%s%s%s%s%s",
result->addr,
&result->aid,
&result->chan,
result->txrate,
result->rxrate,
&result->rssi,
&result->idle,
&result->txseq,
&result->rcseq,
result->caps,
result->acaps,
result->erp,
result->state_maxrate,
result->wps,
result->conn_time,
result->rsn,
result->wme,
result->mode);
if (firstRow == 1)
firstRow = 0;
else
websWrite(wp, ", ");
websWrite(wp, "[");
websWrite(wp, "\"%s\"", result->addr);
value = "YES";
websWrite(wp, ", \"%s\"", value);
value = "";
websWrite(wp, ", \"%s\"", value);
websWrite(wp, "]");
}
fclose(fp);
unlink(AUTH_INFO_PATH);
}
}
free(result);
free(wl_ifnames);
return ret;
}
#if 0
static void convertToUpper(char *str)
{
if(str == NULL)
return;
while(*str)
{
if(*str >= 'a' && *str <= 'z')
{
*str &= (unsigned char)~0x20;
}
str++;
}
}
#endif
#if 1
#define target 7
char str[target][40]={"Address:","ESSID:","Frequency:","Quality=","Encryption key:","IE:","Authentication Suites"};
static int wl_scan(int eid, webs_t wp, int argc, char_t **argv, int unit)
{
int apCount=0,retval=0;
char header[128];
char tmp[128], prefix[] = "wlXXXXXXXXXX_";
char cmd[300];
FILE *fp;
char buf[target][200];
int i,fp_len;
char *pt1,*pt2;
char a1[10],a2[10];
char ssid_str[256];
char ch[4],ssid[33],address[18],enc[9],auth[16],sig[9],wmode[8];
int lock;
dbg("Please wait...");
lock = file_lock("nvramcommit");
system("rm -f /tmp/wlist");
snprintf(prefix, sizeof(prefix), "wl%d_", unit);
sprintf(cmd,"iwlist %s scanning >> /tmp/wlist",nvram_safe_get(strcat_r(prefix, "ifname", tmp)));
system(cmd);
file_unlock(lock);
if((fp= fopen("/tmp/wlist", "r"))==NULL)
return -1;
memset(header, 0, sizeof(header));
sprintf(header, "%-4s%-33s%-18s%-9s%-16s%-9s%-8s\n", "Ch", "SSID", "BSSID", "Enc", "Auth", "Siganl(%)", "W-Mode");
dbg("\n%s", header);
retval += websWrite(wp, "[");
while(1)
{
memset(buf,0,sizeof(buf));
fp_len=0;
for(i=0;i<target;i++)
{
while(fgets(buf[i], sizeof(buf[i]), fp))
{
fp_len += strlen(buf[i]);
if(i!=0 && strstr(buf[i],"Cell") && strstr(buf[i],"Address"))
{
fseek(fp,-fp_len, SEEK_CUR);
fp_len=0;
break;
}
else
{
if(strstr(buf[i],str[i]))
{
fp_len =0;
break;
}
else
memset(buf[i],0,sizeof(buf[i]));
}
}
//dbg("buf[%d]=%s\n",i,buf[i]);
}
if(feof(fp))
break;
apCount++;
dbg("\napCount=%d\n",apCount);
//ch
pt1 = strstr(buf[2], "Channel ");
if(pt1)
{
pt2 = strstr(pt1,")");
memset(ch,0,sizeof(ch));
strncpy(ch,pt1+strlen("Channel "),pt2-pt1-strlen("Channel "));
}
//ssid
pt1 = strstr(buf[1], "ESSID:");
if(pt1)
{
memset(ssid,0,sizeof(ssid));
strncpy(ssid,pt1+strlen("ESSID:")+1,strlen(buf[1])-2-(pt1+strlen("ESSID:")+1-buf[1]));
}
//bssid
pt1 = strstr(buf[0], "Address: ");
if(pt1)
{
memset(address,0,sizeof(address));
strncpy(address,pt1+strlen("Address: "),strlen(buf[0])-(pt1+strlen("Address: ")-buf[0])-1);
}
//enc
pt1=strstr(buf[4],"Encryption key:");
if(pt1)
{
if(strstr(pt1+strlen("Encryption key:"),"on"))
{
sprintf(enc,"ENC");
}
else
sprintf(enc,"NONE");
}
//auth
memset(auth,0,sizeof(auth));
sprintf(auth,"N/A");
//sig
pt1 = strstr(buf[3], "Quality=");
pt2 = strstr(pt1,"/");
if(pt1 && pt2)
{
memset(sig,0,sizeof(sig));
memset(a1,0,sizeof(a1));
memset(a2,0,sizeof(a2));
strncpy(a1,pt1+strlen("Quality="),pt2-pt1-strlen("Quality="));
strncpy(a2,pt2+1,strstr(pt2," ")-(pt2+1));
sprintf(sig,"%d",atoi(a1)/atoi(a2));
}
//wmode
memset(wmode,0,sizeof(wmode));
sprintf(wmode,"11b/g/n");
#if 1
dbg("%-4s%-33s%-18s%-9s%-16s%-9s%-8s\n",ch,ssid,address,enc,auth,sig,wmode);
#endif
memset(ssid_str, 0, sizeof(ssid_str));
char_to_ascii(ssid_str, trim_r(ssid));
if (apCount==1)
retval += websWrite(wp, "[\"%s\", \"%s\"]", ssid_str, address);
else
retval += websWrite(wp, ", [\"%s\", \"%s\"]", ssid_str, address);
}
retval += websWrite(wp, "]");
fclose(fp);
return 0;
}
#else
static int wl_scan(int eid, webs_t wp, int argc, char_t **argv, int unit)
{
int retval = 0, i = 0, apCount = 0;
char data[8192];
char ssid_str[256];
char header[128];
struct iwreq wrq;
SSA *ssap;
char tmp[128], prefix[] = "wlXXXXXXXXXX_";
int lock;
snprintf(prefix, sizeof(prefix), "wl%d_", unit);
memset(data, 0x00, 255);
strcpy(data, "SiteSurvey=1");
wrq.u.data.length = strlen(data)+1;
wrq.u.data.pointer = data;
wrq.u.data.flags = 0;
lock = file_lock("nvramcommit");
if (wl_ioctl(nvram_safe_get(strcat_r(prefix, "ifname", tmp)), RTPRIV_IOCTL_SET, &wrq) < 0)
{
file_unlock(lock);
dbg("Site Survey fails\n");
return 0;
}
file_unlock(lock);
dbg("Please wait");
sleep(1);
dbg(".");
sleep(1);
dbg(".");
sleep(1);
dbg(".");
sleep(1);
dbg(".\n\n");
memset(data, 0, 8192);
strcpy(data, "");
wrq.u.data.length = 8192;
wrq.u.data.pointer = data;
wrq.u.data.flags = 0;
if (wl_ioctl(nvram_safe_get(strcat_r(prefix, "ifname", tmp)), RTPRIV_IOCTL_GSITESURVEY, &wrq) < 0)
{
dbg("errors in getting site survey result\n");
return 0;
}
memset(header, 0, sizeof(header));
//sprintf(header, "%-3s%-33s%-18s%-8s%-15s%-9s%-8s%-2s\n", "Ch", "SSID", "BSSID", "Enc", "Auth", "Siganl(%)", "W-Mode", "NT");
#if 0// defined(RTN14U)
sprintf(header, "%-4s%-33s%-18s%-9s%-16s%-9s%-8s%-4s%-5s\n", "Ch", "SSID", "BSSID", "Enc", "Auth", "Siganl(%)", "W-Mode"," WPS", " DPID");
#else
sprintf(header, "%-4s%-33s%-18s%-9s%-16s%-9s%-8s\n", "Ch", "SSID", "BSSID", "Enc", "Auth", "Siganl(%)", "W-Mode");
#endif
dbg("\n%s", header);
if (wrq.u.data.length > 0)
{
#if defined(RTN65U)
if (unit == 0 && get_model() == MODEL_RTN65U)
{
char *encryption;
SITE_SURVEY_RT3352_iNIC *pSsap, *ssAP;
pSsap = ssAP = (SITE_SURVEY_RT3352_iNIC *) (1 /* '\n' */ + wrq.u.data.pointer + sizeof(SITE_SURVEY_RT3352_iNIC) /* header */);
while(((unsigned int)wrq.u.data.pointer + wrq.u.data.length) > (unsigned int) ssAP)
{
ssAP->channel [sizeof(ssAP->channel) -1] = '\0';
ssAP->ssid [32 ] = '\0';
ssAP->bssid [17 ] = '\0';
ssAP->encryption[sizeof(ssAP->encryption) -1] = '\0';
if((encryption = strchr(ssAP->authmode, '/')) != NULL)
{
memmove(ssAP->encryption, encryption +1, sizeof(ssAP->encryption) -1);
memset(encryption, ' ', sizeof(ssAP->authmode) - (encryption - ssAP->authmode));
*encryption = '\0';
}
ssAP->authmode [sizeof(ssAP->authmode) -1] = '\0';
ssAP->signal [sizeof(ssAP->signal) -1] = '\0';
ssAP->wmode [sizeof(ssAP->wmode) -1] = '\0';
ssAP->extch [sizeof(ssAP->extch) -1] = '\0';
ssAP->nt [sizeof(ssAP->nt) -1] = '\0';
ssAP->wps [sizeof(ssAP->wps) -1] = '\0';
ssAP->dpid [sizeof(ssAP->dpid) -1] = '\0';
convertToUpper(ssAP->bssid);
ssAP++;
apCount++;
}
if (apCount)
{
retval += websWrite(wp, "[");
for (i = 0; i < apCount; i++)
{
dbg("%-4s%-33s%-18s%-9s%-16s%-9s%-8s\n",
pSsap[i].channel,
pSsap[i].ssid,
pSsap[i].bssid,
pSsap[i].encryption,
pSsap[i].authmode,
pSsap[i].signal,
pSsap[i].wmode
);
memset(ssid_str, 0, sizeof(ssid_str));
char_to_ascii(ssid_str, trim_r(pSsap[i].ssid));
if (!i)
retval += websWrite(wp, "[\"%s\", \"%s\"]", ssid_str, pSsap[i].bssid);
else
retval += websWrite(wp, ", [\"%s\", \"%s\"]", ssid_str, pSsap[i].bssid);
}
retval += websWrite(wp, "]");
dbg("\n");
}
else
retval += websWrite(wp, "[]");
return retval;
}
#endif
ssap=(SSA *)(wrq.u.data.pointer+strlen(header)+1);
int len = strlen(wrq.u.data.pointer+strlen(header))-1;
char *sp, *op;
op = sp = wrq.u.data.pointer+strlen(header)+1;
while (*sp && ((len - (sp-op)) >= 0))
{
ssap->SiteSurvey[i].channel[3] = '\0';
ssap->SiteSurvey[i].ssid[32] = '\0';
ssap->SiteSurvey[i].bssid[17] = '\0';
ssap->SiteSurvey[i].encryption[8] = '\0';
ssap->SiteSurvey[i].authmode[15] = '\0';
ssap->SiteSurvey[i].signal[8] = '\0';
ssap->SiteSurvey[i].wmode[7] = '\0';
#if 0//defined(RTN14U)
ssap->SiteSurvey[i].wps[3] = '\0';
ssap->SiteSurvey[i].dpid[4] = '\0';
#endif
sp+=strlen(header);
apCount=++i;
}
if (apCount)
{
retval += websWrite(wp, "[");
for (i = 0; i < apCount; i++)
{
dbg("\napCount=%d\n",i);
dbg(
#if 0//defined(RTN14U)
"%-4s%-33s%-18s%-9s%-16s%-9s%-8s%-4s%-5s\n",
#else
"%-4s%-33s%-18s%-9s%-16s%-9s%-8s\n",
#endif
ssap->SiteSurvey[i].channel,
(char*)ssap->SiteSurvey[i].ssid,
ssap->SiteSurvey[i].bssid,
ssap->SiteSurvey[i].encryption,
ssap->SiteSurvey[i].authmode,
ssap->SiteSurvey[i].signal,
ssap->SiteSurvey[i].wmode
#if 0//defined(RTN14U)
, ssap->SiteSurvey[i].wps
, ssap->SiteSurvey[i].dpid
#endif
);
memset(ssid_str, 0, sizeof(ssid_str));
char_to_ascii(ssid_str, trim_r(ssap->SiteSurvey[i].ssid));
if (!i)
// retval += websWrite(wp, "\"%s\"", ssap->SiteSurvey[i].bssid);
retval += websWrite(wp, "[\"%s\", \"%s\"]", ssid_str, ssap->SiteSurvey[i].bssid);
else
// retval += websWrite(wp, ", \"%s\"", ssap->SiteSurvey[i].bssid);
retval += websWrite(wp, ", [\"%s\", \"%s\"]", ssid_str, ssap->SiteSurvey[i].bssid);
}
retval += websWrite(wp, "]");
dbg("\n");
}
else
retval += websWrite(wp, "[]");
}
return retval;
}
#endif
int
ej_wl_scan(int eid, webs_t wp, int argc, char_t **argv)
{
return wl_scan(eid, wp, argc, argv, 0);
}
int
ej_wl_scan_2g(int eid, webs_t wp, int argc, char_t **argv)
{
return wl_scan(eid, wp, argc, argv, 0);
}
int
ej_wl_scan_5g(int eid, webs_t wp, int argc, char_t **argv)
{
return wl_scan(eid, wp, argc, argv, 1);
}
static int ej_wl_channel_list(int eid, webs_t wp, int argc, char_t **argv, int unit)
{
int retval = 0;
char tmp[128], prefix[] = "wlXXXXXXXXXX_";
char *country_code;
char chList[256];
int band;
snprintf(prefix, sizeof(prefix), "wl%d_", unit);
country_code = nvram_get(strcat_r(prefix, "country_code", tmp));
band = unit;
if (country_code == NULL || strlen(country_code) != 2) return retval;
if (band != 0 && band != 1) return retval;
//try getting channel list via wifi driver first
if(get_channel_list_via_driver(unit, chList, sizeof(chList)) > 0)
{
retval += websWrite(wp, "[%s]", chList);
}
else if(get_channel_list_via_country(unit, country_code, chList, sizeof(chList)) > 0)
{
retval += websWrite(wp, "[%s]", chList);
}
return retval;
}
int
ej_wl_channel_list_2g(int eid, webs_t wp, int argc, char_t **argv)
{
return ej_wl_channel_list(eid, wp, argc, argv, 0);
}
int
ej_wl_channel_list_5g(int eid, webs_t wp, int argc, char_t **argv)
{
return ej_wl_channel_list(eid, wp, argc, argv, 1);
}
static int ej_wl_rate(int eid, webs_t wp, int argc, char_t **argv, int unit)
{
#define ASUS_IOCTL_GET_STA_DATARATE (SIOCDEVPRIVATE+15) /* from qca-wifi/os/linux/include/ieee80211_ioctl.h */
struct iwreq wrq;
int retval = 0;
char tmp[256], prefix[] = "wlXXXXXXXXXX_";
char *name;
char word[256], *next;
int unit_max = 0;
unsigned int rate[2];
char rate_buf[32];
int sw_mode = nvram_get_int("sw_mode");
int wlc_band = nvram_get_int("wlc_band");
sprintf(rate_buf, "0 Mbps");
if (!nvram_match("wlc_state", "2"))
goto ERROR;
foreach (word, nvram_safe_get("wl_ifnames"), next)
unit_max++;
if (unit > (unit_max - 1))
goto ERROR;
if (wlc_band == unit && (sw_mode == SW_MODE_REPEATER || sw_mode == SW_MODE_HOTSPOT))
snprintf(prefix, sizeof(prefix), "wl%d.1_", unit);
else
goto ERROR;
name = nvram_safe_get(strcat_r(prefix, "ifname", tmp));
wrq.u.data.pointer = rate;
wrq.u.data.length = sizeof(rate);
if (wl_ioctl(name, ASUS_IOCTL_GET_STA_DATARATE, &wrq) < 0)
{
dbg("%s: errors in getting %s ASUS_IOCTL_GET_STA_DATARATE result\n", __func__, name);
goto ERROR;
}
if (rate[0] > rate[1])
sprintf(rate_buf, "%d Mbps", rate[0]);
else
sprintf(rate_buf, "%d Mbps", rate[1]);
ERROR:
retval += websWrite(wp, "%s", rate_buf);
return retval;
}
int
ej_wl_rate_2g(int eid, webs_t wp, int argc, char_t **argv)
{
if(nvram_match("sw_mode", "2"))
return ej_wl_rate(eid, wp, argc, argv, 0);
else
return 0;
}
int
ej_wl_rate_5g(int eid, webs_t wp, int argc, char_t **argv)
{
if(nvram_match("sw_mode", "2"))
return ej_wl_rate(eid, wp, argc, argv, 1);
else
return 0;
}
#ifdef RTCONFIG_PROXYSTA
int
ej_wl_auth_psta(int eid, webs_t wp, int argc, char_t **argv)
{
int retval = 0;
if(nvram_match("wlc_state", "2")) //connected
retval += websWrite(wp, "wlc_state=1;wlc_state_auth=0;");
//else if(?) //authorization failed
// retval += websWrite(wp, "wlc_state=2;wlc_state_auth=1;");
else //disconnected
retval += websWrite(wp, "wlc_state=0;wlc_state_auth=0;");
return retval;
}
#endif
| gpl-2.0 |
fullstory-morgue/lirc | tools/xmode2.c | 5 | 8940 | /* $Id: xmode2.c,v 5.18 2005/10/16 19:17:13 lirc Exp $ */
/****************************************************************************
** xmode2.c ****************************************************************
****************************************************************************
*
* xmode2 - shows the ir waveform of an IR signal
*
* patched together on Feb. 18th 1999 by
* Heinrich Langos <heinrich@mad.scientist.com>
*
* This program is based on the smode2.c file by Sinkovics Zoltan
* <sinko@szarvas.hu> which is a part of the LIRC distribution. It is
* just a conversion from svga to X with some basic support for resizing.
* I copied most of this comment.
*
* This program is based on the mode2.c file which is a part of the
* LIRC distribution. The main purpose of this program is to check
* operation of LIRC receiver hardware, and to see the IR waveform of
* the remote controller without an expensive oscilloscope. The time
* division is variable from 1 ms/div to extremely high values (integer
* type) but there is no point increasing this value above 20 ms/div,
* because one pulse is about 1 ms. I think this kind of presentation
* is much more exciting than the simple pulse&space output showed by
* mode2.
*
* Usage: xmode2 [-t (ms/div)] , default division is 5 ms/div
*
*
* compile: gcc -o xmode2 xmode2.c -L/usr/X11R6/lib -lX11
*
* version 0.01 Feb 18 1999
* initial release
*
* version 0.02 Aug 24 1999
* using select() to make the whole thing more responsive
* */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <getopt.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/cursorfont.h>
#include "drivers/lirc.h"
Display *d1;
Window w0,w1; /*w0 = root*/
char w1_wname[]="xmode2";
char w1_iname[]="xmode2";
char font1_name[]="-*-Courier-medium-r-*-*-8-*-*-m-*-iso8859-1";
unsigned int w1_x=0,w1_y=0,w1_w=640,w1_h=480,w1_border=0;
XFontStruct *f1_str;
XColor xc1,xc2;
Colormap cm1;
XGCValues gcval1;
GC gc1,gc2;
XSetWindowAttributes winatt1;
long event_mask1;
XEvent event_return1;
void initscreen(char *geometry)
{
d1=XOpenDisplay(0);
if (d1==NULL)
{
printf("Can't open display.\n");
exit(0);
}
if(geometry != NULL)
{
XParseGeometry(geometry, &w1_x, &w1_y, &w1_w, &w1_h);
}
/*Aufbau der XWindowsAttribStr*/
w0 = DefaultRootWindow(d1);
winatt1.background_pixel = BlackPixel(d1,0);
winatt1.backing_store = WhenMapped;
winatt1.event_mask = KeyPressMask|StructureNotifyMask|ExposureMask;
w1 = XCreateWindow(d1,w0,w1_x,w1_y,w1_w,w1_h,w1_border,CopyFromParent,InputOutput, CopyFromParent,CWBackPixel|CWBackingStore|CWEventMask,&winatt1);
XStoreName(d1,w1,w1_wname);
XSetIconName(d1,w1,w1_iname);
XMapWindow(d1,w1);
cm1=DefaultColormap(d1,0);
if (!XAllocNamedColor(d1,cm1,"blue",&xc1,&xc2)) printf("coudn't allocate blue color\n");
f1_str=XLoadQueryFont(d1,font1_name);
if (f1_str==NULL)
{
printf("could't load font\n");
exit(EXIT_FAILURE);
}
gcval1.foreground = xc1.pixel;
gcval1.font = f1_str -> fid;
gcval1.line_style = LineSolid;
gc1 = XCreateGC(d1,w1, GCForeground|GCLineStyle, &gcval1);
gcval1.foreground = WhitePixel(d1,0);
gc2 = XCreateGC(d1,w1, GCForeground|GCLineStyle|GCFont, &gcval1);
}
void closescreen(void)
{
XUnmapWindow(d1,w1);
XCloseDisplay(d1);
}
int main(int argc, char **argv)
{
fd_set rfds;
int retval;
int xfd, maxfd;
int fd;
unsigned long mode;
lirc_t data;
lirc_t x1,y1,x2,y2;
int result;
char textbuffer[80];
int div=5;
int dmode=0;
struct stat s;
int use_stdin = 0;
char *device=LIRC_DRIVER_DEVICE;
char *progname;
char *geometry = NULL;
progname="xmode2";
while(1)
{
int c;
static struct option long_options[] = {
{"help",no_argument,NULL,'h'},
{"version",no_argument,NULL,'v'},
{"device",required_argument,NULL,'d'},
{"geometry",required_argument,NULL,'g'},
{"timediv",required_argument,NULL,'t'},
{"mode",required_argument,NULL,'m'},
{0, 0, 0, 0}
};
c = getopt_long(argc,argv,"hvd:g:t:m",long_options,NULL);
if(c==-1)
break;
switch (c)
{
case 'h':
printf("Usage: %s [options]\n",progname);
printf("\t -h --help\t\tdisplay usage summary\n");
printf("\t -v --version\t\tdisplay version\n");
printf("\t -d --device=device\tread from given device\n");
printf("\t -g --geometry=geometry\twindow geometry\n");
printf("\t -t --timediv=value\tms per unit\n");
printf("\t -m --mode\t\tenable alternative display mode\n");
return(EXIT_SUCCESS);
case 'v':
printf("%s %s\n",progname, VERSION);
return(EXIT_SUCCESS);
case 'd':
device=optarg;
break;
case 'g':
geometry = optarg;
break;
case 't': /* timediv */
div = strtol(optarg,NULL,10);
break;
case 'm':
dmode=1;
break;
default:
printf("Usage: %s [options]\n",progname);
return(EXIT_FAILURE);
}
}
if(optind < argc)
{
fprintf(stderr,"%s: too many arguments\n",progname);
return(EXIT_FAILURE);
}
if(!isatty(STDIN_FILENO))
{
use_stdin = 1;
fd = STDIN_FILENO;
}
else
{
fd = open(device, O_RDONLY);
if(fd == -1)
{
perror(progname);
fprintf(stderr, "%s: error opening %s\n",
progname, device);
exit(EXIT_FAILURE);
}
if((fstat(fd,&s)!=-1) && (S_ISFIFO(s.st_mode)))
{
/* can't do ioctls on a pipe */
}
else if(ioctl(fd, LIRC_GET_REC_MODE, &mode) == -1 ||
mode != LIRC_MODE_MODE2)
{
printf("This program is only intended for receivers "
"supporting the pulse/space layer.\n");
printf("Note that this is no error, but this "
"program simply makes no sense for your\n"
"receiver.\n");
printf("In order to test your setup run lircd "
"with the --nodaemon option and \n"
"then check if the remote works with the irw "
"tool.\n");
close(fd);
exit(EXIT_FAILURE);
}
}
initscreen(geometry);
xfd=XConnectionNumber(d1);
maxfd = fd>xfd ? fd:xfd;
y1=20;
x1=x2=0;
sprintf(textbuffer,"%d ms/unit",div);
for (y2=0;y2<w1_w;y2+=10) XDrawLine(d1,w1,gc1,y2,0,y2,w1_h);
XDrawString(d1,w1,gc2,w1_w-100,10,textbuffer,strlen(textbuffer));
XFlush(d1);
while(1)
{
while (XPending(d1)>0)
{
XNextEvent(d1, &event_return1);
switch(event_return1.type)
{
case KeyPress:
if (event_return1.xkey.keycode==XKeysymToKeycode(d1,XStringToKeysym("q")))
{
closescreen();
exit(1);
}
break;
case Expose:
case ConfigureNotify:
switch(event_return1.type)
{
case Expose:
break;
case ConfigureNotify:
if(w1_w==event_return1.xconfigure.width &&
w1_h==event_return1.xconfigure.height)
{
continue;
}
w1_w=event_return1.xconfigure.width;
w1_h=event_return1.xconfigure.height;
break;
}
XClearWindow(d1,w1);
for (y2=0;y2<w1_w;y2+=10) XDrawLine(d1,w1,gc1,y2,0,y2,w1_h);
XDrawString(d1,w1,gc2,w1_w-100,10,textbuffer,strlen(textbuffer));
XFlush(d1);
// printf("resize \n");
break;
default:
;
}
}
FD_ZERO(&rfds);
FD_SET(fd, &rfds);
FD_SET(xfd, &rfds);
retval = select(maxfd+1, &rfds, NULL, NULL, NULL);
if (FD_ISSET(fd,&rfds)) {
if(use_stdin)
{
static int space=1;
unsigned long scan;
if(space)
{
result = fscanf(stdin,"space %ld\n",&scan);
}
else
{
result = fscanf(stdin,"pulse %ld\n",&scan);
}
if(result == 1)
{
data=(lirc_t) scan;
if(!space) data|=PULSE_BIT;
}
else
{
fd = STDOUT_FILENO;
}
space = !space;
}
else
{
result=read(fd,&data,sizeof(data));
}
if (result!=0)
{
// printf("%.8x\t",data);
x2=(data&PULSE_MASK)/(div*50);
if(x2>400)
{
if(!dmode) { y1+=15; } else { y1++; }
x1=0;
}
else
{
if (x1==0)
{
if(!dmode) XDrawLine(d1,w1,gc2,x1, y1+10, x1+10, y1+10);
x1+=10;
if(!dmode) XDrawLine(d1,w1,gc2,x1, y1+10, x1, y1);
}
if (x1<w1_w)
{
if(dmode)
{
if(data&PULSE_BIT) XDrawLine(d1,w1,gc2,x1,y1,x1+x2,y1);
x1+=x2;
}
else
{
XDrawLine(d1,w1,gc2,x1,
((data&PULSE_BIT) ? y1:y1+10),x1+x2,
((data&PULSE_BIT) ? y1:y1+10));
x1+=x2;
XDrawLine(d1,w1,gc2,x1,
((data&PULSE_BIT) ? y1:y1+10),x1,
((data&PULSE_BIT) ? y1+10:y1));
}
}
}
if (y1>w1_h)
{
y1=20;
XClearWindow(d1,w1);
for (y2=0;y2<w1_w;y2+=10) XDrawLine(d1,w1,gc1,y2,0,y2,w1_h);
XDrawString(d1,w1,gc2,w1_w-100,10,textbuffer,strlen(textbuffer));
}
}
XFlush(d1);
}
}
exit(EXIT_SUCCESS);
}
| gpl-2.0 |
garrikus/o3_linux | fs/ubifs/super.c | 5 | 61358 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Artem Bityutskiy (Битюцкий Артём)
* Adrian Hunter
*/
/*
* This file implements UBIFS initialization and VFS superblock operations. Some
* initialization stuff which is rather large and complex is placed at
* corresponding subsystems, but most of it is here.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/math64.h>
#include <linux/writeback.h>
#include "ubifs.h"
/*
* Maximum amount of memory we may 'kmalloc()' without worrying that we are
* allocating too much.
*/
#define UBIFS_KMALLOC_OK (128*1024)
/* Slab cache for UBIFS inodes */
struct kmem_cache *ubifs_inode_slab;
/* UBIFS TNC shrinker description */
static struct shrinker ubifs_shrinker_info = {
.shrink = ubifs_shrinker,
.seeks = DEFAULT_SEEKS,
};
/**
* validate_inode - validate inode.
* @c: UBIFS file-system description object
* @inode: the inode to validate
*
* This is a helper function for 'ubifs_iget()' which validates various fields
* of a newly built inode to make sure they contain sane values and prevent
* possible vulnerabilities. Returns zero if the inode is all right and
* a non-zero error code if not.
*/
static int validate_inode(struct ubifs_info *c, const struct inode *inode)
{
int err;
const struct ubifs_inode *ui = ubifs_inode(inode);
if (inode->i_size > c->max_inode_sz) {
ubifs_err("inode is too large (%lld)",
(long long)inode->i_size);
return 1;
}
if (ui->compr_type < 0 || ui->compr_type >= UBIFS_COMPR_TYPES_CNT) {
ubifs_err("unknown compression type %d", ui->compr_type);
return 2;
}
if (ui->xattr_names + ui->xattr_cnt > XATTR_LIST_MAX)
return 3;
if (ui->data_len < 0 || ui->data_len > UBIFS_MAX_INO_DATA)
return 4;
if (ui->xattr && !S_ISREG(inode->i_mode))
return 5;
if (!ubifs_compr_present(ui->compr_type)) {
ubifs_warn("inode %lu uses '%s' compression, but it was not "
"compiled in", inode->i_ino,
ubifs_compr_name(ui->compr_type));
}
err = dbg_check_dir(c, inode);
return err;
}
struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
{
int err;
union ubifs_key key;
struct ubifs_ino_node *ino;
struct ubifs_info *c = sb->s_fs_info;
struct inode *inode;
struct ubifs_inode *ui;
dbg_gen("inode %lu", inum);
inode = iget_locked(sb, inum);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
ui = ubifs_inode(inode);
ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
if (!ino) {
err = -ENOMEM;
goto out;
}
ino_key_init(c, &key, inode->i_ino);
err = ubifs_tnc_lookup(c, &key, ino);
if (err)
goto out_ino;
inode->i_flags |= (S_NOCMTIME | S_NOATIME);
inode->i_nlink = le32_to_cpu(ino->nlink);
inode->i_uid = le32_to_cpu(ino->uid);
inode->i_gid = le32_to_cpu(ino->gid);
inode->i_atime.tv_sec = (int64_t)le64_to_cpu(ino->atime_sec);
inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec);
inode->i_mtime.tv_nsec = le32_to_cpu(ino->mtime_nsec);
inode->i_ctime.tv_sec = (int64_t)le64_to_cpu(ino->ctime_sec);
inode->i_ctime.tv_nsec = le32_to_cpu(ino->ctime_nsec);
inode->i_mode = le32_to_cpu(ino->mode);
inode->i_size = le64_to_cpu(ino->size);
ui->data_len = le32_to_cpu(ino->data_len);
ui->flags = le32_to_cpu(ino->flags);
ui->compr_type = le16_to_cpu(ino->compr_type);
ui->creat_sqnum = le64_to_cpu(ino->creat_sqnum);
ui->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
ui->xattr_size = le32_to_cpu(ino->xattr_size);
ui->xattr_names = le32_to_cpu(ino->xattr_names);
ui->synced_i_size = ui->ui_size = inode->i_size;
ui->xattr = (ui->flags & UBIFS_XATTR_FL) ? 1 : 0;
err = validate_inode(c, inode);
if (err)
goto out_invalid;
/* Disable read-ahead */
inode->i_mapping->backing_dev_info = &c->bdi;
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_mapping->a_ops = &ubifs_file_address_operations;
inode->i_op = &ubifs_file_inode_operations;
inode->i_fop = &ubifs_file_operations;
if (ui->xattr) {
ui->data = kmalloc(ui->data_len + 1, GFP_NOFS);
if (!ui->data) {
err = -ENOMEM;
goto out_ino;
}
memcpy(ui->data, ino->data, ui->data_len);
((char *)ui->data)[ui->data_len] = '\0';
} else if (ui->data_len != 0) {
err = 10;
goto out_invalid;
}
break;
case S_IFDIR:
inode->i_op = &ubifs_dir_inode_operations;
inode->i_fop = &ubifs_dir_operations;
if (ui->data_len != 0) {
err = 11;
goto out_invalid;
}
break;
case S_IFLNK:
inode->i_op = &ubifs_symlink_inode_operations;
if (ui->data_len <= 0 || ui->data_len > UBIFS_MAX_INO_DATA) {
err = 12;
goto out_invalid;
}
ui->data = kmalloc(ui->data_len + 1, GFP_NOFS);
if (!ui->data) {
err = -ENOMEM;
goto out_ino;
}
memcpy(ui->data, ino->data, ui->data_len);
((char *)ui->data)[ui->data_len] = '\0';
break;
case S_IFBLK:
case S_IFCHR:
{
dev_t rdev;
union ubifs_dev_desc *dev;
ui->data = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS);
if (!ui->data) {
err = -ENOMEM;
goto out_ino;
}
dev = (union ubifs_dev_desc *)ino->data;
if (ui->data_len == sizeof(dev->new))
rdev = new_decode_dev(le32_to_cpu(dev->new));
else if (ui->data_len == sizeof(dev->huge))
rdev = huge_decode_dev(le64_to_cpu(dev->huge));
else {
err = 13;
goto out_invalid;
}
memcpy(ui->data, ino->data, ui->data_len);
inode->i_op = &ubifs_file_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
break;
}
case S_IFSOCK:
case S_IFIFO:
inode->i_op = &ubifs_file_inode_operations;
init_special_inode(inode, inode->i_mode, 0);
if (ui->data_len != 0) {
err = 14;
goto out_invalid;
}
break;
default:
err = 15;
goto out_invalid;
}
kfree(ino);
ubifs_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
out_invalid:
ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err);
dbg_dump_node(c, ino);
dbg_dump_inode(c, inode);
err = -EINVAL;
out_ino:
kfree(ino);
out:
ubifs_err("failed to read inode %lu, error %d", inode->i_ino, err);
iget_failed(inode);
return ERR_PTR(err);
}
static struct inode *ubifs_alloc_inode(struct super_block *sb)
{
struct ubifs_inode *ui;
ui = kmem_cache_alloc(ubifs_inode_slab, GFP_NOFS);
if (!ui)
return NULL;
memset((void *)ui + sizeof(struct inode), 0,
sizeof(struct ubifs_inode) - sizeof(struct inode));
mutex_init(&ui->ui_mutex);
spin_lock_init(&ui->ui_lock);
return &ui->vfs_inode;
};
static void ubifs_destroy_inode(struct inode *inode)
{
struct ubifs_inode *ui = ubifs_inode(inode);
kfree(ui->data);
kmem_cache_free(ubifs_inode_slab, inode);
}
/*
* Note, Linux write-back code calls this without 'i_mutex'.
*/
static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err = 0;
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
ubifs_assert(!ui->xattr);
if (is_bad_inode(inode))
return 0;
mutex_lock(&ui->ui_mutex);
/*
* Due to races between write-back forced by budgeting
* (see 'sync_some_inodes()') and pdflush write-back, the inode may
* have already been synchronized, do not do this again. This might
* also happen if it was synchronized in an VFS operation, e.g.
* 'ubifs_link()'.
*/
if (!ui->dirty) {
mutex_unlock(&ui->ui_mutex);
return 0;
}
/*
* As an optimization, do not write orphan inodes to the media just
* because this is not needed.
*/
dbg_gen("inode %lu, mode %#x, nlink %u",
inode->i_ino, (int)inode->i_mode, inode->i_nlink);
if (inode->i_nlink) {
err = ubifs_jnl_write_inode(c, inode);
if (err)
ubifs_err("can't write inode %lu, error %d",
inode->i_ino, err);
else
err = dbg_check_inode_size(c, inode, ui->ui_size);
}
ui->dirty = 0;
mutex_unlock(&ui->ui_mutex);
ubifs_release_dirty_inode_budget(c, ui);
return err;
}
static void ubifs_evict_inode(struct inode *inode)
{
int err;
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
if (ui->xattr)
/*
* Extended attribute inode deletions are fully handled in
* 'ubifs_removexattr()'. These inodes are special and have
* limited usage, so there is nothing to do here.
*/
goto out;
dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode);
ubifs_assert(!atomic_read(&inode->i_count));
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink)
goto done;
if (is_bad_inode(inode))
goto out;
ui->ui_size = inode->i_size = 0;
err = ubifs_jnl_delete_inode(c, inode);
if (err)
/*
* Worst case we have a lost orphan inode wasting space, so a
* simple error message is OK here.
*/
ubifs_err("can't delete inode %lu, error %d",
inode->i_ino, err);
out:
if (ui->dirty)
ubifs_release_dirty_inode_budget(c, ui);
else {
/* We've deleted something - clean the "no space" flags */
c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb();
}
done:
end_writeback(inode);
}
static void ubifs_dirty_inode(struct inode *inode)
{
struct ubifs_inode *ui = ubifs_inode(inode);
ubifs_assert(mutex_is_locked(&ui->ui_mutex));
if (!ui->dirty) {
ui->dirty = 1;
dbg_gen("inode %lu", inode->i_ino);
}
}
static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct ubifs_info *c = dentry->d_sb->s_fs_info;
unsigned long long free;
__le32 *uuid = (__le32 *)c->uuid;
free = ubifs_get_free_space(c);
dbg_gen("free space %lld bytes (%lld blocks)",
free, free >> UBIFS_BLOCK_SHIFT);
buf->f_type = UBIFS_SUPER_MAGIC;
buf->f_bsize = UBIFS_BLOCK_SIZE;
buf->f_blocks = c->block_cnt;
buf->f_bfree = free >> UBIFS_BLOCK_SHIFT;
if (free > c->report_rp_size)
buf->f_bavail = (free - c->report_rp_size) >> UBIFS_BLOCK_SHIFT;
else
buf->f_bavail = 0;
buf->f_files = 0;
buf->f_ffree = 0;
buf->f_namelen = UBIFS_MAX_NLEN;
buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]);
buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]);
ubifs_assert(buf->f_bfree <= c->block_cnt);
return 0;
}
static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt)
{
struct ubifs_info *c = mnt->mnt_sb->s_fs_info;
if (c->mount_opts.unmount_mode == 2)
seq_printf(s, ",fast_unmount");
else if (c->mount_opts.unmount_mode == 1)
seq_printf(s, ",norm_unmount");
if (c->mount_opts.bulk_read == 2)
seq_printf(s, ",bulk_read");
else if (c->mount_opts.bulk_read == 1)
seq_printf(s, ",no_bulk_read");
if (c->mount_opts.chk_data_crc == 2)
seq_printf(s, ",chk_data_crc");
else if (c->mount_opts.chk_data_crc == 1)
seq_printf(s, ",no_chk_data_crc");
if (c->mount_opts.override_compr) {
seq_printf(s, ",compr=%s",
ubifs_compr_name(c->mount_opts.compr_type));
}
return 0;
}
static int ubifs_sync_fs(struct super_block *sb, int wait)
{
int i, err;
struct ubifs_info *c = sb->s_fs_info;
/*
* Zero @wait is just an advisory thing to help the file system shove
* lots of data into the queues, and there will be the second
* '->sync_fs()' call, with non-zero @wait.
*/
if (!wait)
return 0;
/*
* Synchronize write buffers, because 'ubifs_run_commit()' does not
* do this if it waits for an already running commit.
*/
for (i = 0; i < c->jhead_cnt; i++) {
err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
if (err)
return err;
}
/*
* Strictly speaking, it is not necessary to commit the journal here,
* synchronizing write-buffers would be enough. But committing makes
* UBIFS free space predictions much more accurate, so we want to let
* the user be able to get more accurate results of 'statfs()' after
* they synchronize the file system.
*/
err = ubifs_run_commit(c);
if (err)
return err;
return ubi_sync(c->vi.ubi_num);
}
/**
* init_constants_early - initialize UBIFS constants.
* @c: UBIFS file-system description object
*
* This function initialize UBIFS constants which do not need the superblock to
* be read. It also checks that the UBI volume satisfies basic UBIFS
* requirements. Returns zero in case of success and a negative error code in
* case of failure.
*/
static int init_constants_early(struct ubifs_info *c)
{
if (c->vi.corrupted) {
ubifs_warn("UBI volume is corrupted - read-only mode");
c->ro_media = 1;
}
if (c->di.ro_mode) {
ubifs_msg("read-only UBI device");
c->ro_media = 1;
}
if (c->vi.vol_type == UBI_STATIC_VOLUME) {
ubifs_msg("static UBI volume - read-only mode");
c->ro_media = 1;
}
c->leb_cnt = c->vi.size;
c->leb_size = c->vi.usable_leb_size;
c->leb_start = c->di.leb_start;
c->half_leb_size = c->leb_size / 2;
c->min_io_size = c->di.min_io_size;
c->min_io_shift = fls(c->min_io_size) - 1;
c->max_write_size = c->di.max_write_size;
c->max_write_shift = fls(c->max_write_size) - 1;
if (c->leb_size < UBIFS_MIN_LEB_SZ) {
ubifs_err("too small LEBs (%d bytes), min. is %d bytes",
c->leb_size, UBIFS_MIN_LEB_SZ);
return -EINVAL;
}
if (c->leb_cnt < UBIFS_MIN_LEB_CNT) {
ubifs_err("too few LEBs (%d), min. is %d",
c->leb_cnt, UBIFS_MIN_LEB_CNT);
return -EINVAL;
}
if (!is_power_of_2(c->min_io_size)) {
ubifs_err("bad min. I/O size %d", c->min_io_size);
return -EINVAL;
}
/*
* Maximum write size has to be greater or equivalent to min. I/O
* size, and be multiple of min. I/O size.
*/
if (c->max_write_size < c->min_io_size ||
c->max_write_size % c->min_io_size ||
!is_power_of_2(c->max_write_size)) {
ubifs_err("bad write buffer size %d for %d min. I/O unit",
c->max_write_size, c->min_io_size);
return -EINVAL;
}
/*
* UBIFS aligns all node to 8-byte boundary, so to make function in
* io.c simpler, assume minimum I/O unit size to be 8 bytes if it is
* less than 8.
*/
if (c->min_io_size < 8) {
c->min_io_size = 8;
c->min_io_shift = 3;
if (c->max_write_size < c->min_io_size) {
c->max_write_size = c->min_io_size;
c->max_write_shift = c->min_io_shift;
}
}
c->ref_node_alsz = ALIGN(UBIFS_REF_NODE_SZ, c->min_io_size);
c->mst_node_alsz = ALIGN(UBIFS_MST_NODE_SZ, c->min_io_size);
/*
* Initialize node length ranges which are mostly needed for node
* length validation.
*/
c->ranges[UBIFS_PAD_NODE].len = UBIFS_PAD_NODE_SZ;
c->ranges[UBIFS_SB_NODE].len = UBIFS_SB_NODE_SZ;
c->ranges[UBIFS_MST_NODE].len = UBIFS_MST_NODE_SZ;
c->ranges[UBIFS_REF_NODE].len = UBIFS_REF_NODE_SZ;
c->ranges[UBIFS_TRUN_NODE].len = UBIFS_TRUN_NODE_SZ;
c->ranges[UBIFS_CS_NODE].len = UBIFS_CS_NODE_SZ;
c->ranges[UBIFS_INO_NODE].min_len = UBIFS_INO_NODE_SZ;
c->ranges[UBIFS_INO_NODE].max_len = UBIFS_MAX_INO_NODE_SZ;
c->ranges[UBIFS_ORPH_NODE].min_len =
UBIFS_ORPH_NODE_SZ + sizeof(__le64);
c->ranges[UBIFS_ORPH_NODE].max_len = c->leb_size;
c->ranges[UBIFS_DENT_NODE].min_len = UBIFS_DENT_NODE_SZ;
c->ranges[UBIFS_DENT_NODE].max_len = UBIFS_MAX_DENT_NODE_SZ;
c->ranges[UBIFS_XENT_NODE].min_len = UBIFS_XENT_NODE_SZ;
c->ranges[UBIFS_XENT_NODE].max_len = UBIFS_MAX_XENT_NODE_SZ;
c->ranges[UBIFS_DATA_NODE].min_len = UBIFS_DATA_NODE_SZ;
c->ranges[UBIFS_DATA_NODE].max_len = UBIFS_MAX_DATA_NODE_SZ;
/*
* Minimum indexing node size is amended later when superblock is
* read and the key length is known.
*/
c->ranges[UBIFS_IDX_NODE].min_len = UBIFS_IDX_NODE_SZ + UBIFS_BRANCH_SZ;
/*
* Maximum indexing node size is amended later when superblock is
* read and the fanout is known.
*/
c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX;
/*
* Initialize dead and dark LEB space watermarks. See gc.c for comments
* about these values.
*/
c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size);
c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size);
/*
* Calculate how many bytes would be wasted at the end of LEB if it was
* fully filled with data nodes of maximum size. This is used in
* calculations when reporting free space.
*/
c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
/* Buffer size for bulk-reads */
c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
if (c->max_bu_buf_len > c->leb_size)
c->max_bu_buf_len = c->leb_size;
return 0;
}
/**
* bud_wbuf_callback - bud LEB write-buffer synchronization call-back.
* @c: UBIFS file-system description object
* @lnum: LEB the write-buffer was synchronized to
* @free: how many free bytes left in this LEB
* @pad: how many bytes were padded
*
* This is a callback function which is called by the I/O unit when the
* write-buffer is synchronized. We need this to correctly maintain space
* accounting in bud logical eraseblocks. This function returns zero in case of
* success and a negative error code in case of failure.
*
* This function actually belongs to the journal, but we keep it here because
* we want to keep it static.
*/
static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad)
{
return ubifs_update_one_lp(c, lnum, free, pad, 0, 0);
}
/*
* init_constants_sb - initialize UBIFS constants.
* @c: UBIFS file-system description object
*
* This is a helper function which initializes various UBIFS constants after
* the superblock has been read. It also checks various UBIFS parameters and
* makes sure they are all right. Returns zero in case of success and a
* negative error code in case of failure.
*/
static int init_constants_sb(struct ubifs_info *c)
{
int tmp, err;
long long tmp64;
c->main_bytes = (long long)c->main_lebs * c->leb_size;
c->max_znode_sz = sizeof(struct ubifs_znode) +
c->fanout * sizeof(struct ubifs_zbranch);
tmp = ubifs_idx_node_sz(c, 1);
c->ranges[UBIFS_IDX_NODE].min_len = tmp;
c->min_idx_node_sz = ALIGN(tmp, 8);
tmp = ubifs_idx_node_sz(c, c->fanout);
c->ranges[UBIFS_IDX_NODE].max_len = tmp;
c->max_idx_node_sz = ALIGN(tmp, 8);
/* Make sure LEB size is large enough to fit full commit */
tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt;
tmp = ALIGN(tmp, c->min_io_size);
if (tmp > c->leb_size) {
dbg_err("too small LEB size %d, at least %d needed",
c->leb_size, tmp);
return -EINVAL;
}
/*
* Make sure that the log is large enough to fit reference nodes for
* all buds plus one reserved LEB.
*/
tmp64 = c->max_bud_bytes + c->leb_size - 1;
c->max_bud_cnt = div_u64(tmp64, c->leb_size);
tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1);
tmp /= c->leb_size;
tmp += 1;
if (c->log_lebs < tmp) {
dbg_err("too small log %d LEBs, required min. %d LEBs",
c->log_lebs, tmp);
return -EINVAL;
}
/*
* When budgeting we assume worst-case scenarios when the pages are not
* be compressed and direntries are of the maximum size.
*
* Note, data, which may be stored in inodes is budgeted separately, so
* it is not included into 'c->bi.inode_budget'.
*/
c->bi.page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE;
c->bi.inode_budget = UBIFS_INO_NODE_SZ;
c->bi.dent_budget = UBIFS_MAX_DENT_NODE_SZ;
/*
* When the amount of flash space used by buds becomes
* 'c->max_bud_bytes', UBIFS just blocks all writers and starts commit.
* The writers are unblocked when the commit is finished. To avoid
* writers to be blocked UBIFS initiates background commit in advance,
* when number of bud bytes becomes above the limit defined below.
*/
c->bg_bud_bytes = (c->max_bud_bytes * 13) >> 4;
/*
* Ensure minimum journal size. All the bytes in the journal heads are
* considered to be used, when calculating the current journal usage.
* Consequently, if the journal is too small, UBIFS will treat it as
* always full.
*/
tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1;
if (c->bg_bud_bytes < tmp64)
c->bg_bud_bytes = tmp64;
if (c->max_bud_bytes < tmp64 + c->leb_size)
c->max_bud_bytes = tmp64 + c->leb_size;
err = ubifs_calc_lpt_geom(c);
if (err)
return err;
/* Initialize effective LEB size used in budgeting calculations */
c->idx_leb_size = c->leb_size - c->max_idx_node_sz;
return 0;
}
/*
* init_constants_master - initialize UBIFS constants.
* @c: UBIFS file-system description object
*
* This is a helper function which initializes various UBIFS constants after
* the master node has been read. It also checks various UBIFS parameters and
* makes sure they are all right.
*/
static void init_constants_master(struct ubifs_info *c)
{
long long tmp64;
c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
c->report_rp_size = ubifs_reported_space(c, c->rp_size);
/*
* Calculate total amount of FS blocks. This number is not used
* internally because it does not make much sense for UBIFS, but it is
* necessary to report something for the 'statfs()' call.
*
* Subtract the LEB reserved for GC, the LEB which is reserved for
* deletions, minimum LEBs for the index, and assume only one journal
* head is available.
*/
tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1;
tmp64 *= (long long)c->leb_size - c->leb_overhead;
tmp64 = ubifs_reported_space(c, tmp64);
c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT;
}
/**
* take_gc_lnum - reserve GC LEB.
* @c: UBIFS file-system description object
*
* This function ensures that the LEB reserved for garbage collection is marked
* as "taken" in lprops. We also have to set free space to LEB size and dirty
* space to zero, because lprops may contain out-of-date information if the
* file-system was un-mounted before it has been committed. This function
* returns zero in case of success and a negative error code in case of
* failure.
*/
static int take_gc_lnum(struct ubifs_info *c)
{
int err;
if (c->gc_lnum == -1) {
ubifs_err("no LEB for GC");
return -EINVAL;
}
/* And we have to tell lprops that this LEB is taken */
err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0,
LPROPS_TAKEN, 0, 0);
return err;
}
/**
* alloc_wbufs - allocate write-buffers.
* @c: UBIFS file-system description object
*
* This helper function allocates and initializes UBIFS write-buffers. Returns
* zero in case of success and %-ENOMEM in case of failure.
*/
static int alloc_wbufs(struct ubifs_info *c)
{
int i, err;
c->jheads = kzalloc(c->jhead_cnt * sizeof(struct ubifs_jhead),
GFP_KERNEL);
if (!c->jheads)
return -ENOMEM;
/* Initialize journal heads */
for (i = 0; i < c->jhead_cnt; i++) {
INIT_LIST_HEAD(&c->jheads[i].buds_list);
err = ubifs_wbuf_init(c, &c->jheads[i].wbuf);
if (err)
return err;
c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
c->jheads[i].wbuf.jhead = i;
c->jheads[i].grouped = 1;
}
c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
/*
* Garbage Collector head likely contains long-term data and
* does not need to be synchronized by timer. Also GC head nodes are
* not grouped.
*/
c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
c->jheads[GCHD].wbuf.no_timer = 1;
c->jheads[GCHD].grouped = 0;
return 0;
}
/**
* free_wbufs - free write-buffers.
* @c: UBIFS file-system description object
*/
static void free_wbufs(struct ubifs_info *c)
{
int i;
if (c->jheads) {
for (i = 0; i < c->jhead_cnt; i++) {
kfree(c->jheads[i].wbuf.buf);
kfree(c->jheads[i].wbuf.inodes);
}
kfree(c->jheads);
c->jheads = NULL;
}
}
/**
* free_orphans - free orphans.
* @c: UBIFS file-system description object
*/
static void free_orphans(struct ubifs_info *c)
{
struct ubifs_orphan *orph;
while (c->orph_dnext) {
orph = c->orph_dnext;
c->orph_dnext = orph->dnext;
list_del(&orph->list);
kfree(orph);
}
while (!list_empty(&c->orph_list)) {
orph = list_entry(c->orph_list.next, struct ubifs_orphan, list);
list_del(&orph->list);
kfree(orph);
dbg_err("orphan list not empty at unmount");
}
vfree(c->orph_buf);
c->orph_buf = NULL;
}
/**
* free_buds - free per-bud objects.
* @c: UBIFS file-system description object
*/
static void free_buds(struct ubifs_info *c)
{
struct rb_node *this = c->buds.rb_node;
struct ubifs_bud *bud;
while (this) {
if (this->rb_left)
this = this->rb_left;
else if (this->rb_right)
this = this->rb_right;
else {
bud = rb_entry(this, struct ubifs_bud, rb);
this = rb_parent(this);
if (this) {
if (this->rb_left == &bud->rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
kfree(bud);
}
}
}
/**
* check_volume_empty - check if the UBI volume is empty.
* @c: UBIFS file-system description object
*
* This function checks if the UBIFS volume is empty by looking if its LEBs are
* mapped or not. The result of checking is stored in the @c->empty variable.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
static int check_volume_empty(struct ubifs_info *c)
{
int lnum, err;
c->empty = 1;
for (lnum = 0; lnum < c->leb_cnt; lnum++) {
err = ubifs_is_mapped(c, lnum);
if (unlikely(err < 0))
return err;
if (err == 1) {
c->empty = 0;
break;
}
cond_resched();
}
return 0;
}
/*
* UBIFS mount options.
*
* Opt_fast_unmount: do not run a journal commit before un-mounting
* Opt_norm_unmount: run a journal commit before un-mounting
* Opt_bulk_read: enable bulk-reads
* Opt_no_bulk_read: disable bulk-reads
* Opt_chk_data_crc: check CRCs when reading data nodes
* Opt_no_chk_data_crc: do not check CRCs when reading data nodes
* Opt_override_compr: override default compressor
* Opt_err: just end of array marker
*/
enum {
Opt_fast_unmount,
Opt_norm_unmount,
Opt_bulk_read,
Opt_no_bulk_read,
Opt_chk_data_crc,
Opt_no_chk_data_crc,
Opt_override_compr,
Opt_err,
};
static const match_table_t tokens = {
{Opt_fast_unmount, "fast_unmount"},
{Opt_norm_unmount, "norm_unmount"},
{Opt_bulk_read, "bulk_read"},
{Opt_no_bulk_read, "no_bulk_read"},
{Opt_chk_data_crc, "chk_data_crc"},
{Opt_no_chk_data_crc, "no_chk_data_crc"},
{Opt_override_compr, "compr=%s"},
{Opt_err, NULL},
};
/**
* parse_standard_option - parse a standard mount option.
* @option: the option to parse
*
* Normally, standard mount options like "sync" are passed to file-systems as
* flags. However, when a "rootflags=" kernel boot parameter is used, they may
* be present in the options string. This function tries to deal with this
* situation and parse standard options. Returns 0 if the option was not
* recognized, and the corresponding integer flag if it was.
*
* UBIFS is only interested in the "sync" option, so do not check for anything
* else.
*/
static int parse_standard_option(const char *option)
{
ubifs_msg("parse %s", option);
if (!strcmp(option, "sync"))
return MS_SYNCHRONOUS;
return 0;
}
/**
* ubifs_parse_options - parse mount parameters.
* @c: UBIFS file-system description object
* @options: parameters to parse
* @is_remount: non-zero if this is FS re-mount
*
* This function parses UBIFS mount options and returns zero in case success
* and a negative error code in case of failure.
*/
static int ubifs_parse_options(struct ubifs_info *c, char *options,
int is_remount)
{
char *p;
substring_t args[MAX_OPT_ARGS];
if (!options)
return 0;
while ((p = strsep(&options, ","))) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
/*
* %Opt_fast_unmount and %Opt_norm_unmount options are ignored.
* We accept them in order to be backward-compatible. But this
* should be removed at some point.
*/
case Opt_fast_unmount:
c->mount_opts.unmount_mode = 2;
break;
case Opt_norm_unmount:
c->mount_opts.unmount_mode = 1;
break;
case Opt_bulk_read:
c->mount_opts.bulk_read = 2;
c->bulk_read = 1;
break;
case Opt_no_bulk_read:
c->mount_opts.bulk_read = 1;
c->bulk_read = 0;
break;
case Opt_chk_data_crc:
c->mount_opts.chk_data_crc = 2;
c->no_chk_data_crc = 0;
break;
case Opt_no_chk_data_crc:
c->mount_opts.chk_data_crc = 1;
c->no_chk_data_crc = 1;
break;
case Opt_override_compr:
{
char *name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
if (!strcmp(name, "none"))
c->mount_opts.compr_type = UBIFS_COMPR_NONE;
else if (!strcmp(name, "lzo"))
c->mount_opts.compr_type = UBIFS_COMPR_LZO;
else if (!strcmp(name, "zlib"))
c->mount_opts.compr_type = UBIFS_COMPR_ZLIB;
else {
ubifs_err("unknown compressor \"%s\"", name);
kfree(name);
return -EINVAL;
}
kfree(name);
c->mount_opts.override_compr = 1;
c->default_compr = c->mount_opts.compr_type;
break;
}
default:
{
unsigned long flag;
struct super_block *sb = c->vfs_sb;
flag = parse_standard_option(p);
if (!flag) {
ubifs_err("unrecognized mount option \"%s\" "
"or missing value", p);
return -EINVAL;
}
sb->s_flags |= flag;
break;
}
}
}
return 0;
}
/**
* destroy_journal - destroy journal data structures.
* @c: UBIFS file-system description object
*
* This function destroys journal data structures including those that may have
* been created by recovery functions.
*/
static void destroy_journal(struct ubifs_info *c)
{
while (!list_empty(&c->unclean_leb_list)) {
struct ubifs_unclean_leb *ucleb;
ucleb = list_entry(c->unclean_leb_list.next,
struct ubifs_unclean_leb, list);
list_del(&ucleb->list);
kfree(ucleb);
}
while (!list_empty(&c->old_buds)) {
struct ubifs_bud *bud;
bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
list_del(&bud->list);
kfree(bud);
}
ubifs_destroy_idx_gc(c);
ubifs_destroy_size_tree(c);
ubifs_tnc_close(c);
free_buds(c);
}
/**
* bu_init - initialize bulk-read information.
* @c: UBIFS file-system description object
*/
static void bu_init(struct ubifs_info *c)
{
ubifs_assert(c->bulk_read == 1);
if (c->bu.buf)
return; /* Already initialized */
again:
c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN);
if (!c->bu.buf) {
if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) {
c->max_bu_buf_len = UBIFS_KMALLOC_OK;
goto again;
}
/* Just disable bulk-read */
ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, "
"disabling it", c->max_bu_buf_len);
c->mount_opts.bulk_read = 1;
c->bulk_read = 0;
return;
}
}
/**
* check_free_space - check if there is enough free space to mount.
* @c: UBIFS file-system description object
*
* This function makes sure UBIFS has enough free space to be mounted in
* read/write mode. UBIFS must always have some free space to allow deletions.
*/
static int check_free_space(struct ubifs_info *c)
{
ubifs_assert(c->dark_wm > 0);
if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
ubifs_err("insufficient free space to mount in R/W mode");
dbg_dump_budg(c, &c->bi);
dbg_dump_lprops(c);
return -ENOSPC;
}
return 0;
}
/**
* mount_ubifs - mount UBIFS file-system.
* @c: UBIFS file-system description object
*
* This function mounts UBIFS file system. Returns zero in case of success and
* a negative error code in case of failure.
*
* Note, the function does not de-allocate resources it it fails half way
* through, and the caller has to do this instead.
*/
static int mount_ubifs(struct ubifs_info *c)
{
int err;
long long x;
size_t sz;
c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
err = init_constants_early(c);
if (err)
return err;
err = ubifs_debugging_init(c);
if (err)
return err;
err = check_volume_empty(c);
if (err)
goto out_free;
if (c->empty && (c->ro_mount || c->ro_media)) {
/*
* This UBI volume is empty, and read-only, or the file system
* is mounted read-only - we cannot format it.
*/
ubifs_err("can't format empty UBI volume: read-only %s",
c->ro_media ? "UBI volume" : "mount");
err = -EROFS;
goto out_free;
}
if (c->ro_media && !c->ro_mount) {
ubifs_err("cannot mount read-write - read-only media");
err = -EROFS;
goto out_free;
}
/*
* The requirement for the buffer is that it should fit indexing B-tree
* height amount of integers. We assume the height if the TNC tree will
* never exceed 64.
*/
err = -ENOMEM;
c->bottom_up_buf = kmalloc(BOTTOM_UP_HEIGHT * sizeof(int), GFP_KERNEL);
if (!c->bottom_up_buf)
goto out_free;
c->sbuf = vmalloc(c->leb_size);
if (!c->sbuf)
goto out_free;
if (!c->ro_mount) {
c->ileb_buf = vmalloc(c->leb_size);
if (!c->ileb_buf)
goto out_free;
}
if (c->bulk_read == 1)
bu_init(c);
if (!c->ro_mount) {
c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ,
GFP_KERNEL);
if (!c->write_reserve_buf)
goto out_free;
}
c->mounting = 1;
err = ubifs_read_superblock(c);
if (err)
goto out_free;
/*
* Make sure the compressor which is set as default in the superblock
* or overridden by mount options is actually compiled in.
*/
if (!ubifs_compr_present(c->default_compr)) {
ubifs_err("'compressor \"%s\" is not compiled in",
ubifs_compr_name(c->default_compr));
err = -ENOTSUPP;
goto out_free;
}
err = init_constants_sb(c);
if (err)
goto out_free;
sz = ALIGN(c->max_idx_node_sz, c->min_io_size);
sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size);
c->cbuf = kmalloc(sz, GFP_NOFS);
if (!c->cbuf) {
err = -ENOMEM;
goto out_free;
}
err = alloc_wbufs(c);
if (err)
goto out_cbuf;
sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);
if (!c->ro_mount) {
/* Create background thread */
c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name);
if (IS_ERR(c->bgt)) {
err = PTR_ERR(c->bgt);
c->bgt = NULL;
ubifs_err("cannot spawn \"%s\", error %d",
c->bgt_name, err);
goto out_wbufs;
}
wake_up_process(c->bgt);
}
err = ubifs_read_master(c);
if (err)
goto out_master;
init_constants_master(c);
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
ubifs_msg("recovery needed");
c->need_recovery = 1;
}
if (c->need_recovery && !c->ro_mount) {
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err)
goto out_master;
}
err = ubifs_lpt_init(c, 1, !c->ro_mount);
if (err)
goto out_master;
if (!c->ro_mount && c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err)
goto out_master;
}
if (!c->ro_mount) {
/*
* Set the "dirty" flag so that if we reboot uncleanly we
* will notice this immediately on the next mount.
*/
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
err = ubifs_write_master(c);
if (err)
goto out_lpt;
}
err = dbg_check_idx_size(c, c->bi.old_idx_sz);
if (err)
goto out_lpt;
err = ubifs_replay_journal(c);
if (err)
goto out_journal;
/* Calculate 'min_idx_lebs' after journal replay */
c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount);
if (err)
goto out_orphans;
if (!c->ro_mount) {
int lnum;
err = check_free_space(c);
if (err)
goto out_orphans;
/* Check for enough log space */
lnum = c->lhead_lnum + 1;
if (lnum >= UBIFS_LOG_LNUM + c->log_lebs)
lnum = UBIFS_LOG_LNUM;
if (lnum == c->ltail_lnum) {
err = ubifs_consolidate_log(c);
if (err)
goto out_orphans;
}
if (c->need_recovery) {
err = ubifs_recover_size(c);
if (err)
goto out_orphans;
err = ubifs_rcvry_gc_commit(c);
if (err)
goto out_orphans;
} else {
err = take_gc_lnum(c);
if (err)
goto out_orphans;
/*
* GC LEB may contain garbage if there was an unclean
* reboot, and it should be un-mapped.
*/
err = ubifs_leb_unmap(c, c->gc_lnum);
if (err)
goto out_orphans;
}
err = dbg_check_lprops(c);
if (err)
goto out_orphans;
} else if (c->need_recovery) {
err = ubifs_recover_size(c);
if (err)
goto out_orphans;
} else {
/*
* Even if we mount read-only, we have to set space in GC LEB
* to proper value because this affects UBIFS free space
* reporting. We do not want to have a situation when
* re-mounting from R/O to R/W changes amount of free space.
*/
err = take_gc_lnum(c);
if (err)
goto out_orphans;
}
spin_lock(&ubifs_infos_lock);
list_add_tail(&c->infos_list, &ubifs_infos);
spin_unlock(&ubifs_infos_lock);
if (c->need_recovery) {
if (c->ro_mount)
ubifs_msg("recovery deferred");
else {
c->need_recovery = 0;
ubifs_msg("recovery completed");
/*
* GC LEB has to be empty and taken at this point. But
* the journal head LEBs may also be accounted as
* "empty taken" if they are empty.
*/
ubifs_assert(c->lst.taken_empty_lebs > 0);
}
} else
ubifs_assert(c->lst.taken_empty_lebs > 0);
err = dbg_check_filesystem(c);
if (err)
goto out_infos;
err = dbg_debugfs_init_fs(c);
if (err)
goto out_infos;
c->mounting = 0;
ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
c->vi.ubi_num, c->vi.vol_id, c->vi.name);
if (c->ro_mount)
ubifs_msg("mounted read-only");
x = (long long)c->main_lebs * c->leb_size;
ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d "
"LEBs)", x, x >> 10, x >> 20, c->main_lebs);
x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
ubifs_msg("journal size: %lld bytes (%lld KiB, %lld MiB, %d "
"LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt);
ubifs_msg("media format: w%d/r%d (latest is w%d/r%d)",
c->fmt_version, c->ro_compat_version,
UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr));
ubifs_msg("reserved for root: %llu bytes (%llu KiB)",
c->report_rp_size, c->report_rp_size >> 10);
dbg_msg("compiled on: " __DATE__ " at " __TIME__);
dbg_msg("min. I/O unit size: %d bytes", c->min_io_size);
dbg_msg("max. write size: %d bytes", c->max_write_size);
dbg_msg("LEB size: %d bytes (%d KiB)",
c->leb_size, c->leb_size >> 10);
dbg_msg("data journal heads: %d",
c->jhead_cnt - NONDATA_JHEADS_CNT);
dbg_msg("UUID: %pUB", c->uuid);
dbg_msg("big_lpt %d", c->big_lpt);
dbg_msg("log LEBs: %d (%d - %d)",
c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
dbg_msg("LPT area LEBs: %d (%d - %d)",
c->lpt_lebs, c->lpt_first, c->lpt_last);
dbg_msg("orphan area LEBs: %d (%d - %d)",
c->orph_lebs, c->orph_first, c->orph_last);
dbg_msg("main area LEBs: %d (%d - %d)",
c->main_lebs, c->main_first, c->leb_cnt - 1);
dbg_msg("index LEBs: %d", c->lst.idx_lebs);
dbg_msg("total index bytes: %lld (%lld KiB, %lld MiB)",
c->bi.old_idx_sz, c->bi.old_idx_sz >> 10,
c->bi.old_idx_sz >> 20);
dbg_msg("key hash type: %d", c->key_hash_type);
dbg_msg("tree fanout: %d", c->fanout);
dbg_msg("reserved GC LEB: %d", c->gc_lnum);
dbg_msg("first main LEB: %d", c->main_first);
dbg_msg("max. znode size %d", c->max_znode_sz);
dbg_msg("max. index node size %d", c->max_idx_node_sz);
dbg_msg("node sizes: data %zu, inode %zu, dentry %zu",
UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ);
dbg_msg("node sizes: trun %zu, sb %zu, master %zu",
UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ);
dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu",
UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu, idx %d",
UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout));
dbg_msg("dead watermark: %d", c->dead_wm);
dbg_msg("dark watermark: %d", c->dark_wm);
dbg_msg("LEB overhead: %d", c->leb_overhead);
x = (long long)c->main_lebs * c->dark_wm;
dbg_msg("max. dark space: %lld (%lld KiB, %lld MiB)",
x, x >> 10, x >> 20);
dbg_msg("maximum bud bytes: %lld (%lld KiB, %lld MiB)",
c->max_bud_bytes, c->max_bud_bytes >> 10,
c->max_bud_bytes >> 20);
dbg_msg("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
c->bg_bud_bytes, c->bg_bud_bytes >> 10,
c->bg_bud_bytes >> 20);
dbg_msg("current bud bytes %lld (%lld KiB, %lld MiB)",
c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20);
dbg_msg("max. seq. number: %llu", c->max_sqnum);
dbg_msg("commit number: %llu", c->cmt_no);
return 0;
out_infos:
spin_lock(&ubifs_infos_lock);
list_del(&c->infos_list);
spin_unlock(&ubifs_infos_lock);
out_orphans:
free_orphans(c);
out_journal:
destroy_journal(c);
out_lpt:
ubifs_lpt_free(c, 0);
out_master:
kfree(c->mst_node);
kfree(c->rcvrd_mst_node);
if (c->bgt)
kthread_stop(c->bgt);
out_wbufs:
free_wbufs(c);
out_cbuf:
kfree(c->cbuf);
out_free:
kfree(c->write_reserve_buf);
kfree(c->bu.buf);
vfree(c->ileb_buf);
vfree(c->sbuf);
kfree(c->bottom_up_buf);
ubifs_debugging_exit(c);
return err;
}
/**
* ubifs_umount - un-mount UBIFS file-system.
* @c: UBIFS file-system description object
*
* Note, this function is called to free allocated resourced when un-mounting,
* as well as free resources when an error occurred while we were half way
* through mounting (error path cleanup function). So it has to make sure the
* resource was actually allocated before freeing it.
*/
static void ubifs_umount(struct ubifs_info *c)
{
dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num,
c->vi.vol_id);
dbg_debugfs_exit_fs(c);
spin_lock(&ubifs_infos_lock);
list_del(&c->infos_list);
spin_unlock(&ubifs_infos_lock);
if (c->bgt)
kthread_stop(c->bgt);
destroy_journal(c);
free_wbufs(c);
free_orphans(c);
ubifs_lpt_free(c, 0);
kfree(c->cbuf);
kfree(c->rcvrd_mst_node);
kfree(c->mst_node);
kfree(c->write_reserve_buf);
kfree(c->bu.buf);
vfree(c->ileb_buf);
vfree(c->sbuf);
kfree(c->bottom_up_buf);
ubifs_debugging_exit(c);
}
/**
* ubifs_remount_rw - re-mount in read-write mode.
* @c: UBIFS file-system description object
*
* UBIFS avoids allocating many unnecessary resources when mounted in read-only
* mode. This function allocates the needed resources and re-mounts UBIFS in
* read-write mode.
*/
static int ubifs_remount_rw(struct ubifs_info *c)
{
int err, lnum;
if (c->rw_incompat) {
ubifs_err("the file-system is not R/W-compatible");
ubifs_msg("on-flash format version is w%d/r%d, but software "
"only supports up to version w%d/r%d", c->fmt_version,
c->ro_compat_version, UBIFS_FORMAT_VERSION,
UBIFS_RO_COMPAT_VERSION);
return -EROFS;
}
mutex_lock(&c->umount_mutex);
dbg_save_space_info(c);
c->remounting_rw = 1;
c->ro_mount = 0;
err = check_free_space(c);
if (err)
goto out;
if (c->old_leb_cnt != c->leb_cnt) {
struct ubifs_sb_node *sup;
sup = ubifs_read_sb_node(c);
if (IS_ERR(sup)) {
err = PTR_ERR(sup);
goto out;
}
sup->leb_cnt = cpu_to_le32(c->leb_cnt);
err = ubifs_write_sb_node(c, sup);
kfree(sup);
if (err)
goto out;
}
if (c->need_recovery) {
ubifs_msg("completing deferred recovery");
err = ubifs_write_rcvrd_mst_node(c);
if (err)
goto out;
err = ubifs_recover_size(c);
if (err)
goto out;
err = ubifs_clean_lebs(c, c->sbuf);
if (err)
goto out;
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err)
goto out;
} else {
/* A readonly mount is not allowed to have orphans */
ubifs_assert(c->tot_orphans == 0);
err = ubifs_clear_orphans(c);
if (err)
goto out;
}
if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) {
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
err = ubifs_write_master(c);
if (err)
goto out;
}
c->ileb_buf = vmalloc(c->leb_size);
if (!c->ileb_buf) {
err = -ENOMEM;
goto out;
}
c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ, GFP_KERNEL);
if (!c->write_reserve_buf)
goto out;
err = ubifs_lpt_init(c, 0, 1);
if (err)
goto out;
/* Create background thread */
c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name);
if (IS_ERR(c->bgt)) {
err = PTR_ERR(c->bgt);
c->bgt = NULL;
ubifs_err("cannot spawn \"%s\", error %d",
c->bgt_name, err);
goto out;
}
wake_up_process(c->bgt);
c->orph_buf = vmalloc(c->leb_size);
if (!c->orph_buf) {
err = -ENOMEM;
goto out;
}
/* Check for enough log space */
lnum = c->lhead_lnum + 1;
if (lnum >= UBIFS_LOG_LNUM + c->log_lebs)
lnum = UBIFS_LOG_LNUM;
if (lnum == c->ltail_lnum) {
err = ubifs_consolidate_log(c);
if (err)
goto out;
}
if (c->need_recovery)
err = ubifs_rcvry_gc_commit(c);
else
err = ubifs_leb_unmap(c, c->gc_lnum);
if (err)
goto out;
dbg_gen("re-mounted read-write");
c->remounting_rw = 0;
if (c->need_recovery) {
c->need_recovery = 0;
ubifs_msg("deferred recovery completed");
} else {
/*
* Do not run the debugging space check if the were doing
* recovery, because when we saved the information we had the
* file-system in a state where the TNC and lprops has been
* modified in memory, but all the I/O operations (including a
* commit) were deferred. So the file-system was in
* "non-committed" state. Now the file-system is in committed
* state, and of course the amount of free space will change
* because, for example, the old index size was imprecise.
*/
err = dbg_check_space_info(c);
}
if (c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err)
goto out;
}
mutex_unlock(&c->umount_mutex);
return err;
out:
c->ro_mount = 1;
vfree(c->orph_buf);
c->orph_buf = NULL;
if (c->bgt) {
kthread_stop(c->bgt);
c->bgt = NULL;
}
free_wbufs(c);
kfree(c->write_reserve_buf);
c->write_reserve_buf = NULL;
vfree(c->ileb_buf);
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
c->remounting_rw = 0;
mutex_unlock(&c->umount_mutex);
return err;
}
/**
* ubifs_remount_ro - re-mount in read-only mode.
* @c: UBIFS file-system description object
*
* We assume VFS has stopped writing. Possibly the background thread could be
* running a commit, however kthread_stop will wait in that case.
*/
static void ubifs_remount_ro(struct ubifs_info *c)
{
int i, err;
ubifs_assert(!c->need_recovery);
ubifs_assert(!c->ro_mount);
mutex_lock(&c->umount_mutex);
if (c->bgt) {
kthread_stop(c->bgt);
c->bgt = NULL;
}
dbg_save_space_info(c);
for (i = 0; i < c->jhead_cnt; i++)
ubifs_wbuf_sync(&c->jheads[i].wbuf);
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
err = ubifs_write_master(c);
if (err)
ubifs_ro_mode(c, err);
vfree(c->orph_buf);
c->orph_buf = NULL;
kfree(c->write_reserve_buf);
c->write_reserve_buf = NULL;
vfree(c->ileb_buf);
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
c->ro_mount = 1;
err = dbg_check_space_info(c);
if (err)
ubifs_ro_mode(c, err);
mutex_unlock(&c->umount_mutex);
}
static void ubifs_put_super(struct super_block *sb)
{
int i;
struct ubifs_info *c = sb->s_fs_info;
ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num,
c->vi.vol_id);
/*
* The following asserts are only valid if there has not been a failure
* of the media. For example, there will be dirty inodes if we failed
* to write them back because of I/O errors.
*/
if (!c->ro_error) {
ubifs_assert(c->bi.idx_growth == 0);
ubifs_assert(c->bi.dd_growth == 0);
ubifs_assert(c->bi.data_growth == 0);
}
/*
* The 'c->umount_lock' prevents races between UBIFS memory shrinker
* and file system un-mount. Namely, it prevents the shrinker from
* picking this superblock for shrinking - it will be just skipped if
* the mutex is locked.
*/
mutex_lock(&c->umount_mutex);
if (!c->ro_mount) {
/*
* First of all kill the background thread to make sure it does
* not interfere with un-mounting and freeing resources.
*/
if (c->bgt) {
kthread_stop(c->bgt);
c->bgt = NULL;
}
/*
* On fatal errors c->ro_error is set to 1, in which case we do
* not write the master node.
*/
if (!c->ro_error) {
int err;
/* Synchronize write-buffers */
for (i = 0; i < c->jhead_cnt; i++)
ubifs_wbuf_sync(&c->jheads[i].wbuf);
/*
* We are being cleanly unmounted which means the
* orphans were killed - indicate this in the master
* node. Also save the reserved GC LEB number.
*/
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
err = ubifs_write_master(c);
if (err)
/*
* Recovery will attempt to fix the master area
* next mount, so we just print a message and
* continue to unmount normally.
*/
ubifs_err("failed to write master node, "
"error %d", err);
} else {
for (i = 0; i < c->jhead_cnt; i++)
/* Make sure write-buffer timers are canceled */
hrtimer_cancel(&c->jheads[i].wbuf.timer);
}
}
ubifs_umount(c);
bdi_destroy(&c->bdi);
ubi_close_volume(c->ubi);
mutex_unlock(&c->umount_mutex);
kfree(c);
}
static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
{
int err;
struct ubifs_info *c = sb->s_fs_info;
dbg_gen("old flags %#lx, new flags %#x", sb->s_flags, *flags);
err = ubifs_parse_options(c, data, 1);
if (err) {
ubifs_err("invalid or unknown remount parameter");
return err;
}
if (c->ro_mount && !(*flags & MS_RDONLY)) {
if (c->ro_error) {
ubifs_msg("cannot re-mount R/W due to prior errors");
return -EROFS;
}
if (c->ro_media) {
ubifs_msg("cannot re-mount R/W - UBI volume is R/O");
return -EROFS;
}
err = ubifs_remount_rw(c);
if (err)
return err;
} else if (!c->ro_mount && (*flags & MS_RDONLY)) {
if (c->ro_error) {
ubifs_msg("cannot re-mount R/O due to prior errors");
return -EROFS;
}
ubifs_remount_ro(c);
}
if (c->bulk_read == 1)
bu_init(c);
else {
dbg_gen("disable bulk-read");
kfree(c->bu.buf);
c->bu.buf = NULL;
}
ubifs_assert(c->lst.taken_empty_lebs > 0);
return 0;
}
const struct super_operations ubifs_super_operations = {
.alloc_inode = ubifs_alloc_inode,
.destroy_inode = ubifs_destroy_inode,
.put_super = ubifs_put_super,
.write_inode = ubifs_write_inode,
.evict_inode = ubifs_evict_inode,
.statfs = ubifs_statfs,
.dirty_inode = ubifs_dirty_inode,
.remount_fs = ubifs_remount_fs,
.show_options = ubifs_show_options,
.sync_fs = ubifs_sync_fs,
};
/**
* open_ubi - parse UBI device name string and open the UBI device.
* @name: UBI volume name
* @mode: UBI volume open mode
*
* The primary method of mounting UBIFS is by specifying the UBI volume
* character device node path. However, UBIFS may also be mounted withoug any
* character device node using one of the following methods:
*
* o ubiX_Y - mount UBI device number X, volume Y;
* o ubiY - mount UBI device number 0, volume Y;
* o ubiX:NAME - mount UBI device X, volume with name NAME;
* o ubi:NAME - mount UBI device 0, volume with name NAME.
*
* Alternative '!' separator may be used instead of ':' (because some shells
* like busybox may interpret ':' as an NFS host name separator). This function
* returns UBI volume description object in case of success and a negative
* error code in case of failure.
*/
static struct ubi_volume_desc *open_ubi(const char *name, int mode)
{
struct ubi_volume_desc *ubi;
int dev, vol;
char *endptr;
/* First, try to open using the device node path method */
ubi = ubi_open_volume_path(name, mode);
if (!IS_ERR(ubi))
return ubi;
/* Try the "nodev" method */
if (name[0] != 'u' || name[1] != 'b' || name[2] != 'i')
return ERR_PTR(-EINVAL);
/* ubi:NAME method */
if ((name[3] == ':' || name[3] == '!') && name[4] != '\0')
return ubi_open_volume_nm(0, name + 4, mode);
if (!isdigit(name[3]))
return ERR_PTR(-EINVAL);
dev = simple_strtoul(name + 3, &endptr, 0);
/* ubiY method */
if (*endptr == '\0')
return ubi_open_volume(0, dev, mode);
/* ubiX_Y method */
if (*endptr == '_' && isdigit(endptr[1])) {
vol = simple_strtoul(endptr + 1, &endptr, 0);
if (*endptr != '\0')
return ERR_PTR(-EINVAL);
return ubi_open_volume(dev, vol, mode);
}
/* ubiX:NAME method */
if ((*endptr == ':' || *endptr == '!') && endptr[1] != '\0')
return ubi_open_volume_nm(dev, ++endptr, mode);
return ERR_PTR(-EINVAL);
}
static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ubi_volume_desc *ubi = sb->s_fs_info;
struct ubifs_info *c;
struct inode *root;
int err;
c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
if (!c)
return -ENOMEM;
spin_lock_init(&c->cnt_lock);
spin_lock_init(&c->cs_lock);
spin_lock_init(&c->buds_lock);
spin_lock_init(&c->space_lock);
spin_lock_init(&c->orphan_lock);
init_rwsem(&c->commit_sem);
mutex_init(&c->lp_mutex);
mutex_init(&c->tnc_mutex);
mutex_init(&c->log_mutex);
mutex_init(&c->mst_mutex);
mutex_init(&c->umount_mutex);
mutex_init(&c->bu_mutex);
mutex_init(&c->write_reserve_mutex);
init_waitqueue_head(&c->cmt_wq);
c->buds = RB_ROOT;
c->old_idx = RB_ROOT;
c->size_tree = RB_ROOT;
c->orph_tree = RB_ROOT;
INIT_LIST_HEAD(&c->infos_list);
INIT_LIST_HEAD(&c->idx_gc);
INIT_LIST_HEAD(&c->replay_list);
INIT_LIST_HEAD(&c->replay_buds);
INIT_LIST_HEAD(&c->uncat_list);
INIT_LIST_HEAD(&c->empty_list);
INIT_LIST_HEAD(&c->freeable_list);
INIT_LIST_HEAD(&c->frdi_idx_list);
INIT_LIST_HEAD(&c->unclean_leb_list);
INIT_LIST_HEAD(&c->old_buds);
INIT_LIST_HEAD(&c->orph_list);
INIT_LIST_HEAD(&c->orph_new);
c->no_chk_data_crc = 1;
c->vfs_sb = sb;
c->highest_inum = UBIFS_FIRST_INO;
c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
ubi_get_volume_info(ubi, &c->vi);
ubi_get_device_info(c->vi.ubi_num, &c->di);
/* Re-open the UBI device in read-write mode */
c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE);
if (IS_ERR(c->ubi)) {
err = PTR_ERR(c->ubi);
goto out_free;
}
/*
* UBIFS provides 'backing_dev_info' in order to disable read-ahead. For
* UBIFS, I/O is not deferred, it is done immediately in readpage,
* which means the user would have to wait not just for their own I/O
* but the read-ahead I/O as well i.e. completely pointless.
*
* Read-ahead will be disabled because @c->bdi.ra_pages is 0.
*/
c->bdi.name = "ubifs",
c->bdi.capabilities = BDI_CAP_MAP_COPY;
c->bdi.unplug_io_fn = default_unplug_io_fn;
err = bdi_init(&c->bdi);
if (err)
goto out_close;
err = bdi_register(&c->bdi, NULL, "ubifs_%d_%d",
c->vi.ubi_num, c->vi.vol_id);
if (err)
goto out_bdi;
err = ubifs_parse_options(c, data, 0);
if (err)
goto out_bdi;
sb->s_bdi = &c->bdi;
sb->s_fs_info = c;
sb->s_magic = UBIFS_SUPER_MAGIC;
sb->s_blocksize = UBIFS_BLOCK_SIZE;
sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT;
sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c);
if (c->max_inode_sz > MAX_LFS_FILESIZE)
sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
sb->s_op = &ubifs_super_operations;
mutex_lock(&c->umount_mutex);
err = mount_ubifs(c);
if (err) {
ubifs_assert(err < 0);
goto out_unlock;
}
/* Read the root inode */
root = ubifs_iget(sb, UBIFS_ROOT_INO);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out_umount;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root)
goto out_iput;
mutex_unlock(&c->umount_mutex);
return 0;
out_iput:
iput(root);
out_umount:
ubifs_umount(c);
out_unlock:
mutex_unlock(&c->umount_mutex);
out_bdi:
bdi_destroy(&c->bdi);
out_close:
ubi_close_volume(c->ubi);
out_free:
kfree(c);
return err;
}
static int sb_test(struct super_block *sb, void *data)
{
dev_t *dev = data;
struct ubifs_info *c = sb->s_fs_info;
return c->vi.cdev == *dev;
}
static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
const char *name, void *data)
{
struct ubi_volume_desc *ubi;
struct ubi_volume_info vi;
struct super_block *sb;
int err;
dbg_gen("name %s, flags %#x", name, flags);
/*
* Get UBI device number and volume ID. Mount it read-only so far
* because this might be a new mount point, and UBI allows only one
* read-write user at a time.
*/
ubi = open_ubi(name, UBI_READONLY);
if (IS_ERR(ubi)) {
dbg_err("cannot open \"%s\", error %d",
name, (int)PTR_ERR(ubi));
return ERR_CAST(ubi);
}
ubi_get_volume_info(ubi, &vi);
dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id);
sb = sget(fs_type, &sb_test, &set_anon_super, &vi.cdev);
if (IS_ERR(sb)) {
err = PTR_ERR(sb);
goto out_close;
}
if (sb->s_root) {
struct ubifs_info *c1 = sb->s_fs_info;
/* A new mount point for already mounted UBIFS */
dbg_gen("this ubi volume is already mounted");
if (!!(flags & MS_RDONLY) != c1->ro_mount) {
err = -EBUSY;
goto out_deact;
}
} else {
sb->s_flags = flags;
/*
* Pass 'ubi' to 'fill_super()' in sb->s_fs_info where it is
* replaced by 'c'.
*/
sb->s_fs_info = ubi;
err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (err)
goto out_deact;
/* We do not support atime */
sb->s_flags |= MS_ACTIVE | MS_NOATIME;
}
/* 'fill_super()' opens ubi again so we must close it here */
ubi_close_volume(ubi);
return dget(sb->s_root);
out_deact:
deactivate_locked_super(sb);
out_close:
ubi_close_volume(ubi);
return ERR_PTR(err);
}
static struct file_system_type ubifs_fs_type = {
.name = "ubifs",
.owner = THIS_MODULE,
.mount = ubifs_mount,
.kill_sb = kill_anon_super,
};
/*
* Inode slab cache constructor.
*/
static void inode_slab_ctor(void *obj)
{
struct ubifs_inode *ui = obj;
inode_init_once(&ui->vfs_inode);
}
static int __init ubifs_init(void)
{
int err;
BUILD_BUG_ON(sizeof(struct ubifs_ch) != 24);
/* Make sure node sizes are 8-byte aligned */
BUILD_BUG_ON(UBIFS_CH_SZ & 7);
BUILD_BUG_ON(UBIFS_INO_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_DENT_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_XENT_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_DATA_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_SB_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_MST_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_REF_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_CS_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_ORPH_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ & 7);
BUILD_BUG_ON(UBIFS_MAX_NODE_SZ & 7);
BUILD_BUG_ON(MIN_WRITE_SZ & 7);
/* Check min. node size */
BUILD_BUG_ON(UBIFS_INO_NODE_SZ < MIN_WRITE_SZ);
BUILD_BUG_ON(UBIFS_DENT_NODE_SZ < MIN_WRITE_SZ);
BUILD_BUG_ON(UBIFS_XENT_NODE_SZ < MIN_WRITE_SZ);
BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ < MIN_WRITE_SZ);
BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ > UBIFS_MAX_NODE_SZ);
BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ > UBIFS_MAX_NODE_SZ);
BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ > UBIFS_MAX_NODE_SZ);
BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ > UBIFS_MAX_NODE_SZ);
/* Defined node sizes */
BUILD_BUG_ON(UBIFS_SB_NODE_SZ != 4096);
BUILD_BUG_ON(UBIFS_MST_NODE_SZ != 512);
BUILD_BUG_ON(UBIFS_INO_NODE_SZ != 160);
BUILD_BUG_ON(UBIFS_REF_NODE_SZ != 64);
/*
* We use 2 bit wide bit-fields to store compression type, which should
* be amended if more compressors are added. The bit-fields are:
* @compr_type in 'struct ubifs_inode', @default_compr in
* 'struct ubifs_info' and @compr_type in 'struct ubifs_mount_opts'.
*/
BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
/*
* We require that PAGE_CACHE_SIZE is greater-than-or-equal-to
* UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
*/
if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
ubifs_err("VFS page cache size is %u bytes, but UBIFS requires"
" at least 4096 bytes",
(unsigned int)PAGE_CACHE_SIZE);
return -EINVAL;
}
err = register_filesystem(&ubifs_fs_type);
if (err) {
ubifs_err("cannot register file system, error %d", err);
return err;
}
err = -ENOMEM;
ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
sizeof(struct ubifs_inode), 0,
SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
&inode_slab_ctor);
if (!ubifs_inode_slab)
goto out_reg;
register_shrinker(&ubifs_shrinker_info);
err = ubifs_compressors_init();
if (err)
goto out_shrinker;
err = dbg_debugfs_init();
if (err)
goto out_compr;
return 0;
out_compr:
ubifs_compressors_exit();
out_shrinker:
unregister_shrinker(&ubifs_shrinker_info);
kmem_cache_destroy(ubifs_inode_slab);
out_reg:
unregister_filesystem(&ubifs_fs_type);
return err;
}
/* late_initcall to let compressors initialize first */
late_initcall(ubifs_init);
static void __exit ubifs_exit(void)
{
ubifs_assert(list_empty(&ubifs_infos));
ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0);
dbg_debugfs_exit();
ubifs_compressors_exit();
unregister_shrinker(&ubifs_shrinker_info);
kmem_cache_destroy(ubifs_inode_slab);
unregister_filesystem(&ubifs_fs_type);
}
module_exit(ubifs_exit);
MODULE_LICENSE("GPL");
MODULE_VERSION(__stringify(UBIFS_VERSION));
MODULE_AUTHOR("Artem Bityutskiy, Adrian Hunter");
MODULE_DESCRIPTION("UBIFS - UBI File System");
| gpl-2.0 |
huahbo/src | system/main/disfil.c | 5 | 4768 | /* Print out data values.
Alternatively, use sfdd and convert to ASCII form.
*/
/*
Copyright (C) 2004 University of Texas at Austin
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <rsf.h>
static void head(int i, int cols, bool number);
int main (int argc, char* argv[])
{
int cols, *ibuf, esize, i;
short *sbuf;
size_t bufsiz, nbuf, j;
off_t size;
const char* format;
char *buf, *header, *trailer;
float *fbuf;
sf_complex *cbuf;
unsigned char *ubuf;
sf_file in;
sf_datatype type;
bool number;
sf_init (argc,argv);
in = sf_input("in");
type = sf_gettype(in);
if (!sf_getbool("number",&number)) number=true;
/* If number the elements */
if (!sf_getint("col",&cols)) cols=0;
/* Number of columns.
The default depends on the data type:
10 for int and char,
5 for float,
3 for complex */
format = sf_getstring("format");
/* Format for numbers (printf-style).
The default depends on the data type:
"%4d " for int and char,
"%13.4g" for float,
"%10.4g,%10.4gi" for complex */
header = sf_getstring("header");
/* Optional header string to output before data */
trailer = sf_getstring("trailer");
/* Optional trailer string to output after data */
esize = sf_esize(in);
bufsiz = sf_bufsiz(in);
if (0 != esize) bufsiz /= esize;
size = sf_filesize(in);
if (header != NULL) printf ("%s\n",header); /* print header string */
switch (type) {
case SF_UCHAR:
if (0==cols) cols=10;
if (NULL==format) format = "%4d ";
ubuf = sf_ucharalloc (bufsiz);
for (i=0; size > 0; size -= nbuf) {
nbuf = (bufsiz < size)? bufsiz: size;
sf_ucharread (ubuf,nbuf,in);
for (j=0; j < nbuf; j++, i++) {
head(i,cols,number);
printf(format,ubuf[j]);
}
}
printf("\n");
break;
case SF_CHAR:
if (0==cols) cols=10;
if (NULL==format) format = "%4d ";
buf = sf_charalloc (bufsiz);
for (i=0; size > 0; size -= nbuf) {
nbuf = (bufsiz < size)? bufsiz: size;
sf_charread (buf,nbuf,in);
for (j=0; j < nbuf; j++, i++) {
head(i,cols,number);
printf(format,buf[j]);
}
}
printf("\n");
break;
case SF_SHORT:
if (0==cols) cols=10;
if (NULL==format) format = "%4d ";
sbuf = sf_shortalloc (bufsiz);
for (i=0; size > 0; size -= nbuf) {
nbuf = (bufsiz < size)? bufsiz: size;
sf_shortread (sbuf,nbuf,in);
for (j=0; j < nbuf; j++, i++) {
head(i,cols,number);
printf(format,sbuf[j]);
}
}
printf("\n");
break;
case SF_INT:
if (0==cols) cols=10;
if (NULL==format) format = "%4d ";
ibuf = sf_intalloc (bufsiz);
for (i=0; size > 0; size -= nbuf) {
nbuf = (bufsiz < size)? bufsiz: size;
sf_intread (ibuf,nbuf,in);
for (j=0; j < nbuf; j++, i++) {
head(i,cols,number);
printf(format,ibuf[j]);
}
}
printf("\n");
break;
case SF_FLOAT:
if (0==cols) cols=5;
if (NULL==format) format = "%13.4g";
fbuf = sf_floatalloc (bufsiz);
for (i=0; size > 0; size -= nbuf) {
nbuf = (bufsiz < size)? bufsiz: size;
sf_floatread (fbuf,nbuf,in);
for (j=0; j < nbuf; j++, i++) {
head(i,cols,number);
printf(format,fbuf[j]);
}
}
printf("\n");
break;
case SF_COMPLEX:
if (0==cols) cols=3;
if (NULL==format) format = "%10.4g,%10.4gi";
cbuf = sf_complexalloc (bufsiz);
for (i=0; size > 0; size -= nbuf) {
nbuf = (bufsiz < size)? bufsiz: size;
sf_complexread (cbuf,nbuf,in);
for (j=0; j < nbuf; j++, i++) {
head(i,cols,number);
printf(format,crealf(cbuf[j]),cimagf(cbuf[j]));
}
}
printf("\n");
break;
default:
sf_error("Unknown data type");
break;
}
if (trailer != NULL) printf ("%s\n",trailer); /* print trailer string */
exit (0);
}
static void head(int i, int cols, bool number)
{
if (0 == i%cols) {
if (number) {
printf(i? "\n%4d: ":"%4d: ",i);
} else if (i) {
printf("\n");
}
}
}
/* $Id$ */
| gpl-2.0 |
Dr-Shadow/android_kernel_acer_c10 | fs/fat/namei_vfat.c | 5 | 32607 | /*
* linux/fs/vfat/namei.c
*
* Written 1992,1993 by Werner Almesberger
*
* Windows95/Windows NT compatible extended MSDOS filesystem
* by Gordon Chaffee Copyright (C) 1995. Send bug reports for the
* VFAT filesystem to <chaffee@cs.berkeley.edu>. Specify
* what file operation caused you trouble and if you can duplicate
* the problem, send a script that demonstrates it.
*
* Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de>
*
* Support Multibyte characters and cleanup by
* OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/namei.h>
#include "fat.h"
/*
* If new entry was created in the parent, it could create the 8.3
* alias (the shortname of logname). So, the parent may have the
* negative-dentry which matches the created 8.3 alias.
*
* If it happened, the negative dentry isn't actually negative
* anymore. So, drop it.
*/
static int vfat_revalidate_shortname(struct dentry *dentry)
{
int ret = 1;
spin_lock(&dentry->d_lock);
if (dentry->d_time != dentry->d_parent->d_inode->i_version) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : dentry->d_time(%d) != dentry->d_parent->d_inode->i_version(%d), current process is \"(PID=%d)%s\"\n",
__func__,
dentry->d_time,
dentry->d_parent->d_inode->i_version,
current->pid,
current->comm
) ;
ret = 0;
}
spin_unlock(&dentry->d_lock);
return ret;
}
static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
{
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
/* This is not negative dentry. Always valid. */
if (dentry->d_inode)
return 1;
return vfat_revalidate_shortname(dentry);
}
static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
{
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
/*
* This is not negative dentry. Always valid.
*
* Note, rename() to existing directory entry will have ->d_inode,
* and will use existing name which isn't specified name by user.
*
* We may be able to drop this positive dentry here. But dropping
* positive dentry isn't good idea. So it's unsupported like
* rename("filename", "FILENAME") for now.
*/
if (dentry->d_inode)
return 1;
/*
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
if (!nd) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : !nd, current process is \"(PID=%d)%s\"\n",
__func__,
current->pid,
current->comm
) ;
return 0;
}
/*
* Drop the negative dentry, in order to make sure to use the
* case sensitive name which is specified by user if this is
* for creation.
*/
if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : nd->flags = %x, current process is \"(PID=%d)%s\"\n",
__func__,
nd->flags,
current->pid,
current->comm
) ;
return 0;
}
return vfat_revalidate_shortname(dentry);
}
/* returns the length of a struct qstr, ignoring trailing dots */
static unsigned int __vfat_striptail_len(unsigned int len, const char *name)
{
while (len && name[len - 1] == '.')
len--;
return len;
}
static unsigned int vfat_striptail_len(const struct qstr *qstr)
{
return __vfat_striptail_len(qstr->len, qstr->name);
}
/*
* Compute the hash for the vfat name corresponding to the dentry.
* Note: if the name is invalid, we leave the hash code unchanged so
* that the existing dentry can be used. The vfat fs routines will
* return ENOENT or EINVAL as appropriate.
*/
static int vfat_hash(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr));
return 0;
}
/*
* Compute the hash for the vfat name corresponding to the dentry.
* Note: if the name is invalid, we leave the hash code unchanged so
* that the existing dentry can be used. The vfat fs routines will
* return ENOENT or EINVAL as appropriate.
*/
static int vfat_hashi(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
const unsigned char *name;
unsigned int len;
unsigned long hash;
name = qstr->name;
len = vfat_striptail_len(qstr);
hash = init_name_hash();
while (len--)
hash = partial_name_hash(nls_tolower(t, *name++), hash);
qstr->hash = end_name_hash(hash);
return 0;
}
/*
* Case insensitive compare of two vfat names.
*/
static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = vfat_striptail_len(name);
blen = __vfat_striptail_len(len, str);
if (alen == blen) {
if (nls_strnicmp(t, name->name, str, alen) == 0)
return 0;
}
return 1;
}
/*
* Case sensitive compare of two vfat names.
*/
static int vfat_cmp(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = vfat_striptail_len(name);
blen = __vfat_striptail_len(len, str);
if (alen == blen) {
if (strncmp(name->name, str, alen) == 0)
return 0;
}
return 1;
}
static const struct dentry_operations vfat_ci_dentry_ops = {
.d_revalidate = vfat_revalidate_ci,
.d_hash = vfat_hashi,
.d_compare = vfat_cmpi,
};
static const struct dentry_operations vfat_dentry_ops = {
.d_revalidate = vfat_revalidate,
.d_hash = vfat_hash,
.d_compare = vfat_cmp,
};
/* Characters that are undesirable in an MS-DOS file name */
static inline wchar_t vfat_bad_char(wchar_t w)
{
return (w < 0x0020)
|| (w == '*') || (w == '?') || (w == '<') || (w == '>')
|| (w == '|') || (w == '"') || (w == ':') || (w == '/')
|| (w == '\\');
}
static inline wchar_t vfat_replace_char(wchar_t w)
{
return (w == '[') || (w == ']') || (w == ';') || (w == ',')
|| (w == '+') || (w == '=');
}
static wchar_t vfat_skip_char(wchar_t w)
{
return (w == '.') || (w == ' ');
}
static inline int vfat_is_used_badchars(const wchar_t *s, int len)
{
int i;
for (i = 0; i < len; i++)
if (vfat_bad_char(s[i]))
return -EINVAL;
if (s[i - 1] == ' ') /* last character cannot be space */
return -EINVAL;
return 0;
}
static int vfat_find_form(struct inode *dir, unsigned char *name)
{
struct fat_slot_info sinfo;
int err = fat_scan(dir, name, &sinfo);
if (err)
return -ENOENT;
brelse(sinfo.bh);
return 0;
}
/*
* 1) Valid characters for the 8.3 format alias are any combination of
* letters, uppercase alphabets, digits, any of the
* following special characters:
* $ % ' ` - @ { } ~ ! # ( ) & _ ^
* In this case Longfilename is not stored in disk.
*
* WinNT's Extension:
* File name and extension name is contain uppercase/lowercase
* only. And it is expressed by CASE_LOWER_BASE and CASE_LOWER_EXT.
*
* 2) File name is 8.3 format, but it contain the uppercase and
* lowercase char, muliti bytes char, etc. In this case numtail is not
* added, but Longfilename is stored.
*
* 3) When the one except for the above, or the following special
* character are contained:
* . [ ] ; , + =
* numtail is added, and Longfilename must be stored in disk .
*/
struct shortname_info {
unsigned char lower:1,
upper:1,
valid:1;
};
#define INIT_SHORTNAME_INFO(x) do { \
(x)->lower = 1; \
(x)->upper = 1; \
(x)->valid = 1; \
} while (0)
static inline int to_shortname_char(struct nls_table *nls,
unsigned char *buf, int buf_size,
wchar_t *src, struct shortname_info *info)
{
int len;
if (vfat_skip_char(*src)) {
info->valid = 0;
return 0;
}
if (vfat_replace_char(*src)) {
info->valid = 0;
buf[0] = '_';
return 1;
}
len = nls->uni2char(*src, buf, buf_size);
if (len <= 0) {
info->valid = 0;
buf[0] = '_';
len = 1;
} else if (len == 1) {
unsigned char prev = buf[0];
if (buf[0] >= 0x7F) {
info->lower = 0;
info->upper = 0;
}
buf[0] = nls_toupper(nls, buf[0]);
if (isalpha(buf[0])) {
if (buf[0] == prev)
info->lower = 0;
else
info->upper = 0;
}
} else {
info->lower = 0;
info->upper = 0;
}
return len;
}
/*
* Given a valid longname, create a unique shortname. Make sure the
* shortname does not exist
* Returns negative number on error, 0 for a normal
* return, and 1 for valid shortname
*/
static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
wchar_t *uname, int ulen,
unsigned char *name_res, unsigned char *lcase)
{
struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
wchar_t *ip, *ext_start, *end, *name_start;
unsigned char base[9], ext[4], buf[5], *p;
unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
int chl, chi;
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
int is_shortname;
struct shortname_info base_info, ext_info;
is_shortname = 1;
INIT_SHORTNAME_INFO(&base_info);
INIT_SHORTNAME_INFO(&ext_info);
/* Now, we need to create a shortname from the long name */
ext_start = end = &uname[ulen];
while (--ext_start >= uname) {
if (*ext_start == 0x002E) { /* is `.' */
if (ext_start == end - 1) {
sz = ulen;
ext_start = NULL;
}
break;
}
}
if (ext_start == uname - 1) {
sz = ulen;
ext_start = NULL;
} else if (ext_start) {
/*
* Names which start with a dot could be just
* an extension eg. "...test". In this case Win95
* uses the extension as the name and sets no extension.
*/
name_start = &uname[0];
while (name_start < ext_start) {
if (!vfat_skip_char(*name_start))
break;
name_start++;
}
if (name_start != ext_start) {
sz = ext_start - uname;
ext_start++;
} else {
sz = ulen;
ext_start = NULL;
}
}
numtail_baselen = 6;
numtail2_baselen = 2;
for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &base_info);
if (chl == 0)
continue;
if (baselen < 2 && (baselen + chl) > 2)
numtail2_baselen = baselen;
if (baselen < 6 && (baselen + chl) > 6)
numtail_baselen = baselen;
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
baselen++;
if (baselen >= 8)
break;
}
if (baselen >= 8) {
if ((chi < chl - 1) || (ip + 1) - uname < sz)
is_shortname = 0;
break;
}
}
if (baselen == 0) {
return -EINVAL;
}
extlen = 0;
if (ext_start) {
for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &ext_info);
if (chl == 0)
continue;
if ((extlen + chl) > 3) {
is_shortname = 0;
break;
}
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
extlen++;
}
if (extlen >= 3) {
if (ip + 1 != end)
is_shortname = 0;
break;
}
}
}
ext[extlen] = '\0';
base[baselen] = '\0';
/* Yes, it can happen. ".\xe5" would do it. */
if (base[0] == DELETED_FLAG)
base[0] = 0x05;
/* OK, at this point we know that base is not longer than 8 symbols,
* ext is not longer than 3, base is nonempty, both don't contain
* any bad symbols (lowercase transformed to uppercase).
*/
memset(name_res, ' ', MSDOS_NAME);
memcpy(name_res, base, baselen);
memcpy(name_res + 8, ext, extlen);
*lcase = 0;
if (is_shortname && base_info.valid && ext_info.valid) {
if (vfat_find_form(dir, name_res) == 0)
return -EEXIST;
if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
return (base_info.upper && ext_info.upper);
} else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
if ((base_info.upper || base_info.lower) &&
(ext_info.upper || ext_info.lower)) {
if (!base_info.upper && base_info.lower)
*lcase |= CASE_LOWER_BASE;
if (!ext_info.upper && ext_info.lower)
*lcase |= CASE_LOWER_EXT;
return 1;
}
return 0;
} else {
BUG();
}
}
if (opts->numtail == 0)
if (vfat_find_form(dir, name_res) < 0)
return 0;
/*
* Try to find a unique extension. This used to
* iterate through all possibilities sequentially,
* but that gave extremely bad performance. Windows
* only tries a few cases before using random
* values for part of the base.
*/
if (baselen > 6) {
baselen = numtail_baselen;
name_res[7] = ' ';
}
name_res[baselen] = '~';
for (i = 1; i < 10; i++) {
name_res[baselen + 1] = i + '0';
if (vfat_find_form(dir, name_res) < 0)
return 0;
}
i = jiffies;
sz = (jiffies >> 16) & 0x7;
if (baselen > 2) {
baselen = numtail2_baselen;
name_res[7] = ' ';
}
name_res[baselen + 4] = '~';
name_res[baselen + 5] = '1' + sz;
while (1) {
snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
memcpy(&name_res[baselen], buf, 4);
if (vfat_find_form(dir, name_res) < 0)
break;
i -= 11;
}
return 0;
}
/* Translate a string, including coded sequences into Unicode */
static int
xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int *longlen, int *outlen, int escape, int utf8,
struct nls_table *nls)
{
const unsigned char *ip;
unsigned char nc;
unsigned char *op;
unsigned int ec;
int i, k, fill;
int charlen;
if (utf8) {
*outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
(wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN)
return -ENAMETOOLONG;
op = &outname[*outlen * sizeof(wchar_t)];
} else {
for (i = 0, ip = name, op = outname, *outlen = 0;
i < len && *outlen < FAT_LFN_LEN;
*outlen += 1) {
if (escape && (*ip == ':')) {
if (i > len - 5)
return -EINVAL;
ec = 0;
for (k = 1; k < 5; k++) {
nc = ip[k];
ec <<= 4;
if (nc >= '0' && nc <= '9') {
ec |= nc - '0';
continue;
}
if (nc >= 'a' && nc <= 'f') {
ec |= nc - ('a' - 10);
continue;
}
if (nc >= 'A' && nc <= 'F') {
ec |= nc - ('A' - 10);
continue;
}
return -EINVAL;
}
*op++ = ec & 0xFF;
*op++ = ec >> 8;
ip += 5;
i += 5;
} else {
charlen = nls->char2uni(ip, len - i,
(wchar_t *)op);
if (charlen < 0)
return -EINVAL;
ip += charlen;
i += charlen;
op += 2;
}
}
if (i < len)
return -ENAMETOOLONG;
}
*longlen = *outlen;
if (*outlen % 13) {
*op++ = 0;
*op++ = 0;
*outlen += 1;
if (*outlen % 13) {
fill = 13 - (*outlen % 13);
for (i = 0; i < fill; i++) {
*op++ = 0xff;
*op++ = 0xff;
}
*outlen += fill;
}
}
return 0;
}
static int vfat_build_slots(struct inode *dir, const unsigned char *name,
int len, int is_dir, int cluster,
struct timespec *ts,
struct msdos_dir_slot *slots, int *nr_slots)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
struct fat_mount_options *opts = &sbi->options;
struct msdos_dir_slot *ps;
struct msdos_dir_entry *de;
unsigned char cksum, lcase;
unsigned char msdos_name[MSDOS_NAME];
wchar_t *uname;
__le16 time, date;
u8 time_cs;
int err, ulen, usize, i;
loff_t offset;
*nr_slots = 0;
uname = __getname();
if (!uname)
return -ENOMEM;
err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize,
opts->unicode_xlate, opts->utf8, sbi->nls_io);
if (err)
goto out_free;
err = vfat_is_used_badchars(uname, ulen);
if (err)
goto out_free;
err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen,
msdos_name, &lcase);
if (err < 0)
goto out_free;
else if (err == 1) {
de = (struct msdos_dir_entry *)slots;
err = 0;
goto shortname;
}
/* build the entry of long file name */
cksum = fat_checksum(msdos_name);
*nr_slots = usize / 13;
for (ps = slots, i = *nr_slots; i > 0; i--, ps++) {
ps->id = i;
ps->attr = ATTR_EXT;
ps->reserved = 0;
ps->alias_checksum = cksum;
ps->start = 0;
offset = (i - 1) * 13;
fatwchar_to16(ps->name0_4, uname + offset, 5);
fatwchar_to16(ps->name5_10, uname + offset + 5, 6);
fatwchar_to16(ps->name11_12, uname + offset + 11, 2);
}
slots[0].id |= 0x40;
de = (struct msdos_dir_entry *)ps;
shortname:
/* build the entry of 8.3 alias name */
(*nr_slots)++;
memcpy(de->name, msdos_name, MSDOS_NAME);
de->attr = is_dir ? ATTR_DIR : ATTR_ARCH;
de->lcase = lcase;
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de->time = de->ctime = time;
de->date = de->cdate = de->adate = date;
de->ctime_cs = time_cs;
de->start = cpu_to_le16(cluster);
de->starthi = cpu_to_le16(cluster >> 16);
de->size = 0;
out_free:
__putname(uname);
return err;
}
static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir,
int cluster, struct timespec *ts,
struct fat_slot_info *sinfo)
{
struct msdos_dir_slot *slots;
unsigned int len;
int err, nr_slots;
len = vfat_striptail_len(qname);
if (len == 0) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : vfat_striptail_len(%s) failed, current process is \"(PID=%d)%s\"\n",
__func__,
qname->name,
current->pid,
current->comm
) ;
return -ENOENT;
}
slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_NOFS);
if (slots == NULL) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : allocate MSDOS_SLOTS(%d) failed, current process is \"(PID=%d)%s\"\n",
__func__,
MSDOS_SLOTS,
current->pid,
current->comm
) ;
return -ENOMEM;
}
err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts,
slots, &nr_slots);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : calls vfat_build_slots() failed, current process is \"(PID=%d)%s\"\n",
__func__,
current->pid,
current->comm
) ;
goto cleanup;
}
err = fat_add_entries(dir, slots, nr_slots, sinfo);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : calls fat_add_entries() failed, current process is \"(PID=%d)%s\"\n",
__func__,
current->pid,
current->comm
) ;
goto cleanup;
}
/* update timestamp */
dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
mark_inode_dirty(dir);
cleanup:
kfree(slots);
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() clearup, err = %d, current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
return err;
}
static int vfat_find(struct inode *dir, struct qstr *qname,
struct fat_slot_info *sinfo)
{
unsigned int len = vfat_striptail_len(qname);
if (len == 0) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() : vfat_striptail_len(%s) failed, current process is \"(PID=%d)%s\"\n",
__func__,
qname->name,
current->pid,
current->comm
) ;
return -ENOENT;
}
return fat_search_long(dir, qname->name, len, sinfo);
}
/*
* (nfsd's) anonymous disconnected dentry?
* NOTE: !IS_ROOT() is not anonymous (I.e. d_splice_alias() did the job).
*/
static int vfat_d_anon_disconn(struct dentry *dentry)
{
return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
}
static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
struct inode *inode;
struct dentry *alias;
int err;
lock_super(sb);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err) {
if (err == -ENOENT) {
inode = NULL;
goto out;
}
goto error;
}
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
alias = d_find_alias(inode);
if (alias && !vfat_d_anon_disconn(alias)) {
/*
* This inode has non anonymous-DCACHE_DISCONNECTED
* dentry. This means, the user did ->lookup() by an
* another name (longname vs 8.3 alias of it) in past.
*
* Switch to new one for reason of locality if possible.
*/
BUG_ON(d_unhashed(alias));
if (!S_ISDIR(inode->i_mode))
d_move(alias, dentry);
iput(inode);
unlock_super(sb);
return alias;
} else
dput(alias);
out:
unlock_super(sb);
dentry->d_time = dentry->d_parent->d_inode->i_version;
dentry = d_splice_alias(inode, dentry);
if (dentry)
dentry->d_time = dentry->d_parent->d_inode->i_version;
return dentry;
error:
unlock_super(sb);
return ERR_PTR(err);
}
static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec ts;
int err;
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() dentry->d_name = %s, current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
current->pid,
current->comm
) ;
lock_super(sb);
ts = CURRENT_TIME_SEC;
err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() calls vfat_add_entry(%s) failed, err=%d, current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
err,
current->pid,
current->comm
) ;
goto out;
}
dir->i_version++;
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() calls fat_build_inode failed, err=%d, current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto out;
}
inode->i_version++;
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
out:
unlock_super(sb);
return err;
}
static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() dentry->d_name = %s, current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
current->pid,
current->comm
) ;
lock_super(sb);
err = fat_dir_empty(inode);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() calls fat_dir_empty() failed, err = %d, current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto out;
}
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() calls vfat_find(%s) failed, err = %d, current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
err,
current->pid,
current->comm
) ;
goto out;
}
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() calls fat_remove_entries(%s) failed, err = %d, current process is \"(PID=%d)%s\"\n",
__func__,
(sinfo.de)->name,
err,
current->pid,
current->comm
) ;
goto out;
}
drop_nlink(dir);
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
unlock_super(sb);
return err;
}
static int vfat_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() dentry->d_name = %s, current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
current->pid,
current->comm
) ;
lock_super(sb);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() calls vfat_find(%s) failed, current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
current->pid,
current->comm
) ;
goto out;
}
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() calls fat_remove_entries(%s) failed, current process is \"(PID=%d)%s\"\n",
__func__,
(sinfo.de)->name,
current->pid,
current->comm
) ;
goto out;
}
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
unlock_super(sb);
return err;
}
static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec ts;
int err, cluster;
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() dentry->d_name = %s, current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
current->pid,
current->comm
) ;
lock_super(sb);
ts = CURRENT_TIME_SEC;
cluster = fat_alloc_new_dir(dir, &ts);
if (cluster < 0) {
err = cluster;
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() calls fat_alloc_new_dir() failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto out;
}
err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() calls vfat_add_entry(%s) failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
(dentry->d_name).name,
err,
current->pid,
current->comm
) ;
goto out_free;
}
dir->i_version++;
inc_nlink(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() line#%d,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
__LINE__,
err,
current->pid,
current->comm
) ;
/* the directory was completed, just return a error */
goto out;
}
inode->i_version++;
set_nlink(inode, 2);
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
unlock_super(sb);
return 0;
out_free:
fat_free_clusters(dir, cluster);
out:
unlock_super(sb);
return err;
}
static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct buffer_head *dotdot_bh;
struct msdos_dir_entry *dotdot_de;
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec ts;
loff_t dotdot_i_pos, new_i_pos;
int err, is_dir, update_dotdot, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
xlog_printk(ANDROID_LOG_INFO, FAT_TAG,
"%s() old_dentry->d_name = %s, new_dentry->d_name = %s, current process is \"(PID=%d)%s\"\n",
__func__,
(old_dentry->d_name).name,
(new_dentry->d_name).name,
current->pid,
current->comm
) ;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
lock_super(sb);
err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() vfat_find(old_dentry=%s) failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
(old_dentry->d_name).name,
err,
current->pid,
current->comm
) ;
goto out;
}
is_dir = S_ISDIR(old_inode->i_mode);
update_dotdot = (is_dir && old_dir != new_dir);
if (update_dotdot) {
if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de,
&dotdot_i_pos) < 0) {
err = -EIO;
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() fat_get_dotdot_entry() failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto out;
}
}
ts = CURRENT_TIME_SEC;
if (new_inode) {
if (is_dir) {
err = fat_dir_empty(new_inode);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() fat_dir_empty() failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto out;
}
}
new_i_pos = MSDOS_I(new_inode)->i_pos;
fat_detach(new_inode);
} else {
err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0,
&ts, &sinfo);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() vfat_add_entry(new_dentry = %s) failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
(new_dentry->d_name).name,
err,
current->pid,
current->comm
) ;
goto out;
}
new_i_pos = sinfo.i_pos;
}
new_dir->i_version++;
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
if (IS_DIRSYNC(new_dir)) {
err = fat_sync_inode(old_inode);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() fat_sync_inode() failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto error_inode;
}
} else
mark_inode_dirty(old_inode);
if (update_dotdot) {
int start = MSDOS_I(new_dir)->i_logstart;
dotdot_de->start = cpu_to_le16(start);
dotdot_de->starthi = cpu_to_le16(start >> 16);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
if (IS_DIRSYNC(new_dir)) {
err = sync_dirty_buffer(dotdot_bh);
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() sync_dirty_buffer() failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto error_dotdot;
}
}
drop_nlink(old_dir);
if (!new_inode)
inc_nlink(new_dir);
}
err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */
old_sinfo.bh = NULL;
if (err) {
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() fat_remove_entries() failed,err = %d,current process is \"(PID=%d)%s\"\n",
__func__,
err,
current->pid,
current->comm
) ;
goto error_dotdot;
}
old_dir->i_version++;
old_dir->i_ctime = old_dir->i_mtime = ts;
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
mark_inode_dirty(old_dir);
if (new_inode) {
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
new_inode->i_ctime = ts;
}
out:
brelse(sinfo.bh);
brelse(dotdot_bh);
brelse(old_sinfo.bh);
unlock_super(sb);
return err;
error_dotdot:
/* data cluster is shared, serious corruption */
corrupt = 1;
if (update_dotdot) {
int start = MSDOS_I(old_dir)->i_logstart;
dotdot_de->start = cpu_to_le16(start);
dotdot_de->starthi = cpu_to_le16(start >> 16);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
corrupt |= sync_dirty_buffer(dotdot_bh);
}
error_inode:
fat_detach(old_inode);
fat_attach(old_inode, old_sinfo.i_pos);
if (new_inode) {
fat_attach(new_inode, new_i_pos);
if (corrupt)
corrupt |= fat_sync_inode(new_inode);
} else {
/*
* If new entry was not sharing the data cluster, it
* shouldn't be serious corruption.
*/
int err2 = fat_remove_entries(new_dir, &sinfo);
if (corrupt)
corrupt |= err2;
sinfo.bh = NULL;
}
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
__func__, sinfo.i_pos);
xlog_printk(ANDROID_LOG_INFO, FAT_TAG, "%s() Filesystem corrupted (i_pos %lld),current process is \"(PID=%d)%s\"\n",
__func__,
sinfo.i_pos,
current->pid,
current->comm
) ;
}
goto out;
}
static const struct inode_operations vfat_dir_inode_operations = {
.create = vfat_create,
.lookup = vfat_lookup,
.unlink = vfat_unlink,
.mkdir = vfat_mkdir,
.rmdir = vfat_rmdir,
.rename = vfat_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
};
static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations;
if (MSDOS_SB(sb)->options.name_check != 's')
sb->s_d_op = &vfat_ci_dentry_ops;
else
sb->s_d_op = &vfat_dentry_ops;
}
static int vfat_fill_super(struct super_block *sb, void *data, int silent)
{
return fat_fill_super(sb, data, silent, 1, setup);
}
static struct dentry *vfat_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
}
static struct file_system_type vfat_fs_type = {
.owner = THIS_MODULE,
.name = "vfat",
.mount = vfat_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
static int __init init_vfat_fs(void)
{
return register_filesystem(&vfat_fs_type);
}
static void __exit exit_vfat_fs(void)
{
unregister_filesystem(&vfat_fs_type);
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VFAT filesystem support");
MODULE_AUTHOR("Gordon Chaffee");
module_init(init_vfat_fs)
module_exit(exit_vfat_fs)
| gpl-2.0 |
sogis/Quantum-GIS | src/gui/editorwidgets/qgstexteditwrapper.cpp | 5 | 5027 | /***************************************************************************
qgstexteditwrapper.cpp
--------------------------------------
Date : 5.1.2014
Copyright : (C) 2014 Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgstexteditwrapper.h"
#include "qgsfield.h"
#include "qgsfieldvalidator.h"
#include "qgsfilterlineedit.h"
#include <QSettings>
QgsTextEditWrapper::QgsTextEditWrapper( QgsVectorLayer* vl, int fieldIdx, QWidget* editor, QWidget* parent )
: QgsEditorWidgetWrapper( vl, fieldIdx, editor, parent )
, mTextEdit( NULL )
, mPlainTextEdit( NULL )
, mLineEdit( NULL )
{
}
QVariant QgsTextEditWrapper::value()
{
QString v;
if ( mTextEdit )
{
if ( config( "UseHtml" ).toBool() )
{
v = mTextEdit->toHtml();
}
else
{
v = mTextEdit->toPlainText();
}
}
if ( mPlainTextEdit )
{
v = mPlainTextEdit->toPlainText();
}
if ( mLineEdit )
{
v = mLineEdit->text();
}
if (( v.isEmpty() && ( field().type() == QVariant::Int || field().type() == QVariant::Double || field().type() == QVariant::LongLong || field().type() == QVariant::Date ) ) ||
v == QSettings().value( "qgis/nullValue", "NULL" ).toString() )
return QVariant( field().type() );
if ( v == defaultValue().toString() )
{
return defaultValue();
}
QVariant res( v );
if ( field().convertCompatible( res ) )
return res;
else
return QVariant( field().type() );
}
QWidget* QgsTextEditWrapper::createWidget( QWidget* parent )
{
if ( config( "IsMultiline" ).toBool() )
{
if ( config( "UseHtml" ).toBool() )
{
return new QTextEdit( parent );
}
else
{
return new QPlainTextEdit( parent );
}
}
else
{
return new QgsFilterLineEdit( parent );
}
}
void QgsTextEditWrapper::initWidget( QWidget* editor )
{
mTextEdit = qobject_cast<QTextEdit*>( editor );
mPlainTextEdit = qobject_cast<QPlainTextEdit*>( editor );
mLineEdit = qobject_cast<QLineEdit*>( editor );
if ( mTextEdit )
connect( mTextEdit, SIGNAL( textChanged() ), this, SLOT( valueChanged() ) );
if ( mPlainTextEdit )
connect( mPlainTextEdit, SIGNAL( textChanged() ), this, SLOT( valueChanged() ) );
if ( mLineEdit )
{
mLineEdit->setValidator( new QgsFieldValidator( mLineEdit, field(), defaultValue().toString() ) );
QVariant defVal = defaultValue();
if ( defVal.isNull() )
{
defVal = QSettings().value( "qgis/nullValue", "NULL" );
}
QgsFilterLineEdit *fle = qobject_cast<QgsFilterLineEdit*>( mLineEdit );
if ( field().type() == QVariant::Int || field().type() == QVariant::Double || field().type() == QVariant::LongLong || field().type() == QVariant::Date )
{
mLineEdit->setPlaceholderText( defVal.toString() );
}
else if ( fle )
{
fle->setNullValue( defVal.toString() );
}
connect( mLineEdit, SIGNAL( textChanged( QString ) ), this, SLOT( valueChanged( QString ) ) );
mWritablePalette = mLineEdit->palette();
mReadOnlyPalette = mLineEdit->palette();
mReadOnlyPalette.setColor( QPalette::Text, mWritablePalette.color( QPalette::Disabled, QPalette::Text ) );
}
}
bool QgsTextEditWrapper::valid()
{
return mLineEdit || mTextEdit || mPlainTextEdit;
}
void QgsTextEditWrapper::setValue( const QVariant& val )
{
QString v;
if ( val.isNull() )
{
if ( !( field().type() == QVariant::Int || field().type() == QVariant::Double || field().type() == QVariant::LongLong || field().type() == QVariant::Date ) )
v = QSettings().value( "qgis/nullValue", "NULL" ).toString();
}
else
v = val.toString();
if ( mTextEdit )
{
if ( val != value() )
{
if ( config( "UseHtml" ).toBool() )
mTextEdit->setHtml( v );
else
mTextEdit->setPlainText( v );
}
}
if ( mPlainTextEdit )
{
if ( val != value() )
mPlainTextEdit->setPlainText( v );
}
if ( mLineEdit )
mLineEdit->setText( v );
}
void QgsTextEditWrapper::setEnabled( bool enabled )
{
if ( mTextEdit )
mTextEdit->setReadOnly( !enabled );
if ( mPlainTextEdit )
mPlainTextEdit->setReadOnly( !enabled );
if ( mLineEdit )
{
mLineEdit->setReadOnly( !enabled );
if ( enabled )
mLineEdit->setPalette( mWritablePalette );
else
mLineEdit->setPalette( mReadOnlyPalette );
}
}
| gpl-2.0 |
utkanos/android_htc_mecha_kernel | arch/sparc/kernel/of_device_32.c | 517 | 10401 | #include <linux/string.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "of_device_common.h"
/*
* PCI bus specific translator
*/
static int of_bus_pci_match(struct device_node *np)
{
if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
/* Do not do PCI specific frobbing if the
* PCI bridge lacks a ranges property. We
* want to pass it through up to the next
* parent as-is, not with the PCI translate
* method which chops off the top address cell.
*/
if (!of_find_property(np, "ranges", NULL))
return 0;
return 1;
}
return 0;
}
static void of_bus_pci_count_cells(struct device_node *np,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 3;
if (sizec)
*sizec = 2;
}
static int of_bus_pci_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
/* Check address type match */
if ((addr[0] ^ range[0]) & 0x03000000)
return -EINVAL;
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset, skipping high cell. */
for (i = 0; i < na - 1; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
{
u32 w = addr[0];
/* For PCI, we override whatever child busses may have used. */
flags = 0;
switch((w >> 24) & 0x03) {
case 0x01:
flags |= IORESOURCE_IO;
break;
case 0x02: /* 32 bits */
case 0x03: /* 64 bits */
flags |= IORESOURCE_MEM;
break;
}
if (w & 0x40000000)
flags |= IORESOURCE_PREFETCH;
return flags;
}
static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
{
return IORESOURCE_MEM;
}
/*
* AMBAPP bus specific translator
*/
static int of_bus_ambapp_match(struct device_node *np)
{
return !strcmp(np->name, "ambapp");
}
static void of_bus_ambapp_count_cells(struct device_node *child,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 1;
if (sizec)
*sizec = 1;
}
static int of_bus_ambapp_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
return of_bus_default_map(addr, range, na, ns, pna);
}
static unsigned long of_bus_ambapp_get_flags(const u32 *addr,
unsigned long flags)
{
return IORESOURCE_MEM;
}
/*
* Array of bus specific translators
*/
static struct of_bus of_busses[] = {
/* PCI */
{
.name = "pci",
.addr_prop_name = "assigned-addresses",
.match = of_bus_pci_match,
.count_cells = of_bus_pci_count_cells,
.map = of_bus_pci_map,
.get_flags = of_bus_pci_get_flags,
},
/* SBUS */
{
.name = "sbus",
.addr_prop_name = "reg",
.match = of_bus_sbus_match,
.count_cells = of_bus_sbus_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_sbus_get_flags,
},
/* AMBA */
{
.name = "ambapp",
.addr_prop_name = "reg",
.match = of_bus_ambapp_match,
.count_cells = of_bus_ambapp_count_cells,
.map = of_bus_ambapp_map,
.get_flags = of_bus_ambapp_get_flags,
},
/* Default */
{
.name = "default",
.addr_prop_name = "reg",
.match = NULL,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
};
static struct of_bus *of_match_bus(struct device_node *np)
{
int i;
for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
BUG();
return NULL;
}
static int __init build_one_resource(struct device_node *parent,
struct of_bus *bus,
struct of_bus *pbus,
u32 *addr,
int na, int ns, int pna)
{
const u32 *ranges;
unsigned int rlen;
int rone;
ranges = of_get_property(parent, "ranges", &rlen);
if (ranges == NULL || rlen == 0) {
u32 result[OF_MAX_ADDR_CELLS];
int i;
memset(result, 0, pna * 4);
for (i = 0; i < na; i++)
result[pna - 1 - i] =
addr[na - 1 - i];
memcpy(addr, result, pna * 4);
return 0;
}
/* Now walk through the ranges */
rlen /= 4;
rone = na + pna + ns;
for (; rlen >= rone; rlen -= rone, ranges += rone) {
if (!bus->map(addr, ranges, na, ns, pna))
return 0;
}
return 1;
}
static int __init use_1to1_mapping(struct device_node *pp)
{
/* If we have a ranges property in the parent, use it. */
if (of_find_property(pp, "ranges", NULL) != NULL)
return 0;
/* Some SBUS devices use intermediate nodes to express
* hierarchy within the device itself. These aren't
* real bus nodes, and don't have a 'ranges' property.
* But, we should still pass the translation work up
* to the SBUS itself.
*/
if (!strcmp(pp->name, "dma") ||
!strcmp(pp->name, "espdma") ||
!strcmp(pp->name, "ledma") ||
!strcmp(pp->name, "lebuffer"))
return 0;
return 1;
}
static int of_resource_verbose;
static void __init build_device_resources(struct of_device *op,
struct device *parent)
{
struct of_device *p_op;
struct of_bus *bus;
int na, ns;
int index, num_reg;
const void *preg;
if (!parent)
return;
p_op = to_of_device(parent);
bus = of_match_bus(p_op->node);
bus->count_cells(op->node, &na, &ns);
preg = of_get_property(op->node, bus->addr_prop_name, &num_reg);
if (!preg || num_reg == 0)
return;
/* Convert to num-cells. */
num_reg /= 4;
/* Conver to num-entries. */
num_reg /= na + ns;
for (index = 0; index < num_reg; index++) {
struct resource *r = &op->resource[index];
u32 addr[OF_MAX_ADDR_CELLS];
const u32 *reg = (preg + (index * ((na + ns) * 4)));
struct device_node *dp = op->node;
struct device_node *pp = p_op->node;
struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
int dna, dns;
int pna, pns;
size = of_read_addr(reg + na, ns);
memcpy(addr, reg, na * 4);
flags = bus->get_flags(reg, 0);
if (use_1to1_mapping(pp)) {
result = of_read_addr(addr, na);
goto build_res;
}
dna = na;
dns = ns;
dbus = bus;
while (1) {
dp = pp;
pp = dp->parent;
if (!pp) {
result = of_read_addr(addr, dna);
break;
}
pbus = of_match_bus(pp);
pbus->count_cells(dp, &pna, &pns);
if (build_one_resource(dp, dbus, pbus, addr,
dna, dns, pna))
break;
flags = pbus->get_flags(addr, flags);
dna = pna;
dns = pns;
dbus = pbus;
}
build_res:
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
printk("%s reg[%d] -> %llx\n",
op->node->full_name, index,
result);
if (result != OF_BAD_ADDR) {
r->start = result & 0xffffffff;
r->end = result + size - 1;
r->flags = flags | ((result >> 32ULL) & 0xffUL);
}
r->name = op->node->name;
}
}
static struct of_device * __init scan_one_device(struct device_node *dp,
struct device *parent)
{
struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
const struct linux_prom_irqs *intr;
struct dev_archdata *sd;
int len, i;
if (!op)
return NULL;
sd = &op->dev.archdata;
sd->prom_node = dp;
sd->op = op;
op->node = dp;
op->clock_freq = of_getintprop_default(dp, "clock-frequency",
(25*1000*1000));
op->portid = of_getintprop_default(dp, "upa-portid", -1);
if (op->portid == -1)
op->portid = of_getintprop_default(dp, "portid", -1);
intr = of_get_property(dp, "intr", &len);
if (intr) {
op->num_irqs = len / sizeof(struct linux_prom_irqs);
for (i = 0; i < op->num_irqs; i++)
op->irqs[i] = intr[i].pri;
} else {
const unsigned int *irq =
of_get_property(dp, "interrupts", &len);
if (irq) {
op->num_irqs = len / sizeof(unsigned int);
for (i = 0; i < op->num_irqs; i++)
op->irqs[i] = irq[i];
} else {
op->num_irqs = 0;
}
}
if (sparc_cpu_model == sun4d) {
static int pil_to_sbus[] = {
0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
};
struct device_node *io_unit, *sbi = dp->parent;
const struct linux_prom_registers *regs;
int board, slot;
while (sbi) {
if (!strcmp(sbi->name, "sbi"))
break;
sbi = sbi->parent;
}
if (!sbi)
goto build_resources;
regs = of_get_property(dp, "reg", NULL);
if (!regs)
goto build_resources;
slot = regs->which_io;
/* If SBI's parent is not io-unit or the io-unit lacks
* a "board#" property, something is very wrong.
*/
if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
printk("%s: Error, parent is not io-unit.\n",
sbi->full_name);
goto build_resources;
}
io_unit = sbi->parent;
board = of_getintprop_default(io_unit, "board#", -1);
if (board == -1) {
printk("%s: Error, lacks board# property.\n",
io_unit->full_name);
goto build_resources;
}
for (i = 0; i < op->num_irqs; i++) {
int this_irq = op->irqs[i];
int sbusl = pil_to_sbus[this_irq];
if (sbusl)
this_irq = (((board + 1) << 5) +
(sbusl << 2) +
slot);
op->irqs[i] = this_irq;
}
}
build_resources:
build_device_resources(op, parent);
op->dev.parent = parent;
op->dev.bus = &of_platform_bus_type;
if (!parent)
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->node);
if (of_device_register(op)) {
printk("%s: Could not register of device.\n",
dp->full_name);
kfree(op);
op = NULL;
}
return op;
}
static void __init scan_tree(struct device_node *dp, struct device *parent)
{
while (dp) {
struct of_device *op = scan_one_device(dp, parent);
if (op)
scan_tree(dp->child, &op->dev);
dp = dp->sibling;
}
}
static void __init scan_of_devices(void)
{
struct device_node *root = of_find_node_by_path("/");
struct of_device *parent;
parent = scan_one_device(root, NULL);
if (!parent)
return;
scan_tree(root->child, &parent->dev);
}
static int __init of_bus_driver_init(void)
{
int err;
err = of_bus_type_init(&of_platform_bus_type, "of");
if (!err)
scan_of_devices();
return err;
}
postcore_initcall(of_bus_driver_init);
static int __init of_debug(char *str)
{
int val = 0;
get_option(&str, &val);
if (val & 1)
of_resource_verbose = 1;
return 1;
}
__setup("of_debug=", of_debug);
| gpl-2.0 |
guoyuqing/linux-drover | drivers/usb/host/xhci-dbg.c | 517 | 16214 | /*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "xhci.h"
#define XHCI_INIT_VALUE 0x0
/* Add verbose debugging later, just print everything for now */
void xhci_dbg_regs(struct xhci_hcd *xhci)
{
u32 temp;
xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
xhci->cap_regs);
temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
&xhci->cap_regs->hc_capbase, temp);
xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
(unsigned int) HC_LENGTH(temp));
#if 0
xhci_dbg(xhci, "// HCIVERSION: 0x%x\n",
(unsigned int) HC_VERSION(temp));
#endif
xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
&xhci->cap_regs->run_regs_off,
(unsigned int) temp & RTSOFF_MASK);
xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
}
static void xhci_print_cap_regs(struct xhci_hcd *xhci)
{
u32 temp;
xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
(unsigned int) temp);
xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
(unsigned int) HC_LENGTH(temp));
xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
(unsigned int) HC_VERSION(temp));
temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
(unsigned int) temp);
xhci_dbg(xhci, " Max device slots: %u\n",
(unsigned int) HCS_MAX_SLOTS(temp));
xhci_dbg(xhci, " Max interrupters: %u\n",
(unsigned int) HCS_MAX_INTRS(temp));
xhci_dbg(xhci, " Max ports: %u\n",
(unsigned int) HCS_MAX_PORTS(temp));
temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
(unsigned int) temp);
xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
(unsigned int) HCS_IST(temp));
xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
(unsigned int) HCS_ERST_MAX(temp));
temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
(unsigned int) temp);
xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
(unsigned int) HCS_U1_LATENCY(temp));
xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
(unsigned int) HCS_U2_LATENCY(temp));
temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
xhci_dbg(xhci, " HC generates %s bit addresses\n",
HCC_64BIT_ADDR(temp) ? "64" : "32");
/* FIXME */
xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
}
static void xhci_print_command_reg(struct xhci_hcd *xhci)
{
u32 temp;
temp = xhci_readl(xhci, &xhci->op_regs->command);
xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
xhci_dbg(xhci, " HC is %s\n",
(temp & CMD_RUN) ? "running" : "being stopped");
xhci_dbg(xhci, " HC has %sfinished hard reset\n",
(temp & CMD_RESET) ? "not " : "");
xhci_dbg(xhci, " Event Interrupts %s\n",
(temp & CMD_EIE) ? "enabled " : "disabled");
xhci_dbg(xhci, " Host System Error Interrupts %s\n",
(temp & CMD_EIE) ? "enabled " : "disabled");
xhci_dbg(xhci, " HC has %sfinished light reset\n",
(temp & CMD_LRESET) ? "not " : "");
}
static void xhci_print_status(struct xhci_hcd *xhci)
{
u32 temp;
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
xhci_dbg(xhci, " Event ring is %sempty\n",
(temp & STS_EINT) ? "not " : "");
xhci_dbg(xhci, " %sHost System Error\n",
(temp & STS_FATAL) ? "WARNING: " : "No ");
xhci_dbg(xhci, " HC is %s\n",
(temp & STS_HALT) ? "halted" : "running");
}
static void xhci_print_op_regs(struct xhci_hcd *xhci)
{
xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
xhci_print_command_reg(xhci);
xhci_print_status(xhci);
}
static void xhci_print_ports(struct xhci_hcd *xhci)
{
u32 __iomem *addr;
int i, j;
int ports;
char *names[NUM_PORT_REGS] = {
"status",
"power",
"link",
"reserved",
};
ports = HCS_MAX_PORTS(xhci->hcs_params1);
addr = &xhci->op_regs->port_status_base;
for (i = 0; i < ports; i++) {
for (j = 0; j < NUM_PORT_REGS; ++j) {
xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
addr, names[j],
(unsigned int) xhci_readl(xhci, addr));
addr++;
}
}
}
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
{
void *addr;
u32 temp;
u64 temp_64;
addr = &ir_set->irq_pending;
temp = xhci_readl(xhci, addr);
if (temp == XHCI_INIT_VALUE)
return;
xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->irq_control;
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->erst_size;
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->rsvd;
temp = xhci_readl(xhci, addr);
if (temp != XHCI_INIT_VALUE)
xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
addr, (unsigned int)temp);
addr = &ir_set->erst_base;
temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
addr, temp_64);
addr = &ir_set->erst_dequeue;
temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
addr, temp_64);
}
void xhci_print_run_regs(struct xhci_hcd *xhci)
{
u32 temp;
int i;
xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
&xhci->run_regs->microframe_index,
(unsigned int) temp);
for (i = 0; i < 7; ++i) {
temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
if (temp != XHCI_INIT_VALUE)
xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
&xhci->run_regs->rsvd[i],
i, (unsigned int) temp);
}
}
void xhci_print_registers(struct xhci_hcd *xhci)
{
xhci_print_cap_regs(xhci);
xhci_print_op_regs(xhci);
xhci_print_ports(xhci);
}
void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
{
int i;
for (i = 0; i < 4; ++i)
xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
i*4, trb->generic.field[i]);
}
/**
* Debug a transfer request block (TRB).
*/
void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
{
u64 address;
u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
switch (type) {
case TRB_TYPE(TRB_LINK):
xhci_dbg(xhci, "Link TRB:\n");
xhci_print_trb_offsets(xhci, trb);
address = trb->link.segment_ptr;
xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
xhci_dbg(xhci, "Interrupter target = 0x%x\n",
GET_INTR_TARGET(trb->link.intr_target));
xhci_dbg(xhci, "Cycle bit = %u\n",
(unsigned int) (trb->link.control & TRB_CYCLE));
xhci_dbg(xhci, "Toggle cycle bit = %u\n",
(unsigned int) (trb->link.control & LINK_TOGGLE));
xhci_dbg(xhci, "No Snoop bit = %u\n",
(unsigned int) (trb->link.control & TRB_NO_SNOOP));
break;
case TRB_TYPE(TRB_TRANSFER):
address = trb->trans_event.buffer;
/*
* FIXME: look at flags to figure out if it's an address or if
* the data is directly in the buffer field.
*/
xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
break;
case TRB_TYPE(TRB_COMPLETION):
address = trb->event_cmd.cmd_trb;
xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
xhci_dbg(xhci, "Completion status = %u\n",
(unsigned int) GET_COMP_CODE(trb->event_cmd.status));
xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
break;
default:
xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
(unsigned int) type>>10);
xhci_print_trb_offsets(xhci, trb);
break;
}
}
/**
* Debug a segment with an xHCI ring.
*
* @return The Link TRB of the segment, or NULL if there is no Link TRB
* (which is a bug, since all segments must have a Link TRB).
*
* Prints out all TRBs in the segment, even those after the Link TRB.
*
* XXX: should we print out TRBs that the HC owns? As long as we don't
* write, that should be fine... We shouldn't expect that the memory pointed to
* by the TRB is valid at all. Do we care about ones the HC owns? Probably,
* for HC debugging.
*/
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
int i;
u32 addr = (u32) seg->dma;
union xhci_trb *trb = seg->trbs;
for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
trb = &seg->trbs[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
lower_32_bits(trb->link.segment_ptr),
upper_32_bits(trb->link.segment_ptr),
(unsigned int) trb->link.intr_target,
(unsigned int) trb->link.control);
addr += sizeof(*trb);
}
}
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
ring->dequeue,
(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
ring->dequeue));
xhci_dbg(xhci, "Ring deq updated %u times\n",
ring->deq_updates);
xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
ring->enqueue,
(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
ring->enqueue));
xhci_dbg(xhci, "Ring enq updated %u times\n",
ring->enq_updates);
}
/**
* Debugging for an xHCI ring, which is a queue broken into multiple segments.
*
* Print out each segment in the ring. Check that the DMA address in
* each link segment actually matches the segment's stored DMA address.
* Check that the link end bit is only set at the end of the ring.
* Check that the dequeue and enqueue pointers point to real data in this ring
* (not some other ring).
*/
void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
/* FIXME: Throw an error if any segment doesn't have a Link TRB */
struct xhci_segment *seg;
struct xhci_segment *first_seg = ring->first_seg;
xhci_debug_segment(xhci, first_seg);
if (!ring->enq_updates && !ring->deq_updates) {
xhci_dbg(xhci, " Ring has not been updated\n");
return;
}
for (seg = first_seg->next; seg != first_seg; seg = seg->next)
xhci_debug_segment(xhci, seg);
}
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
u32 addr = (u32) erst->erst_dma_addr;
int i;
struct xhci_erst_entry *entry;
for (i = 0; i < erst->num_entries; ++i) {
entry = &erst->entries[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
(unsigned int) addr,
lower_32_bits(entry->seg_addr),
upper_32_bits(entry->seg_addr),
(unsigned int) entry->seg_size,
(unsigned int) entry->rsvd);
addr += sizeof(*entry);
}
}
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
{
u64 val;
val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
lower_32_bits(val));
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
upper_32_bits(val));
}
/* Print the last 32 bytes for 64-byte contexts */
static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
{
int i;
for (i = 0; i < 4; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx "
"(dma) %#08llx - rsvd64[%d]\n",
&ctx[4 + i], (unsigned long long)dma,
ctx[4 + i], i);
dma += 8;
}
}
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
int i;
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
dma_addr_t dma = ctx->dma +
((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
xhci_dbg(xhci, "Slot Context:\n");
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
&slot_ctx->dev_info,
(unsigned long long)dma, slot_ctx->dev_info);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
&slot_ctx->dev_info2,
(unsigned long long)dma, slot_ctx->dev_info2);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
&slot_ctx->tt_info,
(unsigned long long)dma, slot_ctx->tt_info);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
&slot_ctx->dev_state,
(unsigned long long)dma, slot_ctx->dev_state);
dma += field_size;
for (i = 0; i < 4; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&slot_ctx->reserved[i], (unsigned long long)dma,
slot_ctx->reserved[i], i);
dma += field_size;
}
if (csz)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
int i, j;
int last_ep_ctx = 31;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
if (last_ep < 31)
last_ep_ctx = last_ep + 1;
for (i = 0; i < last_ep_ctx; ++i) {
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
dma_addr_t dma = ctx->dma +
((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
&ep_ctx->ep_info,
(unsigned long long)dma, ep_ctx->ep_info);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
&ep_ctx->ep_info2,
(unsigned long long)dma, ep_ctx->ep_info2);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
&ep_ctx->deq,
(unsigned long long)dma, ep_ctx->deq);
dma += 2*field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
&ep_ctx->tx_info,
(unsigned long long)dma, ep_ctx->tx_info);
dma += field_size;
for (j = 0; j < 3; ++j) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ep_ctx->reserved[j],
(unsigned long long)dma,
ep_ctx->reserved[j], j);
dma += field_size;
}
if (csz)
dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
}
}
void xhci_dbg_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
int i;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
struct xhci_slot_ctx *slot_ctx;
dma_addr_t dma = ctx->dma;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
if (ctx->type == XHCI_CTX_TYPE_INPUT) {
struct xhci_input_control_ctx *ctrl_ctx =
xhci_get_input_control_ctx(xhci, ctx);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
&ctrl_ctx->drop_flags, (unsigned long long)dma,
ctrl_ctx->drop_flags);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
&ctrl_ctx->add_flags, (unsigned long long)dma,
ctrl_ctx->add_flags);
dma += field_size;
for (i = 0; i < 6; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
&ctrl_ctx->rsvd2[i], (unsigned long long)dma,
ctrl_ctx->rsvd2[i], i);
dma += field_size;
}
if (csz)
dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
}
slot_ctx = xhci_get_slot_ctx(xhci, ctx);
xhci_dbg_slot_ctx(xhci, ctx);
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
}
| gpl-2.0 |
smihir/wireless-testing | sound/soc/au1x/dma.c | 773 | 8954 | /*
* Au1000/Au1500/Au1100 Audio DMA support.
*
* (c) 2011 Manuel Lauss <manuel.lauss@googlemail.com>
*
* copied almost verbatim from the old ALSA driver, written by
* Charles Eidsness <charles@cooper-street.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1000_dma.h>
#include "psc.h"
struct pcm_period {
u32 start;
u32 relative_end; /* relative to start of buffer */
struct pcm_period *next;
};
struct audio_stream {
struct snd_pcm_substream *substream;
int dma;
struct pcm_period *buffer;
unsigned int period_size;
unsigned int periods;
};
struct alchemy_pcm_ctx {
struct audio_stream stream[2]; /* playback & capture */
};
static void au1000_release_dma_link(struct audio_stream *stream)
{
struct pcm_period *pointer;
struct pcm_period *pointer_next;
stream->period_size = 0;
stream->periods = 0;
pointer = stream->buffer;
if (!pointer)
return;
do {
pointer_next = pointer->next;
kfree(pointer);
pointer = pointer_next;
} while (pointer != stream->buffer);
stream->buffer = NULL;
}
static int au1000_setup_dma_link(struct audio_stream *stream,
unsigned int period_bytes,
unsigned int periods)
{
struct snd_pcm_substream *substream = stream->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct pcm_period *pointer;
unsigned long dma_start;
int i;
dma_start = virt_to_phys(runtime->dma_area);
if (stream->period_size == period_bytes &&
stream->periods == periods)
return 0; /* not changed */
au1000_release_dma_link(stream);
stream->period_size = period_bytes;
stream->periods = periods;
stream->buffer = kmalloc(sizeof(struct pcm_period), GFP_KERNEL);
if (!stream->buffer)
return -ENOMEM;
pointer = stream->buffer;
for (i = 0; i < periods; i++) {
pointer->start = (u32)(dma_start + (i * period_bytes));
pointer->relative_end = (u32) (((i+1) * period_bytes) - 0x1);
if (i < periods - 1) {
pointer->next = kmalloc(sizeof(struct pcm_period),
GFP_KERNEL);
if (!pointer->next) {
au1000_release_dma_link(stream);
return -ENOMEM;
}
pointer = pointer->next;
}
}
pointer->next = stream->buffer;
return 0;
}
static void au1000_dma_stop(struct audio_stream *stream)
{
if (stream->buffer)
disable_dma(stream->dma);
}
static void au1000_dma_start(struct audio_stream *stream)
{
if (!stream->buffer)
return;
init_dma(stream->dma);
if (get_dma_active_buffer(stream->dma) == 0) {
clear_dma_done0(stream->dma);
set_dma_addr0(stream->dma, stream->buffer->start);
set_dma_count0(stream->dma, stream->period_size >> 1);
set_dma_addr1(stream->dma, stream->buffer->next->start);
set_dma_count1(stream->dma, stream->period_size >> 1);
} else {
clear_dma_done1(stream->dma);
set_dma_addr1(stream->dma, stream->buffer->start);
set_dma_count1(stream->dma, stream->period_size >> 1);
set_dma_addr0(stream->dma, stream->buffer->next->start);
set_dma_count0(stream->dma, stream->period_size >> 1);
}
enable_dma_buffers(stream->dma);
start_dma(stream->dma);
}
static irqreturn_t au1000_dma_interrupt(int irq, void *ptr)
{
struct audio_stream *stream = (struct audio_stream *)ptr;
struct snd_pcm_substream *substream = stream->substream;
switch (get_dma_buffer_done(stream->dma)) {
case DMA_D0:
stream->buffer = stream->buffer->next;
clear_dma_done0(stream->dma);
set_dma_addr0(stream->dma, stream->buffer->next->start);
set_dma_count0(stream->dma, stream->period_size >> 1);
enable_dma_buffer0(stream->dma);
break;
case DMA_D1:
stream->buffer = stream->buffer->next;
clear_dma_done1(stream->dma);
set_dma_addr1(stream->dma, stream->buffer->next->start);
set_dma_count1(stream->dma, stream->period_size >> 1);
enable_dma_buffer1(stream->dma);
break;
case (DMA_D0 | DMA_D1):
pr_debug("DMA %d missed interrupt.\n", stream->dma);
au1000_dma_stop(stream);
au1000_dma_start(stream);
break;
case (~DMA_D0 & ~DMA_D1):
pr_debug("DMA %d empty irq.\n", stream->dma);
}
snd_pcm_period_elapsed(substream);
return IRQ_HANDLED;
}
static const struct snd_pcm_hardware alchemy_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BATCH,
.period_bytes_min = 1024,
.period_bytes_max = 16 * 1024 - 1,
.periods_min = 4,
.periods_max = 255,
.buffer_bytes_max = 128 * 1024,
.fifo_size = 16,
};
static inline struct alchemy_pcm_ctx *ss_to_ctx(struct snd_pcm_substream *ss)
{
struct snd_soc_pcm_runtime *rtd = ss->private_data;
return snd_soc_platform_get_drvdata(rtd->platform);
}
static inline struct audio_stream *ss_to_as(struct snd_pcm_substream *ss)
{
struct alchemy_pcm_ctx *ctx = ss_to_ctx(ss);
return &(ctx->stream[ss->stream]);
}
static int alchemy_pcm_open(struct snd_pcm_substream *substream)
{
struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int *dmaids, s = substream->stream;
char *name;
dmaids = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
if (!dmaids)
return -ENODEV; /* whoa, has ordering changed? */
/* DMA setup */
name = (s == SNDRV_PCM_STREAM_PLAYBACK) ? "audio-tx" : "audio-rx";
ctx->stream[s].dma = request_au1000_dma(dmaids[s], name,
au1000_dma_interrupt, 0,
&ctx->stream[s]);
set_dma_mode(ctx->stream[s].dma,
get_dma_mode(ctx->stream[s].dma) & ~DMA_NC);
ctx->stream[s].substream = substream;
ctx->stream[s].buffer = NULL;
snd_soc_set_runtime_hwparams(substream, &alchemy_pcm_hardware);
return 0;
}
static int alchemy_pcm_close(struct snd_pcm_substream *substream)
{
struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream);
int stype = substream->stream;
ctx->stream[stype].substream = NULL;
free_au1000_dma(ctx->stream[stype].dma);
return 0;
}
static int alchemy_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct audio_stream *stream = ss_to_as(substream);
int err;
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (err < 0)
return err;
err = au1000_setup_dma_link(stream,
params_period_bytes(hw_params),
params_periods(hw_params));
if (err)
snd_pcm_lib_free_pages(substream);
return err;
}
static int alchemy_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct audio_stream *stream = ss_to_as(substream);
au1000_release_dma_link(stream);
return snd_pcm_lib_free_pages(substream);
}
static int alchemy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct audio_stream *stream = ss_to_as(substream);
int err = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
au1000_dma_start(stream);
break;
case SNDRV_PCM_TRIGGER_STOP:
au1000_dma_stop(stream);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_pcm_substream *ss)
{
struct audio_stream *stream = ss_to_as(ss);
long location;
location = get_dma_residue(stream->dma);
location = stream->buffer->relative_end - location;
if (location == -1)
location = 0;
return bytes_to_frames(ss->runtime, location);
}
static struct snd_pcm_ops alchemy_pcm_ops = {
.open = alchemy_pcm_open,
.close = alchemy_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = alchemy_pcm_hw_params,
.hw_free = alchemy_pcm_hw_free,
.trigger = alchemy_pcm_trigger,
.pointer = alchemy_pcm_pointer,
};
static void alchemy_pcm_free_dma_buffers(struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
static int alchemy_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL), 65536, (4096 * 1024) - 1);
return 0;
}
static struct snd_soc_platform_driver alchemy_pcm_soc_platform = {
.ops = &alchemy_pcm_ops,
.pcm_new = alchemy_pcm_new,
.pcm_free = alchemy_pcm_free_dma_buffers,
};
static int alchemy_pcm_drvprobe(struct platform_device *pdev)
{
struct alchemy_pcm_ctx *ctx;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
platform_set_drvdata(pdev, ctx);
return snd_soc_register_platform(&pdev->dev, &alchemy_pcm_soc_platform);
}
static int alchemy_pcm_drvremove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver alchemy_pcmdma_driver = {
.driver = {
.name = "alchemy-pcm-dma",
.owner = THIS_MODULE,
},
.probe = alchemy_pcm_drvprobe,
.remove = alchemy_pcm_drvremove,
};
module_platform_driver(alchemy_pcmdma_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Au1000/Au1500/Au1100 Audio DMA driver");
MODULE_AUTHOR("Manuel Lauss");
| gpl-2.0 |
compulab/trimslice-android-kernel | drivers/net/wireless/p54/p54spi.c | 773 | 17371 | /*
* Copyright (C) 2008 Christian Lamparter <chunkeey@web.de>
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
*
* This driver is a port from stlc45xx:
* Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include "p54spi.h"
#include "p54.h"
#include "lmac.h"
#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
#include "p54spi_eeprom.h"
#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
MODULE_FIRMWARE("3826.arm");
MODULE_ALIAS("stlc45xx");
/*
* gpios should be handled in board files and provided via platform data,
* but because it's currently impossible for p54spi to have a header file
* in include/linux, let's use module paramaters for now
*/
static int p54spi_gpio_power = 97;
module_param(p54spi_gpio_power, int, 0444);
MODULE_PARM_DESC(p54spi_gpio_power, "gpio number for power line");
static int p54spi_gpio_irq = 87;
module_param(p54spi_gpio_irq, int, 0444);
MODULE_PARM_DESC(p54spi_gpio_irq, "gpio number for irq line");
static void p54spi_spi_read(struct p54s_priv *priv, u8 address,
void *buf, size_t len)
{
struct spi_transfer t[2];
struct spi_message m;
__le16 addr;
/* We first push the address */
addr = cpu_to_le16(address << 8 | SPI_ADRS_READ_BIT_15);
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = &addr;
t[0].len = sizeof(addr);
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = len;
spi_message_add_tail(&t[1], &m);
spi_sync(priv->spi, &m);
}
static void p54spi_spi_write(struct p54s_priv *priv, u8 address,
const void *buf, size_t len)
{
struct spi_transfer t[3];
struct spi_message m;
__le16 addr;
/* We first push the address */
addr = cpu_to_le16(address << 8);
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = &addr;
t[0].len = sizeof(addr);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
t[1].len = len & ~1;
spi_message_add_tail(&t[1], &m);
if (len % 2) {
__le16 last_word;
last_word = cpu_to_le16(((u8 *)buf)[len - 1]);
t[2].tx_buf = &last_word;
t[2].len = sizeof(last_word);
spi_message_add_tail(&t[2], &m);
}
spi_sync(priv->spi, &m);
}
static u32 p54spi_read32(struct p54s_priv *priv, u8 addr)
{
__le32 val;
p54spi_spi_read(priv, addr, &val, sizeof(val));
return le32_to_cpu(val);
}
static inline void p54spi_write16(struct p54s_priv *priv, u8 addr, __le16 val)
{
p54spi_spi_write(priv, addr, &val, sizeof(val));
}
static inline void p54spi_write32(struct p54s_priv *priv, u8 addr, __le32 val)
{
p54spi_spi_write(priv, addr, &val, sizeof(val));
}
static int p54spi_wait_bit(struct p54s_priv *priv, u16 reg, u32 bits)
{
int i;
for (i = 0; i < 2000; i++) {
u32 buffer = p54spi_read32(priv, reg);
if ((buffer & bits) == bits)
return 1;
}
return 0;
}
static int p54spi_spi_write_dma(struct p54s_priv *priv, __le32 base,
const void *buf, size_t len)
{
if (!p54spi_wait_bit(priv, SPI_ADRS_DMA_WRITE_CTRL, HOST_ALLOWED)) {
dev_err(&priv->spi->dev, "spi_write_dma not allowed "
"to DMA write.\n");
return -EAGAIN;
}
p54spi_write16(priv, SPI_ADRS_DMA_WRITE_CTRL,
cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE));
p54spi_write16(priv, SPI_ADRS_DMA_WRITE_LEN, cpu_to_le16(len));
p54spi_write32(priv, SPI_ADRS_DMA_WRITE_BASE, base);
p54spi_spi_write(priv, SPI_ADRS_DMA_DATA, buf, len);
return 0;
}
static int p54spi_request_firmware(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
int ret;
/* FIXME: should driver use it's own struct device? */
ret = request_firmware(&priv->firmware, "3826.arm", &priv->spi->dev);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_firmware() failed: %d", ret);
return ret;
}
ret = p54_parse_firmware(dev, priv->firmware);
if (ret) {
release_firmware(priv->firmware);
return ret;
}
return 0;
}
static int p54spi_request_eeprom(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
const struct firmware *eeprom;
int ret;
/*
* allow users to customize their eeprom.
*/
ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
if (ret < 0) {
#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
dev_info(&priv->spi->dev, "loading default eeprom...\n");
ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom,
sizeof(p54spi_eeprom));
#else
dev_err(&priv->spi->dev, "Failed to request user eeprom\n");
#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
} else {
dev_info(&priv->spi->dev, "loading user eeprom...\n");
ret = p54_parse_eeprom(dev, (void *) eeprom->data,
(int)eeprom->size);
release_firmware(eeprom);
}
return ret;
}
static int p54spi_upload_firmware(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
unsigned long fw_len, _fw_len;
unsigned int offset = 0;
int err = 0;
u8 *fw;
fw_len = priv->firmware->size;
fw = kmemdup(priv->firmware->data, fw_len, GFP_KERNEL);
if (!fw)
return -ENOMEM;
/* stop the device */
p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET |
SPI_CTRL_STAT_START_HALTED));
msleep(TARGET_BOOT_SLEEP);
p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
SPI_CTRL_STAT_HOST_OVERRIDE |
SPI_CTRL_STAT_START_HALTED));
msleep(TARGET_BOOT_SLEEP);
while (fw_len > 0) {
_fw_len = min_t(long, fw_len, SPI_MAX_PACKET_SIZE);
err = p54spi_spi_write_dma(priv, cpu_to_le32(
ISL38XX_DEV_FIRMWARE_ADDR + offset),
(fw + offset), _fw_len);
if (err < 0)
goto out;
fw_len -= _fw_len;
offset += _fw_len;
}
BUG_ON(fw_len != 0);
/* enable host interrupts */
p54spi_write32(priv, SPI_ADRS_HOST_INT_EN,
cpu_to_le32(SPI_HOST_INTS_DEFAULT));
/* boot the device */
p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET |
SPI_CTRL_STAT_RAM_BOOT));
msleep(TARGET_BOOT_SLEEP);
p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT));
msleep(TARGET_BOOT_SLEEP);
out:
kfree(fw);
return err;
}
static void p54spi_power_off(struct p54s_priv *priv)
{
disable_irq(gpio_to_irq(p54spi_gpio_irq));
gpio_set_value(p54spi_gpio_power, 0);
}
static void p54spi_power_on(struct p54s_priv *priv)
{
gpio_set_value(p54spi_gpio_power, 1);
enable_irq(gpio_to_irq(p54spi_gpio_irq));
/*
* need to wait a while before device can be accessed, the length
* is just a guess
*/
msleep(10);
}
static inline void p54spi_int_ack(struct p54s_priv *priv, u32 val)
{
p54spi_write32(priv, SPI_ADRS_HOST_INT_ACK, cpu_to_le32(val));
}
static int p54spi_wakeup(struct p54s_priv *priv)
{
/* wake the chip */
p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS,
cpu_to_le32(SPI_TARGET_INT_WAKEUP));
/* And wait for the READY interrupt */
if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS,
SPI_HOST_INT_READY)) {
dev_err(&priv->spi->dev, "INT_READY timeout\n");
return -EBUSY;
}
p54spi_int_ack(priv, SPI_HOST_INT_READY);
return 0;
}
static inline void p54spi_sleep(struct p54s_priv *priv)
{
p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS,
cpu_to_le32(SPI_TARGET_INT_SLEEP));
}
static void p54spi_int_ready(struct p54s_priv *priv)
{
p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, cpu_to_le32(
SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE));
switch (priv->fw_state) {
case FW_STATE_BOOTING:
priv->fw_state = FW_STATE_READY;
complete(&priv->fw_comp);
break;
case FW_STATE_RESETTING:
priv->fw_state = FW_STATE_READY;
/* TODO: reinitialize state */
break;
default:
break;
}
}
static int p54spi_rx(struct p54s_priv *priv)
{
struct sk_buff *skb;
u16 len;
u16 rx_head[2];
#define READAHEAD_SZ (sizeof(rx_head)-sizeof(u16))
if (p54spi_wakeup(priv) < 0)
return -EBUSY;
/* Read data size and first data word in one SPI transaction
* This is workaround for firmware/DMA bug,
* when first data word gets lost under high load.
*/
p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, rx_head, sizeof(rx_head));
len = rx_head[0];
if (len == 0) {
p54spi_sleep(priv);
dev_err(&priv->spi->dev, "rx request of zero bytes\n");
return 0;
}
/* Firmware may insert up to 4 padding bytes after the lmac header,
* but it does not amend the size of SPI data transfer.
* Such packets has correct data size in header, thus referencing
* past the end of allocated skb. Reserve extra 4 bytes for this case */
skb = dev_alloc_skb(len + 4);
if (!skb) {
p54spi_sleep(priv);
dev_err(&priv->spi->dev, "could not alloc skb");
return -ENOMEM;
}
if (len <= READAHEAD_SZ) {
memcpy(skb_put(skb, len), rx_head + 1, len);
} else {
memcpy(skb_put(skb, READAHEAD_SZ), rx_head + 1, READAHEAD_SZ);
p54spi_spi_read(priv, SPI_ADRS_DMA_DATA,
skb_put(skb, len - READAHEAD_SZ),
len - READAHEAD_SZ);
}
p54spi_sleep(priv);
/* Put additional bytes to compensate for the possible
* alignment-caused truncation */
skb_put(skb, 4);
if (p54_rx(priv->hw, skb) == 0)
dev_kfree_skb(skb);
return 0;
}
static irqreturn_t p54spi_interrupt(int irq, void *config)
{
struct spi_device *spi = config;
struct p54s_priv *priv = dev_get_drvdata(&spi->dev);
ieee80211_queue_work(priv->hw, &priv->work);
return IRQ_HANDLED;
}
static int p54spi_tx_frame(struct p54s_priv *priv, struct sk_buff *skb)
{
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
int ret = 0;
if (p54spi_wakeup(priv) < 0)
return -EBUSY;
ret = p54spi_spi_write_dma(priv, hdr->req_id, skb->data, skb->len);
if (ret < 0)
goto out;
if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS,
SPI_HOST_INT_WR_READY)) {
dev_err(&priv->spi->dev, "WR_READY timeout\n");
ret = -EAGAIN;
goto out;
}
p54spi_int_ack(priv, SPI_HOST_INT_WR_READY);
if (FREE_AFTER_TX(skb))
p54_free_skb(priv->hw, skb);
out:
p54spi_sleep(priv);
return ret;
}
static int p54spi_wq_tx(struct p54s_priv *priv)
{
struct p54s_tx_info *entry;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct p54_tx_info *minfo;
struct p54s_tx_info *dinfo;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&priv->tx_lock, flags);
while (!list_empty(&priv->tx_pending)) {
entry = list_entry(priv->tx_pending.next,
struct p54s_tx_info, tx_list);
list_del_init(&entry->tx_list);
spin_unlock_irqrestore(&priv->tx_lock, flags);
dinfo = container_of((void *) entry, struct p54s_tx_info,
tx_list);
minfo = container_of((void *) dinfo, struct p54_tx_info,
data);
info = container_of((void *) minfo, struct ieee80211_tx_info,
rate_driver_data);
skb = container_of((void *) info, struct sk_buff, cb);
ret = p54spi_tx_frame(priv, skb);
if (ret < 0) {
p54_free_skb(priv->hw, skb);
return ret;
}
spin_lock_irqsave(&priv->tx_lock, flags);
}
spin_unlock_irqrestore(&priv->tx_lock, flags);
return ret;
}
static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct p54s_priv *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data;
struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data;
unsigned long flags;
BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data)));
spin_lock_irqsave(&priv->tx_lock, flags);
list_add_tail(&di->tx_list, &priv->tx_pending);
spin_unlock_irqrestore(&priv->tx_lock, flags);
ieee80211_queue_work(priv->hw, &priv->work);
}
static void p54spi_work(struct work_struct *work)
{
struct p54s_priv *priv = container_of(work, struct p54s_priv, work);
u32 ints;
int ret;
mutex_lock(&priv->mutex);
if (priv->fw_state == FW_STATE_OFF)
goto out;
ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
if (ints & SPI_HOST_INT_READY) {
p54spi_int_ready(priv);
p54spi_int_ack(priv, SPI_HOST_INT_READY);
}
if (priv->fw_state != FW_STATE_READY)
goto out;
if (ints & SPI_HOST_INT_UPDATE) {
p54spi_int_ack(priv, SPI_HOST_INT_UPDATE);
ret = p54spi_rx(priv);
if (ret < 0)
goto out;
}
if (ints & SPI_HOST_INT_SW_UPDATE) {
p54spi_int_ack(priv, SPI_HOST_INT_SW_UPDATE);
ret = p54spi_rx(priv);
if (ret < 0)
goto out;
}
ret = p54spi_wq_tx(priv);
out:
mutex_unlock(&priv->mutex);
}
static int p54spi_op_start(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
unsigned long timeout;
int ret = 0;
if (mutex_lock_interruptible(&priv->mutex)) {
ret = -EINTR;
goto out;
}
priv->fw_state = FW_STATE_BOOTING;
p54spi_power_on(priv);
ret = p54spi_upload_firmware(dev);
if (ret < 0) {
p54spi_power_off(priv);
goto out_unlock;
}
mutex_unlock(&priv->mutex);
timeout = msecs_to_jiffies(2000);
timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp,
timeout);
if (!timeout) {
dev_err(&priv->spi->dev, "firmware boot failed");
p54spi_power_off(priv);
ret = -1;
goto out;
}
if (mutex_lock_interruptible(&priv->mutex)) {
ret = -EINTR;
p54spi_power_off(priv);
goto out;
}
WARN_ON(priv->fw_state != FW_STATE_READY);
out_unlock:
mutex_unlock(&priv->mutex);
out:
return ret;
}
static void p54spi_op_stop(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
unsigned long flags;
if (mutex_lock_interruptible(&priv->mutex)) {
/* FIXME: how to handle this error? */
return;
}
WARN_ON(priv->fw_state != FW_STATE_READY);
cancel_work_sync(&priv->work);
p54spi_power_off(priv);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
spin_unlock_irqrestore(&priv->tx_lock, flags);
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);
}
static int __devinit p54spi_probe(struct spi_device *spi)
{
struct p54s_priv *priv = NULL;
struct ieee80211_hw *hw;
int ret = -EINVAL;
hw = p54_init_common(sizeof(*priv));
if (!hw) {
dev_err(&spi->dev, "could not alloc ieee80211_hw");
return -ENOMEM;
}
priv = hw->priv;
priv->hw = hw;
dev_set_drvdata(&spi->dev, priv);
priv->spi = spi;
spi->bits_per_word = 16;
spi->max_speed_hz = 24000000;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "spi_setup failed");
goto err_free_common;
}
ret = gpio_request(p54spi_gpio_power, "p54spi power");
if (ret < 0) {
dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret);
goto err_free_common;
}
ret = gpio_request(p54spi_gpio_irq, "p54spi irq");
if (ret < 0) {
dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret);
goto err_free_common;
}
gpio_direction_output(p54spi_gpio_power, 0);
gpio_direction_input(p54spi_gpio_irq);
ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
p54spi_interrupt, IRQF_DISABLED, "p54spi",
priv->spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_irq() failed");
goto err_free_common;
}
irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
disable_irq(gpio_to_irq(p54spi_gpio_irq));
INIT_WORK(&priv->work, p54spi_work);
init_completion(&priv->fw_comp);
INIT_LIST_HEAD(&priv->tx_pending);
mutex_init(&priv->mutex);
SET_IEEE80211_DEV(hw, &spi->dev);
priv->common.open = p54spi_op_start;
priv->common.stop = p54spi_op_stop;
priv->common.tx = p54spi_op_tx;
ret = p54spi_request_firmware(hw);
if (ret < 0)
goto err_free_common;
ret = p54spi_request_eeprom(hw);
if (ret)
goto err_free_common;
ret = p54_register_common(hw, &priv->spi->dev);
if (ret)
goto err_free_common;
return 0;
err_free_common:
p54_free_common(priv->hw);
return ret;
}
static int __devexit p54spi_remove(struct spi_device *spi)
{
struct p54s_priv *priv = dev_get_drvdata(&spi->dev);
p54_unregister_common(priv->hw);
free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
gpio_free(p54spi_gpio_power);
gpio_free(p54spi_gpio_irq);
release_firmware(priv->firmware);
mutex_destroy(&priv->mutex);
p54_free_common(priv->hw);
return 0;
}
static struct spi_driver p54spi_driver = {
.driver = {
.name = "p54spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = p54spi_probe,
.remove = __devexit_p(p54spi_remove),
};
static int __init p54spi_init(void)
{
int ret;
ret = spi_register_driver(&p54spi_driver);
if (ret < 0) {
printk(KERN_ERR "failed to register SPI driver: %d", ret);
goto out;
}
out:
return ret;
}
static void __exit p54spi_exit(void)
{
spi_unregister_driver(&p54spi_driver);
}
module_init(p54spi_init);
module_exit(p54spi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_ALIAS("spi:cx3110x");
MODULE_ALIAS("spi:p54spi");
| gpl-2.0 |
jeboo/kernel_KK_i337_ATT_NB1 | arch/arm/mach-msm/msm_bus/msm_bus_core.c | 1285 | 2933 | /* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/clk.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
#include "msm_bus_core.h"
static atomic_t num_fab = ATOMIC_INIT(0);
int msm_bus_get_num_fab(void)
{
return atomic_read(&num_fab);
}
int msm_bus_device_match(struct device *dev, void* id)
{
struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
if (!fabdev) {
MSM_BUS_WARN("Fabric %p returning 0\n", fabdev);
return 0;
}
return (fabdev->id == (int)id);
}
struct bus_type msm_bus_type = {
.name = "msm-bus-type",
};
EXPORT_SYMBOL(msm_bus_type);
/**
* msm_bus_get_fabric_device() - This function is used to search for
* the fabric device on the bus
* @fabid: Fabric id
* Function returns: Pointer to the fabric device
*/
struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid)
{
struct device *dev;
struct msm_bus_fabric_device *fabric;
dev = bus_find_device(&msm_bus_type, NULL, (void *)fabid,
msm_bus_device_match);
fabric = to_msm_bus_fabric_device(dev);
return fabric;
}
/**
* msm_bus_fabric_device_register() - Registers a fabric on msm bus
* @fabdev: Fabric device to be registered
*/
int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev)
{
int ret = 0;
fabdev->dev.bus = &msm_bus_type;
ret = dev_set_name(&fabdev->dev, fabdev->name);
if (ret) {
MSM_BUS_ERR("error setting dev name\n");
goto err;
}
ret = device_register(&fabdev->dev);
if (ret < 0) {
MSM_BUS_ERR("error registering device%d %s\n",
ret, fabdev->name);
goto err;
}
atomic_inc(&num_fab);
err:
return ret;
}
/**
* msm_bus_fabric_device_unregister() - Unregisters the fabric
* devices from the msm bus
*/
void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev)
{
device_unregister(&fabdev->dev);
atomic_dec(&num_fab);
}
static void __exit msm_bus_exit(void)
{
bus_unregister(&msm_bus_type);
}
static int __init msm_bus_init(void)
{
int retval = 0;
retval = bus_register(&msm_bus_type);
if (retval)
MSM_BUS_ERR("bus_register error! %d\n",
retval);
return retval;
}
postcore_initcall(msm_bus_init);
module_exit(msm_bus_exit);
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.2");
MODULE_ALIAS("platform:msm_bus");
| gpl-2.0 |
acheron1502/android_kernel_BLU_BLU_PURE_XL | tools/perf/tests/evsel-tp-sched.c | 2309 | 1899 | #include "evsel.h"
#include "tests.h"
#include "event-parse.h"
static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
int size, bool should_be_signed)
{
struct format_field *field = perf_evsel__field(evsel, name);
int is_signed;
int ret = 0;
if (field == NULL) {
pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
return -1;
}
is_signed = !!(field->flags | FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
ret = -1;
}
if (field->size != size) {
pr_debug("%s: \"%s\" size (%d) should be %d!\n",
evsel->name, name, field->size, size);
ret = -1;
}
return ret;
}
int test__perf_evsel__tp_sched_test(void)
{
struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
int ret = 0;
if (evsel == NULL) {
pr_debug("perf_evsel__new\n");
return -1;
}
if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_state", 8, true))
ret = -1;
if (perf_evsel__test_field(evsel, "next_comm", 16, true))
ret = -1;
if (perf_evsel__test_field(evsel, "next_pid", 4, true))
ret = -1;
if (perf_evsel__test_field(evsel, "next_prio", 4, true))
ret = -1;
perf_evsel__delete(evsel);
evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
if (perf_evsel__test_field(evsel, "comm", 16, true))
ret = -1;
if (perf_evsel__test_field(evsel, "pid", 4, true))
ret = -1;
if (perf_evsel__test_field(evsel, "prio", 4, true))
ret = -1;
if (perf_evsel__test_field(evsel, "success", 4, true))
ret = -1;
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
ret = -1;
return ret;
}
| gpl-2.0 |
yu-aosp-staging/android_kernel_yu_msm8916 | drivers/net/can/sja1000/ems_pcmcia.c | 2565 | 8248 | /*
* Copyright (C) 2008 Sebastian Haas (initial chardev implementation)
* Copyright (C) 2010 Markus Plessing <plessing@ems-wuensche.com>
* Rework for mainline by Oliver Hartkopp <socketcan@hartkopp.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include "sja1000.h"
#define DRV_NAME "ems_pcmcia"
MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards");
MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card");
MODULE_LICENSE("GPL v2");
#define EMS_PCMCIA_MAX_CHAN 2
struct ems_pcmcia_card {
int channels;
struct pcmcia_device *pcmcia_dev;
struct net_device *net_dev[EMS_PCMCIA_MAX_CHAN];
void __iomem *base_addr;
};
#define EMS_PCMCIA_CAN_CLOCK (16000000 / 2)
/*
* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
* Setting the OCR register to 0xDA is a good idea.
* This means normal output mode , push-pull and the correct polarity.
*/
#define EMS_PCMCIA_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
/*
* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCMCIA_CDR (CDR_CBP | CDR_CLKOUT_MASK)
#define EMS_PCMCIA_MEM_SIZE 4096 /* Size of the remapped io-memory */
#define EMS_PCMCIA_CAN_BASE_OFFSET 0x100 /* Offset where controllers starts */
#define EMS_PCMCIA_CAN_CTRL_SIZE 0x80 /* Memory size for each controller */
#define EMS_CMD_RESET 0x00 /* Perform a reset of the card */
#define EMS_CMD_MAP 0x03 /* Map CAN controllers into card' memory */
#define EMS_CMD_UMAP 0x02 /* Unmap CAN controllers from card' memory */
static struct pcmcia_device_id ems_pcmcia_tbl[] = {
PCMCIA_DEVICE_PROD_ID123("EMS_T_W", "CPC-Card", "V2.0", 0xeab1ea23,
0xa338573f, 0xe4575800),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, ems_pcmcia_tbl);
static u8 ems_pcmcia_read_reg(const struct sja1000_priv *priv, int port)
{
return readb(priv->reg_base + port);
}
static void ems_pcmcia_write_reg(const struct sja1000_priv *priv, int port,
u8 val)
{
writeb(val, priv->reg_base + port);
}
static irqreturn_t ems_pcmcia_interrupt(int irq, void *dev_id)
{
struct ems_pcmcia_card *card = dev_id;
struct net_device *dev;
irqreturn_t retval = IRQ_NONE;
int i, again;
/* Card not present */
if (readw(card->base_addr) != 0xAA55)
return IRQ_HANDLED;
do {
again = 0;
/* Check interrupt for each channel */
for (i = 0; i < card->channels; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
if (sja1000_interrupt(irq, dev) == IRQ_HANDLED)
again = 1;
}
/* At least one channel handled the interrupt */
if (again)
retval = IRQ_HANDLED;
} while (again);
return retval;
}
/*
* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv)
{
/* Make sure SJA1000 is in reset mode */
ems_pcmcia_write_reg(priv, SJA1000_MOD, 1);
ems_pcmcia_write_reg(priv, SJA1000_CDR, CDR_PELICAN);
/* read reset-values */
if (ems_pcmcia_read_reg(priv, SJA1000_CDR) == CDR_PELICAN)
return 1;
return 0;
}
static void ems_pcmcia_del_card(struct pcmcia_device *pdev)
{
struct ems_pcmcia_card *card = pdev->priv;
struct net_device *dev;
int i;
free_irq(pdev->irq, card);
for (i = 0; i < card->channels; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
printk(KERN_INFO "%s: removing %s on channel #%d\n",
DRV_NAME, dev->name, i);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
}
writeb(EMS_CMD_UMAP, card->base_addr);
iounmap(card->base_addr);
kfree(card);
pdev->priv = NULL;
}
/*
* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
{
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pcmcia_card *card;
int err, i;
/* Allocating card structures to hold addresses, ... */
card = kzalloc(sizeof(struct ems_pcmcia_card), GFP_KERNEL);
if (!card)
return -ENOMEM;
pdev->priv = card;
card->channels = 0;
card->base_addr = ioremap(base, EMS_PCMCIA_MEM_SIZE);
if (!card->base_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
/* Check for unique EMS CAN signature */
if (readw(card->base_addr) != 0xAA55) {
err = -ENODEV;
goto failure_cleanup;
}
/* Request board reset */
writeb(EMS_CMD_RESET, card->base_addr);
/* Make sure CAN controllers are mapped into card's memory space */
writeb(EMS_CMD_MAP, card->base_addr);
/* Detect available channels */
for (i = 0; i < EMS_PCMCIA_MAX_CHAN; i++) {
dev = alloc_sja1000dev(0);
if (!dev) {
err = -ENOMEM;
goto failure_cleanup;
}
card->net_dev[i] = dev;
priv = netdev_priv(dev);
priv->priv = card;
SET_NETDEV_DEV(dev, &pdev->dev);
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
priv->reg_base = card->base_addr + EMS_PCMCIA_CAN_BASE_OFFSET +
(i * EMS_PCMCIA_CAN_CTRL_SIZE);
/* Check if channel is present */
if (ems_pcmcia_check_chan(priv)) {
priv->read_reg = ems_pcmcia_read_reg;
priv->write_reg = ems_pcmcia_write_reg;
priv->can.clock.freq = EMS_PCMCIA_CAN_CLOCK;
priv->ocr = EMS_PCMCIA_OCR;
priv->cdr = EMS_PCMCIA_CDR;
priv->flags |= SJA1000_CUSTOM_IRQ_HANDLER;
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
free_sja1000dev(dev);
goto failure_cleanup;
}
card->channels++;
printk(KERN_INFO "%s: registered %s on channel "
"#%d at 0x%p, irq %d\n", DRV_NAME, dev->name,
i, priv->reg_base, dev->irq);
} else
free_sja1000dev(dev);
}
err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
DRV_NAME, card);
if (!err)
return 0;
failure_cleanup:
ems_pcmcia_del_card(pdev);
return err;
}
/*
* Setup PCMCIA socket and probe for EMS CPC-CARD
*/
static int ems_pcmcia_probe(struct pcmcia_device *dev)
{
int csval;
/* General socket configuration */
dev->config_flags |= CONF_ENABLE_IRQ;
dev->config_index = 1;
dev->config_regs = PRESENT_OPTION;
/* The io structure describes IO port mapping */
dev->resource[0]->end = 16;
dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
dev->resource[1]->end = 16;
dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
dev->io_lines = 5;
/* Allocate a memory window */
dev->resource[2]->flags =
(WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE);
dev->resource[2]->start = dev->resource[2]->end = 0;
csval = pcmcia_request_window(dev, dev->resource[2], 0);
if (csval) {
dev_err(&dev->dev, "pcmcia_request_window failed (err=%d)\n",
csval);
return 0;
}
csval = pcmcia_map_mem_page(dev, dev->resource[2], dev->config_base);
if (csval) {
dev_err(&dev->dev, "pcmcia_map_mem_page failed (err=%d)\n",
csval);
return 0;
}
csval = pcmcia_enable_device(dev);
if (csval) {
dev_err(&dev->dev, "pcmcia_enable_device failed (err=%d)\n",
csval);
return 0;
}
ems_pcmcia_add_card(dev, dev->resource[2]->start);
return 0;
}
/*
* Release claimed resources
*/
static void ems_pcmcia_remove(struct pcmcia_device *dev)
{
ems_pcmcia_del_card(dev);
pcmcia_disable_device(dev);
}
static struct pcmcia_driver ems_pcmcia_driver = {
.name = DRV_NAME,
.probe = ems_pcmcia_probe,
.remove = ems_pcmcia_remove,
.id_table = ems_pcmcia_tbl,
};
module_pcmcia_driver(ems_pcmcia_driver);
| gpl-2.0 |
ProjectOpenCannibal/android_kernel_lg_geehrc4g | fs/xfs/xfs_dir2_sf.c | 2821 | 37492 | /*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_error.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_trace.h"
/*
* Prototypes for internal functions.
*/
static void xfs_dir2_sf_addname_easy(xfs_da_args_t *args,
xfs_dir2_sf_entry_t *sfep,
xfs_dir2_data_aoff_t offset,
int new_isize);
static void xfs_dir2_sf_addname_hard(xfs_da_args_t *args, int objchange,
int new_isize);
static int xfs_dir2_sf_addname_pick(xfs_da_args_t *args, int objchange,
xfs_dir2_sf_entry_t **sfepp,
xfs_dir2_data_aoff_t *offsetp);
#ifdef DEBUG
static void xfs_dir2_sf_check(xfs_da_args_t *args);
#else
#define xfs_dir2_sf_check(args)
#endif /* DEBUG */
#if XFS_BIG_INUMS
static void xfs_dir2_sf_toino4(xfs_da_args_t *args);
static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
#endif /* XFS_BIG_INUMS */
/*
* Given a block directory (dp/block), calculate its size as a shortform (sf)
* directory and a header for the sf directory, if it will fit it the
* space currently present in the inode. If it won't fit, the output
* size is too big (but not accurate).
*/
int /* size for sf form */
xfs_dir2_block_sfsize(
xfs_inode_t *dp, /* incore inode pointer */
xfs_dir2_block_t *block, /* block directory data */
xfs_dir2_sf_hdr_t *sfhp) /* output: header for sf form */
{
xfs_dir2_dataptr_t addr; /* data entry address */
xfs_dir2_leaf_entry_t *blp; /* leaf area of the block */
xfs_dir2_block_tail_t *btp; /* tail area of the block */
int count; /* shortform entry count */
xfs_dir2_data_entry_t *dep; /* data entry in the block */
int i; /* block entry index */
int i8count; /* count of big-inode entries */
int isdot; /* entry is "." */
int isdotdot; /* entry is ".." */
xfs_mount_t *mp; /* mount structure pointer */
int namelen; /* total name bytes */
xfs_ino_t parent = 0; /* parent inode number */
int size=0; /* total computed size */
mp = dp->i_mount;
count = i8count = namelen = 0;
btp = xfs_dir2_block_tail_p(mp, block);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Iterate over the block's data entries by using the leaf pointers.
*/
for (i = 0; i < be32_to_cpu(btp->count); i++) {
if ((addr = be32_to_cpu(blp[i].address)) == XFS_DIR2_NULL_DATAPTR)
continue;
/*
* Calculate the pointer to the entry at hand.
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
/*
* Detect . and .., so we can special-case them.
* . is not included in sf directories.
* .. is included by just the parent inode number.
*/
isdot = dep->namelen == 1 && dep->name[0] == '.';
isdotdot =
dep->namelen == 2 &&
dep->name[0] == '.' && dep->name[1] == '.';
#if XFS_BIG_INUMS
if (!isdot)
i8count += be64_to_cpu(dep->inumber) > XFS_DIR2_MAX_SHORT_INUM;
#endif
if (!isdot && !isdotdot) {
count++;
namelen += dep->namelen;
} else if (isdotdot)
parent = be64_to_cpu(dep->inumber);
/*
* Calculate the new size, see if we should give up yet.
*/
size = xfs_dir2_sf_hdr_size(i8count) + /* header */
count + /* namelen */
count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */
namelen + /* name */
(i8count ? /* inumber */
(uint)sizeof(xfs_dir2_ino8_t) * count :
(uint)sizeof(xfs_dir2_ino4_t) * count);
if (size > XFS_IFORK_DSIZE(dp))
return size; /* size value is a failure */
}
/*
* Create the output header, if it worked.
*/
sfhp->count = count;
sfhp->i8count = i8count;
xfs_dir2_sf_put_inumber((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent);
return size;
}
/*
* Convert a block format directory to shortform.
* Caller has already checked that it will fit, and built us a header.
*/
int /* error */
xfs_dir2_block_to_sf(
xfs_da_args_t *args, /* operation arguments */
xfs_dabuf_t *bp, /* block buffer */
int size, /* shortform directory size */
xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
{
xfs_dir2_block_t *block; /* block structure */
xfs_dir2_block_tail_t *btp; /* block tail pointer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused data pointer */
char *endptr; /* end of data entries */
int error; /* error return value */
int logflags; /* inode logging flags */
xfs_mount_t *mp; /* filesystem mount point */
char *ptr; /* current data pointer */
xfs_dir2_sf_entry_t *sfep; /* shortform entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
xfs_ino_t temp;
trace_xfs_dir2_block_to_sf(args);
dp = args->dp;
mp = dp->i_mount;
/*
* Make a copy of the block data, so we can shrink the inode
* and add local data.
*/
block = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
memcpy(block, bp->data, mp->m_dirblksize);
logflags = XFS_ILOG_CORE;
if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
ASSERT(error != ENOSPC);
goto out;
}
/*
* The buffer is now unconditionally gone, whether
* xfs_dir2_shrink_inode worked or not.
*
* Convert the inode to local format.
*/
dp->i_df.if_flags &= ~XFS_IFEXTENTS;
dp->i_df.if_flags |= XFS_IFINLINE;
dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
ASSERT(dp->i_df.if_bytes == 0);
xfs_idata_realloc(dp, size, XFS_DATA_FORK);
logflags |= XFS_ILOG_DDATA;
/*
* Copy the header into the newly allocate local space.
*/
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
dp->i_d.di_size = size;
/*
* Set up to loop over the block's entries.
*/
btp = xfs_dir2_block_tail_p(mp, block);
ptr = (char *)block->u;
endptr = (char *)xfs_dir2_block_leaf_p(btp);
sfep = xfs_dir2_sf_firstentry(sfp);
/*
* Loop over the active and unused entries.
* Stop when we reach the leaf/tail portion of the block.
*/
while (ptr < endptr) {
/*
* If it's unused, just skip over it.
*/
dup = (xfs_dir2_data_unused_t *)ptr;
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
ptr += be16_to_cpu(dup->length);
continue;
}
dep = (xfs_dir2_data_entry_t *)ptr;
/*
* Skip .
*/
if (dep->namelen == 1 && dep->name[0] == '.')
ASSERT(be64_to_cpu(dep->inumber) == dp->i_ino);
/*
* Skip .., but make sure the inode number is right.
*/
else if (dep->namelen == 2 &&
dep->name[0] == '.' && dep->name[1] == '.')
ASSERT(be64_to_cpu(dep->inumber) ==
xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
/*
* Normal entry, copy it into shortform.
*/
else {
sfep->namelen = dep->namelen;
xfs_dir2_sf_put_offset(sfep,
(xfs_dir2_data_aoff_t)
((char *)dep - (char *)block));
memcpy(sfep->name, dep->name, dep->namelen);
temp = be64_to_cpu(dep->inumber);
xfs_dir2_sf_put_inumber(sfp, &temp,
xfs_dir2_sf_inumberp(sfep));
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
ptr += xfs_dir2_data_entsize(dep->namelen);
}
ASSERT((char *)sfep - (char *)sfp == size);
xfs_dir2_sf_check(args);
out:
xfs_trans_log_inode(args->trans, dp, logflags);
kmem_free(block);
return error;
}
/*
* Add a name to a shortform directory.
* There are two algorithms, "easy" and "hard" which we decide on
* before changing anything.
* Convert to block form if necessary, if the new entry won't fit.
*/
int /* error */
xfs_dir2_sf_addname(
xfs_da_args_t *args) /* operation arguments */
{
int add_entsize; /* size of the new entry */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return value */
int incr_isize; /* total change in size */
int new_isize; /* di_size after adding name */
int objchange; /* changing to 8-byte inodes */
xfs_dir2_data_aoff_t offset = 0; /* offset for new entry */
int old_isize; /* di_size before adding name */
int pick; /* which algorithm to use */
xfs_dir2_sf_t *sfp; /* shortform structure */
xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */
trace_xfs_dir2_sf_addname(args);
ASSERT(xfs_dir2_sf_lookup(args) == ENOENT);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
/*
* Make sure the shortform value has some of its header.
*/
if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
return XFS_ERROR(EIO);
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
/*
* Compute entry (and change in) size.
*/
add_entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
incr_isize = add_entsize;
objchange = 0;
#if XFS_BIG_INUMS
/*
* Do we have to change to 8 byte inodes?
*/
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
/*
* Yes, adjust the entry size and the total size.
*/
add_entsize +=
(uint)sizeof(xfs_dir2_ino8_t) -
(uint)sizeof(xfs_dir2_ino4_t);
incr_isize +=
(sfp->hdr.count + 2) *
((uint)sizeof(xfs_dir2_ino8_t) -
(uint)sizeof(xfs_dir2_ino4_t));
objchange = 1;
}
#endif
old_isize = (int)dp->i_d.di_size;
new_isize = old_isize + incr_isize;
/*
* Won't fit as shortform any more (due to size),
* or the pick routine says it won't (due to offset values).
*/
if (new_isize > XFS_IFORK_DSIZE(dp) ||
(pick =
xfs_dir2_sf_addname_pick(args, objchange, &sfep, &offset)) == 0) {
/*
* Just checking or no space reservation, it doesn't fit.
*/
if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
return XFS_ERROR(ENOSPC);
/*
* Convert to block form then add the name.
*/
error = xfs_dir2_sf_to_block(args);
if (error)
return error;
return xfs_dir2_block_addname(args);
}
/*
* Just checking, it fits.
*/
if (args->op_flags & XFS_DA_OP_JUSTCHECK)
return 0;
/*
* Do it the easy way - just add it at the end.
*/
if (pick == 1)
xfs_dir2_sf_addname_easy(args, sfep, offset, new_isize);
/*
* Do it the hard way - look for a place to insert the new entry.
* Convert to 8 byte inode numbers first if necessary.
*/
else {
ASSERT(pick == 2);
#if XFS_BIG_INUMS
if (objchange)
xfs_dir2_sf_toino8(args);
#endif
xfs_dir2_sf_addname_hard(args, objchange, new_isize);
}
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
return 0;
}
/*
* Add the new entry the "easy" way.
* This is copying the old directory and adding the new entry at the end.
* Since it's sorted by "offset" we need room after the last offset
* that's already there, and then room to convert to a block directory.
* This is already checked by the pick routine.
*/
static void
xfs_dir2_sf_addname_easy(
xfs_da_args_t *args, /* operation arguments */
xfs_dir2_sf_entry_t *sfep, /* pointer to new entry */
xfs_dir2_data_aoff_t offset, /* offset to use for new ent */
int new_isize) /* new directory size */
{
int byteoff; /* byte offset in sf dir */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_sf_t *sfp; /* shortform structure */
dp = args->dp;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
byteoff = (int)((char *)sfep - (char *)sfp);
/*
* Grow the in-inode space.
*/
xfs_idata_realloc(dp, xfs_dir2_sf_entsize_byname(sfp, args->namelen),
XFS_DATA_FORK);
/*
* Need to set up again due to realloc of the inode data.
*/
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + byteoff);
/*
* Fill in the new entry.
*/
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
xfs_dir2_sf_put_inumber(sfp, &args->inumber,
xfs_dir2_sf_inumberp(sfep));
/*
* Update the header and inode.
*/
sfp->hdr.count++;
#if XFS_BIG_INUMS
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM)
sfp->hdr.i8count++;
#endif
dp->i_d.di_size = new_isize;
xfs_dir2_sf_check(args);
}
/*
* Add the new entry the "hard" way.
* The caller has already converted to 8 byte inode numbers if necessary,
* in which case we need to leave the i8count at 1.
* Find a hole that the new entry will fit into, and copy
* the first part of the entries, the new entry, and the last part of
* the entries.
*/
/* ARGSUSED */
static void
xfs_dir2_sf_addname_hard(
xfs_da_args_t *args, /* operation arguments */
int objchange, /* changing inode number size */
int new_isize) /* new directory size */
{
int add_datasize; /* data size need for new ent */
char *buf; /* buffer for old */
xfs_inode_t *dp; /* incore directory inode */
int eof; /* reached end of old dir */
int nbytes; /* temp for byte copies */
xfs_dir2_data_aoff_t new_offset; /* next offset value */
xfs_dir2_data_aoff_t offset; /* current offset value */
int old_isize; /* previous di_size */
xfs_dir2_sf_entry_t *oldsfep; /* entry in original dir */
xfs_dir2_sf_t *oldsfp; /* original shortform dir */
xfs_dir2_sf_entry_t *sfep; /* entry in new dir */
xfs_dir2_sf_t *sfp; /* new shortform dir */
/*
* Copy the old directory to the stack buffer.
*/
dp = args->dp;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
old_isize = (int)dp->i_d.di_size;
buf = kmem_alloc(old_isize, KM_SLEEP);
oldsfp = (xfs_dir2_sf_t *)buf;
memcpy(oldsfp, sfp, old_isize);
/*
* Loop over the old directory finding the place we're going
* to insert the new entry.
* If it's going to end up at the end then oldsfep will point there.
*/
for (offset = XFS_DIR2_DATA_FIRST_OFFSET,
oldsfep = xfs_dir2_sf_firstentry(oldsfp),
add_datasize = xfs_dir2_data_entsize(args->namelen),
eof = (char *)oldsfep == &buf[old_isize];
!eof;
offset = new_offset + xfs_dir2_data_entsize(oldsfep->namelen),
oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep),
eof = (char *)oldsfep == &buf[old_isize]) {
new_offset = xfs_dir2_sf_get_offset(oldsfep);
if (offset + add_datasize <= new_offset)
break;
}
/*
* Get rid of the old directory, then allocate space for
* the new one. We do this so xfs_idata_realloc won't copy
* the data.
*/
xfs_idata_realloc(dp, -old_isize, XFS_DATA_FORK);
xfs_idata_realloc(dp, new_isize, XFS_DATA_FORK);
/*
* Reset the pointer since the buffer was reallocated.
*/
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
/*
* Copy the first part of the directory, including the header.
*/
nbytes = (int)((char *)oldsfep - (char *)oldsfp);
memcpy(sfp, oldsfp, nbytes);
sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + nbytes);
/*
* Fill in the new entry, and update the header counts.
*/
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
xfs_dir2_sf_put_inumber(sfp, &args->inumber,
xfs_dir2_sf_inumberp(sfep));
sfp->hdr.count++;
#if XFS_BIG_INUMS
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
sfp->hdr.i8count++;
#endif
/*
* If there's more left to copy, do that.
*/
if (!eof) {
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
memcpy(sfep, oldsfep, old_isize - nbytes);
}
kmem_free(buf);
dp->i_d.di_size = new_isize;
xfs_dir2_sf_check(args);
}
/*
* Decide if the new entry will fit at all.
* If it will fit, pick between adding the new entry to the end (easy)
* or somewhere else (hard).
* Return 0 (won't fit), 1 (easy), 2 (hard).
*/
/*ARGSUSED*/
static int /* pick result */
xfs_dir2_sf_addname_pick(
xfs_da_args_t *args, /* operation arguments */
int objchange, /* inode # size changes */
xfs_dir2_sf_entry_t **sfepp, /* out(1): new entry ptr */
xfs_dir2_data_aoff_t *offsetp) /* out(1): new offset */
{
xfs_inode_t *dp; /* incore directory inode */
int holefit; /* found hole it will fit in */
int i; /* entry number */
xfs_mount_t *mp; /* filesystem mount point */
xfs_dir2_data_aoff_t offset; /* data block offset */
xfs_dir2_sf_entry_t *sfep; /* shortform entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
int size; /* entry's data size */
int used; /* data bytes used */
dp = args->dp;
mp = dp->i_mount;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
size = xfs_dir2_data_entsize(args->namelen);
offset = XFS_DIR2_DATA_FIRST_OFFSET;
sfep = xfs_dir2_sf_firstentry(sfp);
holefit = 0;
/*
* Loop over sf entries.
* Keep track of data offset and whether we've seen a place
* to insert the new entry.
*/
for (i = 0; i < sfp->hdr.count; i++) {
if (!holefit)
holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
offset = xfs_dir2_sf_get_offset(sfep) +
xfs_dir2_data_entsize(sfep->namelen);
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
/*
* Calculate data bytes used excluding the new entry, if this
* was a data block (block form directory).
*/
used = offset +
(sfp->hdr.count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
(uint)sizeof(xfs_dir2_block_tail_t);
/*
* If it won't fit in a block form then we can't insert it,
* we'll go back, convert to block, then try the insert and convert
* to leaf.
*/
if (used + (holefit ? 0 : size) > mp->m_dirblksize)
return 0;
/*
* If changing the inode number size, do it the hard way.
*/
#if XFS_BIG_INUMS
if (objchange) {
return 2;
}
#else
ASSERT(objchange == 0);
#endif
/*
* If it won't fit at the end then do it the hard way (use the hole).
*/
if (used + size > mp->m_dirblksize)
return 2;
/*
* Do it the easy way.
*/
*sfepp = sfep;
*offsetp = offset;
return 1;
}
#ifdef DEBUG
/*
* Check consistency of shortform directory, assert if bad.
*/
static void
xfs_dir2_sf_check(
xfs_da_args_t *args) /* operation arguments */
{
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry number */
int i8count; /* number of big inode#s */
xfs_ino_t ino; /* entry inode number */
int offset; /* data offset */
xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
dp = args->dp;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
offset = XFS_DIR2_DATA_FIRST_OFFSET;
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
i < sfp->hdr.count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
offset =
xfs_dir2_sf_get_offset(sfep) +
xfs_dir2_data_entsize(sfep->namelen);
}
ASSERT(i8count == sfp->hdr.i8count);
ASSERT(XFS_BIG_INUMS || i8count == 0);
ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size);
ASSERT(offset +
(sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
(uint)sizeof(xfs_dir2_block_tail_t) <=
dp->i_mount->m_dirblksize);
}
#endif /* DEBUG */
/*
* Create a new (shortform) directory.
*/
int /* error, always 0 */
xfs_dir2_sf_create(
xfs_da_args_t *args, /* operation arguments */
xfs_ino_t pino) /* parent inode number */
{
xfs_inode_t *dp; /* incore directory inode */
int i8count; /* parent inode is an 8-byte number */
xfs_dir2_sf_t *sfp; /* shortform structure */
int size; /* directory size */
trace_xfs_dir2_sf_create(args);
dp = args->dp;
ASSERT(dp != NULL);
ASSERT(dp->i_d.di_size == 0);
/*
* If it's currently a zero-length extent file,
* convert it to local format.
*/
if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) {
dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */
dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
dp->i_df.if_flags |= XFS_IFINLINE;
}
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
ASSERT(dp->i_df.if_bytes == 0);
i8count = pino > XFS_DIR2_MAX_SHORT_INUM;
size = xfs_dir2_sf_hdr_size(i8count);
/*
* Make a buffer for the data.
*/
xfs_idata_realloc(dp, size, XFS_DATA_FORK);
/*
* Fill in the header,
*/
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
sfp->hdr.i8count = i8count;
/*
* Now can put in the inode number, since i8count is set.
*/
xfs_dir2_sf_put_inumber(sfp, &pino, &sfp->hdr.parent);
sfp->hdr.count = 0;
dp->i_d.di_size = size;
xfs_dir2_sf_check(args);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
return 0;
}
int /* error */
xfs_dir2_sf_getdents(
xfs_inode_t *dp, /* incore directory inode */
void *dirent,
xfs_off_t *offset,
filldir_t filldir)
{
int i; /* shortform entry number */
xfs_mount_t *mp; /* filesystem mount point */
xfs_dir2_dataptr_t off; /* current entry's offset */
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
xfs_dir2_dataptr_t dot_offset;
xfs_dir2_dataptr_t dotdot_offset;
xfs_ino_t ino;
mp = dp->i_mount;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
/*
* Give up if the directory is way too short.
*/
if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
return XFS_ERROR(EIO);
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
/*
* If the block number in the offset is out of range, we're done.
*/
if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk)
return 0;
/*
* Precalculate offsets for . and .. as we will always need them.
*
* XXX(hch): the second argument is sometimes 0 and sometimes
* mp->m_dirdatablk.
*/
dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
XFS_DIR2_DATA_DOT_OFFSET);
dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
XFS_DIR2_DATA_DOTDOT_OFFSET);
/*
* Put . entry unless we're starting past it.
*/
if (*offset <= dot_offset) {
if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) {
*offset = dot_offset & 0x7fffffff;
return 0;
}
}
/*
* Put .. entry unless we're starting past it.
*/
if (*offset <= dotdot_offset) {
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
*offset = dotdot_offset & 0x7fffffff;
return 0;
}
}
/*
* Loop while there are more entries and put'ing works.
*/
sfep = xfs_dir2_sf_firstentry(sfp);
for (i = 0; i < sfp->hdr.count; i++) {
off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
xfs_dir2_sf_get_offset(sfep));
if (*offset > off) {
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
continue;
}
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
if (filldir(dirent, (char *)sfep->name, sfep->namelen,
off & 0x7fffffff, ino, DT_UNKNOWN)) {
*offset = off & 0x7fffffff;
return 0;
}
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
0x7fffffff;
return 0;
}
/*
* Lookup an entry in a shortform directory.
* Returns EEXIST if found, ENOENT if not found.
*/
int /* error */
xfs_dir2_sf_lookup(
xfs_da_args_t *args) /* operation arguments */
{
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
int error;
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
enum xfs_dacmp cmp; /* comparison result */
xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
trace_xfs_dir2_sf_lookup(args);
xfs_dir2_sf_check(args);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
/*
* Bail out if the directory is way too short.
*/
if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
return XFS_ERROR(EIO);
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
/*
* Special case for .
*/
if (args->namelen == 1 && args->name[0] == '.') {
args->inumber = dp->i_ino;
args->cmpresult = XFS_CMP_EXACT;
return XFS_ERROR(EEXIST);
}
/*
* Special case for ..
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
args->cmpresult = XFS_CMP_EXACT;
return XFS_ERROR(EEXIST);
}
/*
* Loop over all the entries trying to match ours.
*/
ci_sfep = NULL;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
/*
* Compare name and if it's an exact match, return the inode
* number. If it's the first case-insensitive match, store the
* inode number and continue looking for an exact match.
*/
cmp = dp->i_mount->m_dirnameops->compname(args, sfep->name,
sfep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
args->inumber = xfs_dir2_sf_get_inumber(sfp,
xfs_dir2_sf_inumberp(sfep));
if (cmp == XFS_CMP_EXACT)
return XFS_ERROR(EEXIST);
ci_sfep = sfep;
}
}
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
/*
* Here, we can only be doing a lookup (not a rename or replace).
* If a case-insensitive match was not found, return ENOENT.
*/
if (!ci_sfep)
return XFS_ERROR(ENOENT);
/* otherwise process the CI match as required by the caller */
error = xfs_dir_cilookup_result(args, ci_sfep->name, ci_sfep->namelen);
return XFS_ERROR(error);
}
/*
* Remove an entry from a shortform directory.
*/
int /* error */
xfs_dir2_sf_removename(
xfs_da_args_t *args)
{
int byteoff; /* offset of removed entry */
xfs_inode_t *dp; /* incore directory inode */
int entsize; /* this entry's size */
int i; /* shortform entry index */
int newsize; /* new inode size */
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
trace_xfs_dir2_sf_removename(args);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
oldsize = (int)dp->i_d.di_size;
/*
* Bail out if the directory is way too short.
*/
if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) {
ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
return XFS_ERROR(EIO);
}
ASSERT(dp->i_df.if_bytes == oldsize);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
/*
* Loop over the old directory entries.
* Find the one we're deleting.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
ASSERT(xfs_dir2_sf_get_inumber(sfp,
xfs_dir2_sf_inumberp(sfep)) ==
args->inumber);
break;
}
}
/*
* Didn't find it.
*/
if (i == sfp->hdr.count)
return XFS_ERROR(ENOENT);
/*
* Calculate sizes.
*/
byteoff = (int)((char *)sfep - (char *)sfp);
entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
newsize = oldsize - entsize;
/*
* Copy the part if any after the removed entry, sliding it down.
*/
if (byteoff + entsize < oldsize)
memmove((char *)sfp + byteoff, (char *)sfp + byteoff + entsize,
oldsize - (byteoff + entsize));
/*
* Fix up the header and file size.
*/
sfp->hdr.count--;
dp->i_d.di_size = newsize;
/*
* Reallocate, making it smaller.
*/
xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK);
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
#if XFS_BIG_INUMS
/*
* Are we changing inode number size?
*/
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) {
if (sfp->hdr.i8count == 1)
xfs_dir2_sf_toino4(args);
else
sfp->hdr.i8count--;
}
#endif
xfs_dir2_sf_check(args);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
return 0;
}
/*
* Replace the inode number of an entry in a shortform directory.
*/
int /* error */
xfs_dir2_sf_replace(
xfs_da_args_t *args) /* operation arguments */
{
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
#if XFS_BIG_INUMS || defined(DEBUG)
xfs_ino_t ino=0; /* entry old inode number */
#endif
#if XFS_BIG_INUMS
int i8elevated; /* sf_toino8 set i8count=1 */
#endif
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
trace_xfs_dir2_sf_replace(args);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
/*
* Bail out if the shortform directory is way too small.
*/
if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
return XFS_ERROR(EIO);
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
#if XFS_BIG_INUMS
/*
* New inode number is large, and need to convert to 8-byte inodes.
*/
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
int error; /* error return value */
int newsize; /* new inode size */
newsize =
dp->i_df.if_bytes +
(sfp->hdr.count + 1) *
((uint)sizeof(xfs_dir2_ino8_t) -
(uint)sizeof(xfs_dir2_ino4_t));
/*
* Won't fit as shortform, convert to block then do replace.
*/
if (newsize > XFS_IFORK_DSIZE(dp)) {
error = xfs_dir2_sf_to_block(args);
if (error) {
return error;
}
return xfs_dir2_block_replace(args);
}
/*
* Still fits, convert to 8-byte now.
*/
xfs_dir2_sf_toino8(args);
i8elevated = 1;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
} else
i8elevated = 0;
#endif
ASSERT(args->namelen != 1 || args->name[0] != '.');
/*
* Replace ..'s entry.
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
#if XFS_BIG_INUMS || defined(DEBUG)
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
ASSERT(args->inumber != ino);
#endif
xfs_dir2_sf_put_inumber(sfp, &args->inumber, &sfp->hdr.parent);
}
/*
* Normal entry, look for the name.
*/
else {
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
i < sfp->hdr.count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
#if XFS_BIG_INUMS || defined(DEBUG)
ino = xfs_dir2_sf_get_inumber(sfp,
xfs_dir2_sf_inumberp(sfep));
ASSERT(args->inumber != ino);
#endif
xfs_dir2_sf_put_inumber(sfp, &args->inumber,
xfs_dir2_sf_inumberp(sfep));
break;
}
}
/*
* Didn't find it.
*/
if (i == sfp->hdr.count) {
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
#if XFS_BIG_INUMS
if (i8elevated)
xfs_dir2_sf_toino4(args);
#endif
return XFS_ERROR(ENOENT);
}
}
#if XFS_BIG_INUMS
/*
* See if the old number was large, the new number is small.
*/
if (ino > XFS_DIR2_MAX_SHORT_INUM &&
args->inumber <= XFS_DIR2_MAX_SHORT_INUM) {
/*
* And the old count was one, so need to convert to small.
*/
if (sfp->hdr.i8count == 1)
xfs_dir2_sf_toino4(args);
else
sfp->hdr.i8count--;
}
/*
* See if the old number was small, the new number is large.
*/
if (ino <= XFS_DIR2_MAX_SHORT_INUM &&
args->inumber > XFS_DIR2_MAX_SHORT_INUM) {
/*
* add to the i8count unless we just converted to 8-byte
* inodes (which does an implied i8count = 1)
*/
ASSERT(sfp->hdr.i8count != 0);
if (!i8elevated)
sfp->hdr.i8count++;
}
#endif
xfs_dir2_sf_check(args);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA);
return 0;
}
#if XFS_BIG_INUMS
/*
* Convert from 8-byte inode numbers to 4-byte inode numbers.
* The last 8-byte inode number is gone, but the count is still 1.
*/
static void
xfs_dir2_sf_toino4(
xfs_da_args_t *args) /* operation arguments */
{
char *buf; /* old dir's buffer */
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
xfs_ino_t ino; /* entry inode number */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
xfs_dir2_sf_t *oldsfp; /* old sf directory */
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_t *sfp; /* new sf directory */
trace_xfs_dir2_sf_toino4(args);
dp = args->dp;
/*
* Copy the old directory to the buffer.
* Then nuke it from the inode, and add the new buffer to the inode.
* Don't want xfs_idata_realloc copying the data here.
*/
oldsize = dp->i_df.if_bytes;
buf = kmem_alloc(oldsize, KM_SLEEP);
oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(oldsfp->hdr.i8count == 1);
memcpy(buf, oldsfp, oldsize);
/*
* Compute the new inode size.
*/
newsize =
oldsize -
(oldsfp->hdr.count + 1) *
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
/*
* Reset our pointers, the data has moved.
*/
oldsfp = (xfs_dir2_sf_t *)buf;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
/*
* Fill in the new header.
*/
sfp->hdr.count = oldsfp->hdr.count;
sfp->hdr.i8count = 0;
ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->hdr.count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
memcpy(sfep->name, oldsfep->name, sfep->namelen);
ino = xfs_dir2_sf_get_inumber(oldsfp,
xfs_dir2_sf_inumberp(oldsfep));
xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
}
/*
* Clean up the inode.
*/
kmem_free(buf);
dp->i_d.di_size = newsize;
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
}
/*
* Convert from 4-byte inode numbers to 8-byte inode numbers.
* The new 8-byte inode number is not there yet, we leave with the
* count 1 but no corresponding entry.
*/
static void
xfs_dir2_sf_toino8(
xfs_da_args_t *args) /* operation arguments */
{
char *buf; /* old dir's buffer */
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
xfs_ino_t ino; /* entry inode number */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
xfs_dir2_sf_t *oldsfp; /* old sf directory */
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_t *sfp; /* new sf directory */
trace_xfs_dir2_sf_toino8(args);
dp = args->dp;
/*
* Copy the old directory to the buffer.
* Then nuke it from the inode, and add the new buffer to the inode.
* Don't want xfs_idata_realloc copying the data here.
*/
oldsize = dp->i_df.if_bytes;
buf = kmem_alloc(oldsize, KM_SLEEP);
oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(oldsfp->hdr.i8count == 0);
memcpy(buf, oldsfp, oldsize);
/*
* Compute the new inode size.
*/
newsize =
oldsize +
(oldsfp->hdr.count + 1) *
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
/*
* Reset our pointers, the data has moved.
*/
oldsfp = (xfs_dir2_sf_t *)buf;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
/*
* Fill in the new header.
*/
sfp->hdr.count = oldsfp->hdr.count;
sfp->hdr.i8count = 1;
ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->hdr.count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
memcpy(sfep->name, oldsfep->name, sfep->namelen);
ino = xfs_dir2_sf_get_inumber(oldsfp,
xfs_dir2_sf_inumberp(oldsfep));
xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
}
/*
* Clean up the inode.
*/
kmem_free(buf);
dp->i_d.di_size = newsize;
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
}
#endif /* XFS_BIG_INUMS */
| gpl-2.0 |
digsig-ng/linux-digsig | drivers/staging/wlan-ng/p80211conv.c | 2821 | 19789 | /* src/p80211/p80211conv.c
*
* Ether/802.11 conversions and packet buffer routines
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License version 2 (the "GPL"), in which
* case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use
* your version of this file under the MPL, indicate your decision
* by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* info@linux-wlan.com
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* This file defines the functions that perform Ethernet to/from
* 802.11 frame conversions.
*
* --------------------------------------------------------------------
*
*================================================================ */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/byteorder/generic.h>
#include <asm/byteorder.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211conv.h"
#include "p80211mgmt.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211ioctl.h"
#include "p80211req.h"
static u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
/*----------------------------------------------------------------
* p80211pb_ether_to_80211
*
* Uses the contents of the ether frame and the etherconv setting
* to build the elements of the 802.11 frame.
*
* We don't actually set
* up the frame header here. That's the MAC's job. We're only handling
* conversion of DIXII or 802.3+LLC frames to something that works
* with 802.11.
*
* Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
* FCS is also not present and will need to be added elsewhere.
*
* Arguments:
* ethconv Conversion type to perform
* skb skbuff containing the ether frame
* p80211_hdr 802.11 header
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
----------------------------------------------------------------*/
int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
struct sk_buff *skb, union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
u16 fc;
u16 proto;
struct wlan_ethhdr e_hdr;
struct wlan_llc *e_llc;
struct wlan_snap *e_snap;
int foo;
memcpy(&e_hdr, skb->data, sizeof(e_hdr));
if (skb->len <= 0) {
pr_debug("zero-length skb!\n");
return 1;
}
if (ethconv == WLAN_ETHCONV_ENCAP) { /* simplest case */
pr_debug("ENCAP len: %d\n", skb->len);
/* here, we don't care what kind of ether frm. Just stick it */
/* in the 80211 payload */
/* which is to say, leave the skb alone. */
} else {
/* step 1: classify ether frame, DIX or 802.3? */
proto = ntohs(e_hdr.type);
if (proto <= 1500) {
pr_debug("802.3 len: %d\n", skb->len);
/* codes <= 1500 reserved for 802.3 lengths */
/* it's 802.3, pass ether payload unchanged, */
/* trim off ethernet header */
skb_pull(skb, WLAN_ETHHDR_LEN);
/* leave off any PAD octets. */
skb_trim(skb, proto);
} else {
pr_debug("DIXII len: %d\n", skb->len);
/* it's DIXII, time for some conversion */
/* trim off ethernet header */
skb_pull(skb, WLAN_ETHHDR_LEN);
/* tack on SNAP */
e_snap =
(struct wlan_snap *) skb_push(skb,
sizeof(struct wlan_snap));
e_snap->type = htons(proto);
if (ethconv == WLAN_ETHCONV_8021h
&& p80211_stt_findproto(proto)) {
memcpy(e_snap->oui, oui_8021h,
WLAN_IEEE_OUI_LEN);
} else {
memcpy(e_snap->oui, oui_rfc1042,
WLAN_IEEE_OUI_LEN);
}
/* tack on llc */
e_llc =
(struct wlan_llc *) skb_push(skb,
sizeof(struct wlan_llc));
e_llc->dsap = 0xAA; /* SNAP, see IEEE 802 */
e_llc->ssap = 0xAA;
e_llc->ctl = 0x03;
}
}
/* Set up the 802.11 header */
/* It's a data frame */
fc = cpu_to_le16(WLAN_SET_FC_FTYPE(WLAN_FTYPE_DATA) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DATAONLY));
switch (wlandev->macmode) {
case WLAN_MACMODE_IBSS_STA:
memcpy(p80211_hdr->a3.a1, &e_hdr.daddr, ETH_ALEN);
memcpy(p80211_hdr->a3.a2, wlandev->netdev->dev_addr, ETH_ALEN);
memcpy(p80211_hdr->a3.a3, wlandev->bssid, ETH_ALEN);
break;
case WLAN_MACMODE_ESS_STA:
fc |= cpu_to_le16(WLAN_SET_FC_TODS(1));
memcpy(p80211_hdr->a3.a1, wlandev->bssid, ETH_ALEN);
memcpy(p80211_hdr->a3.a2, wlandev->netdev->dev_addr, ETH_ALEN);
memcpy(p80211_hdr->a3.a3, &e_hdr.daddr, ETH_ALEN);
break;
case WLAN_MACMODE_ESS_AP:
fc |= cpu_to_le16(WLAN_SET_FC_FROMDS(1));
memcpy(p80211_hdr->a3.a1, &e_hdr.daddr, ETH_ALEN);
memcpy(p80211_hdr->a3.a2, wlandev->bssid, ETH_ALEN);
memcpy(p80211_hdr->a3.a3, &e_hdr.saddr, ETH_ALEN);
break;
default:
printk(KERN_ERR
"Error: Converting eth to wlan in unknown mode.\n");
return 1;
break;
}
p80211_wep->data = NULL;
if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED)
&& (wlandev->hostwep & HOSTWEP_ENCRYPT)) {
/* XXXX need to pick keynum other than default? */
p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC);
foo = wep_encrypt(wlandev, skb->data, p80211_wep->data,
skb->len,
(wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK),
p80211_wep->iv, p80211_wep->icv);
if (foo) {
printk(KERN_WARNING
"Host en-WEP failed, dropping frame (%d).\n",
foo);
return 2;
}
fc |= cpu_to_le16(WLAN_SET_FC_ISWEP(1));
}
/* skb->nh.raw = skb->data; */
p80211_hdr->a3.fc = fc;
p80211_hdr->a3.dur = 0;
p80211_hdr->a3.seq = 0;
return 0;
}
/* jkriegl: from orinoco, modified */
static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
struct p80211_rxmeta *rxmeta)
{
int i;
/* Gather wireless spy statistics: for each packet, compare the
* source address with out list, and if match, get the stats... */
for (i = 0; i < wlandev->spy_number; i++) {
if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
memcpy(wlandev->spy_address[i], mac, ETH_ALEN);
wlandev->spy_stat[i].level = rxmeta->signal;
wlandev->spy_stat[i].noise = rxmeta->noise;
wlandev->spy_stat[i].qual =
(rxmeta->signal >
rxmeta->noise) ? (rxmeta->signal -
rxmeta->noise) : 0;
wlandev->spy_stat[i].updated = 0x7;
}
}
}
/*----------------------------------------------------------------
* p80211pb_80211_to_ether
*
* Uses the contents of a received 802.11 frame and the etherconv
* setting to build an ether frame.
*
* This function extracts the src and dest address from the 802.11
* frame to use in the construction of the eth frame.
*
* Arguments:
* ethconv Conversion type to perform
* skb Packet buffer containing the 802.11 frame
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
----------------------------------------------------------------*/
int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
struct sk_buff *skb)
{
netdevice_t *netdev = wlandev->netdev;
u16 fc;
unsigned int payload_length;
unsigned int payload_offset;
u8 daddr[WLAN_ETHADDR_LEN];
u8 saddr[WLAN_ETHADDR_LEN];
union p80211_hdr *w_hdr;
struct wlan_ethhdr *e_hdr;
struct wlan_llc *e_llc;
struct wlan_snap *e_snap;
int foo;
payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN;
payload_offset = WLAN_HDR_A3_LEN;
w_hdr = (union p80211_hdr *) skb->data;
/* setup some vars for convenience */
fc = le16_to_cpu(w_hdr->a3.fc);
if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN);
memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN);
} else if ((WLAN_GET_FC_TODS(fc) == 0)
&& (WLAN_GET_FC_FROMDS(fc) == 1)) {
memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN);
memcpy(saddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN);
} else if ((WLAN_GET_FC_TODS(fc) == 1)
&& (WLAN_GET_FC_FROMDS(fc) == 0)) {
memcpy(daddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN);
memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN);
} else {
payload_offset = WLAN_HDR_A4_LEN;
if (payload_length < WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN) {
printk(KERN_ERR "A4 frame too short!\n");
return 1;
}
payload_length -= (WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN);
memcpy(daddr, w_hdr->a4.a3, WLAN_ETHADDR_LEN);
memcpy(saddr, w_hdr->a4.a4, WLAN_ETHADDR_LEN);
}
/* perform de-wep if necessary.. */
if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && WLAN_GET_FC_ISWEP(fc)
&& (wlandev->hostwep & HOSTWEP_DECRYPT)) {
if (payload_length <= 8) {
printk(KERN_ERR "WEP frame too short (%u).\n",
skb->len);
return 1;
}
foo = wep_decrypt(wlandev, skb->data + payload_offset + 4,
payload_length - 8, -1,
skb->data + payload_offset,
skb->data + payload_offset +
payload_length - 4);
if (foo) {
/* de-wep failed, drop skb. */
pr_debug("Host de-WEP failed, dropping frame (%d).\n",
foo);
wlandev->rx.decrypt_err++;
return 2;
}
/* subtract the IV+ICV length off the payload */
payload_length -= 8;
/* chop off the IV */
skb_pull(skb, 4);
/* chop off the ICV. */
skb_trim(skb, skb->len - 4);
wlandev->rx.decrypt++;
}
e_hdr = (struct wlan_ethhdr *) (skb->data + payload_offset);
e_llc = (struct wlan_llc *) (skb->data + payload_offset);
e_snap =
(struct wlan_snap *) (skb->data + payload_offset +
sizeof(struct wlan_llc));
/* Test for the various encodings */
if ((payload_length >= sizeof(struct wlan_ethhdr)) &&
(e_llc->dsap != 0xaa || e_llc->ssap != 0xaa) &&
((memcmp(daddr, e_hdr->daddr, WLAN_ETHADDR_LEN) == 0) ||
(memcmp(saddr, e_hdr->saddr, WLAN_ETHADDR_LEN) == 0))) {
pr_debug("802.3 ENCAP len: %d\n", payload_length);
/* 802.3 Encapsulated */
/* Test for an overlength frame */
if (payload_length > (netdev->mtu + WLAN_ETHHDR_LEN)) {
/* A bogus length ethfrm has been encap'd. */
/* Is someone trying an oflow attack? */
printk(KERN_ERR "ENCAP frame too large (%d > %d)\n",
payload_length, netdev->mtu + WLAN_ETHHDR_LEN);
return 1;
}
/* Chop off the 802.11 header. it's already sane. */
skb_pull(skb, payload_offset);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else if ((payload_length >= sizeof(struct wlan_llc) +
sizeof(struct wlan_snap))
&& (e_llc->dsap == 0xaa)
&& (e_llc->ssap == 0xaa)
&& (e_llc->ctl == 0x03)
&&
(((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0)
&& (ethconv == WLAN_ETHCONV_8021h)
&& (p80211_stt_findproto(le16_to_cpu(e_snap->type))))
|| (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) !=
0))) {
pr_debug("SNAP+RFC1042 len: %d\n", payload_length);
/* it's a SNAP + RFC1042 frame && protocol is in STT */
/* build 802.3 + RFC1042 */
/* Test for an overlength frame */
if (payload_length > netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
printk(KERN_ERR "SNAP frame too large (%d > %d)\n",
payload_length, netdev->mtu);
return 1;
}
/* chop 802.11 header from skb. */
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN);
memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN);
memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN);
e_hdr->type = htons(payload_length);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else if ((payload_length >= sizeof(struct wlan_llc) +
sizeof(struct wlan_snap))
&& (e_llc->dsap == 0xaa)
&& (e_llc->ssap == 0xaa)
&& (e_llc->ctl == 0x03)) {
pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
/* it's an 802.1h frame || (an RFC1042 && protocol not in STT)
build a DIXII + RFC894 */
/* Test for an overlength frame */
if ((payload_length - sizeof(struct wlan_llc) -
sizeof(struct wlan_snap))
> netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
printk(KERN_ERR "DIXII frame too large (%ld > %d)\n",
(long int)(payload_length -
sizeof(struct wlan_llc) -
sizeof(struct wlan_snap)), netdev->mtu);
return 1;
}
/* chop 802.11 header from skb. */
skb_pull(skb, payload_offset);
/* chop llc header from skb. */
skb_pull(skb, sizeof(struct wlan_llc));
/* chop snap header from skb. */
skb_pull(skb, sizeof(struct wlan_snap));
/* create 802.3 header at beginning of skb. */
e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN);
e_hdr->type = e_snap->type;
memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN);
memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else {
pr_debug("NON-ENCAP len: %d\n", payload_length);
/* any NON-ENCAP */
/* it's a generic 80211+LLC or IPX 'Raw 802.3' */
/* build an 802.3 frame */
/* allocate space and setup hostbuf */
/* Test for an overlength frame */
if (payload_length > netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
printk(KERN_ERR "OTHER frame too large (%d > %d)\n",
payload_length, netdev->mtu);
return 1;
}
/* Chop off the 802.11 header. */
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN);
memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN);
memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN);
e_hdr->type = htons(payload_length);
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
}
/*
* Note that eth_type_trans() expects an skb w/ skb->data pointing
* at the MAC header, it then sets the following skb members:
* skb->mac_header,
* skb->data, and
* skb->pkt_type.
* It then _returns_ the value that _we're_ supposed to stuff in
* skb->protocol. This is nuts.
*/
skb->protocol = eth_type_trans(skb, netdev);
/* jkriegl: process signal and noise as set in hfa384x_int_rx() */
/* jkriegl: only process signal/noise if requested by iwspy */
if (wlandev->spy_number)
orinoco_spy_gather(wlandev, eth_hdr(skb)->h_source,
P80211SKB_RXMETA(skb));
/* Free the metadata */
p80211skb_rxmeta_detach(skb);
return 0;
}
/*----------------------------------------------------------------
* p80211_stt_findproto
*
* Searches the 802.1h Selective Translation Table for a given
* protocol.
*
* Arguments:
* proto protocl number (in host order) to search for.
*
* Returns:
* 1 - if the table is empty or a match is found.
* 0 - if the table is non-empty and a match is not found.
*
* Call context:
* May be called in interrupt or non-interrupt context
----------------------------------------------------------------*/
int p80211_stt_findproto(u16 proto)
{
/* Always return found for now. This is the behavior used by the */
/* Zoom Win95 driver when 802.1h mode is selected */
/* TODO: If necessary, add an actual search we'll probably
need this to match the CMAC's way of doing things.
Need to do some testing to confirm.
*/
if (proto == 0x80f3) /* APPLETALK */
return 1;
return 0;
}
/*----------------------------------------------------------------
* p80211skb_rxmeta_detach
*
* Disconnects the frmmeta and rxmeta from an skb.
*
* Arguments:
* wlandev The wlandev this skb belongs to.
* skb The skb we're attaching to.
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
----------------------------------------------------------------*/
void p80211skb_rxmeta_detach(struct sk_buff *skb)
{
struct p80211_rxmeta *rxmeta;
struct p80211_frmmeta *frmmeta;
/* Sanity checks */
if (skb == NULL) { /* bad skb */
pr_debug("Called w/ null skb.\n");
return;
}
frmmeta = P80211SKB_FRMMETA(skb);
if (frmmeta == NULL) { /* no magic */
pr_debug("Called w/ bad frmmeta magic.\n");
return;
}
rxmeta = frmmeta->rx;
if (rxmeta == NULL) { /* bad meta ptr */
pr_debug("Called w/ bad rxmeta ptr.\n");
return;
}
/* Free rxmeta */
kfree(rxmeta);
/* Clear skb->cb */
memset(skb->cb, 0, sizeof(skb->cb));
}
/*----------------------------------------------------------------
* p80211skb_rxmeta_attach
*
* Allocates a p80211rxmeta structure, initializes it, and attaches
* it to an skb.
*
* Arguments:
* wlandev The wlandev this skb belongs to.
* skb The skb we're attaching to.
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
----------------------------------------------------------------*/
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
{
int result = 0;
struct p80211_rxmeta *rxmeta;
struct p80211_frmmeta *frmmeta;
/* If these already have metadata, we error out! */
if (P80211SKB_RXMETA(skb) != NULL) {
printk(KERN_ERR "%s: RXmeta already attached!\n",
wlandev->name);
result = 0;
goto exit;
}
/* Allocate the rxmeta */
rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC);
if (rxmeta == NULL) {
printk(KERN_ERR "%s: Failed to allocate rxmeta.\n",
wlandev->name);
result = 1;
goto exit;
}
/* Initialize the rxmeta */
rxmeta->wlandev = wlandev;
rxmeta->hosttime = jiffies;
/* Overlay a frmmeta_t onto skb->cb */
memset(skb->cb, 0, sizeof(struct p80211_frmmeta));
frmmeta = (struct p80211_frmmeta *) (skb->cb);
frmmeta->magic = P80211_FRMMETA_MAGIC;
frmmeta->rx = rxmeta;
exit:
return result;
}
/*----------------------------------------------------------------
* p80211skb_free
*
* Frees an entire p80211skb by checking and freeing the meta struct
* and then freeing the skb.
*
* Arguments:
* wlandev The wlandev this skb belongs to.
* skb The skb we're attaching to.
*
* Returns:
* 0 on success, non-zero otherwise
*
* Call context:
* May be called in interrupt or non-interrupt context
----------------------------------------------------------------*/
void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
{
struct p80211_frmmeta *meta;
meta = P80211SKB_FRMMETA(skb);
if (meta && meta->rx)
p80211skb_rxmeta_detach(skb);
else
printk(KERN_ERR "Freeing an skb (%p) w/ no frmmeta.\n", skb);
dev_kfree_skb(skb);
}
| gpl-2.0 |
dwengen/linux | drivers/hwspinlock/omap_hwspinlock.c | 2821 | 4927 | /*
* OMAP hardware spinlock driver
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
* Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
#include <linux/platform_device.h>
#include "hwspinlock_internal.h"
/* Spinlock register offsets */
#define SYSSTATUS_OFFSET 0x0014
#define LOCK_BASE_OFFSET 0x0800
#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
/* Possible values of SPINLOCK_LOCK_REG */
#define SPINLOCK_NOTTAKEN (0) /* free */
#define SPINLOCK_TAKEN (1) /* locked */
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
{
void __iomem *lock_addr = lock->priv;
/* attempt to acquire the lock by reading its value */
return (SPINLOCK_NOTTAKEN == readl(lock_addr));
}
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
{
void __iomem *lock_addr = lock->priv;
/* release the lock by writing 0 to it */
writel(SPINLOCK_NOTTAKEN, lock_addr);
}
/*
* relax the OMAP interconnect while spinning on it.
*
* The specs recommended that the retry delay time will be
* just over half of the time that a requester would be
* expected to hold the lock.
*
* The number below is taken from an hardware specs example,
* obviously it is somewhat arbitrary.
*/
static void omap_hwspinlock_relax(struct hwspinlock *lock)
{
ndelay(50);
}
static const struct hwspinlock_ops omap_hwspinlock_ops = {
.trylock = omap_hwspinlock_trylock,
.unlock = omap_hwspinlock_unlock,
.relax = omap_hwspinlock_relax,
};
static int omap_hwspinlock_probe(struct platform_device *pdev)
{
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
struct resource *res;
void __iomem *io_base;
int num_locks, i, ret;
if (!pdata)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
io_base = ioremap(res->start, resource_size(res));
if (!io_base)
return -ENOMEM;
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
/* one of the four lsb's must be set, and nothing else */
if (hweight_long(i & 0xf) != 1 || i > 8) {
ret = -EINVAL;
goto iounmap_base;
}
num_locks = i * 32; /* actual number of locks in this device */
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
if (!bank) {
ret = -ENOMEM;
goto iounmap_base;
}
platform_set_drvdata(pdev, bank);
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
/*
* runtime PM will make sure the clock of this module is
* enabled iff at least one lock is requested
*/
pm_runtime_enable(&pdev->dev);
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
pdata->base_id, num_locks);
if (ret)
goto reg_fail;
return 0;
reg_fail:
pm_runtime_disable(&pdev->dev);
kfree(bank);
iounmap_base:
iounmap(io_base);
return ret;
}
static int omap_hwspinlock_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
int ret;
ret = hwspin_lock_unregister(bank);
if (ret) {
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
return ret;
}
pm_runtime_disable(&pdev->dev);
iounmap(io_base);
kfree(bank);
return 0;
}
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
.remove = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
.owner = THIS_MODULE,
},
};
static int __init omap_hwspinlock_init(void)
{
return platform_driver_register(&omap_hwspinlock_driver);
}
/* board init code might need to reserve hwspinlocks for predefined purposes */
postcore_initcall(omap_hwspinlock_init);
static void __exit omap_hwspinlock_exit(void)
{
platform_driver_unregister(&omap_hwspinlock_driver);
}
module_exit(omap_hwspinlock_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
MODULE_AUTHOR("Simon Que <sque@ti.com>");
MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
| gpl-2.0 |
superr/android_kernel_n9510_n9520 | arch/arm/mach-omap2/emu.c | 4869 | 1306 | /*
* emu.c
*
* ETM and ETB CoreSight components' resources as found in OMAP3xxx.
*
* Copyright (C) 2009 Nokia Corporation.
* Alexander Shishkin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <mach/hardware.h>
#include "iomap.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shishkin");
/* Cortex CoreSight components within omap3xxx EMU */
#define ETM_BASE (L4_EMU_34XX_PHYS + 0x10000)
#define DBG_BASE (L4_EMU_34XX_PHYS + 0x11000)
#define ETB_BASE (L4_EMU_34XX_PHYS + 0x1b000)
#define DAPCTL (L4_EMU_34XX_PHYS + 0x1d000)
static AMBA_APB_DEVICE(omap3_etb, "etb", 0x000bb907, ETB_BASE, { }, NULL);
static AMBA_APB_DEVICE(omap3_etm, "etm", 0x102bb921, ETM_BASE, { }, NULL);
static int __init emu_init(void)
{
if (!cpu_is_omap34xx())
return -ENODEV;
amba_device_register(&omap3_etb_device, &iomem_resource);
amba_device_register(&omap3_etm_device, &iomem_resource);
return 0;
}
subsys_initcall(emu_init);
| gpl-2.0 |
jgcaaprom/f2fs | drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 5637 | 42686 | /*
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
* driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/pci.h>
#include "t4vf_common.h"
#include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4fw_api.h"
/*
* Wait for the device to become ready (signified by our "who am I" register
* returning a value other than all 1's). Return an error if it doesn't
* become ready ...
*/
int __devinit t4vf_wait_dev_ready(struct adapter *adapter)
{
const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
const u32 notready1 = 0xffffffff;
const u32 notready2 = 0xeeeeeeee;
u32 val;
val = t4_read_reg(adapter, whoami);
if (val != notready1 && val != notready2)
return 0;
msleep(500);
val = t4_read_reg(adapter, whoami);
if (val != notready1 && val != notready2)
return 0;
else
return -EIO;
}
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order
* (since the firmware data structures are specified in a big-endian layout).
*/
static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
u32 mbox_data)
{
for ( ; size; size -= 8, mbox_data += 8)
*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
}
/*
* Dump contents of mailbox with a leading tag.
*/
static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
{
dev_err(adapter->pdev_dev,
"mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
(unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
(unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
(unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
(unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
(unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
(unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
(unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
(unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
}
/**
* t4vf_wr_mbox_core - send a command to FW through the mailbox
* @adapter: the adapter
* @cmd: the command to write
* @size: command length in bytes
* @rpl: where to optionally store the reply
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Sends the given command to FW through the mailbox and waits for the
* FW to execute the command. If @rpl is not %NULL it is used to store
* the FW's reply to the command. The command and its optional reply
* are of the same length. FW can take up to 500 ms to respond.
* @sleep_ok determines whether we may sleep while awaiting the response.
* If sleeping is allowed we use progressive backoff otherwise we spin.
*
* The return value is 0 on success or a negative errno on failure. A
* failure can happen either because we are not able to execute the
* command or FW executes it but signals an error. In the latter case
* the return value is the error code indicated by FW (negated).
*/
int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100
};
u32 v;
int i, ms, delay_idx;
const __be64 *p;
u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
/*
* Commands must be multiples of 16 bytes in length and may not be
* larger than the size of the Mailbox Data register array.
*/
if ((size % 16) != 0 ||
size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
return -EINVAL;
/*
* Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership.
*/
v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
if (v != MBOX_OWNER_DRV)
return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
/*
* Write the command array into the Mailbox Data register array and
* transfer ownership of the mailbox to the firmware.
*
* For the VFs, the Mailbox Data "registers" are actually backed by
* T4's "MA" interface rather than PL Registers (as is the case for
* the PFs). Because these are in different coherency domains, the
* write to the VF's PL-register-backed Mailbox Control can race in
* front of the writes to the MA-backed VF Mailbox Data "registers".
* So we need to do a read-back on at least one byte of the VF Mailbox
* Data registers before doing the write to the VF Mailbox Control
* register.
*/
for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
t4_read_reg(adapter, mbox_data); /* flush write */
t4_write_reg(adapter, mbox_ctl,
MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */
/*
* Spin waiting for firmware to acknowledge processing our command.
*/
delay_idx = 0;
ms = delay[0];
for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx];
if (delay_idx < ARRAY_SIZE(delay) - 1)
delay_idx++;
msleep(ms);
} else
mdelay(ms);
/*
* If we're the owner, see if this is the reply we wanted.
*/
v = t4_read_reg(adapter, mbox_ctl);
if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
/*
* If the Message Valid bit isn't on, revoke ownership
* of the mailbox and continue waiting for our reply.
*/
if ((v & MBMSGVALID) == 0) {
t4_write_reg(adapter, mbox_ctl,
MBOWNER(MBOX_OWNER_NONE));
continue;
}
/*
* We now have our reply. Extract the command return
* value, copy the reply back to our caller's buffer
* (if specified) and revoke ownership of the mailbox.
* We return the (negated) firmware command return
* code (this depends on FW_SUCCESS == 0).
*/
/* return value in low-order little-endian word */
v = t4_read_reg(adapter, mbox_data);
if (FW_CMD_RETVAL_GET(v))
dump_mbox(adapter, "FW Error", mbox_data);
if (rpl) {
/* request bit in high-order BE word */
WARN_ON((be32_to_cpu(*(const u32 *)cmd)
& FW_CMD_REQUEST) == 0);
get_mbox_rpl(adapter, rpl, size, mbox_data);
WARN_ON((be32_to_cpu(*(u32 *)rpl)
& FW_CMD_REQUEST) != 0);
}
t4_write_reg(adapter, mbox_ctl,
MBOWNER(MBOX_OWNER_NONE));
return -FW_CMD_RETVAL_GET(v);
}
}
/*
* We timed out. Return the error ...
*/
dump_mbox(adapter, "FW Timeout", mbox_data);
return -ETIMEDOUT;
}
/**
* hash_mac_addr - return the hash value of a MAC address
* @addr: the 48-bit Ethernet MAC address
*
* Hashes a MAC address according to the hash function used by hardware
* inexact (hash) address matching.
*/
static int hash_mac_addr(const u8 *addr)
{
u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
a ^= b;
a ^= (a >> 12);
a ^= (a >> 6);
return a & 0x3f;
}
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
* @caps: link capabilities
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
static void __devinit init_link_config(struct link_config *lc,
unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
if (lc->supported & SUPPORTED_Autoneg) {
lc->advertising = lc->supported;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
lc->advertising = 0;
lc->autoneg = AUTONEG_DISABLE;
}
}
/**
* t4vf_port_init - initialize port hardware/software state
* @adapter: the adapter
* @pidx: the adapter port index
*/
int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
{
struct port_info *pi = adap2pinfo(adapter, pidx);
struct fw_vi_cmd vi_cmd, vi_rpl;
struct fw_port_cmd port_cmd, port_rpl;
int v;
u32 word;
/*
* Execute a VI Read command to get our Virtual Interface information
* like MAC address, etc.
*/
memset(&vi_cmd, 0, sizeof(vi_cmd));
vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid));
v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
if (v)
return v;
BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd));
pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd));
t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
/*
* If we don't have read access to our port information, we're done
* now. Otherwise, execute a PORT Read command to get it ...
*/
if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
return 0;
memset(&port_cmd, 0, sizeof(port_cmd));
port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ |
FW_PORT_CMD_PORTID(pi->port_id));
port_cmd.action_to_len16 =
cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(port_cmd));
v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
if (v)
return v;
v = 0;
word = be16_to_cpu(port_rpl.u.info.pcap);
if (word & FW_PORT_CAP_SPEED_100M)
v |= SUPPORTED_100baseT_Full;
if (word & FW_PORT_CAP_SPEED_1G)
v |= SUPPORTED_1000baseT_Full;
if (word & FW_PORT_CAP_SPEED_10G)
v |= SUPPORTED_10000baseT_Full;
if (word & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
init_link_config(&pi->link_cfg, v);
return 0;
}
/**
* t4vf_fw_reset - issue a reset to FW
* @adapter: the adapter
*
* Issues a reset command to FW. For a Physical Function this would
* result in the Firmware reseting all of its state. For a Virtual
* Function this just resets the state associated with the VF.
*/
int t4vf_fw_reset(struct adapter *adapter)
{
struct fw_reset_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
FW_CMD_WRITE);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
/**
* t4vf_query_params - query FW or device parameters
* @adapter: the adapter
* @nparams: the number of parameters
* @params: the parameter names
* @vals: the parameter values
*
* Reads the values of firmware or device parameters. Up to 7 parameters
* can be queried at once.
*/
int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
const u32 *params, u32 *vals)
{
int i, ret;
struct fw_params_cmd cmd, rpl;
struct fw_params_param *p;
size_t len16;
if (nparams > 7)
return -EINVAL;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
param[nparams].mnem), 16);
cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
p->mnem = htonl(*params++);
ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (ret == 0)
for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
*vals++ = be32_to_cpu(p->val);
return ret;
}
/**
* t4vf_set_params - sets FW or device parameters
* @adapter: the adapter
* @nparams: the number of parameters
* @params: the parameter names
* @vals: the parameter values
*
* Sets the values of firmware or device parameters. Up to 7 parameters
* can be specified at once.
*/
int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
const u32 *params, const u32 *vals)
{
int i;
struct fw_params_cmd cmd;
struct fw_params_param *p;
size_t len16;
if (nparams > 7)
return -EINVAL;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
param[nparams]), 16);
cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
p->mnem = cpu_to_be32(*params++);
p->val = cpu_to_be32(*vals++);
}
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
/**
* t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
* @adapter: the adapter
*
* Retrieves various core SGE parameters in the form of hardware SGE
* register values. The caller is responsible for decoding these as
* needed. The SGE parameters are stored in @adapter->params.sge.
*/
int t4vf_get_sge_params(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
u32 params[7], vals[7];
int v;
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_CONTROL));
params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE));
params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0));
params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1));
params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1));
params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3));
params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5));
v = t4vf_query_params(adapter, 7, params, vals);
if (v)
return v;
sge_params->sge_control = vals[0];
sge_params->sge_host_page_size = vals[1];
sge_params->sge_fl_buffer_size[0] = vals[2];
sge_params->sge_fl_buffer_size[1] = vals[3];
sge_params->sge_timer_value_0_and_1 = vals[4];
sge_params->sge_timer_value_2_and_3 = vals[5];
sge_params->sge_timer_value_4_and_5 = vals[6];
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
v = t4vf_query_params(adapter, 1, params, vals);
if (v)
return v;
sge_params->sge_ingress_rx_threshold = vals[0];
return 0;
}
/**
* t4vf_get_vpd_params - retrieve device VPD paremeters
* @adapter: the adapter
*
* Retrives various device Vital Product Data parameters. The parameters
* are stored in @adapter->params.vpd.
*/
int t4vf_get_vpd_params(struct adapter *adapter)
{
struct vpd_params *vpd_params = &adapter->params.vpd;
u32 params[7], vals[7];
int v;
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
v = t4vf_query_params(adapter, 1, params, vals);
if (v)
return v;
vpd_params->cclk = vals[0];
return 0;
}
/**
* t4vf_get_dev_params - retrieve device paremeters
* @adapter: the adapter
*
* Retrives various device parameters. The parameters are stored in
* @adapter->params.dev.
*/
int t4vf_get_dev_params(struct adapter *adapter)
{
struct dev_params *dev_params = &adapter->params.dev;
u32 params[7], vals[7];
int v;
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
dev_params->fwrev = vals[0];
dev_params->tprev = vals[1];
return 0;
}
/**
* t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
* @adapter: the adapter
*
* Retrieves global RSS mode and parameters with which we have to live
* and stores them in the @adapter's RSS parameters.
*/
int t4vf_get_rss_glb_config(struct adapter *adapter)
{
struct rss_params *rss = &adapter->params.rss;
struct fw_rss_glb_config_cmd cmd, rpl;
int v;
/*
* Execute an RSS Global Configuration read command to retrieve
* our RSS configuration.
*/
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
return v;
/*
* Transate the big-endian RSS Global Configuration into our
* cpu-endian format based on the RSS mode. We also do first level
* filtering at this point to weed out modes which don't support
* VF Drivers ...
*/
rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET(
be32_to_cpu(rpl.u.manual.mode_pkd));
switch (rss->mode) {
case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
u32 word = be32_to_cpu(
rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
rss->u.basicvirtual.synmapen =
((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
rss->u.basicvirtual.syn4tupenipv6 =
((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
rss->u.basicvirtual.syn2tupenipv6 =
((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
rss->u.basicvirtual.syn4tupenipv4 =
((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
rss->u.basicvirtual.syn2tupenipv4 =
((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
rss->u.basicvirtual.ofdmapen =
((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
rss->u.basicvirtual.tnlmapen =
((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
rss->u.basicvirtual.tnlalllookup =
((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
rss->u.basicvirtual.hashtoeplitz =
((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
/* we need at least Tunnel Map Enable to be set */
if (!rss->u.basicvirtual.tnlmapen)
return -EINVAL;
break;
}
default:
/* all unknown/unsupported RSS modes result in an error */
return -EINVAL;
}
return 0;
}
/**
* t4vf_get_vfres - retrieve VF resource limits
* @adapter: the adapter
*
* Retrieves configured resource limits and capabilities for a virtual
* function. The results are stored in @adapter->vfres.
*/
int t4vf_get_vfres(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
struct fw_pfvf_cmd cmd, rpl;
int v;
u32 word;
/*
* Execute PFVF Read command to get VF resource limits; bail out early
* with error on command failure.
*/
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
return v;
/*
* Extract VF resource limits and return success.
*/
word = be32_to_cpu(rpl.niqflint_niq);
vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word);
vfres->niq = FW_PFVF_CMD_NIQ_GET(word);
word = be32_to_cpu(rpl.type_to_neq);
vfres->neq = FW_PFVF_CMD_NEQ_GET(word);
vfres->pmask = FW_PFVF_CMD_PMASK_GET(word);
word = be32_to_cpu(rpl.tc_to_nexactf);
vfres->tc = FW_PFVF_CMD_TC_GET(word);
vfres->nvi = FW_PFVF_CMD_NVI_GET(word);
vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word);
word = be32_to_cpu(rpl.r_caps_to_nethctrl);
vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word);
vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word);
vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word);
return 0;
}
/**
* t4vf_read_rss_vi_config - read a VI's RSS configuration
* @adapter: the adapter
* @viid: Virtual Interface ID
* @config: pointer to host-native VI RSS Configuration buffer
*
* Reads the Virtual Interface's RSS configuration information and
* translates it into CPU-native format.
*/
int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
union rss_vi_config *config)
{
struct fw_rss_vi_config_cmd cmd, rpl;
int v;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ |
FW_RSS_VI_CONFIG_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
return v;
switch (adapter->params.rss.mode) {
case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
config->basicvirtual.ip6fourtupen =
((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0);
config->basicvirtual.ip6twotupen =
((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0);
config->basicvirtual.ip4fourtupen =
((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0);
config->basicvirtual.ip4twotupen =
((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0);
config->basicvirtual.udpen =
((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0);
config->basicvirtual.defaultq =
FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word);
break;
}
default:
return -EINVAL;
}
return 0;
}
/**
* t4vf_write_rss_vi_config - write a VI's RSS configuration
* @adapter: the adapter
* @viid: Virtual Interface ID
* @config: pointer to host-native VI RSS Configuration buffer
*
* Write the Virtual Interface's RSS configuration information
* (translating it into firmware-native format before writing).
*/
int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
union rss_vi_config *config)
{
struct fw_rss_vi_config_cmd cmd, rpl;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_RSS_VI_CONFIG_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
switch (adapter->params.rss.mode) {
case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
u32 word = 0;
if (config->basicvirtual.ip6fourtupen)
word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
if (config->basicvirtual.ip6twotupen)
word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
if (config->basicvirtual.ip4fourtupen)
word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
if (config->basicvirtual.ip4twotupen)
word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
if (config->basicvirtual.udpen)
word |= FW_RSS_VI_CONFIG_CMD_UDPEN;
word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ(
config->basicvirtual.defaultq);
cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
break;
}
default:
return -EINVAL;
}
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
}
/**
* t4vf_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @viid: Virtual Interface of RSS Table Slice
* @start: starting entry in the table to write
* @n: how many table entries to write
* @rspq: values for the "Response Queue" (Ingress Queue) lookup table
* @nrspq: number of values in @rspq
*
* Programs the selected part of the VI's RSS mapping table with the
* provided values. If @nrspq < @n the supplied values are used repeatedly
* until the full table range is populated.
*
* The caller must ensure the values in @rspq are in the range 0..1023.
*/
int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
int start, int n, const u16 *rspq, int nrspq)
{
const u16 *rsp = rspq;
const u16 *rsp_end = rspq+nrspq;
struct fw_rss_ind_tbl_cmd cmd;
/*
* Initialize firmware command template to write the RSS table.
*/
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_RSS_IND_TBL_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
/*
* Each firmware RSS command can accommodate up to 32 RSS Ingress
* Queue Identifiers. These Ingress Queue IDs are packed three to
* a 32-bit word as 10-bit values with the upper remaining 2 bits
* reserved.
*/
while (n > 0) {
__be32 *qp = &cmd.iq0_to_iq2;
int nq = min(n, 32);
int ret;
/*
* Set up the firmware RSS command header to send the next
* "nq" Ingress Queue IDs to the firmware.
*/
cmd.niqid = cpu_to_be16(nq);
cmd.startidx = cpu_to_be16(start);
/*
* "nq" more done for the start of the next loop.
*/
start += nq;
n -= nq;
/*
* While there are still Ingress Queue IDs to stuff into the
* current firmware RSS command, retrieve them from the
* Ingress Queue ID array and insert them into the command.
*/
while (nq > 0) {
/*
* Grab up to the next 3 Ingress Queue IDs (wrapping
* around the Ingress Queue ID array if necessary) and
* insert them into the firmware RSS command at the
* current 3-tuple position within the commad.
*/
u16 qbuf[3];
u16 *qbp = qbuf;
int nqbuf = min(3, nq);
nq -= nqbuf;
qbuf[0] = qbuf[1] = qbuf[2] = 0;
while (nqbuf) {
nqbuf--;
*qbp++ = *rsp++;
if (rsp >= rsp_end)
rsp = rspq;
}
*qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
}
/*
* Send this portion of the RRS table update to the firmware;
* bail out on any errors.
*/
ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
if (ret)
return ret;
}
return 0;
}
/**
* t4vf_alloc_vi - allocate a virtual interface on a port
* @adapter: the adapter
* @port_id: physical port associated with the VI
*
* Allocate a new Virtual Interface and bind it to the indicated
* physical port. Return the new Virtual Interface Identifier on
* success, or a [negative] error number on failure.
*/
int t4vf_alloc_vi(struct adapter *adapter, int port_id)
{
struct fw_vi_cmd cmd, rpl;
int v;
/*
* Execute a VI command to allocate Virtual Interface and return its
* VIID.
*/
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_CMD_EXEC);
cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
FW_VI_CMD_ALLOC);
cmd.portid_pkd = FW_VI_CMD_PORTID(port_id);
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
return v;
return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid));
}
/**
* t4vf_free_vi -- free a virtual interface
* @adapter: the adapter
* @viid: the virtual interface identifier
*
* Free a previously allocated Virtual Interface. Return an error on
* failure.
*/
int t4vf_free_vi(struct adapter *adapter, int viid)
{
struct fw_vi_cmd cmd;
/*
* Execute a VI command to free the Virtual Interface.
*/
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
FW_CMD_REQUEST |
FW_CMD_EXEC);
cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
FW_VI_CMD_FREE);
cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
/**
* t4vf_enable_vi - enable/disable a virtual interface
* @adapter: the adapter
* @viid: the Virtual Interface ID
* @rx_en: 1=enable Rx, 0=disable Rx
* @tx_en: 1=enable Tx, 0=disable Tx
*
* Enables/disables a virtual interface.
*/
int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
bool rx_en, bool tx_en)
{
struct fw_vi_enable_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
FW_CMD_REQUEST |
FW_CMD_EXEC |
FW_VI_ENABLE_CMD_VIID(viid));
cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) |
FW_VI_ENABLE_CMD_EEN(tx_en) |
FW_LEN16(cmd));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
/**
* t4vf_identify_port - identify a VI's port by blinking its LED
* @adapter: the adapter
* @viid: the Virtual Interface ID
* @nblinks: how many times to blink LED at 2.5 Hz
*
* Identifies a VI's port by blinking its LED.
*/
int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
unsigned int nblinks)
{
struct fw_vi_enable_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
FW_CMD_REQUEST |
FW_CMD_EXEC |
FW_VI_ENABLE_CMD_VIID(viid));
cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED |
FW_LEN16(cmd));
cmd.blinkdur = cpu_to_be16(nblinks);
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
/**
* t4vf_set_rxmode - set Rx properties of a virtual interface
* @adapter: the adapter
* @viid: the VI id
* @mtu: the new MTU or -1 for no change
* @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
* @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
* @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
* -1 no change
*
* Sets Rx properties of a virtual interface.
*/
int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok)
{
struct fw_vi_rxmode_cmd cmd;
/* convert to FW values */
if (mtu < 0)
mtu = FW_VI_RXMODE_CMD_MTU_MASK;
if (promisc < 0)
promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
if (all_multi < 0)
all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
if (bcast < 0)
bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
if (vlanex < 0)
vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_VI_RXMODE_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
cmd.mtu_to_vlanexen =
cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) |
FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
}
/**
* t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adapter: the adapter
* @viid: the Virtual Interface Identifier
* @free: if true any existing filters for this VI id are first removed
* @naddr: the number of MAC addresses to allocate filters for (up to 7)
* @addr: the MAC address(es)
* @idx: where to store the index of each allocated filter
* @hash: pointer to hash address filter bitmap
* @sleep_ok: call is allowed to sleep
*
* Allocates an exact-match filter for each of the supplied addresses and
* sets it to the corresponding address. If @idx is not %NULL it should
* have at least @naddr entries, each of which will be set to the index of
* the filter allocated for the corresponding MAC address. If a filter
* could not be allocated for an address its index is set to 0xffff.
* If @hash is not %NULL addresses that fail to allocate an exact filter
* are hashed and update the hash filter bitmap pointed at by @hash.
*
* Returns a negative error number or the number of filters allocated.
*/
int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned int naddr, const u8 **addr, u16 *idx,
u64 *hash, bool sleep_ok)
{
int offset, ret = 0;
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
return -EINVAL;
for (offset = 0; offset < naddr; /**/) {
unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
? rem
: ARRAY_SIZE(cmd.u.exact));
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[fw_naddr]), 16);
struct fw_vi_mac_exact *p;
int i;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
(free ? FW_CMD_EXEC : 0) |
FW_VI_MAC_CMD_VIID(viid));
cmd.freemacs_to_len16 =
cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
FW_CMD_LEN16(len16));
for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
p->valid_to_idx = cpu_to_be16(
FW_VI_MAC_CMD_VALID |
FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
}
ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
sleep_ok);
if (ret && ret != -ENOMEM)
break;
for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
u16 index = FW_VI_MAC_CMD_IDX_GET(
be16_to_cpu(p->valid_to_idx));
if (idx)
idx[offset+i] =
(index >= FW_CLS_TCAM_NUM_ENTRIES
? 0xffff
: index);
if (index < FW_CLS_TCAM_NUM_ENTRIES)
nfilters++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
}
free = false;
offset += fw_naddr;
rem -= fw_naddr;
}
/*
* If there were no errors or we merely ran out of room in our MAC
* address arena, return the number of filters actually written.
*/
if (ret == 0 || ret == -ENOMEM)
ret = nfilters;
return ret;
}
/**
* t4vf_change_mac - modifies the exact-match filter for a MAC address
* @adapter: the adapter
* @viid: the Virtual Interface ID
* @idx: index of existing filter for old value of MAC address, or -1
* @addr: the new MAC address value
* @persist: if idx < 0, the new MAC allocation should be persistent
*
* Modifies an exact-match filter and sets it to the new MAC address.
* Note that in general it is not possible to modify the value of a given
* filter so the generic way to modify an address filter is to free the
* one being used by the old address value and allocate a new filter for
* the new address value. @idx can be -1 if the address is a new
* addition.
*
* Returns a negative error number or the index of the filter with the new
* MAC value.
*/
int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
int idx, const u8 *addr, bool persist)
{
int ret;
struct fw_vi_mac_cmd cmd, rpl;
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
/*
* If this is a new allocation, determine whether it should be
* persistent (across a "freemacs" operation) or not.
*/
if (idx < 0)
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_VI_MAC_CMD_VIID(viid));
cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID |
FW_VI_MAC_CMD_IDX(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
ret = -ENOMEM;
}
return ret;
}
/**
* t4vf_set_addr_hash - program the MAC inexact-match hash filter
* @adapter: the adapter
* @viid: the Virtual Interface Identifier
* @ucast: whether the hash filter should also match unicast addresses
* @vec: the value to be written to the hash filter
* @sleep_ok: call is allowed to sleep
*
* Sets the 64-bit inexact-match hash filter for a virtual interface.
*/
int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok)
{
struct fw_vi_mac_cmd cmd;
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[0]), 16);
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_VI_ENABLE_CMD_VIID(viid));
cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN |
FW_VI_MAC_CMD_HASHUNIEN(ucast) |
FW_CMD_LEN16(len16));
cmd.u.hash.hashvec = cpu_to_be64(vec);
return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
}
/**
* t4vf_get_port_stats - collect "port" statistics
* @adapter: the adapter
* @pidx: the port index
* @s: the stats structure to fill
*
* Collect statistics for the "port"'s Virtual Interface.
*/
int t4vf_get_port_stats(struct adapter *adapter, int pidx,
struct t4vf_port_stats *s)
{
struct port_info *pi = adap2pinfo(adapter, pidx);
struct fw_vi_stats_vf fwstats;
unsigned int rem = VI_VF_NUM_STATS;
__be64 *fwsp = (__be64 *)&fwstats;
/*
* Grab the Virtual Interface statistics a chunk at a time via mailbox
* commands. We could use a Work Request and get all of them at once
* but that's an asynchronous interface which is awkward to use.
*/
while (rem) {
unsigned int ix = VI_VF_NUM_STATS - rem;
unsigned int nstats = min(6U, rem);
struct fw_vi_stats_cmd cmd, rpl;
size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
sizeof(struct fw_vi_stats_ctl));
size_t len16 = DIV_ROUND_UP(len, 16);
int ret;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
FW_VI_STATS_CMD_VIID(pi->viid) |
FW_CMD_REQUEST |
FW_CMD_READ);
cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
cmd.u.ctl.nstats_ix =
cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
FW_VI_STATS_CMD_NSTATS(nstats));
ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
if (ret)
return ret;
memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
rem -= nstats;
fwsp += nstats;
}
/*
* Translate firmware statistics into host native statistics.
*/
s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
return 0;
}
/**
* t4vf_iq_free - free an ingress queue and its free lists
* @adapter: the adapter
* @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
* @iqid: ingress queue ID
* @fl0id: FL0 queue ID or 0xffff if no attached FL0
* @fl1id: FL1 queue ID or 0xffff if no attached FL1
*
* Frees an ingress queue and its associated free lists, if any.
*/
int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
{
struct fw_iq_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
FW_CMD_REQUEST |
FW_CMD_EXEC);
cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE |
FW_LEN16(cmd));
cmd.type_to_iqandstindex =
cpu_to_be32(FW_IQ_CMD_TYPE(iqtype));
cmd.iqid = cpu_to_be16(iqid);
cmd.fl0id = cpu_to_be16(fl0id);
cmd.fl1id = cpu_to_be16(fl1id);
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
/**
* t4vf_eth_eq_free - free an Ethernet egress queue
* @adapter: the adapter
* @eqid: egress queue ID
*
* Frees an Ethernet egress queue.
*/
int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
{
struct fw_eq_eth_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
FW_CMD_REQUEST |
FW_CMD_EXEC);
cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE |
FW_LEN16(cmd));
cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
/**
* t4vf_handle_fw_rpl - process a firmware reply message
* @adapter: the adapter
* @rpl: start of the firmware message
*
* Processes a firmware message, such as link state change messages.
*/
int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
{
const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
switch (opcode) {
case FW_PORT_CMD: {
/*
* Link/module state change message.
*/
const struct fw_port_cmd *port_cmd =
(const struct fw_port_cmd *)rpl;
u32 word;
int action, port_id, link_ok, speed, fc, pidx;
/*
* Extract various fields from port status change message.
*/
action = FW_PORT_CMD_ACTION_GET(
be32_to_cpu(port_cmd->action_to_len16));
if (action != FW_PORT_ACTION_GET_PORT_INFO) {
dev_err(adapter->pdev_dev,
"Unknown firmware PORT reply action %x\n",
action);
break;
}
port_id = FW_PORT_CMD_PORTID_GET(
be32_to_cpu(port_cmd->op_to_portid));
word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
link_ok = (word & FW_PORT_CMD_LSTATUS) != 0;
speed = 0;
fc = 0;
if (word & FW_PORT_CMD_RXPAUSE)
fc |= PAUSE_RX;
if (word & FW_PORT_CMD_TXPAUSE)
fc |= PAUSE_TX;
if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
speed = SPEED_100;
else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
speed = SPEED_1000;
else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
speed = SPEED_10000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for
* those bound to the physical port which has changed. If
* our recorded state doesn't match the current state,
* signal that change to the OS code.
*/
for_each_port(adapter, pidx) {
struct port_info *pi = adap2pinfo(adapter, pidx);
struct link_config *lc;
if (pi->port_id != port_id)
continue;
lc = &pi->link_cfg;
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) {
/* something changed */
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
t4vf_os_link_changed(adapter, pidx, link_ok);
}
}
break;
}
default:
dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
opcode);
}
return 0;
}
| gpl-2.0 |
brymaster5000/m7_4.1 | tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c | 5637 | 9961 | /*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
* Licensed under the terms of the GNU GPL License version 2.
*
* Output format inspired by Len Brown's <lenb@kernel.org> turbostat tool.
*
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <libgen.h>
#include "idle_monitor/cpupower-monitor.h"
#include "idle_monitor/idle_monitors.h"
#include "helpers/helpers.h"
/* Define pointers to all monitors. */
#define DEF(x) & x ## _monitor ,
struct cpuidle_monitor *all_monitors[] = {
#include "idle_monitors.def"
0
};
static struct cpuidle_monitor *monitors[MONITORS_MAX];
static unsigned int avail_monitors;
static char *progname;
enum operation_mode_e { list = 1, show, show_all };
static int mode;
static int interval = 1;
static char *show_monitors_param;
static struct cpupower_topology cpu_top;
/* ToDo: Document this in the manpage */
static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', };
static void print_wrong_arg_exit(void)
{
printf(_("invalid or unknown argument\n"));
exit(EXIT_FAILURE);
}
long long timespec_diff_us(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000);
}
void print_n_spaces(int n)
{
int x;
for (x = 0; x < n; x++)
printf(" ");
}
/* size of s must be at least n + 1 */
int fill_string_with_spaces(char *s, int n)
{
int len = strlen(s);
if (len > n)
return -1;
for (; len < n; len++)
s[len] = ' ';
s[len] = '\0';
return 0;
}
void print_header(int topology_depth)
{
int unsigned mon;
int state, need_len, pr_mon_len;
cstate_t s;
char buf[128] = "";
int percent_width = 4;
fill_string_with_spaces(buf, topology_depth * 5 - 1);
printf("%s|", buf);
for (mon = 0; mon < avail_monitors; mon++) {
pr_mon_len = 0;
need_len = monitors[mon]->hw_states_num * (percent_width + 3)
- 1;
if (mon != 0) {
printf("|| ");
need_len--;
}
sprintf(buf, "%s", monitors[mon]->name);
fill_string_with_spaces(buf, need_len);
printf("%s", buf);
}
printf("\n");
if (topology_depth > 2)
printf("PKG |");
if (topology_depth > 1)
printf("CORE|");
if (topology_depth > 0)
printf("CPU |");
for (mon = 0; mon < avail_monitors; mon++) {
if (mon != 0)
printf("|| ");
else
printf(" ");
for (state = 0; state < monitors[mon]->hw_states_num; state++) {
if (state != 0)
printf(" | ");
s = monitors[mon]->hw_states[state];
sprintf(buf, "%s", s.name);
fill_string_with_spaces(buf, percent_width);
printf("%s", buf);
}
printf(" ");
}
printf("\n");
}
void print_results(int topology_depth, int cpu)
{
unsigned int mon;
int state, ret;
double percent;
unsigned long long result;
cstate_t s;
/* Be careful CPUs may got resorted for pkg value do not just use cpu */
if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu))
return;
if (topology_depth > 2)
printf("%4d|", cpu_top.core_info[cpu].pkg);
if (topology_depth > 1)
printf("%4d|", cpu_top.core_info[cpu].core);
if (topology_depth > 0)
printf("%4d|", cpu_top.core_info[cpu].cpu);
for (mon = 0; mon < avail_monitors; mon++) {
if (mon != 0)
printf("||");
for (state = 0; state < monitors[mon]->hw_states_num; state++) {
if (state != 0)
printf("|");
s = monitors[mon]->hw_states[state];
if (s.get_count_percent) {
ret = s.get_count_percent(s.id, &percent,
cpu_top.core_info[cpu].cpu);
if (ret)
printf("******");
else if (percent >= 100.0)
printf("%6.1f", percent);
else
printf("%6.2f", percent);
} else if (s.get_count) {
ret = s.get_count(s.id, &result,
cpu_top.core_info[cpu].cpu);
if (ret)
printf("******");
else
printf("%6llu", result);
} else {
printf(_("Monitor %s, Counter %s has no count "
"function. Implementation error\n"),
monitors[mon]->name, s.name);
exit(EXIT_FAILURE);
}
}
}
/*
* The monitor could still provide useful data, for example
* AMD HW counters partly sit in PCI config space.
* It's up to the monitor plug-in to check .is_online, this one
* is just for additional info.
*/
if (!cpu_top.core_info[cpu].is_online) {
printf(_(" *is offline\n"));
return;
} else
printf("\n");
}
/* param: string passed by -m param (The list of monitors to show)
*
* Monitors must have been registered already, matching monitors
* are picked out and available monitors array is overridden
* with matching ones
*
* Monitors get sorted in the same order the user passes them
*/
static void parse_monitor_param(char *param)
{
unsigned int num;
int mon, hits = 0;
char *tmp = param, *token;
struct cpuidle_monitor *tmp_mons[MONITORS_MAX];
for (mon = 0; mon < MONITORS_MAX; mon++, tmp = NULL) {
token = strtok(tmp, ",");
if (token == NULL)
break;
if (strlen(token) >= MONITOR_NAME_LEN) {
printf(_("%s: max monitor name length"
" (%d) exceeded\n"), token, MONITOR_NAME_LEN);
continue;
}
for (num = 0; num < avail_monitors; num++) {
if (!strcmp(monitors[num]->name, token)) {
dprint("Found requested monitor: %s\n", token);
tmp_mons[hits] = monitors[num];
hits++;
}
}
}
if (hits == 0) {
printf(_("No matching monitor found in %s, "
"try -l option\n"), param);
exit(EXIT_FAILURE);
}
/* Override detected/registerd monitors array with requested one */
memcpy(monitors, tmp_mons,
sizeof(struct cpuidle_monitor *) * MONITORS_MAX);
avail_monitors = hits;
}
void list_monitors(void)
{
unsigned int mon;
int state;
cstate_t s;
for (mon = 0; mon < avail_monitors; mon++) {
printf(_("Monitor \"%s\" (%d states) - Might overflow after %u "
"s\n"),
monitors[mon]->name, monitors[mon]->hw_states_num,
monitors[mon]->overflow_s);
for (state = 0; state < monitors[mon]->hw_states_num; state++) {
s = monitors[mon]->hw_states[state];
/*
* ToDo show more state capabilities:
* percent, time (granlarity)
*/
printf("%s\t[%c] -> %s\n", s.name, range_abbr[s.range],
gettext(s.desc));
}
}
}
int fork_it(char **argv)
{
int status;
unsigned int num;
unsigned long long timediff;
pid_t child_pid;
struct timespec start, end;
child_pid = fork();
clock_gettime(CLOCK_REALTIME, &start);
for (num = 0; num < avail_monitors; num++)
monitors[num]->start();
if (!child_pid) {
/* child */
execvp(argv[0], argv);
} else {
/* parent */
if (child_pid == -1) {
perror("fork");
exit(1);
}
signal(SIGINT, SIG_IGN);
signal(SIGQUIT, SIG_IGN);
if (waitpid(child_pid, &status, 0) == -1) {
perror("wait");
exit(1);
}
}
clock_gettime(CLOCK_REALTIME, &end);
for (num = 0; num < avail_monitors; num++)
monitors[num]->stop();
timediff = timespec_diff_us(start, end);
if (WIFEXITED(status))
printf(_("%s took %.5f seconds and exited with status %d\n"),
argv[0], timediff / (1000.0 * 1000),
WEXITSTATUS(status));
return 0;
}
int do_interval_measure(int i)
{
unsigned int num;
for (num = 0; num < avail_monitors; num++) {
dprint("HW C-state residency monitor: %s - States: %d\n",
monitors[num]->name, monitors[num]->hw_states_num);
monitors[num]->start();
}
sleep(i);
for (num = 0; num < avail_monitors; num++)
monitors[num]->stop();
return 0;
}
static void cmdline(int argc, char *argv[])
{
int opt;
progname = basename(argv[0]);
while ((opt = getopt(argc, argv, "+li:m:")) != -1) {
switch (opt) {
case 'l':
if (mode)
print_wrong_arg_exit();
mode = list;
break;
case 'i':
/* only allow -i with -m or no option */
if (mode && mode != show)
print_wrong_arg_exit();
interval = atoi(optarg);
break;
case 'm':
if (mode)
print_wrong_arg_exit();
mode = show;
show_monitors_param = optarg;
break;
default:
print_wrong_arg_exit();
}
}
if (!mode)
mode = show_all;
}
int cmd_monitor(int argc, char **argv)
{
unsigned int num;
struct cpuidle_monitor *test_mon;
int cpu;
cmdline(argc, argv);
cpu_count = get_cpu_topology(&cpu_top);
if (cpu_count < 0) {
printf(_("Cannot read number of available processors\n"));
return EXIT_FAILURE;
}
/* Default is: monitor all CPUs */
if (bitmask_isallclear(cpus_chosen))
bitmask_setall(cpus_chosen);
dprint("System has up to %d CPU cores\n", cpu_count);
for (num = 0; all_monitors[num]; num++) {
dprint("Try to register: %s\n", all_monitors[num]->name);
test_mon = all_monitors[num]->do_register();
if (test_mon) {
if (test_mon->needs_root && !run_as_root) {
fprintf(stderr, _("Available monitor %s needs "
"root access\n"), test_mon->name);
continue;
}
monitors[avail_monitors] = test_mon;
dprint("%s registered\n", all_monitors[num]->name);
avail_monitors++;
}
}
if (avail_monitors == 0) {
printf(_("No HW Cstate monitors found\n"));
return 1;
}
if (mode == list) {
list_monitors();
exit(EXIT_SUCCESS);
}
if (mode == show)
parse_monitor_param(show_monitors_param);
dprint("Packages: %d - Cores: %d - CPUs: %d\n",
cpu_top.pkgs, cpu_top.cores, cpu_count);
/*
* if any params left, it must be a command to fork
*/
if (argc - optind)
fork_it(argv + optind);
else
do_interval_measure(interval);
/* ToDo: Topology parsing needs fixing first to do
this more generically */
if (cpu_top.pkgs > 1)
print_header(3);
else
print_header(1);
for (cpu = 0; cpu < cpu_count; cpu++) {
if (cpu_top.pkgs > 1)
print_results(3, cpu);
else
print_results(1, cpu);
}
for (num = 0; num < avail_monitors; num++)
monitors[num]->unregister();
cpu_topology_release(cpu_top);
return 0;
}
| gpl-2.0 |
OUDhs/android_kernel_motorola_asanti | fs/yaffs2/yaffs_checkptrw.c | 7941 | 10663 | /*
* YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
*
* Copyright (C) 2002-2010 Aleph One Ltd.
* for Toby Churchill Ltd and Brightstar Engineering
*
* Created by Charles Manning <charles@aleph1.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "yaffs_checkptrw.h"
#include "yaffs_getblockinfo.h"
static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
{
int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"checkpt blocks_avail = %d", blocks_avail);
return (blocks_avail <= 0) ? 0 : 1;
}
static int yaffs_checkpt_erase(struct yaffs_dev *dev)
{
int i;
if (!dev->param.erase_fn)
return 0;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"checking blocks %d to %d",
dev->internal_start_block, dev->internal_end_block);
for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"erasing checkpt block %d", i);
dev->n_erasures++;
if (dev->param.
erase_fn(dev,
i - dev->block_offset /* realign */ )) {
bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
dev->n_erased_blocks++;
dev->n_free_chunks +=
dev->param.chunks_per_block;
} else {
dev->param.bad_block_fn(dev, i);
bi->block_state = YAFFS_BLOCK_STATE_DEAD;
}
}
}
dev->blocks_in_checkpt = 0;
return 1;
}
static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
{
int i;
int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"allocating checkpt block: erased %d reserved %d avail %d next %d ",
dev->n_erased_blocks, dev->param.n_reserved_blocks,
blocks_avail, dev->checkpt_next_block);
if (dev->checkpt_next_block >= 0 &&
dev->checkpt_next_block <= dev->internal_end_block &&
blocks_avail > 0) {
for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
i++) {
struct yaffs_block_info *bi =
yaffs_get_block_info(dev, i);
if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
dev->checkpt_next_block = i + 1;
dev->checkpt_cur_block = i;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"allocating checkpt block %d", i);
return;
}
}
}
yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
dev->checkpt_next_block = -1;
dev->checkpt_cur_block = -1;
}
static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
{
int i;
struct yaffs_ext_tags tags;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"find next checkpt block: start: blocks %d next %d",
dev->blocks_in_checkpt, dev->checkpt_next_block);
if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
i++) {
int chunk = i * dev->param.chunks_per_block;
int realigned_chunk = chunk - dev->chunk_offset;
dev->param.read_chunk_tags_fn(dev, realigned_chunk,
NULL, &tags);
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"find next checkpt block: search: block %d oid %d seq %d eccr %d",
i, tags.obj_id, tags.seq_number,
tags.ecc_result);
if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
/* Right kind of block */
dev->checkpt_next_block = tags.obj_id;
dev->checkpt_cur_block = i;
dev->checkpt_block_list[dev->
blocks_in_checkpt] = i;
dev->blocks_in_checkpt++;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"found checkpt block %d", i);
return;
}
}
yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
dev->checkpt_next_block = -1;
dev->checkpt_cur_block = -1;
}
int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
{
dev->checkpt_open_write = writing;
/* Got the functions we need? */
if (!dev->param.write_chunk_tags_fn ||
!dev->param.read_chunk_tags_fn ||
!dev->param.erase_fn || !dev->param.bad_block_fn)
return 0;
if (writing && !yaffs2_checkpt_space_ok(dev))
return 0;
if (!dev->checkpt_buffer)
dev->checkpt_buffer =
kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
if (!dev->checkpt_buffer)
return 0;
dev->checkpt_page_seq = 0;
dev->checkpt_byte_count = 0;
dev->checkpt_sum = 0;
dev->checkpt_xor = 0;
dev->checkpt_cur_block = -1;
dev->checkpt_cur_chunk = -1;
dev->checkpt_next_block = dev->internal_start_block;
/* Erase all the blocks in the checkpoint area */
if (writing) {
memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
dev->checkpt_byte_offs = 0;
return yaffs_checkpt_erase(dev);
} else {
int i;
/* Set to a value that will kick off a read */
dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
/* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
* going to be way more than we need */
dev->blocks_in_checkpt = 0;
dev->checkpt_max_blocks =
(dev->internal_end_block - dev->internal_start_block) / 16 +
2;
dev->checkpt_block_list =
kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
if (!dev->checkpt_block_list)
return 0;
for (i = 0; i < dev->checkpt_max_blocks; i++)
dev->checkpt_block_list[i] = -1;
}
return 1;
}
int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
{
u32 composite_sum;
composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xFF);
*sum = composite_sum;
return 1;
}
static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
{
int chunk;
int realigned_chunk;
struct yaffs_ext_tags tags;
if (dev->checkpt_cur_block < 0) {
yaffs2_checkpt_find_erased_block(dev);
dev->checkpt_cur_chunk = 0;
}
if (dev->checkpt_cur_block < 0)
return 0;
tags.is_deleted = 0;
tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
tags.chunk_id = dev->checkpt_page_seq + 1;
tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
tags.n_bytes = dev->data_bytes_per_chunk;
if (dev->checkpt_cur_chunk == 0) {
/* First chunk we write for the block? Set block state to
checkpoint */
struct yaffs_block_info *bi =
yaffs_get_block_info(dev, dev->checkpt_cur_block);
bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
dev->blocks_in_checkpt++;
}
chunk =
dev->checkpt_cur_block * dev->param.chunks_per_block +
dev->checkpt_cur_chunk;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
tags.obj_id, tags.chunk_id);
realigned_chunk = chunk - dev->chunk_offset;
dev->n_page_writes++;
dev->param.write_chunk_tags_fn(dev, realigned_chunk,
dev->checkpt_buffer, &tags);
dev->checkpt_byte_offs = 0;
dev->checkpt_page_seq++;
dev->checkpt_cur_chunk++;
if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
dev->checkpt_cur_chunk = 0;
dev->checkpt_cur_block = -1;
}
memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
return 1;
}
int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
{
int i = 0;
int ok = 1;
u8 *data_bytes = (u8 *) data;
if (!dev->checkpt_buffer)
return 0;
if (!dev->checkpt_open_write)
return -1;
while (i < n_bytes && ok) {
dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
dev->checkpt_sum += *data_bytes;
dev->checkpt_xor ^= *data_bytes;
dev->checkpt_byte_offs++;
i++;
data_bytes++;
dev->checkpt_byte_count++;
if (dev->checkpt_byte_offs < 0 ||
dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
ok = yaffs2_checkpt_flush_buffer(dev);
}
return i;
}
int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
{
int i = 0;
int ok = 1;
struct yaffs_ext_tags tags;
int chunk;
int realigned_chunk;
u8 *data_bytes = (u8 *) data;
if (!dev->checkpt_buffer)
return 0;
if (dev->checkpt_open_write)
return -1;
while (i < n_bytes && ok) {
if (dev->checkpt_byte_offs < 0 ||
dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
if (dev->checkpt_cur_block < 0) {
yaffs2_checkpt_find_block(dev);
dev->checkpt_cur_chunk = 0;
}
if (dev->checkpt_cur_block < 0)
ok = 0;
else {
chunk = dev->checkpt_cur_block *
dev->param.chunks_per_block +
dev->checkpt_cur_chunk;
realigned_chunk = chunk - dev->chunk_offset;
dev->n_page_reads++;
/* read in the next chunk */
dev->param.read_chunk_tags_fn(dev,
realigned_chunk,
dev->
checkpt_buffer,
&tags);
if (tags.chunk_id != (dev->checkpt_page_seq + 1)
|| tags.ecc_result > YAFFS_ECC_RESULT_FIXED
|| tags.seq_number !=
YAFFS_SEQUENCE_CHECKPOINT_DATA)
ok = 0;
dev->checkpt_byte_offs = 0;
dev->checkpt_page_seq++;
dev->checkpt_cur_chunk++;
if (dev->checkpt_cur_chunk >=
dev->param.chunks_per_block)
dev->checkpt_cur_block = -1;
}
}
if (ok) {
*data_bytes =
dev->checkpt_buffer[dev->checkpt_byte_offs];
dev->checkpt_sum += *data_bytes;
dev->checkpt_xor ^= *data_bytes;
dev->checkpt_byte_offs++;
i++;
data_bytes++;
dev->checkpt_byte_count++;
}
}
return i;
}
int yaffs_checkpt_close(struct yaffs_dev *dev)
{
if (dev->checkpt_open_write) {
if (dev->checkpt_byte_offs != 0)
yaffs2_checkpt_flush_buffer(dev);
} else if (dev->checkpt_block_list) {
int i;
for (i = 0;
i < dev->blocks_in_checkpt
&& dev->checkpt_block_list[i] >= 0; i++) {
int blk = dev->checkpt_block_list[i];
struct yaffs_block_info *bi = NULL;
if (dev->internal_start_block <= blk
&& blk <= dev->internal_end_block)
bi = yaffs_get_block_info(dev, blk);
if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
else {
/* Todo this looks odd... */
}
}
kfree(dev->checkpt_block_list);
dev->checkpt_block_list = NULL;
}
dev->n_free_chunks -=
dev->blocks_in_checkpt * dev->param.chunks_per_block;
dev->n_erased_blocks -= dev->blocks_in_checkpt;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,"checkpoint byte count %d",
dev->checkpt_byte_count);
if (dev->checkpt_buffer) {
/* free the buffer */
kfree(dev->checkpt_buffer);
dev->checkpt_buffer = NULL;
return 1;
} else {
return 0;
}
}
int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
{
/* Erase the checkpoint data */
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
"checkpoint invalidate of %d blocks",
dev->blocks_in_checkpt);
return yaffs_checkpt_erase(dev);
}
| gpl-2.0 |
TimofeyFox/GT-S7270_kernel | drivers/char/agp/efficeon-agp.c | 8453 | 12657 | /*
* Transmeta's Efficeon AGPGART driver.
*
* Based upon a diff by Linus around November '02.
*
* Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
* and H. Peter Anvin <hpa@transmeta.com>.
*/
/*
* NOTE-cpg-040217:
*
* - when compiled as a module, after loading the module,
* it will refuse to unload, indicating it is in use,
* when it is not.
* - no s3 (suspend to ram) testing.
* - tested on the efficeon integrated nothbridge for tens
* of iterations of starting x and glxgears.
* - tested with radeon 9000 and radeon mobility m9 cards
* - tested with c3/c4 enabled (with the mobility m9 card)
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/gfp.h>
#include <linux/page-flags.h>
#include <linux/mm.h>
#include "agp.h"
#include "intel-agp.h"
/*
* The real differences to the generic AGP code is
* in the GART mappings - a two-level setup with the
* first level being an on-chip 64-entry table.
*
* The page array is filled through the ATTPAGE register
* (Aperture Translation Table Page Register) at 0xB8. Bits:
* 31:20: physical page address
* 11:9: Page Attribute Table Index (PATI)
* must match the PAT index for the
* mapped pages (the 2nd level page table pages
* themselves should be just regular WB-cacheable,
* so this is normally zero.)
* 8: Present
* 7:6: reserved, write as zero
* 5:0: GATT directory index: which 1st-level entry
*
* The Efficeon AGP spec requires pages to be WB-cacheable
* but to be explicitly CLFLUSH'd after any changes.
*/
#define EFFICEON_ATTPAGE 0xb8
#define EFFICEON_L1_SIZE 64 /* Number of PDE pages */
#define EFFICEON_PATI (0 << 9)
#define EFFICEON_PRESENT (1 << 8)
static struct _efficeon_private {
unsigned long l1_table[EFFICEON_L1_SIZE];
} efficeon_private;
static const struct gatt_mask efficeon_generic_masks[] =
{
{.mask = 0x00000001, .type = 0}
};
/* This function does the same thing as mask_memory() for this chipset... */
static inline unsigned long efficeon_mask_memory(struct page *page)
{
unsigned long addr = page_to_phys(page);
return addr | 0x00000001;
}
static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
{
{256, 65536, 0},
{128, 32768, 32},
{64, 16384, 48},
{32, 8192, 56}
};
/*
* Control interfaces are largely identical to
* the legacy Intel 440BX..
*/
static int efficeon_fetch_size(void)
{
int i;
u16 temp;
struct aper_size_info_lvl2 *values;
pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void efficeon_tlbflush(struct agp_memory * mem)
{
printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
}
static void efficeon_cleanup(void)
{
u16 temp;
struct aper_size_info_lvl2 *previous_size;
printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
previous_size->size_value);
}
static int efficeon_configure(void)
{
u32 temp;
u16 temp2;
struct aper_size_info_lvl2 *current_size;
printk(KERN_DEBUG PFX "efficeon_configure()\n");
current_size = A_SIZE_LVL2(agp_bridge->current_size);
/* aperture size */
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
current_size->size_value);
/* address to map to */
pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
/* paccfg/nbxcfg */
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
(temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
/* clear any possible error conditions */
pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
return 0;
}
static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
{
int index, freed = 0;
for (index = 0; index < EFFICEON_L1_SIZE; index++) {
unsigned long page = efficeon_private.l1_table[index];
if (page) {
efficeon_private.l1_table[index] = 0;
ClearPageReserved(virt_to_page((char *)page));
free_page(page);
freed++;
}
printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
agp_bridge->dev, EFFICEON_ATTPAGE, index);
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, index);
}
printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
return 0;
}
/*
* Since we don't need contiguous memory we just try
* to get the gatt table once
*/
#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#undef GET_GATT
#define GET_GATT(addr) (efficeon_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
{
int index;
const int pati = EFFICEON_PATI;
const int present = EFFICEON_PRESENT;
const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
int num_entries, l1_pages;
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
/* There are 2^10 PTE pages per PDE page */
BUG_ON(num_entries & 0x3ff);
l1_pages = num_entries >> 10;
for (index = 0 ; index < l1_pages ; index++) {
int offset;
unsigned long page;
unsigned long value;
page = efficeon_private.l1_table[index];
BUG_ON(page);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
efficeon_free_gatt_table(agp_bridge);
return -ENOMEM;
}
SetPageReserved(virt_to_page((char *)page));
for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
clflush((char *)page+offset);
efficeon_private.l1_table[index] = page;
value = virt_to_phys((unsigned long *)page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);
}
return 0;
}
static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
{
int i, count = mem->page_count, num_entries;
unsigned int *page, *last_page;
const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
const unsigned long clflush_mask = ~(clflush_chunk-1);
printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
if (type != 0 || mem->type != 0)
return -EINVAL;
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
last_page = NULL;
for (i = 0; i < count; i++) {
int index = pg_start + i;
unsigned long insert = efficeon_mask_memory(mem->pages[i]);
page = (unsigned int *) efficeon_private.l1_table[index >> 10];
if (!page)
continue;
page += (index & 0x3ff);
*page = insert;
/* clflush is slow, so don't clflush until we have to */
if (last_page &&
(((unsigned long)page^(unsigned long)last_page) &
clflush_mask))
clflush(last_page);
last_page = page;
}
if ( last_page )
clflush(last_page);
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
{
int i, count = mem->page_count, num_entries;
printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
if (type != 0 || mem->type != 0)
return -EINVAL;
for (i = 0; i < count; i++) {
int index = pg_start + i;
unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
if (!page)
continue;
page += (index & 0x3ff);
*page = 0;
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static const struct agp_bridge_driver efficeon_driver = {
.owner = THIS_MODULE,
.aperture_sizes = efficeon_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 4,
.configure = efficeon_configure,
.fetch_size = efficeon_fetch_size,
.cleanup = efficeon_cleanup,
.tlb_flush = efficeon_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = efficeon_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
// Efficeon-specific GATT table setup / populate / teardown
.create_gatt_table = efficeon_create_gatt_table,
.free_gatt_table = efficeon_free_gatt_table,
.insert_memory = efficeon_insert_memory,
.remove_memory = efficeon_remove_memory,
.cant_use_aperture = false, // true might be faster?
// Generic
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
struct resource *r;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
/* Probe for Efficeon controller */
if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
pdev->device);
return -ENODEV;
}
printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &efficeon_driver;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
/*
* If the device has not been properly setup, the following will catch
* the problem and should stop the system from crashing.
* 20030610 - hamish@zot.org
*/
if (pci_enable_device(pdev)) {
printk(KERN_ERR PFX "Unable to Enable PCI device\n");
agp_put_bridge(bridge);
return -ENODEV;
}
/*
* The following fixes the case where the BIOS has "forgotten" to
* provide an address range for the GART.
* 20030610 - hamish@zot.org
*/
r = &pdev->resource[0];
if (!r->start && r->end) {
if (pci_assign_resource(pdev, 0)) {
printk(KERN_ERR PFX "could not assign resource 0\n");
agp_put_bridge(bridge);
return -ENODEV;
}
}
/* Fill in the mode register */
if (cap_ptr) {
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
}
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
#ifdef CONFIG_PM
static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
{
return 0;
}
static int agp_efficeon_resume(struct pci_dev *pdev)
{
printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
return efficeon_configure();
}
#endif
static struct pci_device_id agp_efficeon_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_TRANSMETA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
static struct pci_driver agp_efficeon_pci_driver = {
.name = "agpgart-efficeon",
.id_table = agp_efficeon_pci_table,
.probe = agp_efficeon_probe,
.remove = agp_efficeon_remove,
#ifdef CONFIG_PM
.suspend = agp_efficeon_suspend,
.resume = agp_efficeon_resume,
#endif
};
static int __init agp_efficeon_init(void)
{
static int agp_initialised=0;
if (agp_off)
return -EINVAL;
if (agp_initialised == 1)
return 0;
agp_initialised=1;
return pci_register_driver(&agp_efficeon_pci_driver);
}
static void __exit agp_efficeon_cleanup(void)
{
pci_unregister_driver(&agp_efficeon_pci_driver);
}
module_init(agp_efficeon_init);
module_exit(agp_efficeon_cleanup);
MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
MODULE_LICENSE("GPL and additional rights");
| gpl-2.0 |
tsiktsiris/falcon | drivers/ps3/ps3av_cmd.c | 14597 | 24361 | /*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006, 2007 Sony Corporation
*
* AV backend support for PS3
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published
* by the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/ps3av.h>
#include <asm/ps3.h>
#include <asm/ps3gpu.h>
#include "vuart.h"
static const struct video_fmt {
u32 format;
u32 order;
} ps3av_video_fmt_table[] = {
{ PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT, PS3AV_CMD_VIDEO_ORDER_RGB },
{ PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT, PS3AV_CMD_VIDEO_ORDER_BGR },
};
static const struct {
int cs;
u32 av;
u32 bl;
} ps3av_cs_video2av_table[] = {
{
.cs = PS3AV_CMD_VIDEO_CS_RGB_8,
.av = PS3AV_CMD_AV_CS_RGB_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_RGB_10,
.av = PS3AV_CMD_AV_CS_RGB_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_RGB_12,
.av = PS3AV_CMD_AV_CS_RGB_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV444_8,
.av = PS3AV_CMD_AV_CS_YUV444_8,
.bl = PS3AV_CMD_AV_CS_8
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV444_10,
.av = PS3AV_CMD_AV_CS_YUV444_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV444_12,
.av = PS3AV_CMD_AV_CS_YUV444_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV422_8,
.av = PS3AV_CMD_AV_CS_YUV422_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV422_10,
.av = PS3AV_CMD_AV_CS_YUV422_8,
.bl = PS3AV_CMD_AV_CS_10
}, {
.cs = PS3AV_CMD_VIDEO_CS_YUV422_12,
.av = PS3AV_CMD_AV_CS_YUV422_8,
.bl = PS3AV_CMD_AV_CS_12
}, {
.cs = PS3AV_CMD_VIDEO_CS_XVYCC_8,
.av = PS3AV_CMD_AV_CS_XVYCC_8,
.bl = PS3AV_CMD_AV_CS_12
}, {
.cs = PS3AV_CMD_VIDEO_CS_XVYCC_10,
.av = PS3AV_CMD_AV_CS_XVYCC_8,
.bl = PS3AV_CMD_AV_CS_12
}, {
.cs = PS3AV_CMD_VIDEO_CS_XVYCC_12,
.av = PS3AV_CMD_AV_CS_XVYCC_8,
.bl = PS3AV_CMD_AV_CS_12
}
};
static u32 ps3av_cs_video2av(int cs)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_cs_video2av_table); i++)
if (ps3av_cs_video2av_table[i].cs == cs)
return ps3av_cs_video2av_table[i].av;
return PS3AV_CMD_AV_CS_RGB_8;
}
static u32 ps3av_cs_video2av_bitlen(int cs)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_cs_video2av_table); i++)
if (ps3av_cs_video2av_table[i].cs == cs)
return ps3av_cs_video2av_table[i].bl;
return PS3AV_CMD_AV_CS_8;
}
static const struct {
int vid;
u32 av;
} ps3av_vid_video2av_table[] = {
{ PS3AV_CMD_VIDEO_VID_480I, PS3AV_CMD_AV_VID_480I },
{ PS3AV_CMD_VIDEO_VID_480P, PS3AV_CMD_AV_VID_480P },
{ PS3AV_CMD_VIDEO_VID_576I, PS3AV_CMD_AV_VID_576I },
{ PS3AV_CMD_VIDEO_VID_576P, PS3AV_CMD_AV_VID_576P },
{ PS3AV_CMD_VIDEO_VID_1080I_60HZ, PS3AV_CMD_AV_VID_1080I_60HZ },
{ PS3AV_CMD_VIDEO_VID_720P_60HZ, PS3AV_CMD_AV_VID_720P_60HZ },
{ PS3AV_CMD_VIDEO_VID_1080P_60HZ, PS3AV_CMD_AV_VID_1080P_60HZ },
{ PS3AV_CMD_VIDEO_VID_1080I_50HZ, PS3AV_CMD_AV_VID_1080I_50HZ },
{ PS3AV_CMD_VIDEO_VID_720P_50HZ, PS3AV_CMD_AV_VID_720P_50HZ },
{ PS3AV_CMD_VIDEO_VID_1080P_50HZ, PS3AV_CMD_AV_VID_1080P_50HZ },
{ PS3AV_CMD_VIDEO_VID_WXGA, PS3AV_CMD_AV_VID_WXGA },
{ PS3AV_CMD_VIDEO_VID_SXGA, PS3AV_CMD_AV_VID_SXGA },
{ PS3AV_CMD_VIDEO_VID_WUXGA, PS3AV_CMD_AV_VID_WUXGA }
};
static u32 ps3av_vid_video2av(int vid)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_vid_video2av_table); i++)
if (ps3av_vid_video2av_table[i].vid == vid)
return ps3av_vid_video2av_table[i].av;
return PS3AV_CMD_AV_VID_480P;
}
static int ps3av_hdmi_range(void)
{
if (ps3_compare_firmware_version(1, 8, 0) < 0)
return 0;
else
return 1; /* supported */
}
int ps3av_cmd_init(void)
{
int res;
struct ps3av_pkt_av_init av_init;
struct ps3av_pkt_video_init video_init;
struct ps3av_pkt_audio_init audio_init;
/* video init */
memset(&video_init, 0, sizeof(video_init));
res = ps3av_do_pkt(PS3AV_CID_VIDEO_INIT, sizeof(video_init.send_hdr),
sizeof(video_init), &video_init.send_hdr);
if (res < 0)
return res;
res = get_status(&video_init);
if (res) {
printk(KERN_ERR "PS3AV_CID_VIDEO_INIT: failed %x\n", res);
return res;
}
/* audio init */
memset(&audio_init, 0, sizeof(audio_init));
res = ps3av_do_pkt(PS3AV_CID_AUDIO_INIT, sizeof(audio_init.send_hdr),
sizeof(audio_init), &audio_init.send_hdr);
if (res < 0)
return res;
res = get_status(&audio_init);
if (res) {
printk(KERN_ERR "PS3AV_CID_AUDIO_INIT: failed %x\n", res);
return res;
}
/* av init */
memset(&av_init, 0, sizeof(av_init));
av_init.event_bit = 0;
res = ps3av_do_pkt(PS3AV_CID_AV_INIT, sizeof(av_init), sizeof(av_init),
&av_init.send_hdr);
if (res < 0)
return res;
res = get_status(&av_init);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_INIT: failed %x\n", res);
return res;
}
int ps3av_cmd_fin(void)
{
int res;
struct ps3av_pkt_av_fin av_fin;
memset(&av_fin, 0, sizeof(av_fin));
res = ps3av_do_pkt(PS3AV_CID_AV_FIN, sizeof(av_fin.send_hdr),
sizeof(av_fin), &av_fin.send_hdr);
if (res < 0)
return res;
res = get_status(&av_fin);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_FIN: failed %x\n", res);
return res;
}
int ps3av_cmd_av_video_mute(int num_of_port, u32 *port, u32 mute)
{
int i, send_len, res;
struct ps3av_pkt_av_video_mute av_video_mute;
if (num_of_port > PS3AV_MUTE_PORT_MAX)
return -EINVAL;
memset(&av_video_mute, 0, sizeof(av_video_mute));
for (i = 0; i < num_of_port; i++) {
av_video_mute.mute[i].avport = port[i];
av_video_mute.mute[i].mute = mute;
}
send_len = sizeof(av_video_mute.send_hdr) +
sizeof(struct ps3av_av_mute) * num_of_port;
res = ps3av_do_pkt(PS3AV_CID_AV_VIDEO_MUTE, send_len,
sizeof(av_video_mute), &av_video_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&av_video_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_VIDEO_MUTE: failed %x\n", res);
return res;
}
int ps3av_cmd_av_video_disable_sig(u32 port)
{
int res;
struct ps3av_pkt_av_video_disable_sig av_video_sig;
memset(&av_video_sig, 0, sizeof(av_video_sig));
av_video_sig.avport = port;
res = ps3av_do_pkt(PS3AV_CID_AV_VIDEO_DISABLE_SIG,
sizeof(av_video_sig), sizeof(av_video_sig),
&av_video_sig.send_hdr);
if (res < 0)
return res;
res = get_status(&av_video_sig);
if (res)
printk(KERN_ERR
"PS3AV_CID_AV_VIDEO_DISABLE_SIG: failed %x port:%x\n",
res, port);
return res;
}
int ps3av_cmd_av_tv_mute(u32 avport, u32 mute)
{
int res;
struct ps3av_pkt_av_tv_mute tv_mute;
memset(&tv_mute, 0, sizeof(tv_mute));
tv_mute.avport = avport;
tv_mute.mute = mute;
res = ps3av_do_pkt(PS3AV_CID_AV_TV_MUTE, sizeof(tv_mute),
sizeof(tv_mute), &tv_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&tv_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_TV_MUTE: failed %x port:%x\n",
res, avport);
return res;
}
int ps3av_cmd_enable_event(void)
{
int res;
struct ps3av_pkt_av_event av_event;
memset(&av_event, 0, sizeof(av_event));
av_event.event_bit = PS3AV_CMD_EVENT_BIT_UNPLUGGED |
PS3AV_CMD_EVENT_BIT_PLUGGED | PS3AV_CMD_EVENT_BIT_HDCP_DONE;
res = ps3av_do_pkt(PS3AV_CID_AV_ENABLE_EVENT, sizeof(av_event),
sizeof(av_event), &av_event.send_hdr);
if (res < 0)
return res;
res = get_status(&av_event);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_ENABLE_EVENT: failed %x\n", res);
return res;
}
int ps3av_cmd_av_hdmi_mode(u8 mode)
{
int res;
struct ps3av_pkt_av_hdmi_mode hdmi_mode;
memset(&hdmi_mode, 0, sizeof(hdmi_mode));
hdmi_mode.mode = mode;
res = ps3av_do_pkt(PS3AV_CID_AV_HDMI_MODE, sizeof(hdmi_mode),
sizeof(hdmi_mode), &hdmi_mode.send_hdr);
if (res < 0)
return res;
res = get_status(&hdmi_mode);
if (res && res != PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
printk(KERN_ERR "PS3AV_CID_AV_HDMI_MODE: failed %x\n", res);
return res;
}
u32 ps3av_cmd_set_av_video_cs(void *p, u32 avport, int video_vid, int cs_out,
int aspect, u32 id)
{
struct ps3av_pkt_av_video_cs *av_video_cs;
av_video_cs = (struct ps3av_pkt_av_video_cs *)p;
if (video_vid == -1)
video_vid = PS3AV_CMD_VIDEO_VID_720P_60HZ;
if (cs_out == -1)
cs_out = PS3AV_CMD_VIDEO_CS_YUV444_8;
if (aspect == -1)
aspect = 0;
memset(av_video_cs, 0, sizeof(*av_video_cs));
ps3av_set_hdr(PS3AV_CID_AV_VIDEO_CS, sizeof(*av_video_cs),
&av_video_cs->send_hdr);
av_video_cs->avport = avport;
/* should be same as video_mode.resolution */
av_video_cs->av_vid = ps3av_vid_video2av(video_vid);
av_video_cs->av_cs_out = ps3av_cs_video2av(cs_out);
/* should be same as video_mode.video_cs_out */
av_video_cs->av_cs_in = ps3av_cs_video2av(PS3AV_CMD_VIDEO_CS_RGB_8);
av_video_cs->bitlen_out = ps3av_cs_video2av_bitlen(cs_out);
if ((id & PS3AV_MODE_WHITE) && ps3av_hdmi_range())
av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_ON;
else /* default off */
av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_OFF;
av_video_cs->aspect = aspect;
if (id & PS3AV_MODE_DITHER) {
av_video_cs->dither = PS3AV_CMD_AV_DITHER_ON
| PS3AV_CMD_AV_DITHER_8BIT;
} else {
/* default off */
av_video_cs->dither = PS3AV_CMD_AV_DITHER_OFF;
}
return sizeof(*av_video_cs);
}
u32 ps3av_cmd_set_video_mode(void *p, u32 head, int video_vid, int video_fmt,
u32 id)
{
struct ps3av_pkt_video_mode *video_mode;
u32 x, y;
video_mode = (struct ps3av_pkt_video_mode *)p;
if (video_vid == -1)
video_vid = PS3AV_CMD_VIDEO_VID_720P_60HZ;
if (video_fmt == -1)
video_fmt = PS3AV_CMD_VIDEO_FMT_X8R8G8B8;
if (ps3av_video_mode2res(id, &x, &y))
return 0;
/* video mode */
memset(video_mode, 0, sizeof(*video_mode));
ps3av_set_hdr(PS3AV_CID_VIDEO_MODE, sizeof(*video_mode),
&video_mode->send_hdr);
video_mode->video_head = head;
if (video_vid == PS3AV_CMD_VIDEO_VID_480I
&& head == PS3AV_CMD_VIDEO_HEAD_B)
video_mode->video_vid = PS3AV_CMD_VIDEO_VID_480I_A;
else
video_mode->video_vid = video_vid;
video_mode->width = (u16) x;
video_mode->height = (u16) y;
video_mode->pitch = video_mode->width * 4; /* line_length */
video_mode->video_out_format = PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT;
video_mode->video_format = ps3av_video_fmt_table[video_fmt].format;
if ((id & PS3AV_MODE_COLOR) && ps3av_hdmi_range())
video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT;
else /* default enable */
video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT;
video_mode->video_order = ps3av_video_fmt_table[video_fmt].order;
pr_debug("%s: video_mode:vid:%x width:%d height:%d pitch:%d out_format:%d format:%x order:%x\n",
__func__, video_vid, video_mode->width, video_mode->height,
video_mode->pitch, video_mode->video_out_format,
video_mode->video_format, video_mode->video_order);
return sizeof(*video_mode);
}
int ps3av_cmd_video_format_black(u32 head, u32 video_fmt, u32 mute)
{
int res;
struct ps3av_pkt_video_format video_format;
memset(&video_format, 0, sizeof(video_format));
video_format.video_head = head;
if (mute != PS3AV_CMD_MUTE_OFF)
video_format.video_format = PS3AV_CMD_VIDEO_FORMAT_BLACK;
else
video_format.video_format =
ps3av_video_fmt_table[video_fmt].format;
video_format.video_order = ps3av_video_fmt_table[video_fmt].order;
res = ps3av_do_pkt(PS3AV_CID_VIDEO_FORMAT, sizeof(video_format),
sizeof(video_format), &video_format.send_hdr);
if (res < 0)
return res;
res = get_status(&video_format);
if (res)
printk(KERN_ERR "PS3AV_CID_VIDEO_FORMAT: failed %x\n", res);
return res;
}
int ps3av_cmd_av_audio_mute(int num_of_port, u32 *port, u32 mute)
{
int i, res;
struct ps3av_pkt_av_audio_mute av_audio_mute;
if (num_of_port > PS3AV_MUTE_PORT_MAX)
return -EINVAL;
/* audio mute */
memset(&av_audio_mute, 0, sizeof(av_audio_mute));
for (i = 0; i < num_of_port; i++) {
av_audio_mute.mute[i].avport = port[i];
av_audio_mute.mute[i].mute = mute;
}
res = ps3av_do_pkt(PS3AV_CID_AV_AUDIO_MUTE,
sizeof(av_audio_mute.send_hdr) +
sizeof(struct ps3av_av_mute) * num_of_port,
sizeof(av_audio_mute), &av_audio_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&av_audio_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_AUDIO_MUTE: failed %x\n", res);
return res;
}
static const struct {
u32 fs;
u8 mclk;
} ps3av_cnv_mclk_table[] = {
{ PS3AV_CMD_AUDIO_FS_44K, PS3AV_CMD_AV_MCLK_512 },
{ PS3AV_CMD_AUDIO_FS_48K, PS3AV_CMD_AV_MCLK_512 },
{ PS3AV_CMD_AUDIO_FS_88K, PS3AV_CMD_AV_MCLK_256 },
{ PS3AV_CMD_AUDIO_FS_96K, PS3AV_CMD_AV_MCLK_256 },
{ PS3AV_CMD_AUDIO_FS_176K, PS3AV_CMD_AV_MCLK_128 },
{ PS3AV_CMD_AUDIO_FS_192K, PS3AV_CMD_AV_MCLK_128 }
};
static u8 ps3av_cnv_mclk(u32 fs)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ps3av_cnv_mclk_table); i++)
if (ps3av_cnv_mclk_table[i].fs == fs)
return ps3av_cnv_mclk_table[i].mclk;
printk(KERN_ERR "%s failed, fs:%x\n", __func__, fs);
return 0;
}
#define BASE PS3AV_CMD_AUDIO_FS_44K
static const u32 ps3av_ns_table[][5] = {
/* D1, D2, D3, D4, D5 */
[PS3AV_CMD_AUDIO_FS_44K-BASE] = { 6272, 6272, 17836, 17836, 8918 },
[PS3AV_CMD_AUDIO_FS_48K-BASE] = { 6144, 6144, 11648, 11648, 5824 },
[PS3AV_CMD_AUDIO_FS_88K-BASE] = { 12544, 12544, 35672, 35672, 17836 },
[PS3AV_CMD_AUDIO_FS_96K-BASE] = { 12288, 12288, 23296, 23296, 11648 },
[PS3AV_CMD_AUDIO_FS_176K-BASE] = { 25088, 25088, 71344, 71344, 35672 },
[PS3AV_CMD_AUDIO_FS_192K-BASE] = { 24576, 24576, 46592, 46592, 23296 }
};
static void ps3av_cnv_ns(u8 *ns, u32 fs, u32 video_vid)
{
u32 av_vid, ns_val;
int d;
d = ns_val = 0;
av_vid = ps3av_vid_video2av(video_vid);
switch (av_vid) {
case PS3AV_CMD_AV_VID_480I:
case PS3AV_CMD_AV_VID_576I:
d = 0;
break;
case PS3AV_CMD_AV_VID_480P:
case PS3AV_CMD_AV_VID_576P:
d = 1;
break;
case PS3AV_CMD_AV_VID_1080I_60HZ:
case PS3AV_CMD_AV_VID_1080I_50HZ:
d = 2;
break;
case PS3AV_CMD_AV_VID_720P_60HZ:
case PS3AV_CMD_AV_VID_720P_50HZ:
d = 3;
break;
case PS3AV_CMD_AV_VID_1080P_60HZ:
case PS3AV_CMD_AV_VID_1080P_50HZ:
case PS3AV_CMD_AV_VID_WXGA:
case PS3AV_CMD_AV_VID_SXGA:
case PS3AV_CMD_AV_VID_WUXGA:
d = 4;
break;
default:
printk(KERN_ERR "%s failed, vid:%x\n", __func__, video_vid);
break;
}
if (fs < PS3AV_CMD_AUDIO_FS_44K || fs > PS3AV_CMD_AUDIO_FS_192K)
printk(KERN_ERR "%s failed, fs:%x\n", __func__, fs);
else
ns_val = ps3av_ns_table[PS3AV_CMD_AUDIO_FS_44K-BASE][d];
*ns++ = ns_val & 0x000000FF;
*ns++ = (ns_val & 0x0000FF00) >> 8;
*ns = (ns_val & 0x00FF0000) >> 16;
}
#undef BASE
static u8 ps3av_cnv_enable(u32 source, const u8 *enable)
{
u8 ret = 0;
if (source == PS3AV_CMD_AUDIO_SOURCE_SPDIF) {
ret = 0x03;
} else if (source == PS3AV_CMD_AUDIO_SOURCE_SERIAL) {
ret = ((enable[0] << 4) + (enable[1] << 5) + (enable[2] << 6) +
(enable[3] << 7)) | 0x01;
} else
printk(KERN_ERR "%s failed, source:%x\n", __func__, source);
return ret;
}
static u8 ps3av_cnv_fifomap(const u8 *map)
{
u8 ret = 0;
ret = map[0] + (map[1] << 2) + (map[2] << 4) + (map[3] << 6);
return ret;
}
static u8 ps3av_cnv_inputlen(u32 word_bits)
{
u8 ret = 0;
switch (word_bits) {
case PS3AV_CMD_AUDIO_WORD_BITS_16:
ret = PS3AV_CMD_AV_INPUTLEN_16;
break;
case PS3AV_CMD_AUDIO_WORD_BITS_20:
ret = PS3AV_CMD_AV_INPUTLEN_20;
break;
case PS3AV_CMD_AUDIO_WORD_BITS_24:
ret = PS3AV_CMD_AV_INPUTLEN_24;
break;
default:
printk(KERN_ERR "%s failed, word_bits:%x\n", __func__,
word_bits);
break;
}
return ret;
}
static u8 ps3av_cnv_layout(u32 num_of_ch)
{
if (num_of_ch > PS3AV_CMD_AUDIO_NUM_OF_CH_8) {
printk(KERN_ERR "%s failed, num_of_ch:%x\n", __func__,
num_of_ch);
return 0;
}
return num_of_ch == PS3AV_CMD_AUDIO_NUM_OF_CH_2 ? 0x0 : 0x1;
}
static void ps3av_cnv_info(struct ps3av_audio_info_frame *info,
const struct ps3av_pkt_audio_mode *mode)
{
info->pb1.cc = mode->audio_num_of_ch + 1; /* CH2:0x01 --- CH8:0x07 */
info->pb1.ct = 0;
info->pb2.sf = 0;
info->pb2.ss = 0;
info->pb3 = 0; /* check mode->audio_format ?? */
info->pb4 = mode->audio_layout;
info->pb5.dm = mode->audio_downmix;
info->pb5.lsv = mode->audio_downmix_level;
}
static void ps3av_cnv_chstat(u8 *chstat, const u8 *cs_info)
{
memcpy(chstat, cs_info, 5);
}
u32 ps3av_cmd_set_av_audio_param(void *p, u32 port,
const struct ps3av_pkt_audio_mode *audio_mode,
u32 video_vid)
{
struct ps3av_pkt_av_audio_param *param;
param = (struct ps3av_pkt_av_audio_param *)p;
memset(param, 0, sizeof(*param));
ps3av_set_hdr(PS3AV_CID_AV_AUDIO_PARAM, sizeof(*param),
¶m->send_hdr);
param->avport = port;
param->mclk = ps3av_cnv_mclk(audio_mode->audio_fs) | 0x80;
ps3av_cnv_ns(param->ns, audio_mode->audio_fs, video_vid);
param->enable = ps3av_cnv_enable(audio_mode->audio_source,
audio_mode->audio_enable);
param->swaplr = 0x09;
param->fifomap = ps3av_cnv_fifomap(audio_mode->audio_map);
param->inputctrl = 0x49;
param->inputlen = ps3av_cnv_inputlen(audio_mode->audio_word_bits);
param->layout = ps3av_cnv_layout(audio_mode->audio_num_of_ch);
ps3av_cnv_info(¶m->info, audio_mode);
ps3av_cnv_chstat(param->chstat, audio_mode->audio_cs_info);
return sizeof(*param);
}
/* default cs val */
u8 ps3av_mode_cs_info[] = {
0x00, 0x09, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00
};
EXPORT_SYMBOL_GPL(ps3av_mode_cs_info);
#define CS_44 0x00
#define CS_48 0x02
#define CS_88 0x08
#define CS_96 0x0a
#define CS_176 0x0c
#define CS_192 0x0e
#define CS_MASK 0x0f
#define CS_BIT 0x40
void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *audio, u32 avport,
u32 ch, u32 fs, u32 word_bits, u32 format,
u32 source)
{
int spdif_through;
int i;
if (!(ch | fs | format | word_bits | source)) {
ch = PS3AV_CMD_AUDIO_NUM_OF_CH_2;
fs = PS3AV_CMD_AUDIO_FS_48K;
word_bits = PS3AV_CMD_AUDIO_WORD_BITS_16;
format = PS3AV_CMD_AUDIO_FORMAT_PCM;
source = PS3AV_CMD_AUDIO_SOURCE_SERIAL;
}
/* audio mode */
memset(audio, 0, sizeof(*audio));
ps3av_set_hdr(PS3AV_CID_AUDIO_MODE, sizeof(*audio), &audio->send_hdr);
audio->avport = (u8) avport;
audio->mask = 0x0FFF; /* XXX set all */
audio->audio_num_of_ch = ch;
audio->audio_fs = fs;
audio->audio_word_bits = word_bits;
audio->audio_format = format;
audio->audio_source = source;
switch (ch) {
case PS3AV_CMD_AUDIO_NUM_OF_CH_8:
audio->audio_enable[3] = 1;
/* fall through */
case PS3AV_CMD_AUDIO_NUM_OF_CH_6:
audio->audio_enable[2] = 1;
audio->audio_enable[1] = 1;
/* fall through */
case PS3AV_CMD_AUDIO_NUM_OF_CH_2:
default:
audio->audio_enable[0] = 1;
}
/* audio swap L/R */
for (i = 0; i < 4; i++)
audio->audio_swap[i] = PS3AV_CMD_AUDIO_SWAP_0; /* no swap */
/* audio serial input mapping */
audio->audio_map[0] = PS3AV_CMD_AUDIO_MAP_OUTPUT_0;
audio->audio_map[1] = PS3AV_CMD_AUDIO_MAP_OUTPUT_1;
audio->audio_map[2] = PS3AV_CMD_AUDIO_MAP_OUTPUT_2;
audio->audio_map[3] = PS3AV_CMD_AUDIO_MAP_OUTPUT_3;
/* audio speaker layout */
if (avport == PS3AV_CMD_AVPORT_HDMI_0 ||
avport == PS3AV_CMD_AVPORT_HDMI_1) {
switch (ch) {
case PS3AV_CMD_AUDIO_NUM_OF_CH_8:
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_8CH;
break;
case PS3AV_CMD_AUDIO_NUM_OF_CH_6:
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_6CH;
break;
case PS3AV_CMD_AUDIO_NUM_OF_CH_2:
default:
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_2CH;
break;
}
} else {
audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_2CH;
}
/* audio downmix permission */
audio->audio_downmix = PS3AV_CMD_AUDIO_DOWNMIX_PERMITTED;
/* audio downmix level shift (0:0dB to 15:15dB) */
audio->audio_downmix_level = 0; /* 0dB */
/* set ch status */
for (i = 0; i < 8; i++)
audio->audio_cs_info[i] = ps3av_mode_cs_info[i];
switch (fs) {
case PS3AV_CMD_AUDIO_FS_44K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_44;
break;
case PS3AV_CMD_AUDIO_FS_88K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_88;
break;
case PS3AV_CMD_AUDIO_FS_96K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_96;
break;
case PS3AV_CMD_AUDIO_FS_176K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_176;
break;
case PS3AV_CMD_AUDIO_FS_192K:
audio->audio_cs_info[3] &= ~CS_MASK;
audio->audio_cs_info[3] |= CS_192;
break;
default:
break;
}
/* non-audio bit */
spdif_through = audio->audio_cs_info[0] & 0x02;
/* pass through setting */
if (spdif_through &&
(avport == PS3AV_CMD_AVPORT_SPDIF_0 ||
avport == PS3AV_CMD_AVPORT_SPDIF_1 ||
avport == PS3AV_CMD_AVPORT_HDMI_0 ||
avport == PS3AV_CMD_AVPORT_HDMI_1)) {
audio->audio_word_bits = PS3AV_CMD_AUDIO_WORD_BITS_16;
audio->audio_format = PS3AV_CMD_AUDIO_FORMAT_BITSTREAM;
}
}
int ps3av_cmd_audio_mode(struct ps3av_pkt_audio_mode *audio_mode)
{
int res;
res = ps3av_do_pkt(PS3AV_CID_AUDIO_MODE, sizeof(*audio_mode),
sizeof(*audio_mode), &audio_mode->send_hdr);
if (res < 0)
return res;
res = get_status(audio_mode);
if (res)
printk(KERN_ERR "PS3AV_CID_AUDIO_MODE: failed %x\n", res);
return res;
}
int ps3av_cmd_audio_mute(int num_of_port, u32 *port, u32 mute)
{
int i, res;
struct ps3av_pkt_audio_mute audio_mute;
if (num_of_port > PS3AV_OPT_PORT_MAX)
return -EINVAL;
/* audio mute */
memset(&audio_mute, 0, sizeof(audio_mute));
for (i = 0; i < num_of_port; i++) {
audio_mute.mute[i].avport = port[i];
audio_mute.mute[i].mute = mute;
}
res = ps3av_do_pkt(PS3AV_CID_AUDIO_MUTE,
sizeof(audio_mute.send_hdr) +
sizeof(struct ps3av_audio_mute) * num_of_port,
sizeof(audio_mute), &audio_mute.send_hdr);
if (res < 0)
return res;
res = get_status(&audio_mute);
if (res)
printk(KERN_ERR "PS3AV_CID_AUDIO_MUTE: failed %x\n", res);
return res;
}
int ps3av_cmd_audio_active(int active, u32 port)
{
int res;
struct ps3av_pkt_audio_active audio_active;
u32 cid;
/* audio active */
memset(&audio_active, 0, sizeof(audio_active));
audio_active.audio_port = port;
cid = active ? PS3AV_CID_AUDIO_ACTIVE : PS3AV_CID_AUDIO_INACTIVE;
res = ps3av_do_pkt(cid, sizeof(audio_active), sizeof(audio_active),
&audio_active.send_hdr);
if (res < 0)
return res;
res = get_status(&audio_active);
if (res)
printk(KERN_ERR "PS3AV_CID_AUDIO_ACTIVE:%x failed %x\n", cid,
res);
return res;
}
int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *avb, u32 send_len)
{
int res;
mutex_lock(&ps3_gpu_mutex);
/* avb packet */
res = ps3av_do_pkt(PS3AV_CID_AVB_PARAM, send_len, sizeof(*avb),
&avb->send_hdr);
if (res < 0)
goto out;
res = get_status(avb);
if (res)
pr_debug("%s: PS3AV_CID_AVB_PARAM: failed %x\n", __func__,
res);
out:
mutex_unlock(&ps3_gpu_mutex);
return res;
}
int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *hw_conf)
{
int res;
memset(hw_conf, 0, sizeof(*hw_conf));
res = ps3av_do_pkt(PS3AV_CID_AV_GET_HW_CONF, sizeof(hw_conf->send_hdr),
sizeof(*hw_conf), &hw_conf->send_hdr);
if (res < 0)
return res;
res = get_status(hw_conf);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_GET_HW_CONF: failed %x\n", res);
return res;
}
int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *info,
u32 avport)
{
int res;
memset(info, 0, sizeof(*info));
info->avport = avport;
res = ps3av_do_pkt(PS3AV_CID_AV_GET_MONITOR_INFO,
sizeof(info->send_hdr) + sizeof(info->avport) +
sizeof(info->reserved),
sizeof(*info), &info->send_hdr);
if (res < 0)
return res;
res = get_status(info);
if (res)
printk(KERN_ERR "PS3AV_CID_AV_GET_MONITOR_INFO: failed %x\n",
res);
return res;
}
#define PS3AV_AV_LAYOUT_0 (PS3AV_CMD_AV_LAYOUT_32 \
| PS3AV_CMD_AV_LAYOUT_44 \
| PS3AV_CMD_AV_LAYOUT_48)
#define PS3AV_AV_LAYOUT_1 (PS3AV_AV_LAYOUT_0 \
| PS3AV_CMD_AV_LAYOUT_88 \
| PS3AV_CMD_AV_LAYOUT_96 \
| PS3AV_CMD_AV_LAYOUT_176 \
| PS3AV_CMD_AV_LAYOUT_192)
| gpl-2.0 |
daschuer/nemo | libnemo-extension/nemo-name-and-desc-provider.c | 6 | 1413 | /*
* nemo-name-and-desc-provider.c - Interface for Nemo extensions that
* returns the extension's proper name and description for the plugin
* manager only - it is not necessary for extension functionality.
*
*/
#include <config.h>
#include "nemo-name-and-desc-provider.h"
#include <glib-object.h>
static void
nemo_name_and_desc_provider_base_init (gpointer g_class)
{
}
GType
nemo_name_and_desc_provider_get_type (void)
{
static GType type = 0;
if (!type) {
const GTypeInfo info = {
sizeof (NemoNameAndDescProviderIface),
nemo_name_and_desc_provider_base_init,
NULL,
NULL,
NULL,
NULL,
0,
0,
NULL
};
type = g_type_register_static (G_TYPE_INTERFACE,
"NemoNameAndDescProvider",
&info, 0);
g_type_interface_add_prerequisite (type, G_TYPE_OBJECT);
}
return type;
}
/**
* nemo_name_and_desc_provider_get_name_and_desc:
* @provider: a #NemoNameAndDescProvider
*
* Returns: (element-type gchar) (transfer full): a list of name:::desc
* strings.
*/
GList *
nemo_name_and_desc_provider_get_name_and_desc (NemoNameAndDescProvider *provider)
{
g_return_val_if_fail (NEMO_IS_NAME_AND_DESC_PROVIDER (provider), NULL);
g_return_val_if_fail (NEMO_NAME_AND_DESC_PROVIDER_GET_IFACE (provider)->get_name_and_desc != NULL, NULL);
return NEMO_NAME_AND_DESC_PROVIDER_GET_IFACE (provider)->get_name_and_desc (provider);
}
| gpl-2.0 |
davidgraeff/linux | drivers/net/ethernet/qlogic/qlge/qlge_main.c | 6 | 137209 | /*
* QLogic qlge NIC HBA Driver
* Copyright (c) 2003-2008 QLogic Corporation
* See LICENSE.qlge for copyright and licensing details.
* Author: Linux qlge network device driver by
* Ron Mercer <ron.mercer@qlogic.com>
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
#include "qlge.h"
char qlge_driver_name[] = DRV_NAME;
const char qlge_driver_version[] = DRV_VERSION;
MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
MODULE_DESCRIPTION(DRV_STRING " ");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static const u32 default_msg =
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
/* NETIF_MSG_TIMER | */
NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP |
NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR |
/* NETIF_MSG_TX_QUEUED | */
/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
/* NETIF_MSG_PKTDATA | */
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
static int debug = -1; /* defaults above */
module_param(debug, int, 0664);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define MSIX_IRQ 0
#define MSI_IRQ 1
#define LEG_IRQ 2
static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, 0664);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
module_param(qlge_mpi_coredump, int, 0);
MODULE_PARM_DESC(qlge_mpi_coredump,
"Option to enable MPI firmware dump. "
"Default is OFF - Do Not allocate memory. ");
static int qlge_force_coredump;
module_param(qlge_force_coredump, int, 0);
MODULE_PARM_DESC(qlge_force_coredump,
"Option to allow force of firmware core dump. "
"Default is OFF - Do not allow.");
static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
{0,}
};
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
static int ql_wol(struct ql_adapter *);
static void qlge_set_multicast_list(struct net_device *);
static int ql_adapter_down(struct ql_adapter *);
static int ql_adapter_up(struct ql_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
*/
static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
{
u32 sem_bits = 0;
switch (sem_mask) {
case SEM_XGMAC0_MASK:
sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
break;
case SEM_XGMAC1_MASK:
sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
break;
case SEM_ICB_MASK:
sem_bits = SEM_SET << SEM_ICB_SHIFT;
break;
case SEM_MAC_ADDR_MASK:
sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
break;
case SEM_FLASH_MASK:
sem_bits = SEM_SET << SEM_FLASH_SHIFT;
break;
case SEM_PROBE_MASK:
sem_bits = SEM_SET << SEM_PROBE_SHIFT;
break;
case SEM_RT_IDX_MASK:
sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
break;
case SEM_PROC_REG_MASK:
sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
break;
default:
netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
return -EINVAL;
}
ql_write32(qdev, SEM, sem_bits | sem_mask);
return !(ql_read32(qdev, SEM) & sem_bits);
}
int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
{
unsigned int wait_count = 30;
do {
if (!ql_sem_trylock(qdev, sem_mask))
return 0;
udelay(100);
} while (--wait_count);
return -ETIMEDOUT;
}
void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
{
ql_write32(qdev, SEM, sem_mask);
ql_read32(qdev, SEM); /* flush */
}
/* This function waits for a specific bit to come ready
* in a given register. It is used mostly by the initialize
* process, but is also used in kernel thread API such as
* netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
*/
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
{
u32 temp;
int count = UDELAY_COUNT;
while (count) {
temp = ql_read32(qdev, reg);
/* check for errors */
if (temp & err_bit) {
netif_alert(qdev, probe, qdev->ndev,
"register 0x%.08x access error, value = 0x%.08x!.\n",
reg, temp);
return -EIO;
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
count--;
}
netif_alert(qdev, probe, qdev->ndev,
"Timed out waiting for reg %x to come ready.\n", reg);
return -ETIMEDOUT;
}
/* The CFG register is used to download TX and RX control blocks
* to the chip. This function waits for an operation to complete.
*/
static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
{
int count = UDELAY_COUNT;
u32 temp;
while (count) {
temp = ql_read32(qdev, CFG);
if (temp & CFG_LE)
return -EIO;
if (!(temp & bit))
return 0;
udelay(UDELAY_DELAY);
count--;
}
return -ETIMEDOUT;
}
/* Used to issue init control blocks to hw. Maps control block,
* sets address, triggers download, waits for completion.
*/
int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
u16 q_id)
{
u64 map;
int status = 0;
int direction;
u32 mask;
u32 value;
direction =
(bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE;
map = pci_map_single(qdev->pdev, ptr, size, direction);
if (pci_dma_mapping_error(qdev->pdev, map)) {
netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
if (status)
return status;
status = ql_wait_cfg(qdev, bit);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Timed out waiting for CFG to come ready.\n");
goto exit;
}
ql_write32(qdev, ICB_L, (u32) map);
ql_write32(qdev, ICB_H, (u32) (map >> 32));
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
ql_write32(qdev, CFG, (mask | value));
/*
* Wait for the bit to clear after signaling hw.
*/
status = ql_wait_cfg(qdev, bit);
exit:
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
pci_unmap_single(qdev->pdev, map, size, direction);
return status;
}
/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
u32 *value)
{
u32 offset = 0;
int status;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
case MAC_ADDR_TYPE_CAM_MAC:
{
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
if (type == MAC_ADDR_TYPE_CAM_MAC) {
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
}
break;
}
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
return status;
}
/* Set up a MAC, multicast or VLAN address for the
* inbound frame matching.
*/
static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
u16 index)
{
u32 offset = 0;
int status = 0;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
{
u32 upper = (addr[0] << 8) | addr[1];
u32 lower = (addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | (addr[5]);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
(index << MAC_ADDR_IDX_SHIFT) |
type | MAC_ADDR_E);
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
(index << MAC_ADDR_IDX_SHIFT) |
type | MAC_ADDR_E);
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
break;
}
case MAC_ADDR_TYPE_CAM_MAC:
{
u32 cam_output;
u32 upper = (addr[0] << 8) | addr[1];
u32 lower =
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
/* This field should also include the queue id
and possibly the function id. Right now we hardcode
the route field to NIC core.
*/
cam_output = (CAM_OUT_ROUTE_NIC |
(qdev->
func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
break;
}
case MAC_ADDR_TYPE_VLAN:
{
u32 enable_bit = *((u32 *) &addr[0]);
/* For VLAN, the addr actually holds a bit that
* either enables or disables the vlan id we are
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type | /* type */
enable_bit); /* enable/disable */
break;
}
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
return status;
}
/* Set or clear MAC address in hardware. We sometimes
* have to clear it to prevent wrong frame routing
* especially in a bonding environment.
*/
static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
{
int status;
char zero_mac_addr[ETH_ALEN];
char *addr;
if (set) {
addr = &qdev->current_mac_addr[0];
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Clearing MAC address\n");
}
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init mac address.\n");
return status;
}
void ql_link_on(struct ql_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
ql_set_mac_addr(qdev, 1);
}
void ql_link_off(struct ql_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
ql_set_mac_addr(qdev, 0);
}
/* Get a specific frame routing value from the CAM.
* Used for debug and reg dump.
*/
int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
{
int status = 0;
status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
ql_write32(qdev, RT_IDX,
RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
if (status)
goto exit;
*value = ql_read32(qdev, RT_DATA);
exit:
return status;
}
/* The NIC function for this chip has 16 routing indexes. Each one can be used
* to route different frame types to various inbound queues. We send broadcast/
* multicast/error frames to the default queue for slow handling,
* and CAM hit/RSS frames to the fast handling queues.
*/
static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int enable)
{
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
switch (mask) {
case RT_IDX_CAM_HIT:
{
value = RT_IDX_DST_CAM_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_VALID: /* Promiscuous Mode frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_IP_CSUM_ERR_SLOT <<
RT_IDX_IDX_SHIFT); /* index */
break;
}
case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
RT_IDX_IDX_SHIFT); /* index */
break;
}
case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_MCAST: /* Pass up All Multicast frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
{
value = RT_IDX_DST_RSS | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case 0: /* Clear the E-bit on an entry. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(index << RT_IDX_IDX_SHIFT);/* index */
break;
}
default:
netif_err(qdev, ifup, qdev->ndev,
"Mask type %d not yet supported.\n", mask);
status = -EPERM;
goto exit;
}
if (value) {
status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
value |= (enable ? RT_IDX_E : 0);
ql_write32(qdev, RT_IDX, value);
ql_write32(qdev, RT_DATA, enable ? mask : 0);
}
exit:
return status;
}
static void ql_enable_interrupts(struct ql_adapter *qdev)
{
ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
}
static void ql_disable_interrupts(struct ql_adapter *qdev)
{
ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
}
/* If we're running with multiple MSI-X vectors then we enable on the fly.
* Otherwise, we may have multiple outstanding workers and don't want to
* enable until the last one finishes. In this case, the irq_cnt gets
* incremented every time we queue a worker and decremented every time
* a worker finishes. Once it hits zero we enable the interrupt.
*/
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
u32 var = 0;
unsigned long hw_flags = 0;
struct intr_context *ctx = qdev->intr_context + intr;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
/* Always enable if we're MSIX multi interrupts and
* it's not the default (zeroeth) interrupt.
*/
ql_write32(qdev, INTR_EN,
ctx->intr_en_mask);
var = ql_read32(qdev, STS);
return var;
}
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
if (atomic_dec_and_test(&ctx->irq_cnt)) {
ql_write32(qdev, INTR_EN,
ctx->intr_en_mask);
var = ql_read32(qdev, STS);
}
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return var;
}
static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
u32 var = 0;
struct intr_context *ctx;
/* HW disables for us if we're MSIX multi interrupts and
* it's not the default (zeroeth) interrupt.
*/
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
return 0;
ctx = qdev->intr_context + intr;
spin_lock(&qdev->hw_lock);
if (!atomic_read(&ctx->irq_cnt)) {
ql_write32(qdev, INTR_EN,
ctx->intr_dis_mask);
var = ql_read32(qdev, STS);
}
atomic_inc(&ctx->irq_cnt);
spin_unlock(&qdev->hw_lock);
return var;
}
static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
{
int i;
for (i = 0; i < qdev->intr_count; i++) {
/* The enable call does a atomic_dec_and_test
* and enables only if the result is zero.
* So we precharge it here.
*/
if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
i == 0))
atomic_set(&qdev->intr_context[i].irq_cnt, 1);
ql_enable_completion_interrupt(qdev, i);
}
}
static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
{
int status, i;
u16 csum = 0;
__le16 *flash = (__le16 *)&qdev->flash;
status = strncmp((char *)&qdev->flash, str, 4);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
return status;
}
for (i = 0; i < size; i++)
csum += le16_to_cpu(*flash++);
if (csum)
netif_err(qdev, ifup, qdev->ndev,
"Invalid flash checksum, csum = 0x%.04x.\n", csum);
return csum;
}
static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* This data is stored on flash as an array of
* __le32. Since ql_read32() returns cpu endian
* we need to swap it back.
*/
*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
exit:
return status;
}
static int ql_get_8000_flash_params(struct ql_adapter *qdev)
{
u32 i, size;
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset;
u8 mac_addr[6];
/* Get flash offset for function and adjust
* for dword access.
*/
if (!qdev->port)
offset = FUNC0_FLASH_OFFSET / sizeof(u32);
else
offset = FUNC1_FLASH_OFFSET / sizeof(u32);
if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
size = sizeof(struct flash_params_8000) / sizeof(u32);
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
goto exit;
}
}
status = ql_validate_flash(qdev,
sizeof(struct flash_params_8000) / sizeof(u16),
"8000");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
/* Extract either manufacturer or BOFM modified
* MAC address.
*/
if (qdev->flash.flash_params_8000.data_type1 == 2)
memcpy(mac_addr,
qdev->flash.flash_params_8000.mac_addr1,
qdev->ndev->addr_len);
else
memcpy(mac_addr,
qdev->flash.flash_params_8000.mac_addr,
qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
memcpy(qdev->ndev->dev_addr,
mac_addr,
qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
static int ql_get_8012_flash_params(struct ql_adapter *qdev)
{
int i;
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset = 0;
u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
/* Second function's parameters follow the first
* function's.
*/
if (qdev->port)
offset = size;
if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
goto exit;
}
}
status = ql_validate_flash(qdev,
sizeof(struct flash_params_8012) / sizeof(u16),
"8012");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
status = -EINVAL;
goto exit;
}
memcpy(qdev->ndev->dev_addr,
qdev->flash.flash_params_8012.mac_addr,
qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
/* xgmac register are located behind the xgmac_addr and xgmac_data
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
{
int status;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
return status;
/* write the data to the data reg */
ql_write32(qdev, XGMAC_DATA, data);
/* trigger the write */
ql_write32(qdev, XGMAC_ADDR, reg);
return status;
}
/* xgmac register are located behind the xgmac_addr and xgmac_data
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
*data = ql_read32(qdev, XGMAC_DATA);
exit:
return status;
}
/* This is used for reading the 64-bit statistics regs. */
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
{
int status = 0;
u32 hi = 0;
u32 lo = 0;
status = ql_read_xgmac_reg(qdev, reg, &lo);
if (status)
goto exit;
status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
if (status)
goto exit;
*data = (u64) lo | ((u64) hi << 32);
exit:
return status;
}
static int ql_8000_port_initialize(struct ql_adapter *qdev)
{
int status;
/*
* Get MPI firmware version for driver banner
* and ethool info.
*/
status = ql_mb_about_fw(qdev);
if (status)
goto exit;
status = ql_mb_get_fw_state(qdev);
if (status)
goto exit;
/* Wake up a worker to get/set the TX/RX frame sizes. */
queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
exit:
return status;
}
/* Take the MAC Core out of reset.
* Enable statistics counting.
* Take the transmitter/receiver out of reset.
* This functionality may be done in the MPI firmware at a
* later date.
*/
static int ql_8012_port_initialize(struct ql_adapter *qdev)
{
int status = 0;
u32 data;
if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
netif_info(qdev, link, qdev->ndev,
"Another function has the semaphore, so wait for the port init bit to come ready.\n");
status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
netif_crit(qdev, link, qdev->ndev,
"Port initialize timed out.\n");
}
return status;
}
netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
goto end;
data |= GLOBAL_CFG_RESET;
status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
/* Clear the core reset and turn on jumbo for receiver. */
data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
data |= GLOBAL_CFG_TX_STAT_EN;
data |= GLOBAL_CFG_RX_STAT_EN;
status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
/* Enable transmitter, and clear it's reset. */
status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
if (status)
goto end;
data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
data |= TX_CFG_EN; /* Enable the transmitter. */
status = ql_write_xgmac_reg(qdev, TX_CFG, data);
if (status)
goto end;
/* Enable receiver and clear it's reset. */
status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
if (status)
goto end;
data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
data |= RX_CFG_EN; /* Enable the receiver. */
status = ql_write_xgmac_reg(qdev, RX_CFG, data);
if (status)
goto end;
/* Turn on jumbo. */
status =
ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
if (status)
goto end;
status =
ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
if (status)
goto end;
/* Signal to the world that the port is enabled. */
ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
end:
ql_sem_unlock(qdev, qdev->xg_sem_mask);
return status;
}
static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
{
return PAGE_SIZE << qdev->lbq_buf_order;
}
/* Get the next large buffer. */
static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
{
struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
rx_ring->lbq_curr_idx++;
if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
rx_ring->lbq_curr_idx = 0;
rx_ring->lbq_free_cnt++;
return lbq_desc;
}
static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
dma_unmap_addr(lbq_desc, mapaddr),
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
/* If it's the last chunk of our master page then
* we unmap it.
*/
if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
== ql_lbq_block_size(qdev))
pci_unmap_page(qdev->pdev,
lbq_desc->p.pg_chunk.map,
ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
return lbq_desc;
}
/* Get the next small buffer. */
static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
{
struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
rx_ring->sbq_curr_idx++;
if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
rx_ring->sbq_curr_idx = 0;
rx_ring->sbq_free_cnt++;
return sbq_desc;
}
/* Update an rx ring index. */
static void ql_update_cq(struct rx_ring *rx_ring)
{
rx_ring->cnsmr_idx++;
rx_ring->curr_entry++;
if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
}
}
static void ql_write_cq_idx(struct rx_ring *rx_ring)
{
ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
struct bq_desc *lbq_desc)
{
if (!rx_ring->pg_chunk.page) {
u64 map;
rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
netif_err(qdev, drv, qdev->ndev,
"page allocation failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.offset = 0;
map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
0, ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
rx_ring->pg_chunk.page = NULL;
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.map = map;
rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
}
/* Copy the current master pg_chunk info
* to the current descriptor.
*/
lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
/* Adjust the master page chunk for next
* buffer get.
*/
rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
rx_ring->pg_chunk.page = NULL;
lbq_desc->p.pg_chunk.last_flag = 1;
} else {
rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
get_page(rx_ring->pg_chunk.page);
lbq_desc->p.pg_chunk.last_flag = 0;
}
return 0;
}
/* Process (refill) a large buffer queue. */
static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
u32 clean_idx = rx_ring->lbq_clean_idx;
u32 start_idx = clean_idx;
struct bq_desc *lbq_desc;
u64 map;
int i;
while (rx_ring->lbq_free_cnt > 32) {
for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"lbq: try cleaning clean_idx = %d.\n",
clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
rx_ring->lbq_clean_idx = clean_idx;
netif_err(qdev, ifup, qdev->ndev,
"Could not get a page chunk, i=%d, clean_idx =%d .\n",
i, clean_idx);
return;
}
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
dma_unmap_addr_set(lbq_desc, mapaddr, map);
dma_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map);
pci_dma_sync_single_for_device(qdev->pdev, map,
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
clean_idx++;
if (clean_idx == rx_ring->lbq_len)
clean_idx = 0;
}
rx_ring->lbq_clean_idx = clean_idx;
rx_ring->lbq_prod_idx += 16;
if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
rx_ring->lbq_prod_idx = 0;
rx_ring->lbq_free_cnt -= 16;
}
if (start_idx != clean_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"lbq: updating prod idx = %d.\n",
rx_ring->lbq_prod_idx);
ql_write_db_reg(rx_ring->lbq_prod_idx,
rx_ring->lbq_prod_idx_db_reg);
}
}
/* Process (refill) a small buffer queue. */
static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
u32 clean_idx = rx_ring->sbq_clean_idx;
u32 start_idx = clean_idx;
struct bq_desc *sbq_desc;
u64 map;
int i;
while (rx_ring->sbq_free_cnt > 16) {
for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
sbq_desc = &rx_ring->sbq[clean_idx];
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"sbq: try cleaning clean_idx = %d.\n",
clean_idx);
if (sbq_desc->p.skb == NULL) {
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"sbq: getting new skb for index %d.\n",
sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
rx_ring->sbq_clean_idx = clean_idx;
return;
}
skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
map = pci_map_single(qdev->pdev,
sbq_desc->p.skb->data,
rx_ring->sbq_buf_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
netif_err(qdev, ifup, qdev->ndev,
"PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
return;
}
dma_unmap_addr_set(sbq_desc, mapaddr, map);
dma_unmap_len_set(sbq_desc, maplen,
rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
clean_idx++;
if (clean_idx == rx_ring->sbq_len)
clean_idx = 0;
}
rx_ring->sbq_clean_idx = clean_idx;
rx_ring->sbq_prod_idx += 16;
if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
rx_ring->sbq_prod_idx = 0;
rx_ring->sbq_free_cnt -= 16;
}
if (start_idx != clean_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"sbq: updating prod idx = %d.\n",
rx_ring->sbq_prod_idx);
ql_write_db_reg(rx_ring->sbq_prod_idx,
rx_ring->sbq_prod_idx_db_reg);
}
}
static void ql_update_buffer_queues(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
ql_update_sbq(qdev, rx_ring);
ql_update_lbq(qdev, rx_ring);
}
/* Unmaps tx buffers. Can be called from send() if a pci mapping
* fails at some stage, or from the interrupt when a tx completes.
*/
static void ql_unmap_send(struct ql_adapter *qdev,
struct tx_ring_desc *tx_ring_desc, int mapped)
{
int i;
for (i = 0; i < mapped; i++) {
if (i == 0 || (i == 7 && mapped > 7)) {
/*
* Unmap the skb->data area, or the
* external sglist (AKA the Outbound
* Address List (OAL)).
* If its the zeroeth element, then it's
* the skb->data area. If it's the 7th
* element and there is more than 6 frags,
* then its an OAL.
*/
if (i == 7) {
netif_printk(qdev, tx_done, KERN_DEBUG,
qdev->ndev,
"unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen),
PCI_DMA_TODEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen), PCI_DMA_TODEVICE);
}
}
}
/* Map the buffers for this transmit. This will return
* NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
*/
static int ql_map_send(struct ql_adapter *qdev,
struct ob_mac_iocb_req *mac_iocb_ptr,
struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
{
int len = skb_headlen(skb);
dma_addr_t map;
int frag_idx, err, map_idx = 0;
struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
int frag_cnt = skb_shinfo(skb)->nr_frags;
if (frag_cnt) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"frag_cnt = %d.\n", frag_cnt);
}
/*
* Map the skb buffer first.
*/
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping failed with error: %d\n", err);
return NETDEV_TX_BUSY;
}
tbd->len = cpu_to_le32(len);
tbd->addr = cpu_to_le64(map);
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
map_idx++;
/*
* This loop fills the remainder of the 8 address descriptors
* in the IOCB. If there are more than 7 fragments, then the
* eighth address desc will point to an external list (OAL).
* When this happens, the remainder of the frags will be stored
* in this list.
*/
for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
tbd++;
if (frag_idx == 6 && frag_cnt > 7) {
/* Let's tack on an sglist.
* Our control block will now
* look like this:
* iocb->seg[0] = skb->data
* iocb->seg[1] = frag[0]
* iocb->seg[2] = frag[1]
* iocb->seg[3] = frag[2]
* iocb->seg[4] = frag[3]
* iocb->seg[5] = frag[4]
* iocb->seg[6] = frag[5]
* iocb->seg[7] = ptr to OAL (external sglist)
* oal->seg[0] = frag[6]
* oal->seg[1] = frag[7]
* oal->seg[2] = frag[8]
* oal->seg[3] = frag[9]
* oal->seg[4] = frag[10]
* etc...
*/
/* Tack on the OAL in the eighth segment of IOCB. */
map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
sizeof(struct oal),
PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
err);
goto map_error;
}
tbd->addr = cpu_to_le64(map);
/*
* The length is the number of fragments
* that remain to be mapped times the length
* of our sglist (OAL).
*/
tbd->len =
cpu_to_le32((sizeof(struct tx_buf_desc) *
(frag_cnt - frag_idx)) | TX_DESC_C);
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
sizeof(struct oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
}
map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping frags failed with error: %d.\n",
err);
goto map_error;
}
tbd->addr = cpu_to_le64(map);
tbd->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
skb_frag_size(frag));
}
/* Save the number of segments we've mapped. */
tx_ring_desc->map_cnt = map_idx;
/* Terminate the last segment. */
tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
return NETDEV_TX_OK;
map_error:
/*
* If the first frag mapping failed, then i will be zero.
* This causes the unmap of the skb->data area. Otherwise
* we pass in the number of frags that mapped successfully
* so they can be umapped.
*/
ql_unmap_send(qdev, tx_ring_desc, map_idx);
return NETDEV_TX_BUSY;
}
/* Categorizing receive firmware frame errors */
static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
struct rx_ring *rx_ring)
{
struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++;
rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
stats->rx_code_err++;
break;
case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
stats->rx_oversize_err++;
break;
case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
stats->rx_undersize_err++;
break;
case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
stats->rx_preamble_err++;
break;
case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
stats->rx_frame_len_err++;
break;
case IB_MAC_IOCB_RSP_ERR_CRC:
stats->rx_crc_err++;
default:
break;
}
}
/**
* ql_update_mac_hdr_len - helper routine to update the mac header length
* based on vlan tags if present
*/
static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp,
void *page, size_t *len)
{
u16 *tags;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
return;
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
tags = (u16 *)page;
/* Look for stacked vlan tags in ethertype field */
if (tags[6] == ETH_P_8021Q &&
tags[8] == ETH_P_8021Q)
*len += 2 * VLAN_HLEN;
else
*len += VLAN_HLEN;
}
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u32 length,
u16 vlan_id)
{
struct sk_buff *skb;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
put_page(lbq_desc->p.pg_chunk.page);
return;
}
napi->dev = qdev->ndev;
skb = napi_get_frags(napi);
if (!skb) {
netif_err(qdev, drv, qdev->ndev,
"Couldn't get an skb, exiting.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
}
prefetch(lbq_desc->p.pg_chunk.va);
__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
skb_shinfo(skb)->nr_frags++;
rx_ring->rx_packets++;
rx_ring->rx_bytes += length;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
napi_gro_frags(napi);
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u32 length,
u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
}
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
goto err_out;
}
/* Update the MAC header length*/
ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset + hlen,
length - hlen);
skb->len += length - hlen;
skb->data_len += length - hlen;
skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
(struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"UDP checksum done!\n");
}
}
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb);
else
netif_receive_skb(skb);
return;
err_out:
dev_kfree_skb_any(skb);
put_page(lbq_desc->p.pg_chunk.page);
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u32 length,
u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
struct sk_buff *new_skb = NULL;
struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (new_skb == NULL) {
rx_ring->rx_dropped++;
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + ETH_HLEN) {
dev_kfree_skb_any(skb);
rx_ring->rx_dropped++;
return;
}
prefetch(skb->data);
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_REG ? "Registered" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Promiscuous Packet.\n");
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"UDP checksum done!\n");
}
}
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
static void ql_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
/* Undo the skb_reserve(skb,32) we did before
* giving to hardware, and realign data on
* a 2-byte boundary.
*/
skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
skb_copy_to_linear_data(skb, temp_addr,
(unsigned int)len);
}
/*
* This function builds an skb for the given inbound
* completion. It will be rewritten for readability in the near
* future, but for not it works well.
*/
static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
struct bq_desc *lbq_desc;
struct bq_desc *sbq_desc;
struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
*/
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Header of %d bytes in small buffer.\n", hdr_len);
/*
* Headers fit nicely into a small buffer.
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
sbq_desc->p.skb = NULL;
}
/*
* Handle the data buffer(s).
*/
if (unlikely(!length)) { /* Is there data too? */
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"No Data buffer in this packet.\n");
return skb;
}
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Headers in small, data of %d bytes in small, combine them.\n",
length);
/*
* Data is less than small buffer size so it's
* stuffed in a small buffer.
* For this case we append the data
* from the "data" small buffer to the "header" small
* buffer.
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
dma_unmap_addr
(sbq_desc, mapaddr),
dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
memcpy(skb_put(skb, length),
sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr
(sbq_desc,
mapaddr),
dma_unmap_len
(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes in a single small buffer.\n",
length);
sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc,
mapaddr),
dma_unmap_len(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Header in small, %d bytes in large. Chain large to small!\n",
length);
/*
* The data is in a single large buffer. We
* chain it to the header buffer's skb and let
* it rip.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Chaining page at offset = %d, for %d bytes to skb.\n",
lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
} else {
/*
* The headers and data are in a single large buffer. We
* copy it to a new skb and let it go. This can happen with
* jumbo mtu on a non-TCP/UDP frame.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) {
netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
"No skb available, drop the packet.\n");
return NULL;
}
pci_unmap_page(qdev->pdev,
dma_unmap_addr(lbq_desc,
mapaddr),
dma_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
length -= length;
ql_update_mac_hdr_len(qdev, ib_mac_rsp,
lbq_desc->p.pg_chunk.va,
&hlen);
__pskb_pull_tail(skb, hlen);
}
} else {
/*
* The data is in a chain of large buffers
* pointed to by a small buffer. We loop
* thru and chain them to the our small header
* buffer's skb.
* frags: There are 18 max frags and our small
* buffer will hold 32 of them. The thing is,
* we'll use 3 max for our 9000 byte jumbo
* frames. If the MTU goes up we could
* eventually be in trouble.
*/
int size, i = 0;
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
* the headers aren't split into a small
* buffer. We have to use the small buffer
* that contains our sg list as our skb to
* send upstairs. Copy the sg list here to
* a local buffer and use it to find the
* pages to chain.
*/
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers & data in chain of large.\n",
length);
skb = sbq_desc->p.skb;
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
}
while (length > 0) {
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
size = (length < rx_ring->lbq_buf_size) ? length :
rx_ring->lbq_buf_size;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Adding page %d to skb for %d bytes.\n",
i, size);
skb_fill_page_desc(skb, i,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
size);
skb->len += size;
skb->data_len += size;
skb->truesize += size;
length -= size;
i++;
}
ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
&hlen);
__pskb_pull_tail(skb, hlen);
}
return skb;
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"No skb available, drop packet.\n");
rx_ring->rx_dropped++;
return;
}
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + ETH_HLEN) {
dev_kfree_skb_any(skb);
rx_ring->rx_dropped++;
return;
}
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
prefetch(skb->data);
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_REG ? "Registered" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
rx_ring->rx_multicast++;
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Promiscuous Packet.\n");
}
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
}
}
}
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
/* Process an inbound completion from an rx ring. */
static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
/* The data fit in a single small buffer.
* Allocate a new skb, copy the data and
* return the buffer to the free pool.
*/
ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
length, vlan_id);
} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
/* TCP packet in a page chunk that's been checksummed.
* Tack it on to our GRO skb and let it go.
*/
ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
length, vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
/* Non-TCP packet in a page chunk. Allocate an
* skb, tack it on frags, and send it up.
*/
ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
length, vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
* can be processed corrrectly by the split frame logic.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
}
return (unsigned long)length;
}
/* Process an outbound completion from an rx ring. */
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
QL_DUMP_OB_MAC_RSP(mac_rsp);
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
tx_ring->tx_packets++;
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
OB_MAC_IOCB_RSP_S |
OB_MAC_IOCB_RSP_L |
OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
netif_warn(qdev, tx_done, qdev->ndev,
"Total descriptor length did not match transfer length.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
netif_warn(qdev, tx_done, qdev->ndev,
"Frame too short to be valid, not sent.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
netif_warn(qdev, tx_done, qdev->ndev,
"Frame too long, but sent anyway.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
netif_warn(qdev, tx_done, qdev->ndev,
"PCI backplane error. Frame not sent.\n");
}
}
atomic_inc(&tx_ring->tx_count);
}
/* Fire up a handler to reset the MPI processor. */
void ql_queue_fw_error(struct ql_adapter *qdev)
{
ql_link_off(qdev);
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
}
void ql_queue_asic_error(struct ql_adapter *qdev)
{
ql_link_off(qdev);
ql_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
/* Set asic recovery bit to indicate reset process that we are
* in fatal error recovery process rather than normal close
*/
set_bit(QL_ASIC_RECOVERY, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
struct ib_ae_iocb_rsp *ib_ae_rsp)
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
netif_err(qdev, rx_err, qdev->ndev,
"Management Processor Fatal Error.\n");
ql_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
netdev_err(qdev->ndev, "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
netdev_err(qdev->ndev, "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
netdev_err(qdev->ndev, "PCI error occurred when reading "
"anonymous buffers from rx_ring %d.\n",
ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
default:
netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
ib_ae_rsp->event);
ql_queue_asic_error(qdev);
break;
}
}
static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
{
struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ob_mac_iocb_rsp *net_rsp = NULL;
int count = 0;
struct tx_ring *tx_ring;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d.\n.",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
case OPCODE_OB_MAC_TSO_IOCB:
case OPCODE_OB_MAC_IOCB:
ql_process_mac_tx_intr(qdev, net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Hit default case, not handled! dropping the packet, opcode = %x.\n",
net_rsp->opcode);
}
count++;
ql_update_cq(rx_ring);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
if (!net_rsp)
return 0;
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
}
return count;
}
static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
{
struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ql_net_rsp_iocb *net_rsp;
int count = 0;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d.\n.",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
case OPCODE_IB_MAC_IOCB:
ql_process_mac_rx_intr(qdev, rx_ring,
(struct ib_mac_iocb_rsp *)
net_rsp);
break;
case OPCODE_IB_AE_IOCB:
ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Hit default case, not handled! dropping the packet, opcode = %x.\n",
net_rsp->opcode);
break;
}
count++;
ql_update_cq(rx_ring);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
if (count == budget)
break;
}
ql_update_buffer_queues(qdev, rx_ring);
ql_write_cq_idx(rx_ring);
return count;
}
static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
{
struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
struct ql_adapter *qdev = rx_ring->qdev;
struct rx_ring *trx_ring;
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
* right after the RSS rings. */
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
trx_ring = &qdev->rx_ring[i];
/* If this TX completion ring belongs to this vector and
* it's not empty then service it.
*/
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing TX completion ring %d.\n",
__func__, trx_ring->cq_id);
ql_clean_outbound_rx_ring(trx_ring);
}
}
/*
* Now service the RSS ring if it's active.
*/
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing RX completion ring %d.\n",
__func__, rx_ring->cq_id);
work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
}
if (work_done < budget) {
napi_complete(napi);
ql_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
}
static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
/**
* qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
* based on the features to enable/disable hardware vlan accel
*/
static int qlge_update_hw_vlan_features(struct net_device *ndev,
netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status = 0;
status = ql_adapter_down(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring down the adapter\n");
return status;
}
/* update the features with resent change */
ndev->features = features;
status = ql_adapter_up(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring up the adapter\n");
return status;
}
return status;
}
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
int err;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
return features;
}
static int qlge_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
return 0;
}
static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
int err;
err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
return err;
}
static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
int err;
err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
return err;
}
static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
{
int status;
u16 vid;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
__qlge_vlan_rx_add_vid(qdev, vid);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
napi_schedule(&rx_ring->napi);
return IRQ_HANDLED;
}
/* This handles a fatal error, MPI activity, and the default
* rx_ring in an MSI-X multiple vector environment.
* In MSI/Legacy environment it also process the rest of
* the rx_rings.
*/
static irqreturn_t qlge_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
struct ql_adapter *qdev = rx_ring->qdev;
struct intr_context *intr_context = &qdev->intr_context[0];
u32 var;
int work_done = 0;
spin_lock(&qdev->hw_lock);
if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"Shared Interrupt, Not ours!\n");
spin_unlock(&qdev->hw_lock);
return IRQ_NONE;
}
spin_unlock(&qdev->hw_lock);
var = ql_disable_completion_interrupt(qdev, intr_context->intr);
/*
* Check for fatal error.
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
netdev_err(qdev->ndev, "Resetting chip. "
"Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
/*
* Check MPI processor activity.
*/
if ((var & STS_PI) &&
(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
netif_err(qdev, intr, qdev->ndev,
"Got MPI processor interrupt.\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
qdev->workqueue, &qdev->mpi_work, 0);
work_done++;
}
/*
* Get the bit-mask that shows the active queues for this
* pass. Compare it to the queues that this irq services
* and call napi if there's a match.
*/
var = ql_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
netif_info(qdev, intr, qdev->ndev,
"Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
}
ql_enable_completion_interrupt(qdev, intr_context->intr);
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
{
if (skb_is_gso(skb)) {
int err;
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb)
<< OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
if (likely(skb->protocol == htons(ETH_P_IP))) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, 0,
IPPROTO_TCP,
0);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
}
return 1;
}
return 0;
}
static void ql_hw_csum_setup(struct sk_buff *skb,
struct ob_mac_tso_iocb_req *mac_iocb_ptr)
{
int len;
struct iphdr *iph = ip_hdr(skb);
__sum16 *check;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
len = (ntohs(iph->tot_len) - (iph->ihl << 2));
if (likely(iph->protocol == IPPROTO_TCP)) {
check = &(tcp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2));
} else {
check = &(udp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) +
sizeof(struct udphdr));
}
*check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, len, iph->protocol, 0);
}
static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
{
struct tx_ring_desc *tx_ring_desc;
struct ob_mac_iocb_req *mac_iocb_ptr;
struct ql_adapter *qdev = netdev_priv(ndev);
int tso;
struct tx_ring *tx_ring;
u32 tx_ring_idx = (u32) skb->queue_mapping;
tx_ring = &qdev->tx_ring[tx_ring_idx];
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_info(qdev, tx_queued, qdev->ndev,
"%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
/* We use the upper 32-bits to store the tx queue for this IO.
* When we get the completion we can use it to establish the context.
*/
mac_iocb_ptr->txq_idx = tx_ring_idx;
tx_ring_desc->skb = skb;
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
if (vlan_tx_tag_present(skb)) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
}
tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
ql_hw_csum_setup(skb,
(struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
netif_err(qdev, tx_queued, qdev->ndev,
"Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
tx_ring->prod_idx = 0;
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_stop_subqueue(ndev, tx_ring->wq_id);
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
}
return NETDEV_TX_OK;
}
static void ql_free_shadow_space(struct ql_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
pci_free_consistent(qdev->pdev,
PAGE_SIZE,
qdev->rx_ring_shadow_reg_area,
qdev->rx_ring_shadow_reg_dma);
qdev->rx_ring_shadow_reg_area = NULL;
}
if (qdev->tx_ring_shadow_reg_area) {
pci_free_consistent(qdev->pdev,
PAGE_SIZE,
qdev->tx_ring_shadow_reg_area,
qdev->tx_ring_shadow_reg_dma);
qdev->tx_ring_shadow_reg_area = NULL;
}
}
static int ql_alloc_shadow_space(struct ql_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->rx_ring_shadow_reg_dma);
if (qdev->rx_ring_shadow_reg_area == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
qdev->tx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
if (qdev->tx_ring_shadow_reg_area == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
return 0;
err_wqp_sh_area:
pci_free_consistent(qdev->pdev,
PAGE_SIZE,
qdev->rx_ring_shadow_reg_area,
qdev->rx_ring_shadow_reg_dma);
return -ENOMEM;
}
static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
{
struct tx_ring_desc *tx_ring_desc;
int i;
struct ob_mac_iocb_req *mac_iocb_ptr;
mac_iocb_ptr = tx_ring->wq_base;
tx_ring_desc = tx_ring->q;
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc->index = i;
tx_ring_desc->skb = NULL;
tx_ring_desc->queue_entry = mac_iocb_ptr;
mac_iocb_ptr++;
tx_ring_desc++;
}
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
}
static void ql_free_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
if (tx_ring->wq_base) {
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
}
kfree(tx_ring->q);
tx_ring->q = NULL;
}
static int ql_alloc_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
tx_ring->wq_base =
pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL) ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
goto pci_alloc_err;
tx_ring->q =
kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
if (tx_ring->q == NULL)
goto err;
return 0;
err:
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
pci_alloc_err:
netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
struct bq_desc *lbq_desc;
uint32_t curr_idx, clean_idx;
curr_idx = rx_ring->lbq_curr_idx;
clean_idx = rx_ring->lbq_clean_idx;
while (curr_idx != clean_idx) {
lbq_desc = &rx_ring->lbq[curr_idx];
if (lbq_desc->p.pg_chunk.last_flag) {
pci_unmap_page(qdev->pdev,
lbq_desc->p.pg_chunk.map,
ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
lbq_desc->p.pg_chunk.last_flag = 0;
}
put_page(lbq_desc->p.pg_chunk.page);
lbq_desc->p.pg_chunk.page = NULL;
if (++curr_idx == rx_ring->lbq_len)
curr_idx = 0;
}
if (rx_ring->pg_chunk.page) {
pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
put_page(rx_ring->pg_chunk.page);
rx_ring->pg_chunk.page = NULL;
}
}
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
struct bq_desc *sbq_desc;
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
if (sbq_desc == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
}
}
}
/* Free all large and small rx buffers associated
* with the completion queues for this device.
*/
static void ql_free_rx_buffers(struct ql_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
if (rx_ring->lbq)
ql_free_lbq_buffers(qdev, rx_ring);
if (rx_ring->sbq)
ql_free_sbq_buffers(qdev, rx_ring);
}
}
static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
{
struct rx_ring *rx_ring;
int i;
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
if (rx_ring->type != TX_Q)
ql_update_buffer_queues(qdev, rx_ring);
}
}
static void ql_init_lbq_ring(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
int i;
struct bq_desc *lbq_desc;
__le64 *bq = rx_ring->lbq_base;
memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
for (i = 0; i < rx_ring->lbq_len; i++) {
lbq_desc = &rx_ring->lbq[i];
memset(lbq_desc, 0, sizeof(*lbq_desc));
lbq_desc->index = i;
lbq_desc->addr = bq;
bq++;
}
}
static void ql_init_sbq_ring(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
int i;
struct bq_desc *sbq_desc;
__le64 *bq = rx_ring->sbq_base;
memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
memset(sbq_desc, 0, sizeof(*sbq_desc));
sbq_desc->index = i;
sbq_desc->addr = bq;
bq++;
}
}
static void ql_free_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
/* Free the small buffer queue. */
if (rx_ring->sbq_base) {
pci_free_consistent(qdev->pdev,
rx_ring->sbq_size,
rx_ring->sbq_base, rx_ring->sbq_base_dma);
rx_ring->sbq_base = NULL;
}
/* Free the small buffer queue control blocks. */
kfree(rx_ring->sbq);
rx_ring->sbq = NULL;
/* Free the large buffer queue. */
if (rx_ring->lbq_base) {
pci_free_consistent(qdev->pdev,
rx_ring->lbq_size,
rx_ring->lbq_base, rx_ring->lbq_base_dma);
rx_ring->lbq_base = NULL;
}
/* Free the large buffer queue control blocks. */
kfree(rx_ring->lbq);
rx_ring->lbq = NULL;
/* Free the rx queue. */
if (rx_ring->cq_base) {
pci_free_consistent(qdev->pdev,
rx_ring->cq_size,
rx_ring->cq_base, rx_ring->cq_base_dma);
rx_ring->cq_base = NULL;
}
}
/* Allocate queues and buffers for this completions queue based
* on the values in the parameter structure. */
static int ql_alloc_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
/*
* Allocate the completion queue for this rx_ring.
*/
rx_ring->cq_base =
pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
&rx_ring->cq_base_dma);
if (rx_ring->cq_base == NULL) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
if (rx_ring->sbq_len) {
/*
* Allocate small buffer queue.
*/
rx_ring->sbq_base =
pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
&rx_ring->sbq_base_dma);
if (rx_ring->sbq_base == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Small buffer queue allocation failed.\n");
goto err_mem;
}
/*
* Allocate small buffer queue control blocks.
*/
rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->sbq == NULL)
goto err_mem;
ql_init_sbq_ring(qdev, rx_ring);
}
if (rx_ring->lbq_len) {
/*
* Allocate large buffer queue.
*/
rx_ring->lbq_base =
pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
&rx_ring->lbq_base_dma);
if (rx_ring->lbq_base == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Large buffer queue allocation failed.\n");
goto err_mem;
}
/*
* Allocate large buffer queue control blocks.
*/
rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->lbq == NULL)
goto err_mem;
ql_init_lbq_ring(qdev, rx_ring);
}
return 0;
err_mem:
ql_free_rx_resources(qdev, rx_ring);
return -ENOMEM;
}
static void ql_tx_ring_clean(struct ql_adapter *qdev)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
int i, j;
/*
* Loop through all queues and free
* any resources.
*/
for (j = 0; j < qdev->tx_ring_count; j++) {
tx_ring = &qdev->tx_ring[j];
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc = &tx_ring->q[i];
if (tx_ring_desc && tx_ring_desc->skb) {
netif_err(qdev, ifdown, qdev->ndev,
"Freeing lost SKB %p, from queue %d, index %d.\n",
tx_ring_desc->skb, j,
tx_ring_desc->index);
ql_unmap_send(qdev, tx_ring_desc,
tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
}
}
}
}
static void ql_free_mem_resources(struct ql_adapter *qdev)
{
int i;
for (i = 0; i < qdev->tx_ring_count; i++)
ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
for (i = 0; i < qdev->rx_ring_count; i++)
ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
ql_free_shadow_space(qdev);
}
static int ql_alloc_mem_resources(struct ql_adapter *qdev)
{
int i;
/* Allocate space for our shadow registers and such. */
if (ql_alloc_shadow_space(qdev))
return -ENOMEM;
for (i = 0; i < qdev->rx_ring_count; i++) {
if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"RX resource allocation failed.\n");
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"TX resource allocation failed.\n");
goto err_mem;
}
}
return 0;
err_mem:
ql_free_mem_resources(qdev);
return -ENOMEM;
}
/* Set up the rx ring control block and pass it to the chip.
* The control block is defined as
* "Completion Queue Initialization Control Block", or cqicb.
*/
static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
struct cqicb *cqicb = &rx_ring->cqicb;
void *shadow_reg = qdev->rx_ring_shadow_reg_area +
(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
u16 bq_len;
u64 tmp;
__le64 *base_indirect_ptr;
int page_entries;
/* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg;
rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
*rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64);
rx_ring->lbq_base_indirect = shadow_reg;
rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
rx_ring->sbq_base_indirect = shadow_reg;
rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
/* PCI doorbell mem area + 0x04 for valid register */
rx_ring->valid_db_reg = doorbell_area + 0x04;
/* PCI doorbell mem area + 0x18 for large buffer consumer */
rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
/* PCI doorbell mem area + 0x1c */
rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
memset((void *)cqicb, 0, sizeof(struct cqicb));
cqicb->msix_vect = rx_ring->irq;
bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
/*
* Set up the control block load flags.
*/
cqicb->flags = FLAGS_LC | /* Load queue base address */
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq_base_dma;
base_indirect_ptr = rx_ring->lbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
cqicb->lbq_addr =
cpu_to_le64(rx_ring->lbq_base_indirect_dma);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
(u16) rx_ring->lbq_buf_size;
cqicb->lbq_buf_size = cpu_to_le16(bq_len);
bq_len = (rx_ring->lbq_len == 65536) ? 0 :
(u16) rx_ring->lbq_len;
cqicb->lbq_len = cpu_to_le16(bq_len);
rx_ring->lbq_prod_idx = 0;
rx_ring->lbq_curr_idx = 0;
rx_ring->lbq_clean_idx = 0;
rx_ring->lbq_free_cnt = rx_ring->lbq_len;
}
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq_base_dma;
base_indirect_ptr = rx_ring->sbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
cpu_to_le16((u16)(rx_ring->sbq_buf_size));
bq_len = (rx_ring->sbq_len == 65536) ? 0 :
(u16) rx_ring->sbq_len;
cqicb->sbq_len = cpu_to_le16(bq_len);
rx_ring->sbq_prod_idx = 0;
rx_ring->sbq_curr_idx = 0;
rx_ring->sbq_clean_idx = 0;
rx_ring->sbq_free_cnt = rx_ring->sbq_len;
}
switch (rx_ring->type) {
case TX_Q:
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
break;
case RX_Q:
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
break;
default:
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Invalid rx_ring->type = %d.\n", rx_ring->type);
}
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
}
return err;
}
static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
{
struct wqicb *wqicb = (struct wqicb *)tx_ring;
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
void *shadow_reg = qdev->tx_ring_shadow_reg_area +
(tx_ring->wq_id * sizeof(u64));
u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
(tx_ring->wq_id * sizeof(u64));
int err = 0;
/*
* Assign doorbell registers for this tx_ring.
*/
/* TX PCI doorbell mem area for tx producer index */
tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
tx_ring->prod_idx = 0;
/* TX PCI doorbell mem area + 0x04 */
tx_ring->valid_db_reg = doorbell_area + 0x04;
/*
* Assign shadow registers for this tx_ring.
*/
tx_ring->cnsmr_idx_sh_reg = shadow_reg;
tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
wqicb->rid = 0;
wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
ql_init_tx_ring(qdev, tx_ring);
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
return err;
}
static void ql_disable_msix(struct ql_adapter *qdev)
{
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
pci_disable_msix(qdev->pdev);
clear_bit(QL_MSIX_ENABLED, &qdev->flags);
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
pci_disable_msi(qdev->pdev);
clear_bit(QL_MSI_ENABLED, &qdev->flags);
}
}
/* We start by trying to get the number of vectors
* stored in qdev->intr_count. If we don't get that
* many then we reduce the count and try again.
*/
static void ql_enable_msix(struct ql_adapter *qdev)
{
int i, err;
/* Get the MSIX vectors. */
if (qlge_irq_type == MSIX_IRQ) {
/* Try to alloc space for the msix struct,
* if it fails then go to MSI/legacy.
*/
qdev->msi_x_entry = kcalloc(qdev->intr_count,
sizeof(struct msix_entry),
GFP_KERNEL);
if (!qdev->msi_x_entry) {
qlge_irq_type = MSI_IRQ;
goto msi;
}
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
qlge_irq_type = MSI_IRQ;
} else {
qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
qdev->intr_count);
return;
}
}
msi:
qdev->intr_count = 1;
if (qlge_irq_type == MSI_IRQ) {
if (!pci_enable_msi(qdev->pdev)) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"Running with MSI interrupts.\n");
return;
}
}
qlge_irq_type = LEG_IRQ;
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Running with legacy interrupts.\n");
}
/* Each vector services 1 RSS ring and and 1 or more
* TX completion rings. This function loops through
* the TX completion rings and assigns the vector that
* will service it. An example would be if there are
* 2 vectors (so 2 RSS rings) and 8 TX completion rings.
* This would mean that vector 0 would service RSS ring 0
* and TX completion rings 0,1,2 and 3. Vector 1 would
* service RSS ring 1 and TX completion rings 4,5,6 and 7.
*/
static void ql_set_tx_vect(struct ql_adapter *qdev)
{
int i, j, vect;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Assign irq vectors to TX rx_rings.*/
for (vect = 0, j = 0, i = qdev->rss_ring_count;
i < qdev->rx_ring_count; i++) {
if (j == tx_rings_per_vector) {
vect++;
j = 0;
}
qdev->rx_ring[i].irq = vect;
j++;
}
} else {
/* For single vector all rings have an irq
* of zero.
*/
for (i = 0; i < qdev->rx_ring_count; i++)
qdev->rx_ring[i].irq = 0;
}
}
/* Set the interrupt mask for this vector. Each vector
* will service 1 RSS ring and 1 or more TX completion
* rings. This function sets up a bit mask per vector
* that indicates which rings it services.
*/
static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
{
int j, vect = ctx->intr;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Add the RSS ring serviced by this vector
* to the mask.
*/
ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
/* Add the TX ring(s) serviced by this vector
* to the mask. */
for (j = 0; j < tx_rings_per_vector; j++) {
ctx->irq_mask |=
(1 << qdev->rx_ring[qdev->rss_ring_count +
(vect * tx_rings_per_vector) + j].cq_id);
}
} else {
/* For single vector we just shift each queue's
* ID into the mask.
*/
for (j = 0; j < qdev->rx_ring_count; j++)
ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
}
}
/*
* Here we build the intr_context structures based on
* our rx_ring count and intr vector count.
* The intr_context structure is used to hook each vector
* to possibly different handlers.
*/
static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
{
int i = 0;
struct intr_context *intr_context = &qdev->intr_context[0];
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Each rx_ring has it's
* own intr_context since we have separate
* vectors for each queue.
*/
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
qdev->rx_ring[i].irq = i;
intr_context->intr = i;
intr_context->qdev = qdev;
/* Set up this vector's bit-mask that indicates
* which queues it services.
*/
ql_set_irq_mask(qdev, intr_context);
/*
* We set up each vectors enable/disable/read bits so
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
| i;
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
INTR_EN_IHD | i;
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
i;
if (i == 0) {
/* The first vector/queue handles
* broadcast/multicast, fatal errors,
* and firmware events. This in addition
* to normal inbound NAPI processing.
*/
intr_context->handler = qlge_isr;
sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i);
} else {
/*
* Inbound queues handle unicast frames only.
*/
intr_context->handler = qlge_msix_rx_isr;
sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i);
}
}
} else {
/*
* All rx_rings use the same intr_context since
* there is only one vector.
*/
intr_context->intr = 0;
intr_context->qdev = qdev;
/*
* We set up each vectors enable/disable/read bits so
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE;
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
/*
* Single interrupt means one handler for all rings.
*/
intr_context->handler = qlge_isr;
sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
/* Set up this vector's bit-mask that indicates
* which queues it services. In this case there is
* a single vector so it will service all RSS and
* TX completion rings.
*/
ql_set_irq_mask(qdev, intr_context);
}
/* Tell the TX completion rings which MSIx vector
* they will be using.
*/
ql_set_tx_vect(qdev);
}
static void ql_free_irq(struct ql_adapter *qdev)
{
int i;
struct intr_context *intr_context = &qdev->intr_context[0];
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
if (intr_context->hooked) {
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
}
}
}
ql_disable_msix(qdev);
}
static int ql_request_irq(struct ql_adapter *qdev)
{
int i;
int status = 0;
struct pci_dev *pdev = qdev->pdev;
struct intr_context *intr_context = &qdev->intr_context[0];
ql_resolve_queues_to_irqs(qdev);
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
atomic_set(&intr_context->irq_cnt, 0);
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
status = request_irq(qdev->msi_x_entry[i].vector,
intr_context->handler,
0,
intr_context->name,
&qdev->rx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed request for MSIX interrupt %d.\n",
i);
goto err_irq;
}
} else {
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"trying msi or legacy interrupts.\n");
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: irq = %d.\n", __func__, pdev->irq);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: context->name = %s.\n", __func__,
intr_context->name);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: dev_id = 0x%p.\n", __func__,
&qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
test_bit(QL_MSI_ENABLED,
&qdev->
flags) ? 0 : IRQF_SHARED,
intr_context->name, &qdev->rx_ring[0]);
if (status)
goto err_irq;
netif_err(qdev, ifup, qdev->ndev,
"Hooked intr %d, queue type %s, with name %s.\n",
i,
qdev->rx_ring[0].type == DEFAULT_Q ?
"DEFAULT_Q" :
qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
intr_context->name);
}
intr_context->hooked = 1;
}
return status;
err_irq:
netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
ql_free_irq(qdev);
return status;
}
static int ql_start_rss(struct ql_adapter *qdev)
{
static const u8 init_hash_seed[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
u8 *hash_id = (u8 *) ricb->hash_cq_id;
memset((void *)ricb, 0, sizeof(*ricb));
ricb->base_cq = RSS_L4K;
ricb->flags =
(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
ricb->mask = cpu_to_le16((u16)(0x3ff));
/*
* Fill out the Indirection Table.
*/
for (i = 0; i < 1024; i++)
hash_id[i] = (i & (qdev->rss_ring_count - 1));
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
return status;
}
static int ql_clear_routing_entries(struct ql_adapter *qdev)
{
int i, status = 0;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
break;
}
}
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Initialize the frame-to-queue routing. */
static int ql_route_initialize(struct ql_adapter *qdev)
{
int status = 0;
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status)
return status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
RT_IDX_IP_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register "
"for IP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
RT_IDX_TU_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register "
"for TCP/UDP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for broadcast packets.\n");
goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
* routing block.
*/
if (qdev->rss_ring_count > 1) {
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
RT_IDX_RSS_MATCH, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for MATCH RSS packets.\n");
goto exit;
}
}
status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
int ql_cam_route_initialize(struct ql_adapter *qdev)
{
int status, set;
/* If check if the link is up and use to
* determine if we are setting or clearing
* the MAC address in the CAM.
*/
set = ql_read32(qdev, STS);
set &= qdev->port_link_up;
status = ql_set_mac_addr(qdev, set);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
status = ql_route_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
static int ql_adapter_initialize(struct ql_adapter *qdev)
{
u32 value, mask;
int i;
int status = 0;
/*
* Set up the System register to halt on errors.
*/
value = SYS_EFE | SYS_FAE;
mask = value << 16;
ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
value = NIC_RCV_CFG_DFQ;
mask = NIC_RCV_CFG_DFQ_MASK;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
value |= NIC_RCV_CFG_RV;
mask |= (NIC_RCV_CFG_RV << 16);
}
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
FSC_EC | FSC_VM_PAGE_4K;
value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
ql_write32(qdev, FSC, mask | value);
ql_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
* This is helpful on bonding where both interfaces can have
* the same MAC address.
*/
ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
/* Reroute all packets to our Interface.
* They may have been routed to MPI firmware
* due to WOL.
*/
value = ql_read32(qdev, MGMT_RCV_CFG);
value &= ~MGMT_RCV_CFG_RM;
mask = 0xffff0000;
/* Sticky reg needs clearing due to WOL. */
ql_write32(qdev, MGMT_RCV_CFG, mask);
ql_write32(qdev, MGMT_RCV_CFG, mask | value);
/* Default WOL is enable on Mezz cards */
if (qdev->pdev->subsystem_device == 0x0068 ||
qdev->pdev->subsystem_device == 0x0180)
qdev->wol = WAKE_MAGIC;
/* Start up the rx queues. */
for (i = 0; i < qdev->rx_ring_count; i++) {
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start rx ring[%d].\n", i);
return status;
}
}
/* If there is more than one inbound completion queue
* then download a RICB to configure RSS.
*/
if (qdev->rss_ring_count > 1) {
status = ql_start_rss(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
}
}
/* Start up the tx queues. */
for (i = 0; i < qdev->tx_ring_count; i++) {
status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start tx ring[%d].\n", i);
return status;
}
}
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = 0; i < qdev->rss_ring_count; i++)
napi_enable(&qdev->rx_ring[i].napi);
return status;
}
/* Issue soft reset to chip. */
static int ql_adapter_reset(struct ql_adapter *qdev)
{
u32 value;
int status = 0;
unsigned long end_jiffies;
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
}
end_jiffies = jiffies +
max((unsigned long)1, usecs_to_jiffies(30));
/* Check if bit is set then skip the mailbox command and
* clear the bit, else we are in normal reset process.
*/
if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
/* Stop management traffic. */
ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
/* Wait for the NIC and MGMNT FIFOs to empty. */
ql_wait_fifo_empty(qdev);
} else
clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
do {
value = ql_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
break;
cpu_relax();
} while (time_before(jiffies, end_jiffies));
if (value & RST_FO_FR) {
netif_err(qdev, ifdown, qdev->ndev,
"ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
/* Resume management traffic. */
ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
return status;
}
static void ql_display_dev_info(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
netif_info(qdev, probe, qdev->ndev,
"Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
"XG Roll = %d, XG Rev = %d.\n",
qdev->func,
qdev->port,
qdev->chip_rev_id & 0x0000000f,
qdev->chip_rev_id >> 4 & 0x0000000f,
qdev->chip_rev_id >> 8 & 0x0000000f,
qdev->chip_rev_id >> 12 & 0x0000000f);
netif_info(qdev, probe, qdev->ndev,
"MAC address %pM\n", ndev->dev_addr);
}
static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
/* The CAM is still intact after a reset, but if we
* are doing WOL, then we may need to program the
* routing regs. We would also need to issue the mailbox
* commands to instruct the MPI what to do per the ethtool
* settings.
*/
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST)) {
netif_err(qdev, ifdown, qdev->ndev,
"Unsupported WOL parameter. qdev->wol = 0x%x.\n",
qdev->wol);
return -EINVAL;
}
if (qdev->wol & WAKE_MAGIC) {
status = ql_mb_wol_set_magic(qdev, 1);
if (status) {
netif_err(qdev, ifdown, qdev->ndev,
"Failed to set magic packet on %s.\n",
qdev->ndev->name);
return status;
} else
netif_info(qdev, drv, qdev->ndev,
"Enabled magic packet successfully on %s.\n",
qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
status = ql_mb_wol_mode(qdev, wol);
netif_err(qdev, drv, qdev->ndev,
"WOL %s (wol code 0x%x) on %s\n",
(status == 0) ? "Successfully set" : "Failed",
wol, qdev->ndev->name);
}
return status;
}
static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
{
/* Don't kill the reset worker thread if we
* are in the process of recovery.
*/
if (test_bit(QL_ADAPTER_UP, &qdev->flags))
cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
}
static int ql_adapter_down(struct ql_adapter *qdev)
{
int i, status = 0;
ql_link_off(qdev);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
napi_disable(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
ql_disable_interrupts(qdev);
ql_tx_ring_clean(qdev);
/* Call netif_napi_del() from common point.
*/
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
status = ql_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
ql_free_rx_buffers(qdev);
return status;
}
static int ql_adapter_up(struct ql_adapter *qdev)
{
int err = 0;
err = ql_adapter_initialize(qdev);
if (err) {
netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
ql_alloc_rx_buffers(qdev);
/* If the port is initialized and the
* link is up the turn on the carrier.
*/
if ((ql_read32(qdev, STS) & qdev->port_init) &&
(ql_read32(qdev, STS) & qdev->port_link_up))
ql_link_on(qdev);
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
/* Restore vlan setting. */
qlge_restore_vlan(qdev);
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
return 0;
err_init:
ql_adapter_reset(qdev);
return err;
}
static void ql_release_adapter_resources(struct ql_adapter *qdev)
{
ql_free_mem_resources(qdev);
ql_free_irq(qdev);
}
static int ql_get_adapter_resources(struct ql_adapter *qdev)
{
int status = 0;
if (ql_alloc_mem_resources(qdev)) {
netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
status = ql_request_irq(qdev);
return status;
}
static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
/* If we hit pci_channel_io_perm_failure
* failure condition, then we already
* brought the adapter down.
*/
if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
clear_bit(QL_EEH_FATAL, &qdev->flags);
return 0;
}
/*
* Wait for device to recover from a reset.
* (Rarely happens, but possible.)
*/
while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
msleep(1);
ql_adapter_down(qdev);
ql_release_adapter_resources(qdev);
return 0;
}
static int ql_configure_rings(struct ql_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
qdev->lbq_buf_order = get_order(lbq_buf_len);
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
* cpu_cnt vectors. ql_enable_msix() will adjust the
* vector count to what we actually get. We then
* allocate an RSS ring for each.
* Essentially, we are doing min(cpu_count, msix_vector_count).
*/
qdev->intr_count = cpu_cnt;
ql_enable_msix(qdev);
/* Adjust the RSS ring count to the actual vector count. */
qdev->rss_ring_count = qdev->intr_count;
qdev->tx_ring_count = cpu_cnt;
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
memset((void *)tx_ring, 0, sizeof(*tx_ring));
tx_ring->qdev = qdev;
tx_ring->wq_id = i;
tx_ring->wq_len = qdev->tx_ring_size;
tx_ring->wq_size =
tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
/*
* The completion queue ID for the tx rings start
* immediately after the rss rings.
*/
tx_ring->cq_id = qdev->rss_ring_count + i;
}
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
memset((void *)rx_ring, 0, sizeof(*rx_ring));
rx_ring->qdev = qdev;
rx_ring->cq_id = i;
rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
if (i < qdev->rss_ring_count) {
/*
* Inbound (RSS) queues.
*/
rx_ring->cq_len = qdev->rx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
rx_ring->lbq_len = NUM_LARGE_BUFFERS;
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = (u16)lbq_buf_len;
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
rx_ring->type = RX_Q;
} else {
/*
* Outbound queue handles outbound completions only.
*/
/* outbound cq is same size as tx_ring it services. */
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
rx_ring->lbq_len = 0;
rx_ring->lbq_size = 0;
rx_ring->lbq_buf_size = 0;
rx_ring->sbq_len = 0;
rx_ring->sbq_size = 0;
rx_ring->sbq_buf_size = 0;
rx_ring->type = TX_Q;
}
}
return 0;
}
static int qlge_open(struct net_device *ndev)
{
int err = 0;
struct ql_adapter *qdev = netdev_priv(ndev);
err = ql_adapter_reset(qdev);
if (err)
return err;
err = ql_configure_rings(qdev);
if (err)
return err;
err = ql_get_adapter_resources(qdev);
if (err)
goto error_up;
err = ql_adapter_up(qdev);
if (err)
goto error_up;
return err;
error_up:
ql_release_adapter_resources(qdev);
return err;
}
static int ql_change_rx_buffers(struct ql_adapter *qdev)
{
struct rx_ring *rx_ring;
int i, status;
u32 lbq_buf_len;
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
netif_err(qdev, ifup, qdev->ndev,
"Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
netif_err(qdev, ifup, qdev->ndev,
"Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
status = ql_adapter_down(qdev);
if (status)
goto error;
/* Get the new rx buffer size. */
lbq_buf_len = (qdev->ndev->mtu > 1500) ?
LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
qdev->lbq_buf_order = get_order(lbq_buf_len);
for (i = 0; i < qdev->rss_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
/* Set the new size. */
rx_ring->lbq_buf_size = lbq_buf_len;
}
status = ql_adapter_up(qdev);
if (status)
goto error;
return status;
error:
netif_alert(qdev, ifup, qdev->ndev,
"Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
}
static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
if (ndev->mtu == 1500 && new_mtu == 9000) {
netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
} else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3*HZ);
ndev->mtu = new_mtu;
if (!netif_running(qdev->ndev)) {
return 0;
}
status = ql_change_rx_buffers(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Changing MTU failed.\n");
}
return status;
}
static struct net_device_stats *qlge_get_stats(struct net_device
*ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct rx_ring *rx_ring = &qdev->rx_ring[0];
struct tx_ring *tx_ring = &qdev->tx_ring[0];
unsigned long pkts, mcast, dropped, errors, bytes;
int i;
/* Get RX stats. */
pkts = mcast = dropped = errors = bytes = 0;
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
pkts += rx_ring->rx_packets;
bytes += rx_ring->rx_bytes;
dropped += rx_ring->rx_dropped;
errors += rx_ring->rx_errors;
mcast += rx_ring->rx_multicast;
}
ndev->stats.rx_packets = pkts;
ndev->stats.rx_bytes = bytes;
ndev->stats.rx_dropped = dropped;
ndev->stats.rx_errors = errors;
ndev->stats.multicast = mcast;
/* Get TX stats. */
pkts = errors = bytes = 0;
for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
pkts += tx_ring->tx_packets;
bytes += tx_ring->tx_bytes;
errors += tx_ring->tx_errors;
}
ndev->stats.tx_packets = pkts;
ndev->stats.tx_bytes = bytes;
ndev->stats.tx_errors = errors;
return &ndev->stats;
}
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct netdev_hw_addr *ha;
int i, status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return;
/*
* Set or clear promiscuous mode if a
* transition is taking place.
*/
if (ndev->flags & IFF_PROMISC) {
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set promiscuous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
}
} else {
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear promiscuous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
}
}
/*
* Set or clear all multicast mode if a
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set all-multi mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
}
} else {
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear all-multi mode.\n");
} else {
clear_bit(QL_ALLMULTI, &qdev->flags);
}
}
}
if (!netdev_mc_empty(ndev)) {
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
i++;
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (ql_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set multicast match mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
}
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct sockaddr *addr = p;
int status;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
/* Update local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
if (status)
netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
static void qlge_tx_timeout(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
}
static void ql_asic_reset_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, asic_reset_work.work);
int status;
rtnl_lock();
status = ql_adapter_down(qdev);
if (status)
goto error;
status = ql_adapter_up(qdev);
if (status)
goto error;
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
rtnl_unlock();
return;
error:
netif_alert(qdev, ifup, qdev->ndev,
"Driver up/down cycle failed, closing device\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
rtnl_unlock();
}
static const struct nic_operations qla8012_nic_ops = {
.get_flash = ql_get_8012_flash_params,
.port_initialize = ql_8012_port_initialize,
};
static const struct nic_operations qla8000_nic_ops = {
.get_flash = ql_get_8000_flash_params,
.port_initialize = ql_8000_port_initialize,
};
/* Find the pcie function number for the other NIC
* on this chip. Since both NIC functions share a
* common firmware we have the lowest enabled function
* do any common work. Examples would be resetting
* after a fatal firmware error, or doing a firmware
* coredump.
*/
static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
{
int status = 0;
u32 temp;
u32 nic_func1, nic_func2;
status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
&temp);
if (status)
return status;
nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
MPI_TEST_NIC_FUNC_MASK);
nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
MPI_TEST_NIC_FUNC_MASK);
if (qdev->func == nic_func1)
qdev->alt_func = nic_func2;
else if (qdev->func == nic_func2)
qdev->alt_func = nic_func1;
else
status = -EIO;
return status;
}
static int ql_get_board_info(struct ql_adapter *qdev)
{
int status;
qdev->func =
(ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
if (qdev->func > 3)
return -EIO;
status = ql_get_alt_pcie_func(qdev);
if (status)
return status;
qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
if (qdev->port) {
qdev->xg_sem_mask = SEM_XGMAC1_MASK;
qdev->port_link_up = STS_PL1;
qdev->port_init = STS_PI1;
qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
} else {
qdev->xg_sem_mask = SEM_XGMAC0_MASK;
qdev->port_link_up = STS_PL0;
qdev->port_init = STS_PI0;
qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
}
qdev->chip_rev_id = ql_read32(qdev, REV_ID);
qdev->device_id = qdev->pdev->device;
if (qdev->device_id == QLGE_DEVICE_ID_8012)
qdev->nic_ops = &qla8012_nic_ops;
else if (qdev->device_id == QLGE_DEVICE_ID_8000)
qdev->nic_ops = &qla8000_nic_ops;
return status;
}
static void ql_release_all(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
if (qdev->workqueue) {
destroy_workqueue(qdev->workqueue);
qdev->workqueue = NULL;
}
if (qdev->reg_base)
iounmap(qdev->reg_base);
if (qdev->doorbell_area)
iounmap(qdev->doorbell_area);
vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
}
static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
int cards_found)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
memset((void *)qdev, 0, sizeof(*qdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "PCI device enable failed.\n");
return err;
}
qdev->ndev = ndev;
qdev->pdev = pdev;
pci_set_drvdata(pdev, ndev);
/* Set PCIe read request size */
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
goto err_out1;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "PCI region request failed.\n");
return err;
}
pci_set_master(pdev);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
set_bit(QL_DMA64, &qdev->flags);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
goto err_out2;
}
/* Set PCIe reset type for EEH to fundamental. */
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
ioremap_nocache(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
goto err_out2;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
ioremap_nocache(pci_resource_start(pdev, 3),
pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
goto err_out2;
}
err = ql_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
if (qlge_mpi_coredump) {
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
if (qdev->mpi_coredump == NULL) {
err = -ENOMEM;
goto err_out2;
}
if (qlge_force_coredump)
set_bit(QL_FRC_COREDUMP, &qdev->flags);
}
/* make sure the EEPROM is good */
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
goto err_out2;
}
/* Keep local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
/* Set up the default ring sizes. */
qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
/* Set up the coalescing parameters. */
qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
/*
* Set up the operating parameters.
*/
qdev->workqueue = create_singlethread_workqueue(ndev->name);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
mutex_init(&qdev->mpi_mutex);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
DRV_NAME, DRV_VERSION);
}
return 0;
err_out2:
ql_release_all(pdev);
err_out1:
pci_disable_device(pdev);
return err;
}
static const struct net_device_ops qlge_netdev_ops = {
.ndo_open = qlge_open,
.ndo_stop = qlge_close,
.ndo_start_xmit = qlge_send,
.ndo_change_mtu = qlge_change_mtu,
.ndo_get_stats = qlge_get_stats,
.ndo_set_rx_mode = qlge_set_multicast_list,
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
.ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
static void ql_timer(unsigned long data)
{
struct ql_adapter *qdev = (struct ql_adapter *)data;
u32 var = 0;
var = ql_read32(qdev, STS);
if (pci_channel_offline(qdev->pdev)) {
netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
return;
}
mod_timer(&qdev->timer, jiffies + (5*HZ));
}
static int qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
struct net_device *ndev = NULL;
struct ql_adapter *qdev = NULL;
static int cards_found = 0;
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
min(MAX_CPUS, netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
err = ql_init_device(pdev, ndev, cards_found);
if (err < 0) {
free_netdev(ndev);
return err;
}
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
/*
* Set up net_device structure.
*/
ndev->tx_queue_len = qdev->tx_ring_size;
ndev->irq = pdev->irq;
ndev->netdev_ops = &qlge_netdev_ops;
ndev->ethtool_ops = &qlge_ethtool_ops;
ndev->watchdog_timeo = 10 * HZ;
err = register_netdev(ndev);
if (err) {
dev_err(&pdev->dev, "net device registration failed.\n");
ql_release_all(pdev);
pci_disable_device(pdev);
free_netdev(ndev);
return err;
}
/* Start up the timer to trigger EEH if
* the bus goes dead
*/
init_timer_deferrable(&qdev->timer);
qdev->timer.data = (unsigned long)qdev;
qdev->timer.function = ql_timer;
qdev->timer.expires = jiffies + (5*HZ);
add_timer(&qdev->timer);
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
cards_found++;
return 0;
}
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
{
return qlge_send(skb, ndev);
}
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
{
return ql_clean_inbound_rx_ring(rx_ring, budget);
}
static void qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
free_netdev(ndev);
}
/* Clean up resources without touching hardware. */
static void ql_eeh_close(struct net_device *ndev)
{
int i;
struct ql_adapter *qdev = netdev_priv(ndev);
if (netif_carrier_ok(ndev)) {
netif_carrier_off(ndev);
netif_stop_queue(ndev);
}
/* Disabling the timer */
del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
ql_tx_ring_clean(qdev);
ql_free_rx_buffers(qdev);
ql_release_adapter_resources(qdev);
}
/*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
*/
static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
enum pci_channel_state state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/*
* This callback is called after the PCI buss has been reset.
* Basically, this tries to restart the card from scratch.
* This is a shortened version of the device probe/discovery code,
* it resembles the first-half of the () routine.
*/
static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
pdev->error_state = pci_channel_io_normal;
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
netif_err(qdev, ifup, qdev->ndev,
"Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
if (ql_adapter_reset(qdev)) {
netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_RECOVERED;
}
static void qlge_io_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
netif_err(qdev, ifup, qdev->ndev,
"Device initialization failed after reset.\n");
return;
}
} else {
netif_err(qdev, ifup, qdev->ndev,
"Device was not running prior to EEH.\n");
}
mod_timer(&qdev->timer, jiffies + (5*HZ));
netif_device_attach(ndev);
}
static const struct pci_error_handlers qlge_err_handler = {
.error_detected = qlge_io_error_detected,
.slot_reset = qlge_io_slot_reset,
.resume = qlge_io_resume,
};
static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
netif_device_detach(ndev);
del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
err = ql_adapter_down(qdev);
if (!err)
return err;
}
ql_wol(qdev);
err = pci_save_state(pdev);
if (err)
return err;
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
#ifdef CONFIG_PM
static int qlge_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
if (netif_running(ndev)) {
err = ql_adapter_up(qdev);
if (err)
return err;
}
mod_timer(&qdev->timer, jiffies + (5*HZ));
netif_device_attach(ndev);
return 0;
}
#endif /* CONFIG_PM */
static void qlge_shutdown(struct pci_dev *pdev)
{
qlge_suspend(pdev, PMSG_SUSPEND);
}
static struct pci_driver qlge_driver = {
.name = DRV_NAME,
.id_table = qlge_pci_tbl,
.probe = qlge_probe,
.remove = qlge_remove,
#ifdef CONFIG_PM
.suspend = qlge_suspend,
.resume = qlge_resume,
#endif
.shutdown = qlge_shutdown,
.err_handler = &qlge_err_handler
};
module_pci_driver(qlge_driver);
| gpl-2.0 |
fintler/vbox | src/VBox/HostDrivers/VBoxUSB/USBFilter.cpp | 6 | 61000 | /* $Id$ */
/** @file
* VirtualBox USB filter abstraction.
*/
/*
* Copyright (C) 2007-2010 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include <VBox/usbfilter.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <iprt/string.h>
#include <iprt/assert.h>
#include <iprt/ctype.h>
/** @todo split this up for the sake of device drivers and such. */
/**
* Initializes an USBFILTER structure.
*
* @param pFilter The filter to initialize.
* @param enmType The filter type. If not valid, the filter will not
* be properly initialized and all other calls will fail.
*/
USBLIB_DECL(void) USBFilterInit(PUSBFILTER pFilter, USBFILTERTYPE enmType)
{
memset(pFilter, 0, sizeof(*pFilter));
AssertReturnVoid(enmType > USBFILTERTYPE_INVALID && enmType < USBFILTERTYPE_END);
pFilter->u32Magic = USBFILTER_MAGIC;
pFilter->enmType = enmType;
for (unsigned i = 0; i < RT_ELEMENTS(pFilter->aFields); i++)
pFilter->aFields[i].enmMatch = USBFILTERMATCH_IGNORE;
}
/**
* Make a clone of the specified filter.
*
* @param pFilter The target filter.
* @param pToClone The source filter.
*/
USBLIB_DECL(void) USBFilterClone(PUSBFILTER pFilter, PCUSBFILTER pToClone)
{
memcpy(pFilter, pToClone, sizeof(*pToClone));
}
/**
* Deletes (invalidates) an USBFILTER structure.
*
* @param pFilter The filter to delete.
*/
USBLIB_DECL(void) USBFilterDelete(PUSBFILTER pFilter)
{
pFilter->u32Magic = ~USBFILTER_MAGIC;
pFilter->enmType = USBFILTERTYPE_INVALID;
pFilter->offCurEnd = 0xfffff;
}
/**
* Skips blanks.
*
* @returns Next non-blank char in the string.
* @param psz The string.
*/
DECLINLINE(const char *) usbfilterSkipBlanks(const char *psz)
{
while (RT_C_IS_BLANK(*psz))
psz++;
return psz;
}
/**
* Worker for usbfilterReadNumber that parses a hexadecimal number.
*
* @returns Same as usbfilterReadNumber, except for VERR_NO_DIGITS.
* @param pszExpr Where to start converting, first char is a valid digit.
* @param ppszExpr See usbfilterReadNumber.
* @param pu16Val See usbfilterReadNumber.
*/
static int usbfilterReadNumberHex(const char *pszExpr, const char **ppszExpr, uint16_t *pu16Val)
{
int rc = VINF_SUCCESS;
uint32_t u32 = 0;
do
{
unsigned uDigit = *pszExpr >= 'a' && *pszExpr <= 'f'
? *pszExpr - 'a' + 10
: *pszExpr >= 'A' && *pszExpr <= 'F'
? *pszExpr - 'A' + 10
: *pszExpr - '0';
if (uDigit >= 16)
break;
u32 *= 16;
u32 += uDigit;
if (u32 > UINT16_MAX)
rc = VWRN_NUMBER_TOO_BIG;
} while (*++pszExpr);
*ppszExpr = usbfilterSkipBlanks(pszExpr);
*pu16Val = rc == VINF_SUCCESS ? u32 : UINT16_MAX;
return VINF_SUCCESS;
}
/**
* Worker for usbfilterReadNumber that parses a decimal number.
*
* @returns Same as usbfilterReadNumber, except for VERR_NO_DIGITS.
* @param pszExpr Where to start converting, first char is a valid digit.
* @param uBase The base - 8 or 16.
* @param ppszExpr See usbfilterReadNumber.
* @param pu16Val See usbfilterReadNumber.
*/
static int usbfilterReadNumberDecimal(const char *pszExpr, unsigned uBase, const char **ppszExpr, uint16_t *pu16Val)
{
int rc = VINF_SUCCESS;
uint32_t u32 = 0;
do
{
unsigned uDigit = *pszExpr - '0';
if (uDigit >= uBase)
break;
u32 *= uBase;
u32 += uDigit;
if (u32 > UINT16_MAX)
rc = VWRN_NUMBER_TOO_BIG;
} while (*++pszExpr);
*ppszExpr = usbfilterSkipBlanks(pszExpr);
*pu16Val = rc == VINF_SUCCESS ? u32 : UINT16_MAX;
return rc;
}
/**
* Reads a number from a numeric expression.
*
* @returns IPRT status code.
* @retval VINF_SUCCESS if all is fine. *ppszExpr and *pu16Val are updated.
* @retval VWRN_NUMBER_TOO_BIG if the number exceeds unsigned 16-bit, both *ppszExpr and *pu16Val are updated.
* @retval VERR_NO_DIGITS if there aren't any digits.
*
* @param ppszExpr Pointer to the current expression pointer.
* This is advanced past the expression and trailing blanks on success.
* @param pu16Val Where to store the value on success.
*/
static int usbfilterReadNumber(const char **ppszExpr, uint16_t *pu16Val)
{
const char *pszExpr = usbfilterSkipBlanks(*ppszExpr);
if (!RT_C_IS_DIGIT(*pszExpr))
return VERR_NO_DIGITS;
if (*pszExpr == '0')
{
if (pszExpr[1] == 'x' || pszExpr[1] == 'X')
{
if (!RT_C_IS_XDIGIT(pszExpr[2]))
return VERR_NO_DIGITS;
return usbfilterReadNumberHex(pszExpr + 2, ppszExpr, pu16Val);
}
if (RT_C_IS_ODIGIT(pszExpr[1]))
return usbfilterReadNumberDecimal(pszExpr + 1, 8, ppszExpr, pu16Val);
/* Solitary 0! */
if (RT_C_IS_DIGIT(pszExpr[1]))
return VERR_NO_DIGITS;
}
return usbfilterReadNumberDecimal(pszExpr, 10, ppszExpr, pu16Val);
}
/**
* Validates a numeric expression.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if valid.
* @retval VERR_INVALID_PARAMETER if invalid.
* @retval VERR_NO_DIGITS if some expression is short of digits.
*
* @param pszExpr The numeric expression.
*/
static int usbfilterValidateNumExpression(const char *pszExpr)
{
/*
* An empty expression is fine.
*/
if (!*pszExpr)
return VINF_SUCCESS;
/*
* The string format is: "(<m>|([<m>]-[<n>]))|(<m>|([<m>]-[<n>]))+"
* where <m> and <n> are numbers in the decimal, hex (0xNNN) or octal (0NNN)
* form. Spaces are allowed around <m> and <n>.
*/
unsigned cSubExpressions = 0;
while (*pszExpr)
{
/*
* Skip remnants of the previous expression and any empty expressions.
* ('|' is the expression separator.)
*/
while (*pszExpr == '|' || RT_C_IS_BLANK(*pszExpr))
pszExpr++;
if (!*pszExpr)
break;
/*
* Parse the expression.
*/
int rc;
uint16_t u16First = 0;
uint16_t u16Last = 0;
if (*pszExpr == '-')
{
/* -N */
pszExpr++;
rc = usbfilterReadNumber(&pszExpr, &u16Last);
}
else
{
/* M or M-N */
rc = usbfilterReadNumber(&pszExpr, &u16First);
if (RT_SUCCESS(rc))
{
if (*pszExpr == '-')
{
/* M-N */
pszExpr++;
rc = usbfilterReadNumber(&pszExpr, &u16Last);
}
else
{
/* M */
u16Last = u16First;
}
}
}
if (RT_FAILURE(rc))
return rc;
/*
* We should either be at the end of the string or at
* an expression separator (|).
*/
if (*pszExpr && *pszExpr != '|' )
return VERR_INVALID_PARAMETER;
cSubExpressions++;
}
return cSubExpressions ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
}
/**
* Validates a string pattern.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if valid.
* @retval VERR_INVALID_PARAMETER if invalid.
*
* @param psz The string pattern.
*/
static int usbfilterValidateStringPattern(const char *psz)
{
/*
* This is only becomes important if we start doing
* sets ([0-9]) and such like.
*/
return VINF_SUCCESS;
}
/**
* Thoroughly validates the USB Filter.
*
* @returns Appropriate VBox status code.
* @param pFilter The filter to validate.
*/
USBLIB_DECL(int) USBFilterValidate(PCUSBFILTER pFilter)
{
if (!VALID_PTR(pFilter))
return VERR_INVALID_POINTER;
if (pFilter->u32Magic != USBFILTER_MAGIC)
return VERR_INVALID_MAGIC;
if ( pFilter->enmType <= USBFILTERTYPE_INVALID
|| pFilter->enmType >= USBFILTERTYPE_END)
{
Log(("USBFilter: %p - enmType=%d!\n", pFilter, pFilter->enmType));
return VERR_INVALID_PARAMETER;
}
if (pFilter->offCurEnd >= sizeof(pFilter->achStrTab))
{
Log(("USBFilter: %p - offCurEnd=%#x!\n", pFilter, pFilter->offCurEnd));
return VERR_INVALID_PARAMETER;
}
/*
* Validate the string table.
*/
if (pFilter->achStrTab[0])
{
Log(("USBFilter: %p - bad null string\n", pFilter));
return VERR_INVALID_PARAMETER;
}
const char *psz = &pFilter->achStrTab[1];
while (psz < &pFilter->achStrTab[pFilter->offCurEnd])
{
const char *pszEnd = RTStrEnd(psz, &pFilter->achStrTab[sizeof(pFilter->achStrTab)] - psz);
if (!pszEnd)
{
Log(("USBFilter: %p - string at %#x isn't terminated!\n",
pFilter, psz - &pFilter->achStrTab[0]));
return VERR_INVALID_PARAMETER;
}
uint16_t off = (uint16_t)(uintptr_t)(psz - &pFilter->achStrTab[0]);
unsigned i;
for (i = 0; i < RT_ELEMENTS(pFilter->aFields); i++)
if ( USBFilterIsMethodString((USBFILTERMATCH)pFilter->aFields[i].enmMatch)
&& pFilter->aFields[i].u16Value == off)
break;
if (i >= RT_ELEMENTS(pFilter->aFields))
{
Log(("USBFilter: %p - string at %#x isn't used by anyone! (%s)\n",
pFilter, psz - &pFilter->achStrTab[0], psz));
return VERR_INVALID_PARAMETER;
}
psz = pszEnd + 1;
}
if ((uintptr_t)(psz - &pFilter->achStrTab[0] - 1) != pFilter->offCurEnd)
{
Log(("USBFilter: %p - offCurEnd=%#x currently at %#x\n",
pFilter, pFilter->offCurEnd, psz - &pFilter->achStrTab[0] - 1));
return VERR_INVALID_PARAMETER;
}
while (psz < &pFilter->achStrTab[sizeof(pFilter->achStrTab)])
{
if (*psz)
{
Log(("USBFilter: %p - str tab isn't zero padded! %#x: %c\n",
pFilter, psz - &pFilter->achStrTab[0], *psz));
return VERR_INVALID_PARAMETER;
}
psz++;
}
/*
* Validate the fields.
*/
int rc;
for (unsigned i = 0; i < RT_ELEMENTS(pFilter->aFields); i++)
{
switch (pFilter->aFields[i].enmMatch)
{
case USBFILTERMATCH_IGNORE:
case USBFILTERMATCH_PRESENT:
if (pFilter->aFields[i].u16Value)
{
Log(("USBFilter: %p - #%d/%d u16Value=%d expected 0!\n",
pFilter, i, pFilter->aFields[i].enmMatch, pFilter->aFields[i].u16Value));
return VERR_INVALID_PARAMETER;
}
break;
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
if (!USBFilterIsNumericField((USBFILTERIDX)i))
{
Log(("USBFilter: %p - #%d / %d - not numeric field\n",
pFilter, i, pFilter->aFields[i].enmMatch));
return VERR_INVALID_PARAMETER;
}
break;
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
if (!USBFilterIsNumericField((USBFILTERIDX)i))
{
Log(("USBFilter: %p - #%d / %d - not numeric field\n",
pFilter, i, pFilter->aFields[i].enmMatch));
return VERR_INVALID_PARAMETER;
}
if ( pFilter->aFields[i].u16Value >= pFilter->offCurEnd
&& pFilter->offCurEnd)
{
Log(("USBFilter: %p - #%d / %d - off=%#x max=%#x\n",
pFilter, i, pFilter->aFields[i].enmMatch, pFilter->aFields[i].u16Value, pFilter->offCurEnd));
return VERR_INVALID_PARAMETER;
}
psz = &pFilter->achStrTab[pFilter->aFields[i].u16Value];
rc = usbfilterValidateNumExpression(psz);
if (RT_FAILURE(rc))
{
Log(("USBFilter: %p - #%d / %d - bad num expr: %s (rc=%Rrc)\n",
pFilter, i, pFilter->aFields[i].enmMatch, psz, rc));
return rc;
}
break;
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_EXACT_NP:
if (!USBFilterIsStringField((USBFILTERIDX)i))
{
Log(("USBFilter: %p - #%d / %d - not string field\n",
pFilter, i, pFilter->aFields[i].enmMatch));
return VERR_INVALID_PARAMETER;
}
if ( pFilter->aFields[i].u16Value >= pFilter->offCurEnd
&& pFilter->offCurEnd)
{
Log(("USBFilter: %p - #%d / %d - off=%#x max=%#x\n",
pFilter, i, pFilter->aFields[i].enmMatch, pFilter->aFields[i].u16Value, pFilter->offCurEnd));
return VERR_INVALID_PARAMETER;
}
break;
case USBFILTERMATCH_STR_PATTERN:
case USBFILTERMATCH_STR_PATTERN_NP:
if (!USBFilterIsStringField((USBFILTERIDX)i))
{
Log(("USBFilter: %p - #%d / %d - not string field\n",
pFilter, i, pFilter->aFields[i].enmMatch));
return VERR_INVALID_PARAMETER;
}
if ( pFilter->aFields[i].u16Value >= pFilter->offCurEnd
&& pFilter->offCurEnd)
{
Log(("USBFilter: %p - #%d / %d - off=%#x max=%#x\n",
pFilter, i, pFilter->aFields[i].enmMatch, pFilter->aFields[i].u16Value, pFilter->offCurEnd));
return VERR_INVALID_PARAMETER;
}
psz = &pFilter->achStrTab[pFilter->aFields[i].u16Value];
rc = usbfilterValidateStringPattern(psz);
if (RT_FAILURE(rc))
{
Log(("USBFilter: %p - #%d / %d - bad string pattern: %s (rc=%Rrc)\n",
pFilter, i, pFilter->aFields[i].enmMatch, psz, rc));
return rc;
}
break;
default:
Log(("USBFilter: %p - #%d enmMatch=%d!\n", pFilter, i, pFilter->aFields[i].enmMatch));
return VERR_INVALID_PARAMETER;
}
}
return VINF_SUCCESS;
}
/**
* Find the specified field in the string table.
*
* @returns Pointer to the string in the string table on success.
* NULL if the field is invalid or it doesn't have a string value.
* @param pFilter The filter.
* @param enmFieldIdx The field index.
*/
DECLINLINE(const char *) usbfilterGetString(PCUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
if ((unsigned)enmFieldIdx < (unsigned)USBFILTERIDX_END)
{
switch (pFilter->aFields[enmFieldIdx].enmMatch)
{
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_EXACT_NP:
case USBFILTERMATCH_STR_PATTERN:
case USBFILTERMATCH_STR_PATTERN_NP:
Assert(pFilter->aFields[enmFieldIdx].u16Value < sizeof(pFilter->achStrTab));
return &pFilter->achStrTab[pFilter->aFields[enmFieldIdx].u16Value];
default:
AssertMsgFailed(("%d\n", pFilter->aFields[enmFieldIdx].enmMatch));
case USBFILTERMATCH_IGNORE:
case USBFILTERMATCH_PRESENT:
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
break;
}
}
return NULL;
}
/**
* Gets a number value of a field.
*
* The field must contain a numeric value.
*
* @returns The field value on success, -1 on failure (invalid input / not numeric).
* @param pFilter The filter.
* @param enmFieldIdx The field index.
*/
DECLINLINE(int) usbfilterGetNum(PCUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
if ((unsigned)enmFieldIdx < (unsigned)USBFILTERIDX_END)
{
switch (pFilter->aFields[enmFieldIdx].enmMatch)
{
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
return pFilter->aFields[enmFieldIdx].u16Value;
default:
AssertMsgFailed(("%d\n", pFilter->aFields[enmFieldIdx].enmMatch));
case USBFILTERMATCH_IGNORE:
case USBFILTERMATCH_PRESENT:
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_EXACT_NP:
case USBFILTERMATCH_STR_PATTERN:
case USBFILTERMATCH_STR_PATTERN_NP:
break;
}
}
return -1;
}
/**
* Performs simple pattern matching.
*
* @returns true on match and false on mismatch.
* @param pszExpr The numeric expression.
* @param u16Value The value to match.
*/
static bool usbfilterMatchNumExpression(const char *pszExpr, uint16_t u16Value)
{
/*
* The string format is: "(<m>|([<m>]-[<n>]))|(<m>|([<m>]-[<n>]))+"
* where <m> and <n> are numbers in the decimal, hex (0xNNN) or octal (0NNN)
* form. Spaces are allowed around <m> and <n>.
*/
while (*pszExpr)
{
/*
* Skip remnants of the previous expression and any empty expressions.
* ('|' is the expression separator.)
*/
while (*pszExpr == '|' || RT_C_IS_BLANK(*pszExpr))
pszExpr++;
if (!*pszExpr)
break;
/*
* Parse the expression.
*/
int rc;
uint16_t u16First = 0;
uint16_t u16Last = 0;
if (*pszExpr == '-')
{
/* -N */
pszExpr++;
rc = usbfilterReadNumber(&pszExpr, &u16Last);
}
else
{
/* M or M-N */
rc = usbfilterReadNumber(&pszExpr, &u16First);
if (RT_SUCCESS(rc))
{
pszExpr = usbfilterSkipBlanks(pszExpr);
if (*pszExpr == '-')
{
/* M-N */
pszExpr++;
rc = usbfilterReadNumber(&pszExpr, &u16Last);
}
else
{
/* M */
u16Last = u16First;
}
}
}
/* On success, we should either be at the end of the string or
at an expression separator (|). */
if (RT_SUCCESS(rc) && *pszExpr && *pszExpr != '|' )
rc = VERR_INVALID_PARAMETER;
if (RT_SUCCESS(rc))
{
/*
* Swap the values if the order is mixed up.
*/
if (u16First > u16Last)
{
uint16_t u16Tmp = u16First;
u16First = u16Last;
u16Last = u16Tmp;
}
/*
* Perform the compare.
*/
if ( u16Value >= u16First
&& u16Value <= u16Last)
return true;
}
else
{
/*
* Skip the bad expression.
* ('|' is the expression separator.)
*/
while (*pszExpr && *pszExpr != '|')
pszExpr++;
}
}
return false;
}
/**
* Performs simple pattern matching.
*
* @returns true on match and false on mismatch.
* @param pszPattern The pattern to match against.
* @param psz The string to match.
*/
static bool usbfilterMatchStringPattern(const char *pszPattern, const char *psz)
{
char ch;
while ((ch = *pszPattern++))
{
if (ch == '?')
{
/*
* Matches one char or end of string.
*/
if (*psz)
psz++;
}
else if (ch == '*')
{
/*
* Matches zero or more characters.
*/
/* skip subsequent wildcards */
while ( (ch = *pszPattern) == '*'
|| ch == '?')
pszPattern++;
if (!ch)
/* Pattern ends with a '*' and thus matches the rest of psz. */
return true;
/* Find the length of the following exact pattern sequence. */
ssize_t cchMatch = 1;
while ( (ch = pszPattern[cchMatch]) != '\0'
&& ch != '*'
&& ch != '?')
cchMatch++;
/* Check if the exact pattern sequence is too long. */
ssize_t cch = strlen(psz);
cch -= cchMatch;
if (cch < 0)
return false;
/* Is the rest an exact match? */
if (!ch)
return memcmp(psz + cch, pszPattern, cchMatch) == 0;
/*
* This is where things normally starts to get recursive or ugly.
*
* Just to make life simple, we'll skip the nasty stuff and say
* that we will do a maximal wildcard match and forget about any
* alternative matches.
*
* If somebody is bored out of their mind one day, feel free to
* implement correct matching without using recursion.
*/
ch = *pszPattern;
const char *pszMatch = NULL;
while ( cch-- >= 0
&& *psz)
{
if ( *psz == ch
&& !strncmp(psz, pszPattern, cchMatch))
pszMatch = psz;
psz++;
}
if (!pszMatch)
return false;
/* advance */
psz = pszMatch + cchMatch;
pszPattern += cchMatch;
}
else
{
/* exact match */
if (ch != *psz)
return false;
psz++;
}
}
return *psz == '\0';
}
/**
* Match a filter against a device.
*
* @returns true if they match, false if not.
*
* @param pFilter The filter to match with.
* @param pDevice The device data. This is a filter (type ignored) that
* contains 'exact' values for all present fields and 'ignore'
* values for the non-present fields.
*
* @remark Both the filter and the device are ASSUMED to be valid because
* we don't wish to waste any time in this function.
*/
USBLIB_DECL(bool) USBFilterMatch(PCUSBFILTER pFilter, PCUSBFILTER pDevice)
{
return USBFilterMatchRated(pFilter, pDevice) > 0;
}
#if 0 /*def IN_RING0*/ /** @todo convert to proper logging. */
extern "C" int printf(const char *format, ...);
# define dprintf(a) printf a
#else
# define dprintf(a) do {} while (0)
#endif
/**
* Match a filter against a device and rate the result.
*
* @returns -1 if no match, matching rate between 1 and 100 (inclusive) if matched.
*
* @param pFilter The filter to match with.
* @param pDevice The device data. This is a filter (type ignored) that
* contains 'exact' values for all present fields and 'ignore'
* values for the non-present fields.
*
* @remark Both the filter and the device are ASSUMED to be valid because
* we don't wish to waste any time in this function.
*/
USBLIB_DECL(int) USBFilterMatchRated(PCUSBFILTER pFilter, PCUSBFILTER pDevice)
{
unsigned iRate = 0;
dprintf(("USBFilterMatchRated: %p %p\n", pFilter, pDevice));
for (unsigned i = 0; i < RT_ELEMENTS(pFilter->aFields); i++)
{
switch (pFilter->aFields[i].enmMatch)
{
case USBFILTERMATCH_IGNORE:
iRate += 2;
break;
case USBFILTERMATCH_PRESENT:
if (pDevice->aFields[i].enmMatch == USBFILTERMATCH_IGNORE)
{
dprintf(("filter match[%d]: !present\n", i));
return -1;
}
iRate += 2;
break;
case USBFILTERMATCH_NUM_EXACT:
if ( pDevice->aFields[i].enmMatch == USBFILTERMATCH_IGNORE
|| pFilter->aFields[i].u16Value != pDevice->aFields[i].u16Value)
{
if (pDevice->aFields[i].enmMatch == USBFILTERMATCH_IGNORE)
dprintf(("filter match[%d]: !num_exact device=ignore\n", i));
else
dprintf(("filter match[%d]: !num_exact %#x (filter) != %#x (device)\n", i, pFilter->aFields[i].u16Value, pDevice->aFields[i].u16Value));
return -1;
}
iRate += 2;
break;
case USBFILTERMATCH_NUM_EXACT_NP:
if ( pDevice->aFields[i].enmMatch != USBFILTERMATCH_IGNORE
&& pFilter->aFields[i].u16Value != pDevice->aFields[i].u16Value)
{
dprintf(("filter match[%d]: !num_exact_np %#x (filter) != %#x (device)\n", i, pFilter->aFields[i].u16Value, pDevice->aFields[i].u16Value));
return -1;
}
iRate += 2;
break;
case USBFILTERMATCH_NUM_EXPRESSION:
if ( pDevice->aFields[i].enmMatch == USBFILTERMATCH_IGNORE
|| !usbfilterMatchNumExpression(usbfilterGetString(pFilter, (USBFILTERIDX)i),
pDevice->aFields[i].u16Value))
{
dprintf(("filter match[%d]: !num_expression\n", i));
return -1;
}
iRate += 1;
break;
case USBFILTERMATCH_NUM_EXPRESSION_NP:
if ( pDevice->aFields[i].enmMatch != USBFILTERMATCH_IGNORE
&& !usbfilterMatchNumExpression(usbfilterGetString(pFilter, (USBFILTERIDX)i),
pDevice->aFields[i].u16Value))
{
dprintf(("filter match[%d]: !num_expression_no\n", i));
return -1;
}
iRate += 1;
break;
case USBFILTERMATCH_STR_EXACT:
if ( pDevice->aFields[i].enmMatch == USBFILTERMATCH_IGNORE
|| strcmp(usbfilterGetString(pFilter, (USBFILTERIDX)i),
usbfilterGetString(pDevice, (USBFILTERIDX)i)))
{
dprintf(("filter match[%d]: !str_exact\n", i));
return -1;
}
iRate += 2;
break;
case USBFILTERMATCH_STR_EXACT_NP:
if ( pDevice->aFields[i].enmMatch != USBFILTERMATCH_IGNORE
&& strcmp(usbfilterGetString(pFilter, (USBFILTERIDX)i),
usbfilterGetString(pDevice, (USBFILTERIDX)i)))
{
dprintf(("filter match[%d]: !str_exact_np\n", i));
return -1;
}
iRate += 2;
break;
case USBFILTERMATCH_STR_PATTERN:
if ( pDevice->aFields[i].enmMatch == USBFILTERMATCH_IGNORE
|| !usbfilterMatchStringPattern(usbfilterGetString(pFilter, (USBFILTERIDX)i),
usbfilterGetString(pDevice, (USBFILTERIDX)i)))
{
dprintf(("filter match[%d]: !str_pattern\n", i));
return -1;
}
iRate += 1;
break;
case USBFILTERMATCH_STR_PATTERN_NP:
if ( pDevice->aFields[i].enmMatch != USBFILTERMATCH_IGNORE
&& !usbfilterMatchStringPattern(usbfilterGetString(pFilter, (USBFILTERIDX)i),
usbfilterGetString(pDevice, (USBFILTERIDX)i)))
{
dprintf(("filter match[%d]: !str_pattern_np\n", i));
return -1;
}
iRate += 1;
break;
default:
AssertMsgFailed(("#%d: %d\n", i, pFilter->aFields[i].enmMatch));
return -1;
}
}
/* iRate is the range 0..2*cFields - recalc to percent. */
dprintf(("filter match: iRate=%d", iRate));
return iRate == 2 * RT_ELEMENTS(pFilter->aFields)
? 100
: (iRate * 100) / (2 * RT_ELEMENTS(pFilter->aFields));
}
/**
* Match a filter against a USBDEVICE.
*
* @returns true if they match, false if not.
*
* @param pFilter The filter to match with.
* @param pDevice The device to match.
*
* @remark Both the filter and the device are ASSUMED to be valid because
* we don't wish to waste any time in this function.
*/
USBLIB_DECL(bool) USBFilterMatchDevice(PCUSBFILTER pFilter, PUSBDEVICE pDevice)
{
for (unsigned i = 0; i < RT_ELEMENTS(pFilter->aFields); i++)
{
switch (pFilter->aFields[i].enmMatch)
{
case USBFILTERMATCH_IGNORE:
break;
case USBFILTERMATCH_PRESENT:
{
const char *psz;
switch (i)
{
case USBFILTERIDX_MANUFACTURER_STR: psz = pDevice->pszManufacturer; break;
case USBFILTERIDX_PRODUCT_STR: psz = pDevice->pszProduct; break;
case USBFILTERIDX_SERIAL_NUMBER_STR: psz = pDevice->pszSerialNumber; break;
default: psz = ""; break;
}
if (!psz)
return false;
break;
}
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
{
uint16_t u16Value;
switch (i)
{
case USBFILTERIDX_VENDOR_ID: u16Value = pDevice->idVendor; break;
case USBFILTERIDX_PRODUCT_ID: u16Value = pDevice->idProduct; break;
case USBFILTERIDX_DEVICE: u16Value = pDevice->bcdDevice; break;
case USBFILTERIDX_DEVICE_CLASS: u16Value = pDevice->bDeviceClass; break;
case USBFILTERIDX_DEVICE_SUB_CLASS: u16Value = pDevice->bDeviceSubClass; break;
case USBFILTERIDX_DEVICE_PROTOCOL: u16Value = pDevice->bDeviceProtocol; break;
case USBFILTERIDX_BUS: u16Value = pDevice->bBus; break;
case USBFILTERIDX_PORT: u16Value = pDevice->bPort; break;
default: u16Value = ~0; break;
}
switch (pFilter->aFields[i].enmMatch)
{
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
if (pFilter->aFields[i].u16Value != u16Value)
return false;
break;
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
if (!usbfilterMatchNumExpression(usbfilterGetString(pFilter, (USBFILTERIDX)i), u16Value))
return false;
break;
}
break;
}
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_EXACT_NP:
case USBFILTERMATCH_STR_PATTERN:
case USBFILTERMATCH_STR_PATTERN_NP:
{
const char *psz;
switch (i)
{
case USBFILTERIDX_MANUFACTURER_STR: psz = pDevice->pszManufacturer; break;
case USBFILTERIDX_PRODUCT_STR: psz = pDevice->pszProduct; break;
case USBFILTERIDX_SERIAL_NUMBER_STR: psz = pDevice->pszSerialNumber; break;
default: psz = NULL; break;
}
switch (pFilter->aFields[i].enmMatch)
{
case USBFILTERMATCH_STR_EXACT:
if ( !psz
|| strcmp(usbfilterGetString(pFilter, (USBFILTERIDX)i), psz))
return false;
break;
case USBFILTERMATCH_STR_EXACT_NP:
if ( psz
&& strcmp(usbfilterGetString(pFilter, (USBFILTERIDX)i), psz))
return false;
break;
case USBFILTERMATCH_STR_PATTERN:
if ( !psz
|| !usbfilterMatchStringPattern(usbfilterGetString(pFilter, (USBFILTERIDX)i), psz))
return false;
break;
case USBFILTERMATCH_STR_PATTERN_NP:
if ( psz
&& !usbfilterMatchStringPattern(usbfilterGetString(pFilter, (USBFILTERIDX)i), psz))
return false;
break;
}
break;
}
default:
AssertMsgFailed(("#%d: %d\n", i, pFilter->aFields[i].enmMatch));
return false;
}
}
return true;
}
/**
* Checks if the two filters are identical.
*
* @returns true if the are identical, false if they aren't.
* @param pFilter The first filter.
* @param pFilter2 The second filter.
*/
USBLIB_DECL(bool) USBFilterIsIdentical(PCUSBFILTER pFilter, PCUSBFILTER pFilter2)
{
/* Lazy works here because we're darn strict with zero padding and such elsewhere. */
return memcmp(pFilter, pFilter2, sizeof(*pFilter)) == 0;
}
/**
* Sets the filter type.
*
* @returns VBox status code.
* @retval VERR_INVALID_PARAMETER if the filter type is invalid.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmType The new filter type.
*/
USBLIB_DECL(int) USBFilterSetFilterType(PUSBFILTER pFilter, USBFILTERTYPE enmType)
{
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, VERR_INVALID_MAGIC);
AssertReturn(enmType > USBFILTERTYPE_INVALID && enmType < USBFILTERTYPE_END, VERR_INVALID_PARAMETER);
pFilter->enmType = enmType;
return VINF_SUCCESS;
}
/**
* Replaces the string value of a field.
*
* This will remove any existing string value current held by the field from the
* string table and then attempt to add the new value. This function can be used
* to delete any assigned string before changing the type to numeric by passing
* in an empty string. This works because the first byte in the string table is
* reserved for the empty (NULL) string.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_BUFFER_OVERFLOW if the string table is full.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx isn't valid.
* @retval VERR_INVALID_POINTER if pszString isn't valid.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index.
* @param pszString The string to add.
*/
static int usbfilterSetString(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, const char *pszString)
{
/*
* Validate input.
*/
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, VERR_INVALID_MAGIC);
AssertReturn((unsigned)enmFieldIdx < (unsigned)USBFILTERIDX_END, VERR_INVALID_PARAMETER);
AssertPtrReturn(pszString, VERR_INVALID_POINTER);
Assert(pFilter->offCurEnd < sizeof(pFilter->achStrTab));
Assert(pFilter->achStrTab[pFilter->offCurEnd] == '\0');
/*
* Remove old string value if any.
*/
if ( USBFilterIsMethodUsingStringValue((USBFILTERMATCH)pFilter->aFields[enmFieldIdx].enmMatch)
&& pFilter->aFields[enmFieldIdx].u16Value != 0)
{
uint32_t off = pFilter->aFields[enmFieldIdx].u16Value;
pFilter->aFields[enmFieldIdx].u16Value = 0; /* Assign it to the NULL string. */
unsigned cchShift = (unsigned)strlen(&pFilter->achStrTab[off]) + 1;
ssize_t cchToMove = (pFilter->offCurEnd + 1) - (off + cchShift);
Assert(cchToMove >= 0);
if (cchToMove > 0)
{
/* We're not last - must shift the strings. */
memmove(&pFilter->achStrTab[off], &pFilter->achStrTab[off + cchShift], cchToMove);
for (unsigned i = 0; i < RT_ELEMENTS(pFilter->aFields); i++)
if ( pFilter->aFields[i].u16Value >= off
&& USBFilterIsMethodUsingStringValue((USBFILTERMATCH)pFilter->aFields[i].enmMatch))
pFilter->aFields[i].u16Value -= cchShift;
}
pFilter->offCurEnd -= cchShift;
Assert(pFilter->offCurEnd < sizeof(pFilter->achStrTab));
Assert(pFilter->offCurEnd + cchShift <= sizeof(pFilter->achStrTab));
/* zero the unused string table (to allow lazyness/strictness elsewhere). */
memset(&pFilter->achStrTab[pFilter->offCurEnd], '\0', cchShift);
}
/*
* Make a special case for the empty string.
* (This also makes the delete logical above work correctly for the last string.)
*/
if (!*pszString)
pFilter->aFields[enmFieldIdx].u16Value = 0;
else
{
const size_t cch = strlen(pszString);
if (pFilter->offCurEnd + cch + 2 > sizeof(pFilter->achStrTab))
return VERR_BUFFER_OVERFLOW;
pFilter->aFields[enmFieldIdx].u16Value = pFilter->offCurEnd + 1;
memcpy(&pFilter->achStrTab[pFilter->offCurEnd + 1], pszString, cch + 1);
pFilter->offCurEnd += (uint32_t)cch + 1;
}
return VINF_SUCCESS;
}
/**
* Wrapper around usbfilterSetString() that deletes any string value
* currently assigned to a field.
*
* Upon successful return the field contains a null string, nothing or a number.
*
* This function will validate the field index if there isn't any string
* value to delete, thus preventing any extra validating of the index.
*
* @returns VBox status code. See usbfilterSetString.
* @param pFilter The filter.
* @param enmFieldIdx The index of the field which string value should be deleted.
*/
static int usbfilterDeleteAnyStringValue(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
int rc = VINF_SUCCESS;
if ( USBFilterIsMethodUsingStringValue((USBFILTERMATCH)pFilter->aFields[enmFieldIdx].enmMatch)
&& pFilter->aFields[enmFieldIdx].u16Value != 0)
rc = usbfilterSetString(pFilter, enmFieldIdx, "");
else if ((unsigned)enmFieldIdx >= (unsigned)USBFILTERIDX_END)
rc = VERR_INVALID_PARAMETER;
return rc;
}
/**
* Sets a field to always match (ignore whatever is thrown at it).
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx isn't valid.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index. This must be a string field.
*/
USBLIB_DECL(int) USBFilterSetIgnore(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
int rc = usbfilterDeleteAnyStringValue(pFilter, enmFieldIdx);
if (RT_SUCCESS(rc))
{
pFilter->aFields[enmFieldIdx].enmMatch = USBFILTERMATCH_IGNORE;
pFilter->aFields[enmFieldIdx].u16Value = 0;
}
return rc;
}
/**
* Sets a field to match on device field present only.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx isn't valid.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index. This must be a string field.
*/
USBLIB_DECL(int) USBFilterSetPresentOnly(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
int rc = usbfilterDeleteAnyStringValue(pFilter, enmFieldIdx);
if (RT_SUCCESS(rc))
{
pFilter->aFields[enmFieldIdx].enmMatch = USBFILTERMATCH_PRESENT;
pFilter->aFields[enmFieldIdx].u16Value = 0;
}
return rc;
}
/**
* Sets a field to exactly match a number.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx isn't valid.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index. This must be a string field.
* @param u16Value The string pattern.
* @param fMustBePresent If set, a non-present field on the device will result in a mismatch.
* If clear, a non-present field on the device will match.
*/
USBLIB_DECL(int) USBFilterSetNumExact(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, uint16_t u16Value, bool fMustBePresent)
{
int rc = USBFilterIsNumericField(enmFieldIdx) ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
if (RT_SUCCESS(rc))
{
rc = usbfilterDeleteAnyStringValue(pFilter, enmFieldIdx);
if (RT_SUCCESS(rc))
{
pFilter->aFields[enmFieldIdx].u16Value = u16Value;
pFilter->aFields[enmFieldIdx].enmMatch = fMustBePresent ? USBFILTERMATCH_NUM_EXACT : USBFILTERMATCH_NUM_EXACT_NP;
}
}
return rc;
}
/**
* Sets a field to match a numeric expression.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_BUFFER_OVERFLOW if the string table is full.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx or the numeric expression aren't valid.
* @retval VERR_INVALID_POINTER if pszExpression isn't a valid pointer.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index. This must be a string field.
* @param pszExpression The numeric expression.
* @param fMustBePresent If set, a non-present field on the device will result in a mismatch.
* If clear, a non-present field on the device will match.
*/
USBLIB_DECL(int) USBFilterSetNumExpression(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, const char *pszExpression, bool fMustBePresent)
{
int rc = USBFilterIsNumericField(enmFieldIdx) ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
if (RT_SUCCESS(rc))
{
/* Strip leading spaces and empty sub expressions (||). */
while (*pszExpression && (RT_C_IS_BLANK(*pszExpression) || *pszExpression == '|'))
pszExpression++;
rc = usbfilterValidateNumExpression(pszExpression);
if (RT_SUCCESS(rc))
{
/* We could optimize the expression further (stripping spaces, convert numbers),
but it's more work than what it's worth and it could upset some users. */
rc = usbfilterSetString(pFilter, enmFieldIdx, pszExpression);
if (RT_SUCCESS(rc))
pFilter->aFields[enmFieldIdx].enmMatch = fMustBePresent ? USBFILTERMATCH_NUM_EXPRESSION : USBFILTERMATCH_NUM_EXPRESSION_NP;
else if (rc == VERR_NO_DIGITS)
rc = VERR_INVALID_PARAMETER;
}
}
return rc;
}
/**
* Sets a field to exactly match a string.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_BUFFER_OVERFLOW if the string table is full.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx isn't valid.
* @retval VERR_INVALID_POINTER if pszPattern isn't a valid pointer.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index. This must be a string field.
* @param pszValue The string value.
* @param fMustBePresent If set, a non-present field on the device will result in a mismatch.
* If clear, a non-present field on the device will match.
*/
USBLIB_DECL(int) USBFilterSetStringExact(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, const char *pszValue, bool fMustBePresent)
{
int rc = USBFilterIsStringField(enmFieldIdx) ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
if (RT_SUCCESS(rc))
{
rc = usbfilterSetString(pFilter, enmFieldIdx, pszValue);
if (RT_SUCCESS(rc))
pFilter->aFields[enmFieldIdx].enmMatch = fMustBePresent ? USBFILTERMATCH_STR_EXACT : USBFILTERMATCH_STR_EXACT_NP;
}
return rc;
}
/**
* Sets a field to match a string pattern.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_BUFFER_OVERFLOW if the string table is full.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx or pattern aren't valid.
* @retval VERR_INVALID_POINTER if pszPattern isn't a valid pointer.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index. This must be a string field.
* @param pszPattern The string pattern.
* @param fMustBePresent If set, a non-present field on the device will result in a mismatch.
* If clear, a non-present field on the device will match.
*/
USBLIB_DECL(int) USBFilterSetStringPattern(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, const char *pszPattern, bool fMustBePresent)
{
int rc = USBFilterIsStringField(enmFieldIdx) ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
if (RT_SUCCESS(rc))
{
rc = usbfilterValidateStringPattern(pszPattern);
if (RT_SUCCESS(rc))
{
rc = usbfilterSetString(pFilter, enmFieldIdx, pszPattern);
if (RT_SUCCESS(rc))
pFilter->aFields[enmFieldIdx].enmMatch = fMustBePresent ? USBFILTERMATCH_STR_PATTERN : USBFILTERMATCH_STR_PATTERN_NP;
}
}
return rc;
}
/**
* Sets the must-be-present part of a field.
*
* This only works on field which already has matching criteria. This means
* that field marked 'ignore' will not be processed and will result in a
* warning status code.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VWRN_INVALID_PARAMETER if the field is marked 'ignore'. No assertions.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx or pattern aren't valid.
* @retval VERR_INVALID_POINTER if pszPattern isn't a valid pointer.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index.
* @param fMustBePresent If set, a non-present field on the device will result in a mismatch.
* If clear, a non-present field on the device will match.
*/
USBLIB_DECL(int) USBFilterSetMustBePresent(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, bool fMustBePresent)
{
AssertPtrReturn(pFilter, VERR_INVALID_POINTER);
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, VERR_INVALID_MAGIC);
AssertReturn((unsigned)enmFieldIdx < (unsigned)USBFILTERIDX_END, VERR_INVALID_PARAMETER);
USBFILTERMATCH enmMatch = (USBFILTERMATCH)pFilter->aFields[enmFieldIdx].enmMatch;
if (fMustBePresent)
{
switch (enmMatch)
{
case USBFILTERMATCH_IGNORE:
return VWRN_INVALID_PARAMETER;
case USBFILTERMATCH_PRESENT:
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_PATTERN:
break;
case USBFILTERMATCH_NUM_EXACT_NP:
enmMatch = USBFILTERMATCH_NUM_EXACT;
break;
case USBFILTERMATCH_NUM_EXPRESSION_NP:
enmMatch = USBFILTERMATCH_NUM_EXPRESSION;
break;
case USBFILTERMATCH_STR_EXACT_NP:
enmMatch = USBFILTERMATCH_STR_EXACT;
break;
case USBFILTERMATCH_STR_PATTERN_NP:
enmMatch = USBFILTERMATCH_STR_PATTERN;
break;
default:
AssertMsgFailedReturn(("%p: enmFieldIdx=%d enmMatch=%d\n", pFilter, enmFieldIdx, enmMatch), VERR_INVALID_MAGIC);
}
}
else
{
switch (enmMatch)
{
case USBFILTERMATCH_IGNORE:
return VWRN_INVALID_PARAMETER;
case USBFILTERMATCH_NUM_EXACT_NP:
case USBFILTERMATCH_STR_PATTERN_NP:
case USBFILTERMATCH_STR_EXACT_NP:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
break;
case USBFILTERMATCH_PRESENT:
enmMatch = USBFILTERMATCH_IGNORE;
break;
case USBFILTERMATCH_NUM_EXACT:
enmMatch = USBFILTERMATCH_NUM_EXACT_NP;
break;
case USBFILTERMATCH_NUM_EXPRESSION:
enmMatch = USBFILTERMATCH_NUM_EXPRESSION_NP;
break;
case USBFILTERMATCH_STR_EXACT:
enmMatch = USBFILTERMATCH_STR_EXACT_NP;
break;
case USBFILTERMATCH_STR_PATTERN:
enmMatch = USBFILTERMATCH_STR_PATTERN_NP;
break;
default:
AssertMsgFailedReturn(("%p: enmFieldIdx=%d enmMatch=%d\n", pFilter, enmFieldIdx, enmMatch), VERR_INVALID_MAGIC);
}
}
pFilter->aFields[enmFieldIdx].enmMatch = enmMatch;
return VINF_SUCCESS;
}
/**
* Gets the filter type.
*
* @returns The filter type.
* USBFILTERTYPE_INVALID if the filter is invalid.
* @param pFilter The filter.
*/
USBLIB_DECL(USBFILTERTYPE) USBFilterGetFilterType(PCUSBFILTER pFilter)
{
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, USBFILTERTYPE_INVALID);
return pFilter->enmType;
}
/**
* Gets the matching method for a field.
*
* @returns The matching method on success, UBFILTERMATCH_INVALID on invalid field index.
* @param pFilter The filter.
* @param enmFieldIdx The field index.
*/
USBLIB_DECL(USBFILTERMATCH) USBFilterGetMatchingMethod(PCUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
if ( pFilter->u32Magic == USBFILTER_MAGIC
&& (unsigned)enmFieldIdx < (unsigned)USBFILTERIDX_END)
return (USBFILTERMATCH)pFilter->aFields[enmFieldIdx].enmMatch;
return USBFILTERMATCH_INVALID;
}
/**
* Gets the numeric value of a field.
*
* The field must contain a number, we're not doing any conversions for you.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx isn't valid or if the field doesn't contain a number.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index.
* @param pu16Value Where to store the value.
*/
USBLIB_DECL(int) USBFilterQueryNum(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, uint16_t *pu16Value)
{
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, VERR_INVALID_MAGIC);
int iValue = usbfilterGetNum(pFilter, enmFieldIdx);
if (iValue == -1)
return VERR_INVALID_PARAMETER;
*pu16Value = (uint16_t)iValue;
return VINF_SUCCESS;
}
/**
* Gets the numeric value of a field.
*
* The field must contain a number, we're not doing any conversions for you.
*
* @returns The field value on success, -1 on failure (invalid input / not numeric).
*
* @param pFilter The filter.
* @param enmFieldIdx The field index.
*/
USBLIB_DECL(int) USBFilterGetNum(PCUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, -1);
return usbfilterGetNum(pFilter, enmFieldIdx);
}
/**
* Gets the string value of a field.
*
* The field must contain a string, we're not doing any conversions for you.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success.
* @retval VERR_BUFFER_OVERFLOW if the buffer isn't sufficient to hold the string. The buffer
* will be filled with as much of the string that'll fit.
* @retval VERR_INVALID_PARAMETER if the enmFieldIdx isn't valid or if the field doesn't contain a string.
* @retval VERR_INVALID_MAGIC if pFilter is invalid.
*
* @param pFilter The filter.
* @param enmFieldIdx The field index.
* @param pszBuf Where to store the string.
* @param cchBuf The size of the buffer.
*/
USBLIB_DECL(int) USBFilterQueryString(PUSBFILTER pFilter, USBFILTERIDX enmFieldIdx, char *pszBuf, size_t cchBuf)
{
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, VERR_INVALID_MAGIC);
const char *psz = usbfilterGetString(pFilter, enmFieldIdx);
if (RT_UNLIKELY(!psz))
return VERR_INVALID_PARAMETER;
int rc = VINF_SUCCESS;
size_t cch = strlen(psz);
if (cch < cchBuf)
memcpy(pszBuf, psz, cch + 1);
else
{
rc = VERR_BUFFER_OVERFLOW;
if (cchBuf)
{
memcpy(pszBuf, psz, cchBuf - 1);
pszBuf[cchBuf - 1] = '\0';
}
}
return rc;
}
/**
* Gets the string table entry for a field.
*
* @returns Pointer to the string. (readonly!)
*
* @param pFilter The filter.
* @param enmFieldIdx The field index.
*/
USBLIB_DECL(const char *) USBFilterGetString(PCUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, NULL);
const char *psz = usbfilterGetString(pFilter, enmFieldIdx);
if (RT_UNLIKELY(!psz))
return NULL;
return psz;
}
/**
* Gets the string length of a field containing a string.
*
* @returns String length on success, -1 on failure (not a string, bad filter).
* @param pFilter The filter.
* @param enmFieldIdx The field index.
*/
USBLIB_DECL(ssize_t) USBFilterGetStringLen(PCUSBFILTER pFilter, USBFILTERIDX enmFieldIdx)
{
if (RT_LIKELY(pFilter->u32Magic == USBFILTER_MAGIC))
{
const char *psz = usbfilterGetString(pFilter, enmFieldIdx);
if (RT_LIKELY(psz))
return strlen(psz);
}
return -1;
}
/**
* Check if any of the fields are set to something substatial.
*
* Consider the fileter a wildcard if this returns false.
*
* @returns true / false.
* @param pFilter The filter.
*/
USBLIB_DECL(bool) USBFilterHasAnySubstatialCriteria(PCUSBFILTER pFilter)
{
AssertReturn(pFilter->u32Magic == USBFILTER_MAGIC, false);
for (unsigned i = 0; i < RT_ELEMENTS(pFilter->aFields); i++)
{
switch (pFilter->aFields[i].enmMatch)
{
case USBFILTERMATCH_IGNORE:
case USBFILTERMATCH_PRESENT:
break;
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_EXACT_NP:
return true;
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
{
const char *psz = usbfilterGetString(pFilter, (USBFILTERIDX)i);
if (psz)
{
while (*psz && (*psz == '|' || RT_C_IS_BLANK(*psz)))
psz++;
if (*psz)
return true;
}
break;
}
case USBFILTERMATCH_STR_PATTERN:
case USBFILTERMATCH_STR_PATTERN_NP:
{
const char *psz = usbfilterGetString(pFilter, (USBFILTERIDX)i);
if (psz)
{
while (*psz && (*psz == '*' || *psz == '?'))
psz++;
if (*psz)
return true;
}
break;
}
}
}
return false;
}
/**
* Checks whether the specified field is a numeric field or not.
*
* @returns true / false.
* @param enmFieldIdx The field index.
*/
USBLIB_DECL(bool) USBFilterIsNumericField(USBFILTERIDX enmFieldIdx)
{
switch (enmFieldIdx)
{
case USBFILTERIDX_VENDOR_ID:
case USBFILTERIDX_PRODUCT_ID:
case USBFILTERIDX_DEVICE:
case USBFILTERIDX_DEVICE_CLASS:
case USBFILTERIDX_DEVICE_SUB_CLASS:
case USBFILTERIDX_DEVICE_PROTOCOL:
case USBFILTERIDX_BUS:
case USBFILTERIDX_PORT:
return true;
default:
AssertMsgFailed(("%d\n", enmFieldIdx));
case USBFILTERIDX_MANUFACTURER_STR:
case USBFILTERIDX_PRODUCT_STR:
case USBFILTERIDX_SERIAL_NUMBER_STR:
return false;
}
}
/**
* Checks whether the specified field is a string field or not.
*
* @returns true / false.
* @param enmFieldIdx The field index.
*/
USBLIB_DECL(bool) USBFilterIsStringField(USBFILTERIDX enmFieldIdx)
{
switch (enmFieldIdx)
{
default:
AssertMsgFailed(("%d\n", enmFieldIdx));
case USBFILTERIDX_VENDOR_ID:
case USBFILTERIDX_PRODUCT_ID:
case USBFILTERIDX_DEVICE:
case USBFILTERIDX_DEVICE_CLASS:
case USBFILTERIDX_DEVICE_SUB_CLASS:
case USBFILTERIDX_DEVICE_PROTOCOL:
case USBFILTERIDX_BUS:
case USBFILTERIDX_PORT:
return false;
case USBFILTERIDX_MANUFACTURER_STR:
case USBFILTERIDX_PRODUCT_STR:
case USBFILTERIDX_SERIAL_NUMBER_STR:
return true;
}
}
/**
* Checks whether the specified matching method uses a numeric value or not.
*
* @returns true / false.
* @param enmMatchingMethod The matching method.
*/
USBLIB_DECL(bool) USBFilterIsMethodUsingNumericValue(USBFILTERMATCH enmMatchingMethod)
{
switch (enmMatchingMethod)
{
default:
AssertMsgFailed(("%d\n", enmMatchingMethod));
case USBFILTERMATCH_IGNORE:
case USBFILTERMATCH_PRESENT:
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_EXACT_NP:
case USBFILTERMATCH_STR_PATTERN:
case USBFILTERMATCH_STR_PATTERN_NP:
return false;
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
return true;
}
}
/**
* Checks whether the specified matching method uses a string value or not.
*
* @returns true / false.
* @param enmMatchingMethod The matching method.
*/
USBLIB_DECL(bool) USBFilterIsMethodUsingStringValue(USBFILTERMATCH enmMatchingMethod)
{
switch (enmMatchingMethod)
{
default:
AssertMsgFailed(("%d\n", enmMatchingMethod));
case USBFILTERMATCH_IGNORE:
case USBFILTERMATCH_PRESENT:
case USBFILTERMATCH_NUM_EXACT:
case USBFILTERMATCH_NUM_EXACT_NP:
return false;
case USBFILTERMATCH_NUM_EXPRESSION:
case USBFILTERMATCH_NUM_EXPRESSION_NP:
case USBFILTERMATCH_STR_EXACT:
case USBFILTERMATCH_STR_EXACT_NP:
case USBFILTERMATCH_STR_PATTERN:
case USBFILTERMATCH_STR_PATTERN_NP:
return true;
}
}
/**
* Checks if a matching method is for string fields or not.
*
* @returns true / false.
* @param enmMatchingMethod The matching method.
*/
USBLIB_DECL(bool) USBFilterIsMethodNumeric(USBFILTERMATCH enmMatchingMethod)
{
return enmMatchingMethod >= USBFILTERMATCH_NUM_FIRST
&& enmMatchingMethod <= USBFILTERMATCH_NUM_LAST;
}
/**
* Checks if a matching method is for string fields or not.
*
* @returns true / false.
* @param enmMatchingMethod The matching method.
*/
USBLIB_DECL(bool) USBFilterIsMethodString(USBFILTERMATCH enmMatchingMethod)
{
return enmMatchingMethod >= USBFILTERMATCH_STR_FIRST
&& enmMatchingMethod <= USBFILTERMATCH_STR_LAST;
}
| gpl-2.0 |
rockly703/linux-2.6.18 | sound/oss/forte.c | 6 | 48529 | /*
* forte.c - ForteMedia FM801 OSS Driver
*
* Written by Martin K. Petersen <mkp@mkp.net>
* Copyright (C) 2002 Hewlett-Packard Company
* Portions Copyright (C) 2003 Martin K. Petersen
*
* Latest version: http://mkp.net/forte/
*
* Based upon the ALSA FM801 driver by Jaroslav Kysela and OSS drivers
* by Thomas Sailer, Alan Cox, Zach Brown, and Jeff Garzik. Thanks
* guys!
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/poll.h>
#include <linux/sound.h>
#include <linux/ac97_codec.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#define DRIVER_NAME "forte"
#define DRIVER_VERSION "$Id: forte.c,v 1.63 2003/03/01 05:32:42 mkp Exp $"
#define PFX DRIVER_NAME ": "
#undef M_DEBUG
#ifdef M_DEBUG
#define DPRINTK(args...) printk(KERN_WARNING args)
#else
#define DPRINTK(args...)
#endif
/* Card capabilities */
#define FORTE_CAPS (DSP_CAP_MMAP | DSP_CAP_TRIGGER)
/* Supported audio formats */
#define FORTE_FMTS (AFMT_U8 | AFMT_S16_LE)
/* Buffers */
#define FORTE_MIN_FRAG_SIZE 256
#define FORTE_MAX_FRAG_SIZE PAGE_SIZE
#define FORTE_DEF_FRAG_SIZE 256
#define FORTE_MIN_FRAGMENTS 2
#define FORTE_MAX_FRAGMENTS 256
#define FORTE_DEF_FRAGMENTS 2
#define FORTE_MIN_BUF_MSECS 500
#define FORTE_MAX_BUF_MSECS 1000
/* PCI BARs */
#define FORTE_PCM_VOL 0x00 /* PCM Output Volume */
#define FORTE_FM_VOL 0x02 /* FM Output Volume */
#define FORTE_I2S_VOL 0x04 /* I2S Volume */
#define FORTE_REC_SRC 0x06 /* Record Source */
#define FORTE_PLY_CTRL 0x08 /* Playback Control */
#define FORTE_PLY_COUNT 0x0a /* Playback Count */
#define FORTE_PLY_BUF1 0x0c /* Playback Buffer I */
#define FORTE_PLY_BUF2 0x10 /* Playback Buffer II */
#define FORTE_CAP_CTRL 0x14 /* Capture Control */
#define FORTE_CAP_COUNT 0x16 /* Capture Count */
#define FORTE_CAP_BUF1 0x18 /* Capture Buffer I */
#define FORTE_CAP_BUF2 0x1c /* Capture Buffer II */
#define FORTE_CODEC_CTRL 0x22 /* Codec Control */
#define FORTE_I2S_MODE 0x24 /* I2S Mode Control */
#define FORTE_VOLUME 0x26 /* Volume Up/Down/Mute Status */
#define FORTE_I2C_CTRL 0x29 /* I2C Control */
#define FORTE_AC97_CMD 0x2a /* AC'97 Command */
#define FORTE_AC97_DATA 0x2c /* AC'97 Data */
#define FORTE_MPU401_DATA 0x30 /* MPU401 Data */
#define FORTE_MPU401_CMD 0x31 /* MPU401 Command */
#define FORTE_GPIO_CTRL 0x52 /* General Purpose I/O Control */
#define FORTE_GEN_CTRL 0x54 /* General Control */
#define FORTE_IRQ_MASK 0x56 /* Interrupt Mask */
#define FORTE_IRQ_STATUS 0x5a /* Interrupt Status */
#define FORTE_OPL3_BANK0 0x68 /* OPL3 Status Read / Bank 0 Write */
#define FORTE_OPL3_DATA0 0x69 /* OPL3 Data 0 Write */
#define FORTE_OPL3_BANK1 0x6a /* OPL3 Bank 1 Write */
#define FORTE_OPL3_DATA1 0x6b /* OPL3 Bank 1 Write */
#define FORTE_POWERDOWN 0x70 /* Blocks Power Down Control */
#define FORTE_CAP_OFFSET FORTE_CAP_CTRL - FORTE_PLY_CTRL
#define FORTE_AC97_ADDR_SHIFT 10
/* Playback and record control register bits */
#define FORTE_BUF1_LAST (1<<1)
#define FORTE_BUF2_LAST (1<<2)
#define FORTE_START (1<<5)
#define FORTE_PAUSE (1<<6)
#define FORTE_IMMED_STOP (1<<7)
#define FORTE_RATE_SHIFT 8
#define FORTE_RATE_MASK (15 << FORTE_RATE_SHIFT)
#define FORTE_CHANNELS_4 (1<<12) /* Playback only */
#define FORTE_CHANNELS_6 (2<<12) /* Playback only */
#define FORTE_CHANNELS_6MS (3<<12) /* Playback only */
#define FORTE_CHANNELS_MASK (3<<12)
#define FORTE_16BIT (1<<14)
#define FORTE_STEREO (1<<15)
/* IRQ status bits */
#define FORTE_IRQ_PLAYBACK (1<<8)
#define FORTE_IRQ_CAPTURE (1<<9)
#define FORTE_IRQ_VOLUME (1<<14)
#define FORTE_IRQ_MPU (1<<15)
/* CODEC control */
#define FORTE_CC_CODEC_RESET (1<<5)
#define FORTE_CC_AC97_RESET (1<<6)
/* AC97 cmd */
#define FORTE_AC97_WRITE (0<<7)
#define FORTE_AC97_READ (1<<7)
#define FORTE_AC97_DP_INVALID (0<<8)
#define FORTE_AC97_DP_VALID (1<<8)
#define FORTE_AC97_PORT_RDY (0<<9)
#define FORTE_AC97_PORT_BSY (1<<9)
struct forte_channel {
const char *name;
unsigned short ctrl; /* Ctrl BAR contents */
unsigned long iobase; /* Ctrl BAR address */
wait_queue_head_t wait;
void *buf; /* Buffer */
dma_addr_t buf_handle; /* Buffer handle */
unsigned int record;
unsigned int format;
unsigned int rate;
unsigned int stereo;
unsigned int frag_sz; /* Current fragment size */
unsigned int frag_num; /* Current # of fragments */
unsigned int frag_msecs; /* Milliseconds per frag */
unsigned int buf_sz; /* Current buffer size */
unsigned int hwptr; /* Tail */
unsigned int swptr; /* Head */
unsigned int filled_frags; /* Fragments currently full */
unsigned int next_buf; /* Index of next buffer */
unsigned int active; /* Channel currently in use */
unsigned int mapped; /* mmap */
unsigned int buf_pages; /* Real size of buffer */
unsigned int nr_irqs; /* Number of interrupts */
unsigned int bytes; /* Total bytes */
unsigned int residue; /* Partial fragment */
};
struct forte_chip {
struct pci_dev *pci_dev;
unsigned long iobase;
int irq;
struct mutex open_mutex; /* Device access */
spinlock_t lock; /* State */
spinlock_t ac97_lock;
struct ac97_codec *ac97;
int multichannel;
int dsp; /* OSS handle */
int trigger; /* mmap I/O trigger */
struct forte_channel play;
struct forte_channel rec;
};
static int channels[] = { 2, 4, 6, };
static int rates[] = { 5500, 8000, 9600, 11025, 16000, 19200,
22050, 32000, 38400, 44100, 48000, };
static struct forte_chip *forte;
static int found;
/* AC97 Codec -------------------------------------------------------------- */
/**
* forte_ac97_wait:
* @chip: fm801 instance whose AC97 codec to wait on
*
* FIXME:
* Stop busy-waiting
*/
static inline int
forte_ac97_wait (struct forte_chip *chip)
{
int i = 10000;
while ( (inw (chip->iobase + FORTE_AC97_CMD) & FORTE_AC97_PORT_BSY)
&& i-- )
cpu_relax();
return i == 0;
}
/**
* forte_ac97_read:
* @codec: AC97 codec to read from
* @reg: register to read
*/
static u16
forte_ac97_read (struct ac97_codec *codec, u8 reg)
{
u16 ret = 0;
struct forte_chip *chip = codec->private_data;
spin_lock (&chip->ac97_lock);
/* Knock, knock */
if (forte_ac97_wait (chip)) {
printk (KERN_ERR PFX "ac97_read: Serial bus busy\n");
goto out;
}
/* Send read command */
outw (reg | (1<<7), chip->iobase + FORTE_AC97_CMD);
if (forte_ac97_wait (chip)) {
printk (KERN_ERR PFX "ac97_read: Bus busy reading reg 0x%x\n",
reg);
goto out;
}
/* Sanity checking */
if (inw (chip->iobase + FORTE_AC97_CMD) & FORTE_AC97_DP_INVALID) {
printk (KERN_ERR PFX "ac97_read: Invalid data port");
goto out;
}
/* Fetch result */
ret = inw (chip->iobase + FORTE_AC97_DATA);
out:
spin_unlock (&chip->ac97_lock);
return ret;
}
/**
* forte_ac97_write:
* @codec: AC97 codec to send command to
* @reg: register to write
* @val: value to write
*/
static void
forte_ac97_write (struct ac97_codec *codec, u8 reg, u16 val)
{
struct forte_chip *chip = codec->private_data;
spin_lock (&chip->ac97_lock);
/* Knock, knock */
if (forte_ac97_wait (chip)) {
printk (KERN_ERR PFX "ac97_write: Serial bus busy\n");
goto out;
}
outw (val, chip->iobase + FORTE_AC97_DATA);
outb (reg | FORTE_AC97_WRITE, chip->iobase + FORTE_AC97_CMD);
/* Wait for completion */
if (forte_ac97_wait (chip)) {
printk (KERN_ERR PFX "ac97_write: Bus busy after write\n");
goto out;
}
out:
spin_unlock (&chip->ac97_lock);
}
/* Mixer ------------------------------------------------------------------- */
/**
* forte_mixer_open:
* @inode:
* @file:
*/
static int
forte_mixer_open (struct inode *inode, struct file *file)
{
struct forte_chip *chip = forte;
file->private_data = chip->ac97;
return 0;
}
/**
* forte_mixer_release:
* @inode:
* @file:
*/
static int
forte_mixer_release (struct inode *inode, struct file *file)
{
/* We will welease Wodewick */
return 0;
}
/**
* forte_mixer_ioctl:
* @inode:
* @file:
*/
static int
forte_mixer_ioctl (struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct ac97_codec *codec = (struct ac97_codec *) file->private_data;
return codec->mixer_ioctl (codec, cmd, arg);
}
static struct file_operations forte_mixer_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.ioctl = forte_mixer_ioctl,
.open = forte_mixer_open,
.release = forte_mixer_release,
};
/* Channel ----------------------------------------------------------------- */
/**
* forte_channel_reset:
* @channel: Channel to reset
*
* Locking: Must be called with lock held.
*/
static void
forte_channel_reset (struct forte_channel *channel)
{
if (!channel || !channel->iobase)
return;
DPRINTK ("%s: channel = %s\n", __FUNCTION__, channel->name);
channel->ctrl &= ~FORTE_START;
outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
/* We always play at least two fragments, hence these defaults */
channel->hwptr = channel->frag_sz;
channel->next_buf = 1;
channel->swptr = 0;
channel->filled_frags = 0;
channel->active = 0;
channel->bytes = 0;
channel->nr_irqs = 0;
channel->mapped = 0;
channel->residue = 0;
}
/**
* forte_channel_start:
* @channel: Channel to start (record/playback)
*
* Locking: Must be called with lock held.
*/
static void inline
forte_channel_start (struct forte_channel *channel)
{
if (!channel || !channel->iobase || channel->active)
return;
channel->ctrl &= ~(FORTE_PAUSE | FORTE_BUF1_LAST | FORTE_BUF2_LAST
| FORTE_IMMED_STOP);
channel->ctrl |= FORTE_START;
channel->active = 1;
outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
}
/**
* forte_channel_stop:
* @channel: Channel to stop
*
* Locking: Must be called with lock held.
*/
static void inline
forte_channel_stop (struct forte_channel *channel)
{
if (!channel || !channel->iobase)
return;
channel->ctrl &= ~(FORTE_START | FORTE_PAUSE);
channel->ctrl |= FORTE_IMMED_STOP;
channel->active = 0;
outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
}
/**
* forte_channel_pause:
* @channel: Channel to pause
*
* Locking: Must be called with lock held.
*/
static void inline
forte_channel_pause (struct forte_channel *channel)
{
if (!channel || !channel->iobase)
return;
channel->ctrl |= FORTE_PAUSE;
channel->active = 0;
outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
}
/**
* forte_channel_rate:
* @channel: Channel whose rate to set. Playback and record are
* independent.
* @rate: Channel rate in Hz
*
* Locking: Must be called with lock held.
*/
static int
forte_channel_rate (struct forte_channel *channel, unsigned int rate)
{
int new_rate;
if (!channel || !channel->iobase)
return -EINVAL;
/* The FM801 only supports a handful of fixed frequencies.
* We find the value closest to what userland requested.
*/
if (rate <= 6250) { rate = 5500; new_rate = 0; }
else if (rate <= 8800) { rate = 8000; new_rate = 1; }
else if (rate <= 10312) { rate = 9600; new_rate = 2; }
else if (rate <= 13512) { rate = 11025; new_rate = 3; }
else if (rate <= 17600) { rate = 16000; new_rate = 4; }
else if (rate <= 20625) { rate = 19200; new_rate = 5; }
else if (rate <= 27025) { rate = 22050; new_rate = 6; }
else if (rate <= 35200) { rate = 32000; new_rate = 7; }
else if (rate <= 41250) { rate = 38400; new_rate = 8; }
else if (rate <= 46050) { rate = 44100; new_rate = 9; }
else { rate = 48000; new_rate = 10; }
channel->ctrl &= ~FORTE_RATE_MASK;
channel->ctrl |= new_rate << FORTE_RATE_SHIFT;
channel->rate = rate;
DPRINTK ("%s: %s rate = %d\n", __FUNCTION__, channel->name, rate);
return rate;
}
/**
* forte_channel_format:
* @channel: Channel whose audio format to set
* @format: OSS format ID
*
* Locking: Must be called with lock held.
*/
static int
forte_channel_format (struct forte_channel *channel, int format)
{
if (!channel || !channel->iobase)
return -EINVAL;
switch (format) {
case AFMT_QUERY:
break;
case AFMT_U8:
channel->ctrl &= ~FORTE_16BIT;
channel->format = AFMT_U8;
break;
case AFMT_S16_LE:
default:
channel->ctrl |= FORTE_16BIT;
channel->format = AFMT_S16_LE;
break;
}
DPRINTK ("%s: %s want %d format, got %d\n", __FUNCTION__, channel->name,
format, channel->format);
return channel->format;
}
/**
* forte_channel_stereo:
* @channel: Channel to toggle
* @stereo: 0 for Mono, 1 for Stereo
*
* Locking: Must be called with lock held.
*/
static int
forte_channel_stereo (struct forte_channel *channel, unsigned int stereo)
{
int ret;
if (!channel || !channel->iobase)
return -EINVAL;
DPRINTK ("%s: %s stereo = %d\n", __FUNCTION__, channel->name, stereo);
switch (stereo) {
case 0:
channel->ctrl &= ~(FORTE_STEREO | FORTE_CHANNELS_MASK);
channel-> stereo = stereo;
ret = stereo;
break;
case 1:
channel->ctrl &= ~FORTE_CHANNELS_MASK;
channel->ctrl |= FORTE_STEREO;
channel-> stereo = stereo;
ret = stereo;
break;
default:
DPRINTK ("Unsupported channel format");
ret = -EINVAL;
break;
}
return ret;
}
/**
* forte_channel_buffer:
* @channel: Channel whose buffer to set up
*
* Locking: Must be called with lock held.
*/
static void
forte_channel_buffer (struct forte_channel *channel, int sz, int num)
{
unsigned int msecs, shift;
/* Go away, I'm busy */
if (channel->filled_frags || channel->bytes)
return;
/* Fragment size must be a power of 2 */
shift = 0; sz++;
while (sz >>= 1)
shift++;
channel->frag_sz = 1 << shift;
/* Round fragment size to something reasonable */
if (channel->frag_sz < FORTE_MIN_FRAG_SIZE)
channel->frag_sz = FORTE_MIN_FRAG_SIZE;
if (channel->frag_sz > FORTE_MAX_FRAG_SIZE)
channel->frag_sz = FORTE_MAX_FRAG_SIZE;
/* Find fragment length in milliseconds */
msecs = channel->frag_sz /
(channel->format == AFMT_S16_LE ? 2 : 1) /
(channel->stereo ? 2 : 1) /
(channel->rate / 1000);
channel->frag_msecs = msecs;
/* Pick a suitable number of fragments */
if (msecs * num < FORTE_MIN_BUF_MSECS)
num = FORTE_MIN_BUF_MSECS / msecs;
if (msecs * num > FORTE_MAX_BUF_MSECS)
num = FORTE_MAX_BUF_MSECS / msecs;
/* Fragment number must be a power of 2 */
shift = 0;
while (num >>= 1)
shift++;
channel->frag_num = 1 << (shift + 1);
/* Round fragment number to something reasonable */
if (channel->frag_num < FORTE_MIN_FRAGMENTS)
channel->frag_num = FORTE_MIN_FRAGMENTS;
if (channel->frag_num > FORTE_MAX_FRAGMENTS)
channel->frag_num = FORTE_MAX_FRAGMENTS;
channel->buf_sz = channel->frag_sz * channel->frag_num;
DPRINTK ("%s: %s frag_sz = %d, frag_num = %d, buf_sz = %d\n",
__FUNCTION__, channel->name, channel->frag_sz,
channel->frag_num, channel->buf_sz);
}
/**
* forte_channel_prep:
* @channel: Channel whose buffer to prepare
*
* Locking: Lock held.
*/
static void
forte_channel_prep (struct forte_channel *channel)
{
struct page *page;
int i;
if (channel->buf)
return;
forte_channel_buffer (channel, channel->frag_sz, channel->frag_num);
channel->buf_pages = channel->buf_sz >> PAGE_SHIFT;
if (channel->buf_sz % PAGE_SIZE)
channel->buf_pages++;
DPRINTK ("%s: %s frag_sz = %d, frag_num = %d, buf_sz = %d, pg = %d\n",
__FUNCTION__, channel->name, channel->frag_sz,
channel->frag_num, channel->buf_sz, channel->buf_pages);
/* DMA buffer */
channel->buf = pci_alloc_consistent (forte->pci_dev,
channel->buf_pages * PAGE_SIZE,
&channel->buf_handle);
if (!channel->buf || !channel->buf_handle)
BUG();
page = virt_to_page (channel->buf);
/* FIXME: can this go away ? */
for (i = 0 ; i < channel->buf_pages ; i++)
SetPageReserved(page++);
/* Prep buffer registers */
outw (channel->frag_sz - 1, channel->iobase + FORTE_PLY_COUNT);
outl (channel->buf_handle, channel->iobase + FORTE_PLY_BUF1);
outl (channel->buf_handle + channel->frag_sz,
channel->iobase + FORTE_PLY_BUF2);
/* Reset hwptr */
channel->hwptr = channel->frag_sz;
channel->next_buf = 1;
DPRINTK ("%s: %s buffer @ %p (%p)\n", __FUNCTION__, channel->name,
channel->buf, channel->buf_handle);
}
/**
* forte_channel_drain:
* @chip:
* @channel:
*
* Locking: Don't hold the lock.
*/
static inline int
forte_channel_drain (struct forte_channel *channel)
{
DECLARE_WAITQUEUE (wait, current);
unsigned long flags;
DPRINTK ("%s\n", __FUNCTION__);
if (channel->mapped) {
spin_lock_irqsave (&forte->lock, flags);
forte_channel_stop (channel);
spin_unlock_irqrestore (&forte->lock, flags);
return 0;
}
spin_lock_irqsave (&forte->lock, flags);
add_wait_queue (&channel->wait, &wait);
for (;;) {
if (channel->active == 0 || channel->filled_frags == 1)
break;
spin_unlock_irqrestore (&forte->lock, flags);
__set_current_state (TASK_INTERRUPTIBLE);
schedule();
spin_lock_irqsave (&forte->lock, flags);
}
forte_channel_stop (channel);
forte_channel_reset (channel);
set_current_state (TASK_RUNNING);
remove_wait_queue (&channel->wait, &wait);
spin_unlock_irqrestore (&forte->lock, flags);
return 0;
}
/**
* forte_channel_init:
* @chip: Forte chip instance the channel hangs off
* @channel: Channel to initialize
*
* Description:
* Initializes a channel, sets defaults, and allocates
* buffers.
*
* Locking: No lock held.
*/
static int
forte_channel_init (struct forte_chip *chip, struct forte_channel *channel)
{
DPRINTK ("%s: chip iobase @ %p\n", __FUNCTION__, (void *)chip->iobase);
spin_lock_irq (&chip->lock);
memset (channel, 0x0, sizeof (*channel));
if (channel == &chip->play) {
channel->name = "PCM_OUT";
channel->iobase = chip->iobase;
DPRINTK ("%s: PCM-OUT iobase @ %p\n", __FUNCTION__,
(void *) channel->iobase);
}
else if (channel == &chip->rec) {
channel->name = "PCM_IN";
channel->iobase = chip->iobase + FORTE_CAP_OFFSET;
channel->record = 1;
DPRINTK ("%s: PCM-IN iobase @ %p\n", __FUNCTION__,
(void *) channel->iobase);
}
else
BUG();
init_waitqueue_head (&channel->wait);
/* Defaults: 48kHz, 16-bit, stereo */
channel->ctrl = inw (channel->iobase + FORTE_PLY_CTRL);
forte_channel_reset (channel);
forte_channel_stereo (channel, 1);
forte_channel_format (channel, AFMT_S16_LE);
forte_channel_rate (channel, 48000);
channel->frag_sz = FORTE_DEF_FRAG_SIZE;
channel->frag_num = FORTE_DEF_FRAGMENTS;
chip->trigger = 0;
spin_unlock_irq (&chip->lock);
return 0;
}
/**
* forte_channel_free:
* @chip: Chip this channel hangs off
* @channel: Channel to nuke
*
* Description:
* Resets channel and frees buffers.
*
* Locking: Hold your horses.
*/
static void
forte_channel_free (struct forte_chip *chip, struct forte_channel *channel)
{
DPRINTK ("%s: %s\n", __FUNCTION__, channel->name);
if (!channel->buf_handle)
return;
pci_free_consistent (chip->pci_dev, channel->buf_pages * PAGE_SIZE,
channel->buf, channel->buf_handle);
memset (channel, 0x0, sizeof (*channel));
}
/* DSP --------------------------------------------------------------------- */
/**
* forte_dsp_ioctl:
*/
static int
forte_dsp_ioctl (struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
int ival=0, ret, rval=0, rd, wr, count;
struct forte_chip *chip;
struct audio_buf_info abi;
struct count_info cinfo;
void __user *argp = (void __user *)arg;
int __user *p = argp;
chip = file->private_data;
if (file->f_mode & FMODE_WRITE)
wr = 1;
else
wr = 0;
if (file->f_mode & FMODE_READ)
rd = 1;
else
rd = 0;
switch (cmd) {
case OSS_GETVERSION:
return put_user (SOUND_VERSION, p);
case SNDCTL_DSP_GETCAPS:
DPRINTK ("%s: GETCAPS\n", __FUNCTION__);
ival = FORTE_CAPS; /* DUPLEX */
return put_user (ival, p);
case SNDCTL_DSP_GETFMTS:
DPRINTK ("%s: GETFMTS\n", __FUNCTION__);
ival = FORTE_FMTS; /* U8, 16LE */
return put_user (ival, p);
case SNDCTL_DSP_SETFMT: /* U8, 16LE */
DPRINTK ("%s: SETFMT\n", __FUNCTION__);
if (get_user (ival, p))
return -EFAULT;
spin_lock_irq (&chip->lock);
if (rd) {
forte_channel_stop (&chip->rec);
rval = forte_channel_format (&chip->rec, ival);
}
if (wr) {
forte_channel_stop (&chip->rec);
rval = forte_channel_format (&chip->play, ival);
}
spin_unlock_irq (&chip->lock);
return put_user (rval, p);
case SNDCTL_DSP_STEREO: /* 0 - mono, 1 - stereo */
DPRINTK ("%s: STEREO\n", __FUNCTION__);
if (get_user (ival, p))
return -EFAULT;
spin_lock_irq (&chip->lock);
if (rd) {
forte_channel_stop (&chip->rec);
rval = forte_channel_stereo (&chip->rec, ival);
}
if (wr) {
forte_channel_stop (&chip->rec);
rval = forte_channel_stereo (&chip->play, ival);
}
spin_unlock_irq (&chip->lock);
return put_user (rval, p);
case SNDCTL_DSP_CHANNELS: /* 1 - mono, 2 - stereo */
DPRINTK ("%s: CHANNELS\n", __FUNCTION__);
if (get_user (ival, p))
return -EFAULT;
spin_lock_irq (&chip->lock);
if (rd) {
forte_channel_stop (&chip->rec);
rval = forte_channel_stereo (&chip->rec, ival-1) + 1;
}
if (wr) {
forte_channel_stop (&chip->play);
rval = forte_channel_stereo (&chip->play, ival-1) + 1;
}
spin_unlock_irq (&chip->lock);
return put_user (rval, p);
case SNDCTL_DSP_SPEED:
DPRINTK ("%s: SPEED\n", __FUNCTION__);
if (get_user (ival, p))
return -EFAULT;
spin_lock_irq (&chip->lock);
if (rd) {
forte_channel_stop (&chip->rec);
rval = forte_channel_rate (&chip->rec, ival);
}
if (wr) {
forte_channel_stop (&chip->play);
rval = forte_channel_rate (&chip->play, ival);
}
spin_unlock_irq (&chip->lock);
return put_user(rval, p);
case SNDCTL_DSP_GETBLKSIZE:
DPRINTK ("%s: GETBLKSIZE\n", __FUNCTION__);
spin_lock_irq (&chip->lock);
if (rd)
ival = chip->rec.frag_sz;
if (wr)
ival = chip->play.frag_sz;
spin_unlock_irq (&chip->lock);
return put_user (ival, p);
case SNDCTL_DSP_RESET:
DPRINTK ("%s: RESET\n", __FUNCTION__);
spin_lock_irq (&chip->lock);
if (rd)
forte_channel_reset (&chip->rec);
if (wr)
forte_channel_reset (&chip->play);
spin_unlock_irq (&chip->lock);
return 0;
case SNDCTL_DSP_SYNC:
DPRINTK ("%s: SYNC\n", __FUNCTION__);
if (wr)
ret = forte_channel_drain (&chip->play);
return 0;
case SNDCTL_DSP_POST:
DPRINTK ("%s: POST\n", __FUNCTION__);
if (wr) {
spin_lock_irq (&chip->lock);
if (chip->play.filled_frags)
forte_channel_start (&chip->play);
spin_unlock_irq (&chip->lock);
}
return 0;
case SNDCTL_DSP_SETFRAGMENT:
DPRINTK ("%s: SETFRAGMENT\n", __FUNCTION__);
if (get_user (ival, p))
return -EFAULT;
spin_lock_irq (&chip->lock);
if (rd) {
forte_channel_buffer (&chip->rec, ival & 0xffff,
(ival >> 16) & 0xffff);
ival = (chip->rec.frag_num << 16) + chip->rec.frag_sz;
}
if (wr) {
forte_channel_buffer (&chip->play, ival & 0xffff,
(ival >> 16) & 0xffff);
ival = (chip->play.frag_num << 16) +chip->play.frag_sz;
}
spin_unlock_irq (&chip->lock);
return put_user (ival, p);
case SNDCTL_DSP_GETISPACE:
DPRINTK ("%s: GETISPACE\n", __FUNCTION__);
if (!rd)
return -EINVAL;
spin_lock_irq (&chip->lock);
abi.fragstotal = chip->rec.frag_num;
abi.fragsize = chip->rec.frag_sz;
if (chip->rec.mapped) {
abi.fragments = chip->rec.frag_num - 2;
abi.bytes = abi.fragments * abi.fragsize;
}
else {
abi.fragments = chip->rec.filled_frags;
abi.bytes = abi.fragments * abi.fragsize;
}
spin_unlock_irq (&chip->lock);
return copy_to_user (argp, &abi, sizeof (abi)) ? -EFAULT : 0;
case SNDCTL_DSP_GETIPTR:
DPRINTK ("%s: GETIPTR\n", __FUNCTION__);
if (!rd)
return -EINVAL;
spin_lock_irq (&chip->lock);
if (chip->rec.active)
cinfo.ptr = chip->rec.hwptr;
else
cinfo.ptr = 0;
cinfo.bytes = chip->rec.bytes;
cinfo.blocks = chip->rec.nr_irqs;
chip->rec.nr_irqs = 0;
spin_unlock_irq (&chip->lock);
return copy_to_user (argp, &cinfo, sizeof (cinfo)) ? -EFAULT : 0;
case SNDCTL_DSP_GETOSPACE:
if (!wr)
return -EINVAL;
spin_lock_irq (&chip->lock);
abi.fragstotal = chip->play.frag_num;
abi.fragsize = chip->play.frag_sz;
if (chip->play.mapped) {
abi.fragments = chip->play.frag_num - 2;
abi.bytes = chip->play.buf_sz;
}
else {
abi.fragments = chip->play.frag_num -
chip->play.filled_frags;
if (chip->play.residue)
abi.fragments--;
abi.bytes = abi.fragments * abi.fragsize +
chip->play.residue;
}
spin_unlock_irq (&chip->lock);
return copy_to_user (argp, &abi, sizeof (abi)) ? -EFAULT : 0;
case SNDCTL_DSP_GETOPTR:
if (!wr)
return -EINVAL;
spin_lock_irq (&chip->lock);
if (chip->play.active)
cinfo.ptr = chip->play.hwptr;
else
cinfo.ptr = 0;
cinfo.bytes = chip->play.bytes;
cinfo.blocks = chip->play.nr_irqs;
chip->play.nr_irqs = 0;
spin_unlock_irq (&chip->lock);
return copy_to_user (argp, &cinfo, sizeof (cinfo)) ? -EFAULT : 0;
case SNDCTL_DSP_GETODELAY:
if (!wr)
return -EINVAL;
spin_lock_irq (&chip->lock);
if (!chip->play.active) {
ival = 0;
}
else if (chip->play.mapped) {
count = inw (chip->play.iobase + FORTE_PLY_COUNT) + 1;
ival = chip->play.frag_sz - count;
}
else {
ival = chip->play.filled_frags * chip->play.frag_sz;
if (chip->play.residue)
ival += chip->play.frag_sz - chip->play.residue;
}
spin_unlock_irq (&chip->lock);
return put_user (ival, p);
case SNDCTL_DSP_SETDUPLEX:
DPRINTK ("%s: SETDUPLEX\n", __FUNCTION__);
return -EINVAL;
case SNDCTL_DSP_GETTRIGGER:
DPRINTK ("%s: GETTRIGGER\n", __FUNCTION__);
return put_user (chip->trigger, p);
case SNDCTL_DSP_SETTRIGGER:
if (get_user (ival, p))
return -EFAULT;
DPRINTK ("%s: SETTRIGGER %d\n", __FUNCTION__, ival);
if (wr) {
spin_lock_irq (&chip->lock);
if (ival & PCM_ENABLE_OUTPUT)
forte_channel_start (&chip->play);
else {
chip->trigger = 1;
forte_channel_prep (&chip->play);
forte_channel_stop (&chip->play);
}
spin_unlock_irq (&chip->lock);
}
else if (rd) {
spin_lock_irq (&chip->lock);
if (ival & PCM_ENABLE_INPUT)
forte_channel_start (&chip->rec);
else {
chip->trigger = 1;
forte_channel_prep (&chip->rec);
forte_channel_stop (&chip->rec);
}
spin_unlock_irq (&chip->lock);
}
return 0;
case SOUND_PCM_READ_RATE:
DPRINTK ("%s: PCM_READ_RATE\n", __FUNCTION__);
return put_user (chip->play.rate, p);
case SOUND_PCM_READ_CHANNELS:
DPRINTK ("%s: PCM_READ_CHANNELS\n", __FUNCTION__);
return put_user (chip->play.stereo, p);
case SOUND_PCM_READ_BITS:
DPRINTK ("%s: PCM_READ_BITS\n", __FUNCTION__);
return put_user (chip->play.format, p);
case SNDCTL_DSP_NONBLOCK:
DPRINTK ("%s: DSP_NONBLOCK\n", __FUNCTION__);
file->f_flags |= O_NONBLOCK;
return 0;
default:
DPRINTK ("Unsupported ioctl: %x (%p)\n", cmd, argp);
break;
}
return -EINVAL;
}
/**
* forte_dsp_open:
*/
static int
forte_dsp_open (struct inode *inode, struct file *file)
{
struct forte_chip *chip = forte; /* FIXME: HACK FROM HELL! */
if (file->f_flags & O_NONBLOCK) {
if (!mutex_trylock(&chip->open_mutex)) {
DPRINTK ("%s: returning -EAGAIN\n", __FUNCTION__);
return -EAGAIN;
}
}
else {
if (mutex_lock_interruptible(&chip->open_mutex)) {
DPRINTK ("%s: returning -ERESTARTSYS\n", __FUNCTION__);
return -ERESTARTSYS;
}
}
file->private_data = forte;
DPRINTK ("%s: dsp opened by %d\n", __FUNCTION__, current->pid);
if (file->f_mode & FMODE_WRITE)
forte_channel_init (forte, &forte->play);
if (file->f_mode & FMODE_READ)
forte_channel_init (forte, &forte->rec);
return nonseekable_open(inode, file);
}
/**
* forte_dsp_release:
*/
static int
forte_dsp_release (struct inode *inode, struct file *file)
{
struct forte_chip *chip = file->private_data;
int ret = 0;
DPRINTK ("%s: chip @ %p\n", __FUNCTION__, chip);
if (file->f_mode & FMODE_WRITE) {
forte_channel_drain (&chip->play);
spin_lock_irq (&chip->lock);
forte_channel_free (chip, &chip->play);
spin_unlock_irq (&chip->lock);
}
if (file->f_mode & FMODE_READ) {
while (chip->rec.filled_frags > 0)
interruptible_sleep_on (&chip->rec.wait);
spin_lock_irq (&chip->lock);
forte_channel_stop (&chip->rec);
forte_channel_free (chip, &chip->rec);
spin_unlock_irq (&chip->lock);
}
mutex_unlock(&chip->open_mutex);
return ret;
}
/**
* forte_dsp_poll:
*
*/
static unsigned int
forte_dsp_poll (struct file *file, struct poll_table_struct *wait)
{
struct forte_chip *chip;
struct forte_channel *channel;
unsigned int mask = 0;
chip = file->private_data;
if (file->f_mode & FMODE_WRITE) {
channel = &chip->play;
if (channel->active)
poll_wait (file, &channel->wait, wait);
spin_lock_irq (&chip->lock);
if (channel->frag_num - channel->filled_frags > 0)
mask |= POLLOUT | POLLWRNORM;
spin_unlock_irq (&chip->lock);
}
if (file->f_mode & FMODE_READ) {
channel = &chip->rec;
if (channel->active)
poll_wait (file, &channel->wait, wait);
spin_lock_irq (&chip->lock);
if (channel->filled_frags > 0)
mask |= POLLIN | POLLRDNORM;
spin_unlock_irq (&chip->lock);
}
return mask;
}
/**
* forte_dsp_mmap:
*/
static int
forte_dsp_mmap (struct file *file, struct vm_area_struct *vma)
{
struct forte_chip *chip;
struct forte_channel *channel;
unsigned long size;
int ret;
chip = file->private_data;
DPRINTK ("%s: start %lXh, size %ld, pgoff %ld\n", __FUNCTION__,
vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_pgoff);
spin_lock_irq (&chip->lock);
if (vma->vm_flags & VM_WRITE && chip->play.active) {
ret = -EBUSY;
goto out;
}
if (vma->vm_flags & VM_READ && chip->rec.active) {
ret = -EBUSY;
goto out;
}
if (file->f_mode & FMODE_WRITE)
channel = &chip->play;
else if (file->f_mode & FMODE_READ)
channel = &chip->rec;
else {
ret = -EINVAL;
goto out;
}
forte_channel_prep (channel);
channel->mapped = 1;
if (vma->vm_pgoff != 0) {
ret = -EINVAL;
goto out;
}
size = vma->vm_end - vma->vm_start;
if (size > channel->buf_pages * PAGE_SIZE) {
DPRINTK ("%s: size (%ld) > buf_sz (%d) \n", __FUNCTION__,
size, channel->buf_sz);
ret = -EINVAL;
goto out;
}
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(channel->buf) >> PAGE_SHIFT,
size, vma->vm_page_prot)) {
DPRINTK ("%s: remap el a no worko\n", __FUNCTION__);
ret = -EAGAIN;
goto out;
}
ret = 0;
out:
spin_unlock_irq (&chip->lock);
return ret;
}
/**
* forte_dsp_write:
*/
static ssize_t
forte_dsp_write (struct file *file, const char __user *buffer, size_t bytes,
loff_t *ppos)
{
struct forte_chip *chip;
struct forte_channel *channel;
unsigned int i = bytes, sz = 0;
unsigned long flags;
if (!access_ok (VERIFY_READ, buffer, bytes))
return -EFAULT;
chip = (struct forte_chip *) file->private_data;
if (!chip)
BUG();
channel = &chip->play;
if (!channel)
BUG();
spin_lock_irqsave (&chip->lock, flags);
/* Set up buffers with the right fragment size */
forte_channel_prep (channel);
while (i) {
/* All fragment buffers in use -> wait */
if (channel->frag_num - channel->filled_frags == 0) {
DECLARE_WAITQUEUE (wait, current);
/* For trigger or non-blocking operation, get out */
if (chip->trigger || file->f_flags & O_NONBLOCK) {
spin_unlock_irqrestore (&chip->lock, flags);
return -EAGAIN;
}
/* Otherwise wait for buffers */
add_wait_queue (&channel->wait, &wait);
for (;;) {
spin_unlock_irqrestore (&chip->lock, flags);
set_current_state (TASK_INTERRUPTIBLE);
schedule();
spin_lock_irqsave (&chip->lock, flags);
if (channel->frag_num - channel->filled_frags)
break;
}
remove_wait_queue (&channel->wait, &wait);
set_current_state (TASK_RUNNING);
if (signal_pending (current)) {
spin_unlock_irqrestore (&chip->lock, flags);
return -ERESTARTSYS;
}
}
if (channel->residue)
sz = channel->residue;
else if (i > channel->frag_sz)
sz = channel->frag_sz;
else
sz = i;
spin_unlock_irqrestore (&chip->lock, flags);
if (copy_from_user ((void *) channel->buf + channel->swptr, buffer, sz))
return -EFAULT;
spin_lock_irqsave (&chip->lock, flags);
/* Advance software pointer */
buffer += sz;
channel->swptr += sz;
channel->swptr %= channel->buf_sz;
i -= sz;
/* Only bump filled_frags if a full fragment has been written */
if (channel->swptr % channel->frag_sz == 0) {
channel->filled_frags++;
channel->residue = 0;
}
else
channel->residue = channel->frag_sz - sz;
/* If playback isn't active, start it */
if (channel->active == 0 && chip->trigger == 0)
forte_channel_start (channel);
}
spin_unlock_irqrestore (&chip->lock, flags);
return bytes - i;
}
/**
* forte_dsp_read:
*/
static ssize_t
forte_dsp_read (struct file *file, char __user *buffer, size_t bytes,
loff_t *ppos)
{
struct forte_chip *chip;
struct forte_channel *channel;
unsigned int i = bytes, sz;
unsigned long flags;
if (!access_ok (VERIFY_WRITE, buffer, bytes))
return -EFAULT;
chip = (struct forte_chip *) file->private_data;
if (!chip)
BUG();
channel = &chip->rec;
if (!channel)
BUG();
spin_lock_irqsave (&chip->lock, flags);
/* Set up buffers with the right fragment size */
forte_channel_prep (channel);
/* Start recording */
if (!chip->trigger)
forte_channel_start (channel);
while (i) {
/* No fragment buffers in use -> wait */
if (channel->filled_frags == 0) {
DECLARE_WAITQUEUE (wait, current);
/* For trigger mode operation, get out */
if (chip->trigger) {
spin_unlock_irqrestore (&chip->lock, flags);
return -EAGAIN;
}
add_wait_queue (&channel->wait, &wait);
for (;;) {
if (channel->active == 0)
break;
if (channel->filled_frags)
break;
spin_unlock_irqrestore (&chip->lock, flags);
set_current_state (TASK_INTERRUPTIBLE);
schedule();
spin_lock_irqsave (&chip->lock, flags);
}
set_current_state (TASK_RUNNING);
remove_wait_queue (&channel->wait, &wait);
}
if (i > channel->frag_sz)
sz = channel->frag_sz;
else
sz = i;
spin_unlock_irqrestore (&chip->lock, flags);
if (copy_to_user (buffer, (void *)channel->buf+channel->swptr, sz)) {
DPRINTK ("%s: copy_to_user failed\n", __FUNCTION__);
return -EFAULT;
}
spin_lock_irqsave (&chip->lock, flags);
/* Advance software pointer */
buffer += sz;
if (channel->filled_frags > 0)
channel->filled_frags--;
channel->swptr += channel->frag_sz;
channel->swptr %= channel->buf_sz;
i -= sz;
}
spin_unlock_irqrestore (&chip->lock, flags);
return bytes - i;
}
static struct file_operations forte_dsp_fops = {
.owner = THIS_MODULE,
.llseek = &no_llseek,
.read = &forte_dsp_read,
.write = &forte_dsp_write,
.poll = &forte_dsp_poll,
.ioctl = &forte_dsp_ioctl,
.open = &forte_dsp_open,
.release = &forte_dsp_release,
.mmap = &forte_dsp_mmap,
};
/* Common ------------------------------------------------------------------ */
/**
* forte_interrupt:
*/
static irqreturn_t
forte_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
struct forte_chip *chip = dev_id;
struct forte_channel *channel = NULL;
u16 status, count;
status = inw (chip->iobase + FORTE_IRQ_STATUS);
/* If this is not for us, get outta here ASAP */
if ((status & (FORTE_IRQ_PLAYBACK | FORTE_IRQ_CAPTURE)) == 0)
return IRQ_NONE;
if (status & FORTE_IRQ_PLAYBACK) {
channel = &chip->play;
spin_lock (&chip->lock);
if (channel->frag_sz == 0)
goto pack;
/* Declare a fragment done */
if (channel->filled_frags > 0)
channel->filled_frags--;
channel->bytes += channel->frag_sz;
channel->nr_irqs++;
/* Flip-flop between buffer I and II */
channel->next_buf ^= 1;
/* Advance hardware pointer by fragment size and wrap around */
channel->hwptr += channel->frag_sz;
channel->hwptr %= channel->buf_sz;
/* Buffer I or buffer II BAR */
outl (channel->buf_handle + channel->hwptr,
channel->next_buf == 0 ?
channel->iobase + FORTE_PLY_BUF1 :
channel->iobase + FORTE_PLY_BUF2);
/* If the currently playing fragment is last, schedule pause */
if (channel->filled_frags == 1)
forte_channel_pause (channel);
pack:
/* Acknowledge interrupt */
outw (FORTE_IRQ_PLAYBACK, chip->iobase + FORTE_IRQ_STATUS);
if (waitqueue_active (&channel->wait))
wake_up_all (&channel->wait);
spin_unlock (&chip->lock);
}
if (status & FORTE_IRQ_CAPTURE) {
channel = &chip->rec;
spin_lock (&chip->lock);
/* One fragment filled */
channel->filled_frags++;
/* Get # of completed bytes */
count = inw (channel->iobase + FORTE_PLY_COUNT) + 1;
if (count == 0) {
DPRINTK ("%s: last, filled_frags = %d\n", __FUNCTION__,
channel->filled_frags);
channel->filled_frags = 0;
goto rack;
}
/* Buffer I or buffer II BAR */
outl (channel->buf_handle + channel->hwptr,
channel->next_buf == 0 ?
channel->iobase + FORTE_PLY_BUF1 :
channel->iobase + FORTE_PLY_BUF2);
/* Flip-flop between buffer I and II */
channel->next_buf ^= 1;
/* Advance hardware pointer by fragment size and wrap around */
channel->hwptr += channel->frag_sz;
channel->hwptr %= channel->buf_sz;
/* Out of buffers */
if (channel->filled_frags == channel->frag_num - 1)
forte_channel_stop (channel);
rack:
/* Acknowledge interrupt */
outw (FORTE_IRQ_CAPTURE, chip->iobase + FORTE_IRQ_STATUS);
spin_unlock (&chip->lock);
if (waitqueue_active (&channel->wait))
wake_up_all (&channel->wait);
}
return IRQ_HANDLED;
}
/**
* forte_proc_read:
*/
static int
forte_proc_read (char *page, char **start, off_t off, int count,
int *eof, void *data)
{
int i = 0, p_rate, p_chan, r_rate;
unsigned short p_reg, r_reg;
i += sprintf (page, "ForteMedia FM801 OSS Lite driver\n%s\n \n",
DRIVER_VERSION);
if (!forte->iobase)
return i;
p_rate = p_chan = -1;
p_reg = inw (forte->iobase + FORTE_PLY_CTRL);
p_rate = (p_reg >> 8) & 15;
p_chan = (p_reg >> 12) & 3;
if (p_rate >= 0 || p_rate <= 10)
p_rate = rates[p_rate];
if (p_chan >= 0 || p_chan <= 2)
p_chan = channels[p_chan];
r_rate = -1;
r_reg = inw (forte->iobase + FORTE_CAP_CTRL);
r_rate = (r_reg >> 8) & 15;
if (r_rate >= 0 || r_rate <= 10)
r_rate = rates[r_rate];
i += sprintf (page + i,
" Playback Capture\n"
"FIFO empty : %-3s %-3s\n"
"Buf1 Last : %-3s %-3s\n"
"Buf2 Last : %-3s %-3s\n"
"Started : %-3s %-3s\n"
"Paused : %-3s %-3s\n"
"Immed Stop : %-3s %-3s\n"
"Rate : %-5d %-5d\n"
"Channels : %-5d -\n"
"16-bit : %-3s %-3s\n"
"Stereo : %-3s %-3s\n"
" \n"
"Buffer Sz : %-6d %-6d\n"
"Frag Sz : %-6d %-6d\n"
"Frag Num : %-6d %-6d\n"
"Frag msecs : %-6d %-6d\n"
"Used Frags : %-6d %-6d\n"
"Mapped : %-3s %-3s\n",
p_reg & 1<<0 ? "yes" : "no",
r_reg & 1<<0 ? "yes" : "no",
p_reg & 1<<1 ? "yes" : "no",
r_reg & 1<<1 ? "yes" : "no",
p_reg & 1<<2 ? "yes" : "no",
r_reg & 1<<2 ? "yes" : "no",
p_reg & 1<<5 ? "yes" : "no",
r_reg & 1<<5 ? "yes" : "no",
p_reg & 1<<6 ? "yes" : "no",
r_reg & 1<<6 ? "yes" : "no",
p_reg & 1<<7 ? "yes" : "no",
r_reg & 1<<7 ? "yes" : "no",
p_rate, r_rate,
p_chan,
p_reg & 1<<14 ? "yes" : "no",
r_reg & 1<<14 ? "yes" : "no",
p_reg & 1<<15 ? "yes" : "no",
r_reg & 1<<15 ? "yes" : "no",
forte->play.buf_sz, forte->rec.buf_sz,
forte->play.frag_sz, forte->rec.frag_sz,
forte->play.frag_num, forte->rec.frag_num,
forte->play.frag_msecs, forte->rec.frag_msecs,
forte->play.filled_frags, forte->rec.filled_frags,
forte->play.mapped ? "yes" : "no",
forte->rec.mapped ? "yes" : "no"
);
return i;
}
/**
* forte_proc_init:
*
* Creates driver info entries in /proc
*/
static int __init
forte_proc_init (void)
{
if (!proc_mkdir ("driver/forte", NULL))
return -EIO;
if (!create_proc_read_entry ("driver/forte/chip", 0, NULL, forte_proc_read, forte)) {
remove_proc_entry ("driver/forte", NULL);
return -EIO;
}
if (!create_proc_read_entry("driver/forte/ac97", 0, NULL, ac97_read_proc, forte->ac97)) {
remove_proc_entry ("driver/forte/chip", NULL);
remove_proc_entry ("driver/forte", NULL);
return -EIO;
}
return 0;
}
/**
* forte_proc_remove:
*
* Removes driver info entries in /proc
*/
static void
forte_proc_remove (void)
{
remove_proc_entry ("driver/forte/ac97", NULL);
remove_proc_entry ("driver/forte/chip", NULL);
remove_proc_entry ("driver/forte", NULL);
}
/**
* forte_chip_init:
* @chip: Chip instance to initialize
*
* Description:
* Resets chip, configures codec and registers the driver with
* the sound subsystem.
*
* Press and hold Start for 8 secs, then switch on Run
* and hold for 4 seconds. Let go of Start. Numbers
* assume a properly oiled TWG.
*/
static int __devinit
forte_chip_init (struct forte_chip *chip)
{
u8 revision;
u16 cmdw;
struct ac97_codec *codec;
pci_read_config_byte (chip->pci_dev, PCI_REVISION_ID, &revision);
if (revision >= 0xB1) {
chip->multichannel = 1;
printk (KERN_INFO PFX "Multi-channel device detected.\n");
}
/* Reset chip */
outw (FORTE_CC_CODEC_RESET | FORTE_CC_AC97_RESET,
chip->iobase + FORTE_CODEC_CTRL);
udelay(100);
outw (0, chip->iobase + FORTE_CODEC_CTRL);
/* Request read from AC97 */
outw (FORTE_AC97_READ | (0 << FORTE_AC97_ADDR_SHIFT),
chip->iobase + FORTE_AC97_CMD);
mdelay(750);
if ((inw (chip->iobase + FORTE_AC97_CMD) & (3<<8)) != (1<<8)) {
printk (KERN_INFO PFX "AC97 codec not responding");
return -EIO;
}
/* Init volume */
outw (0x0808, chip->iobase + FORTE_PCM_VOL);
outw (0x9f1f, chip->iobase + FORTE_FM_VOL);
outw (0x8808, chip->iobase + FORTE_I2S_VOL);
/* I2S control - I2S mode */
outw (0x0003, chip->iobase + FORTE_I2S_MODE);
/* Interrupt setup - unmask PLAYBACK & CAPTURE */
cmdw = inw (chip->iobase + FORTE_IRQ_MASK);
cmdw &= ~0x0003;
outw (cmdw, chip->iobase + FORTE_IRQ_MASK);
/* Interrupt clear */
outw (FORTE_IRQ_PLAYBACK|FORTE_IRQ_CAPTURE,
chip->iobase + FORTE_IRQ_STATUS);
/* Set up the AC97 codec */
if ((codec = ac97_alloc_codec()) == NULL)
return -ENOMEM;
codec->private_data = chip;
codec->codec_read = forte_ac97_read;
codec->codec_write = forte_ac97_write;
codec->id = 0;
if (ac97_probe_codec (codec) == 0) {
printk (KERN_ERR PFX "codec probe failed\n");
ac97_release_codec(codec);
return -1;
}
/* Register mixer */
if ((codec->dev_mixer =
register_sound_mixer (&forte_mixer_fops, -1)) < 0) {
printk (KERN_ERR PFX "couldn't register mixer!\n");
ac97_release_codec(codec);
return -1;
}
chip->ac97 = codec;
/* Register DSP */
if ((chip->dsp = register_sound_dsp (&forte_dsp_fops, -1) ) < 0) {
printk (KERN_ERR PFX "couldn't register dsp!\n");
return -1;
}
/* Register with /proc */
if (forte_proc_init()) {
printk (KERN_ERR PFX "couldn't add entries to /proc!\n");
return -1;
}
return 0;
}
/**
* forte_probe:
* @pci_dev: PCI struct for probed device
* @pci_id:
*
* Description:
* Allocates chip instance, I/O region, and IRQ
*/
static int __init
forte_probe (struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
{
struct forte_chip *chip;
int ret = 0;
/* FIXME: Support more than one chip */
if (found++)
return -EIO;
/* Ignition */
if (pci_enable_device (pci_dev))
return -EIO;
pci_set_master (pci_dev);
/* Allocate chip instance and configure */
forte = (struct forte_chip *)
kmalloc (sizeof (struct forte_chip), GFP_KERNEL);
chip = forte;
if (chip == NULL) {
printk (KERN_WARNING PFX "Out of memory");
return -ENOMEM;
}
memset (chip, 0, sizeof (struct forte_chip));
chip->pci_dev = pci_dev;
mutex_init(&chip->open_mutex);
spin_lock_init (&chip->lock);
spin_lock_init (&chip->ac97_lock);
if (! request_region (pci_resource_start (pci_dev, 0),
pci_resource_len (pci_dev, 0), DRIVER_NAME)) {
printk (KERN_WARNING PFX "Unable to reserve I/O space");
ret = -ENOMEM;
goto error;
}
chip->iobase = pci_resource_start (pci_dev, 0);
chip->irq = pci_dev->irq;
if (request_irq (chip->irq, forte_interrupt, IRQF_SHARED, DRIVER_NAME,
chip)) {
printk (KERN_WARNING PFX "Unable to reserve IRQ");
ret = -EIO;
goto error;
}
pci_set_drvdata (pci_dev, chip);
printk (KERN_INFO PFX "FM801 chip found at 0x%04lX-0x%16llX IRQ %u\n",
chip->iobase, (unsigned long long)pci_resource_end (pci_dev, 0),
chip->irq);
/* Power it up */
if ((ret = forte_chip_init (chip)) == 0)
return 0;
error:
if (chip->irq)
free_irq (chip->irq, chip);
if (chip->iobase)
release_region (pci_resource_start (pci_dev, 0),
pci_resource_len (pci_dev, 0));
kfree (chip);
return ret;
}
/**
* forte_remove:
* @pci_dev: PCI device to unclaim
*
*/
static void
forte_remove (struct pci_dev *pci_dev)
{
struct forte_chip *chip = pci_get_drvdata (pci_dev);
if (chip == NULL)
return;
/* Turn volume down to avoid popping */
outw (0x1f1f, chip->iobase + FORTE_PCM_VOL);
outw (0x1f1f, chip->iobase + FORTE_FM_VOL);
outw (0x1f1f, chip->iobase + FORTE_I2S_VOL);
forte_proc_remove();
free_irq (chip->irq, chip);
release_region (chip->iobase, pci_resource_len (pci_dev, 0));
unregister_sound_dsp (chip->dsp);
unregister_sound_mixer (chip->ac97->dev_mixer);
ac97_release_codec(chip->ac97);
kfree (chip);
printk (KERN_INFO PFX "driver released\n");
}
static struct pci_device_id forte_pci_ids[] = {
{ 0x1319, 0x0801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
{ 0, }
};
static struct pci_driver forte_pci_driver = {
.name = DRIVER_NAME,
.id_table = forte_pci_ids,
.probe = forte_probe,
.remove = forte_remove,
};
/**
* forte_init_module:
*
*/
static int __init
forte_init_module (void)
{
printk (KERN_INFO PFX DRIVER_VERSION "\n");
return pci_register_driver (&forte_pci_driver);
}
/**
* forte_cleanup_module:
*
*/
static void __exit
forte_cleanup_module (void)
{
pci_unregister_driver (&forte_pci_driver);
}
module_init(forte_init_module);
module_exit(forte_cleanup_module);
MODULE_AUTHOR("Martin K. Petersen <mkp@mkp.net>");
MODULE_DESCRIPTION("ForteMedia FM801 OSS Driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE (pci, forte_pci_ids);
| gpl-2.0 |
FelixMDenis/doxygen | src/dia.cpp | 6 | 2469 | /******************************************************************************
*
*
*
* Copyright (C) 1997-2014 by Dimitri van Heesch.
*
* Permission to use, copy, modify, and distribute this software and its
* documentation under the terms of the GNU General Public License is hereby
* granted. No representations are made about the suitability of this software
* for any purpose. It is provided "as is" without express or implied warranty.
* See the GNU General Public License for more details.
*
* Documents produced by Doxygen are derivative works derived from the
* input used in their production; they are not affected by this license.
*
*/
#include "dia.h"
#include "portable.h"
#include "config.h"
#include "message.h"
#include "util.h"
#include <qdir.h>
static const int maxCmdLine = 40960;
void writeDiaGraphFromFile(const char *inFile,const char *outDir,
const char *outFile,DiaOutputFormat format)
{
QCString absOutFile = outDir;
absOutFile+=portable_pathSeparator();
absOutFile+=outFile;
// chdir to the output dir, so dot can find the font file.
QCString oldDir = QDir::currentDirPath().utf8();
// go to the html output directory (i.e. path)
QDir::setCurrent(outDir);
//printf("Going to dir %s\n",QDir::currentDirPath().data());
QCString diaExe = Config_getString("DIA_PATH")+"dia"+portable_commandExtension();
QCString diaArgs;
QCString extension;
diaArgs+="-n ";
if (format==DIA_BITMAP)
{
diaArgs+="-t png-libart";
extension=".png";
}
else if (format==DIA_EPS)
{
diaArgs+="-t eps";
extension=".eps";
}
diaArgs+=" -e \"";
diaArgs+=outFile;
diaArgs+=extension+"\"";
diaArgs+=" \"";
diaArgs+=inFile;
diaArgs+="\"";
int exitCode;
//printf("*** running: %s %s outDir:%s %s\n",diaExe.data(),diaArgs.data(),outDir,outFile);
portable_sysTimerStart();
if ((exitCode=portable_system(diaExe,diaArgs,FALSE))!=0)
{
portable_sysTimerStop();
goto error;
}
portable_sysTimerStop();
if ( (format==DIA_EPS) && (Config_getBool("USE_PDFLATEX")) )
{
QCString epstopdfArgs(maxCmdLine);
epstopdfArgs.sprintf("\"%s.eps\" --outfile=\"%s.pdf\"",
outFile,outFile);
portable_sysTimerStart();
if (portable_system("epstopdf",epstopdfArgs)!=0)
{
err("Problems running epstopdf. Check your TeX installation!\n");
}
portable_sysTimerStop();
}
error:
QDir::setCurrent(oldDir);
}
| gpl-2.0 |
andr7e/android_kernel_elephone_p6000 | kernel/drivers/staging/android/ion/compat_ion.c | 6 | 4801 | /*
* drivers/gpu/ion/compat_ion.c
*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/ion.h>
#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include "compat_ion.h"
/* See include/linux/ion.h for the definition of these structs */
struct compat_ion_allocation_data {
compat_size_t len;
compat_size_t align;
compat_uint_t heap_id_mask;
compat_uint_t flags;
compat_int_t handle;
};
struct compat_ion_custom_data {
compat_uint_t cmd;
compat_ulong_t arg;
};
struct compat_ion_handle_data {
compat_int_t handle;
};
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
static int compat_get_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
{
compat_size_t s;
compat_uint_t u;
compat_int_t i;
int err;
err = get_user(s, &data32->len);
err |= put_user(s, &data->len);
err |= get_user(s, &data32->align);
err |= put_user(s, &data->align);
err |= get_user(u, &data32->heap_id_mask);
err |= put_user(u, &data->heap_id_mask);
err |= get_user(u, &data32->flags);
err |= put_user(u, &data->flags);
err |= get_user(i, &data32->handle);
err |= put_user(i, &data->handle);
return err;
}
static int compat_get_ion_handle_data(
struct compat_ion_handle_data __user *data32,
struct ion_handle_data __user *data)
{
compat_int_t i;
int err;
err = get_user(i, &data32->handle);
err |= put_user(i, &data->handle);
return err;
}
static int compat_put_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
{
compat_size_t s;
compat_uint_t u;
compat_int_t i;
int err;
err = get_user(s, &data->len);
err |= put_user(s, &data32->len);
err |= get_user(s, &data->align);
err |= put_user(s, &data32->align);
err |= get_user(u, &data->heap_id_mask);
err |= put_user(u, &data32->heap_id_mask);
err |= get_user(u, &data->flags);
err |= put_user(u, &data32->flags);
err |= get_user(i, &data->handle);
err |= put_user(i, &data32->handle);
return err;
}
static int compat_get_ion_custom_data(
struct compat_ion_custom_data __user *data32,
struct ion_custom_data __user *data)
{
compat_uint_t cmd;
compat_ulong_t arg;
int err;
err = get_user(cmd, &data32->cmd);
err |= put_user(cmd, &data->cmd);
err |= get_user(arg, &data32->arg);
err |= put_user(arg, &data->arg);
return err;
};
long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
long ret;
if (!filp->f_op || !filp->f_op->unlocked_ioctl)
return -ENOTTY;
switch (cmd) {
case COMPAT_ION_IOC_ALLOC:
{
struct compat_ion_allocation_data __user *data32;
struct ion_allocation_data __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (data == NULL)
return -EFAULT;
err = compat_get_ion_allocation_data(data32, data);
if (err)
return err;
ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
(unsigned long)data);
err = compat_put_ion_allocation_data(data32, data);
return ret ? ret : err;
}
case COMPAT_ION_IOC_FREE:
{
struct compat_ion_handle_data __user *data32;
struct ion_handle_data __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (data == NULL)
return -EFAULT;
err = compat_get_ion_handle_data(data32, data);
if (err)
return err;
return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
(unsigned long)data);
}
case COMPAT_ION_IOC_CUSTOM: {
struct compat_ion_custom_data __user *data32;
struct ion_custom_data __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (data == NULL)
return -EFAULT;
err = compat_get_ion_custom_data(data32, data);
if (err)
return err;
return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
(unsigned long)data);
}
case ION_IOC_SHARE:
case ION_IOC_MAP:
case ION_IOC_IMPORT:
case ION_IOC_SYNC:
return filp->f_op->unlocked_ioctl(filp, cmd,
(unsigned long)compat_ptr(arg));
default:
return -ENOIOCTLCMD;
}
}
| gpl-2.0 |
rsalveti/xbmc-eden-flattened | xbmc/dialogs/GUIDialogSeekBar.cpp | 6 | 5167 | /*
* Copyright (C) 2005-2008 Team XBMC
* http://www.xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
* http://www.gnu.org/copyleft/gpl.html
*
*/
#include "GUIDialogSeekBar.h"
#include "guilib/GUISliderControl.h"
#include "GUIUserMessages.h"
#include "Application.h"
#include "GUIInfoManager.h"
#include "utils/TimeUtils.h"
#include "utils/StringUtils.h"
#define SEEK_BAR_DISPLAY_TIME 2000L
#define SEEK_BAR_SEEK_TIME 500L
#define POPUP_SEEK_SLIDER 401
#define POPUP_SEEK_LABEL 402
CGUIDialogSeekBar::CGUIDialogSeekBar(void)
: CGUIDialog(WINDOW_DIALOG_SEEK_BAR, "DialogSeekBar.xml")
{
m_fSeekPercentage = 0.0f;
m_bRequireSeek = false;
m_loadOnDemand = false; // the application class handles our resources
}
CGUIDialogSeekBar::~CGUIDialogSeekBar(void)
{
}
bool CGUIDialogSeekBar::OnAction(const CAction &action)
{
if (action.GetID() == ACTION_ANALOG_SEEK_FORWARD || action.GetID() == ACTION_ANALOG_SEEK_BACK)
{
if (!m_bRequireSeek)
{ // start of seeking
if (g_infoManager.GetTotalPlayTime())
m_fSeekPercentage = (float)g_infoManager.GetPlayTime() / g_infoManager.GetTotalPlayTime() * 0.1f;
else
m_fSeekPercentage = 0.0f;
// tell info manager that we have started a seekbar operation
m_bRequireSeek = true;
g_infoManager.SetSeeking(true);
}
// calculate our seek amount
if (g_application.m_pPlayer && !g_infoManager.m_performingSeek)
{
//100% over 1 second.
float speed = 100.0f;
if( action.GetRepeat() )
speed *= action.GetRepeat();
else
speed /= g_infoManager.GetFPS();
if (action.GetID() == ACTION_ANALOG_SEEK_FORWARD)
m_fSeekPercentage += action.GetAmount() * action.GetAmount() * speed;
else
m_fSeekPercentage -= action.GetAmount() * action.GetAmount() * speed;
if (m_fSeekPercentage > 100.0f) m_fSeekPercentage = 100.0f;
if (m_fSeekPercentage < 0.0f) m_fSeekPercentage = 0.0f;
CGUISliderControl *pSlider = (CGUISliderControl*)GetControl(POPUP_SEEK_SLIDER);
if (pSlider) pSlider->SetPercentage((int)m_fSeekPercentage); // Update our seek bar accordingly
}
ResetTimer();
return true;
}
return CGUIDialog::OnAction(action);
}
bool CGUIDialogSeekBar::OnMessage(CGUIMessage& message)
{
switch ( message.GetMessage() )
{
case GUI_MSG_WINDOW_INIT:
case GUI_MSG_WINDOW_DEINIT:
return CGUIDialog::OnMessage(message);
case GUI_MSG_LABEL_SET:
{
if (message.GetSenderId() == GetID() && message.GetControlId() == POPUP_SEEK_LABEL)
CGUIDialog::OnMessage(message);
}
break;
case GUI_MSG_PLAYBACK_STARTED:
{ // new song started while our window is up - update our details
m_bRequireSeek = false;
m_fSeekPercentage = 0.0f;
}
break;
}
return false; // don't process anything other than what we need!
}
void CGUIDialogSeekBar::ResetTimer()
{
m_timer = CTimeUtils::GetFrameTime();
}
void CGUIDialogSeekBar::FrameMove()
{
if (!g_application.m_pPlayer)
{
Close(true);
return;
}
// check if we should seek or exit
if (!g_infoManager.m_performingSeek && CTimeUtils::GetFrameTime() - m_timer > SEEK_BAR_DISPLAY_TIME)
g_infoManager.SetSeeking(false);
// render our controls
if (!m_bRequireSeek && !g_infoManager.m_performingSeek)
{ // position the bar at our current time
CGUISliderControl *pSlider = (CGUISliderControl*)GetControl(POPUP_SEEK_SLIDER);
if (pSlider && g_infoManager.GetTotalPlayTime())
pSlider->SetPercentage((int)((float)g_infoManager.GetPlayTime()/g_infoManager.GetTotalPlayTime() * 0.1f));
CGUIMessage msg(GUI_MSG_LABEL_SET, GetID(), POPUP_SEEK_LABEL);
msg.SetLabel(g_infoManager.GetCurrentPlayTime());
OnMessage(msg);
}
else
{
CGUIMessage msg(GUI_MSG_LABEL_SET, GetID(), POPUP_SEEK_LABEL);
msg.SetLabel(GetSeekTimeLabel());
OnMessage(msg);
}
// Check for seek timeout, and perform the seek
if (m_bRequireSeek && CTimeUtils::GetFrameTime() - m_timer > SEEK_BAR_SEEK_TIME)
{
g_infoManager.m_performingSeek = true;
double time = g_infoManager.GetTotalPlayTime() * m_fSeekPercentage * 0.01;
g_application.SeekTime(time);
m_bRequireSeek = false;
}
CGUIDialog::FrameMove();
}
CStdString CGUIDialogSeekBar::GetSeekTimeLabel(TIME_FORMAT format)
{
int time = (int)(g_infoManager.GetTotalPlayTime() * m_fSeekPercentage * 0.01f);
return StringUtils::SecondsToTimeString(time, format);
}
| gpl-2.0 |
Vegaviet-Dev/android_kernel_pantech_ef63-common | drivers/usb/gadget/u_bam.c | 6 | 40482 | /* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <mach/msm_smd.h>
#include <linux/netdevice.h>
#include <mach/bam_dmux.h>
#include <linux/debugfs.h>
#include <linux/bitops.h>
#include <linux/termios.h>
#include <mach/usb_gadget_xport.h>
#include <linux/usb/msm_hsusb.h>
#include <mach/usb_bam.h>
#include "u_rmnet.h"
#define BAM_N_PORTS 1
#define BAM2BAM_N_PORTS 3
static struct workqueue_struct *gbam_wq;
static int n_bam_ports;
static int n_bam2bam_ports;
static unsigned n_tx_req_queued;
static unsigned bam_ch_ids[] = { 8 };
static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
#define BAM_PENDING_LIMIT 220
#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
#define BAM_MUX_HDR 8
#define BAM_MUX_RX_Q_SIZE 16
#define BAM_MUX_TX_Q_SIZE 200
#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
#define DL_INTR_THRESHOLD 20
#define FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND
#ifdef FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND
//#define USE_ZERO_FLAG //if zero_flag should be used, define this feature.
#endif
static unsigned int bam_pending_limit = BAM_PENDING_LIMIT;
module_param(bam_pending_limit, uint, S_IRUGO | S_IWUSR);
static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
static unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
#define BAM_CH_OPENED BIT(0)
#define BAM_CH_READY BIT(1)
struct bam_ch_info {
unsigned long flags;
unsigned id;
struct list_head tx_idle;
struct sk_buff_head tx_skb_q;
struct list_head rx_idle;
struct sk_buff_head rx_skb_q;
struct gbam_port *port;
struct work_struct write_tobam_w;
struct work_struct write_tohost_w;
struct usb_request *rx_req;
struct usb_request *tx_req;
u32 src_pipe_idx;
u32 dst_pipe_idx;
u8 src_connection_idx;
u8 dst_connection_idx;
enum transport_type trans;
struct usb_bam_connect_ipa_params ipa_params;
/* stats */
unsigned int pending_with_bam;
unsigned int tohost_drp_cnt;
unsigned int tomodem_drp_cnt;
unsigned int tx_len;
unsigned int rx_len;
unsigned long to_modem;
unsigned long to_host;
unsigned int rx_flow_control_disable;
unsigned int rx_flow_control_enable;
unsigned int rx_flow_control_triggered;
unsigned int max_num_pkts_pending_with_bam;
};
struct gbam_port {
unsigned port_num;
spinlock_t port_lock_ul;
spinlock_t port_lock_dl;
struct grmnet *port_usb;
struct grmnet *gr;
struct bam_ch_info data_ch;
struct work_struct connect_w;
struct work_struct disconnect_w;
struct work_struct suspend_w;
struct work_struct resume_w;
};
static struct bam_portmaster {
struct gbam_port *port;
struct platform_driver pdrv;
} bam_ports[BAM_N_PORTS];
struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
static void gbam_start_rx(struct gbam_port *port);
static void gbam_start_endless_rx(struct gbam_port *port);
static void gbam_start_endless_tx(struct gbam_port *port);
static int gbam_peer_reset_cb(void *param);
/*---------------misc functions---------------- */
static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
{
struct usb_request *req;
while (!list_empty(head)) {
req = list_entry(head->next, struct usb_request, list);
list_del(&req->list);
usb_ep_free_request(ep, req);
}
}
static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
int num,
void (*cb)(struct usb_ep *ep, struct usb_request *),
gfp_t flags)
{
int i;
struct usb_request *req;
pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
ep, head, num, cb);
for (i = 0; i < num; i++) {
req = usb_ep_alloc_request(ep, flags);
if (!req) {
pr_debug("%s: req allocated:%d\n", __func__, i);
return list_empty(head) ? -ENOMEM : 0;
}
req->complete = cb;
list_add(&req->list, head);
}
return 0;
}
/*--------------------------------------------- */
/*------------data_path----------------------------*/
static void gbam_write_data_tohost(struct gbam_port *port)
{
unsigned long flags;
struct bam_ch_info *d = &port->data_ch;
struct sk_buff *skb;
int ret;
struct usb_request *req;
struct usb_ep *ep;
#if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND) && !defined(USE_ZERO_FLAG)
static bool useZLP = false;
#endif
spin_lock_irqsave(&port->port_lock_dl, flags);
if (!port->port_usb) {
spin_unlock_irqrestore(&port->port_lock_dl, flags);
return;
}
ep = port->port_usb->in;
while (!list_empty(&d->tx_idle)) {
#if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND) && !defined(USE_ZERO_FLAG)
if (useZLP) {
useZLP = false;
req = list_first_entry(&d->tx_idle,
struct usb_request,
list);
req->context = NULL;
req->buf = NULL;
req->length = 0;
} else {
skb = __skb_dequeue(&d->tx_skb_q);
if (!skb) {
spin_unlock_irqrestore(&port->port_lock_dl, flags);
return;
}
req = list_first_entry(&d->tx_idle,
struct usb_request,
list);
req->context = skb;
req->buf = skb->data;
req->length = skb->len;
}
#else
skb = __skb_dequeue(&d->tx_skb_q);
if (!skb) {
spin_unlock_irqrestore(&port->port_lock_dl, flags);
return;
}
req = list_first_entry(&d->tx_idle,
struct usb_request,
list);
req->context = skb;
req->buf = skb->data;
req->length = skb->len;
#endif
n_tx_req_queued++;
if (n_tx_req_queued == dl_intr_threshold) {
req->no_interrupt = 0;
n_tx_req_queued = 0;
} else {
req->no_interrupt = 1;
}
// LS2_USB 20130723 pooyi send zelo length packet
if ((req->length % ep->maxpacket) == 0)
req->zero = 1;
else
req->zero = 0;
#if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND)
#ifdef USE_ZERO_FLAG
// LS2_USB 20130723 pooyi send zelo length packet
if (req->length && ((req->length % ep->maxpacket) == 0))
req->zero = 1;
else
req->zero = 0;
#else
if (req->length && ((req->length % ep->maxpacket) == 0))
useZLP = true;
else
useZLP = false;
#endif
#else
/* Send ZLP in case packet length is multiple of maxpacksize */
req->zero = 1;
#endif
list_del(&req->list);
spin_unlock(&port->port_lock_dl);
ret = usb_ep_queue(ep, req, GFP_ATOMIC);
spin_lock(&port->port_lock_dl);
if (ret) {
pr_err("%s: usb epIn failed with %d\n", __func__, ret);
list_add(&req->list, &d->tx_idle);
dev_kfree_skb_any(skb);
break;
}
d->to_host++;
}
spin_unlock_irqrestore(&port->port_lock_dl, flags);
}
static void gbam_write_data_tohost_w(struct work_struct *w)
{
struct bam_ch_info *d;
struct gbam_port *port;
d = container_of(w, struct bam_ch_info, write_tohost_w);
port = d->port;
gbam_write_data_tohost(port);
}
void gbam_data_recv_cb(void *p, struct sk_buff *skb)
{
struct gbam_port *port = p;
struct bam_ch_info *d = &port->data_ch;
unsigned long flags;
if (!skb)
return;
pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
port, port->port_num, d, skb->len);
spin_lock_irqsave(&port->port_lock_dl, flags);
if (!port->port_usb) {
spin_unlock_irqrestore(&port->port_lock_dl, flags);
dev_kfree_skb_any(skb);
return;
}
if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
d->tohost_drp_cnt++;
if (printk_ratelimit())
pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
__func__, d->tohost_drp_cnt);
spin_unlock_irqrestore(&port->port_lock_dl, flags);
dev_kfree_skb_any(skb);
return;
}
__skb_queue_tail(&d->tx_skb_q, skb);
spin_unlock_irqrestore(&port->port_lock_dl, flags);
gbam_write_data_tohost(port);
}
void gbam_data_write_done(void *p, struct sk_buff *skb)
{
struct gbam_port *port = p;
struct bam_ch_info *d = &port->data_ch;
unsigned long flags;
if (!skb)
return;
dev_kfree_skb_any(skb);
spin_lock_irqsave(&port->port_lock_ul, flags);
d->pending_with_bam--;
pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
port, d, d->to_modem,
d->pending_with_bam, port->port_num);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
queue_work(gbam_wq, &d->write_tobam_w);
}
static void gbam_data_write_tobam(struct work_struct *w)
{
struct gbam_port *port;
struct bam_ch_info *d;
struct sk_buff *skb;
unsigned long flags;
int ret;
int qlen;
d = container_of(w, struct bam_ch_info, write_tobam_w);
port = d->port;
spin_lock_irqsave(&port->port_lock_ul, flags);
if (!port->port_usb) {
spin_unlock_irqrestore(&port->port_lock_ul, flags);
return;
}
while (d->pending_with_bam < bam_pending_limit) {
skb = __skb_dequeue(&d->rx_skb_q);
if (!skb)
break;
d->pending_with_bam++;
d->to_modem++;
pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
port, d, d->to_modem, d->pending_with_bam,
port->port_num);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
ret = msm_bam_dmux_write(d->id, skb);
spin_lock_irqsave(&port->port_lock_ul, flags);
if (ret) {
pr_debug("%s: write error:%d\n", __func__, ret);
d->pending_with_bam--;
d->to_modem--;
d->tomodem_drp_cnt++;
dev_kfree_skb_any(skb);
break;
}
if (d->pending_with_bam > d->max_num_pkts_pending_with_bam)
d->max_num_pkts_pending_with_bam = d->pending_with_bam;
}
qlen = d->rx_skb_q.qlen;
spin_unlock_irqrestore(&port->port_lock_ul, flags);
if (qlen < bam_mux_rx_fctrl_dis_thld) {
if (d->rx_flow_control_triggered) {
d->rx_flow_control_disable++;
d->rx_flow_control_triggered = 0;
}
gbam_start_rx(port);
}
}
/*-------------------------------------------------------------*/
static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gbam_port *port = ep->driver_data;
struct bam_ch_info *d;
struct sk_buff *skb = req->context;
int status = req->status;
switch (status) {
case 0:
/* successful completion */
break;
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
dev_kfree_skb_any(skb);
usb_ep_free_request(ep, req);
return;
default:
pr_err("%s: data tx ep error %d\n",
__func__, status);
break;
}
#if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND) && !defined(USE_ZERO_FLAG)
if (skb)
dev_kfree_skb_any(skb);
#else
dev_kfree_skb_any(skb);
#endif
if (!port)
return;
spin_lock(&port->port_lock_dl);
d = &port->data_ch;
list_add_tail(&req->list, &d->tx_idle);
spin_unlock(&port->port_lock_dl);
queue_work(gbam_wq, &d->write_tohost_w);
}
static void
gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gbam_port *port = ep->driver_data;
struct bam_ch_info *d = &port->data_ch;
struct sk_buff *skb = req->context;
int status = req->status;
int queue = 0;
switch (status) {
case 0:
skb_put(skb, req->actual);
queue = 1;
break;
case -ECONNRESET:
case -ESHUTDOWN:
/* cable disconnection */
dev_kfree_skb_any(skb);
req->buf = 0;
usb_ep_free_request(ep, req);
return;
default:
if (printk_ratelimit())
pr_err("%s: %s response error %d, %d/%d\n",
__func__, ep->name, status,
req->actual, req->length);
dev_kfree_skb_any(skb);
break;
}
spin_lock(&port->port_lock_ul);
if (queue) {
__skb_queue_tail(&d->rx_skb_q, skb);
queue_work(gbam_wq, &d->write_tobam_w);
}
/* TODO: Handle flow control gracefully by having
* having call back mechanism from bam driver
*/
if (bam_mux_rx_fctrl_support &&
d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
if (!d->rx_flow_control_triggered) {
d->rx_flow_control_triggered = 1;
d->rx_flow_control_enable++;
}
list_add_tail(&req->list, &d->rx_idle);
spin_unlock(&port->port_lock_ul);
return;
}
spin_unlock(&port->port_lock_ul);
skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
if (!skb) {
spin_lock(&port->port_lock_ul);
list_add_tail(&req->list, &d->rx_idle);
spin_unlock(&port->port_lock_ul);
return;
}
skb_reserve(skb, BAM_MUX_HDR);
req->buf = skb->data;
req->length = bam_mux_rx_req_size;
req->context = skb;
status = usb_ep_queue(ep, req, GFP_ATOMIC);
if (status) {
dev_kfree_skb_any(skb);
if (printk_ratelimit())
pr_err("%s: data rx enqueue err %d\n",
__func__, status);
spin_lock(&port->port_lock_ul);
list_add_tail(&req->list, &d->rx_idle);
spin_unlock(&port->port_lock_ul);
}
}
static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
{
int status = req->status;
pr_debug("%s status: %d\n", __func__, status);
}
static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
{
int status = req->status;
pr_debug("%s status: %d\n", __func__, status);
}
static void gbam_start_rx(struct gbam_port *port)
{
struct usb_request *req;
struct bam_ch_info *d;
struct usb_ep *ep;
unsigned long flags;
int ret;
struct sk_buff *skb;
spin_lock_irqsave(&port->port_lock_ul, flags);
if (!port->port_usb) {
spin_unlock_irqrestore(&port->port_lock_ul, flags);
return;
}
d = &port->data_ch;
ep = port->port_usb->out;
while (port->port_usb && !list_empty(&d->rx_idle)) {
if (bam_mux_rx_fctrl_support &&
d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
break;
req = list_first_entry(&d->rx_idle, struct usb_request, list);
skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
if (!skb)
break;
skb_reserve(skb, BAM_MUX_HDR);
list_del(&req->list);
req->buf = skb->data;
req->length = bam_mux_rx_req_size;
req->context = skb;
spin_unlock_irqrestore(&port->port_lock_ul, flags);
ret = usb_ep_queue(ep, req, GFP_ATOMIC);
spin_lock_irqsave(&port->port_lock_ul, flags);
if (ret) {
dev_kfree_skb_any(skb);
if (printk_ratelimit())
pr_err("%s: rx queue failed %d\n",
__func__, ret);
if (port->port_usb)
list_add(&req->list, &d->rx_idle);
else
usb_ep_free_request(ep, req);
break;
}
}
spin_unlock_irqrestore(&port->port_lock_ul, flags);
}
static void gbam_start_endless_rx(struct gbam_port *port)
{
struct bam_ch_info *d = &port->data_ch;
int status;
spin_lock(&port->port_lock_ul);
if (!port->port_usb) {
spin_unlock(&port->port_lock_ul);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
pr_debug("%s: enqueue\n", __func__);
status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
if (status)
pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
spin_unlock(&port->port_lock_ul);
}
static void gbam_start_endless_tx(struct gbam_port *port)
{
struct bam_ch_info *d = &port->data_ch;
int status;
spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
spin_unlock(&port->port_lock_dl);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
pr_debug("%s: enqueue\n", __func__);
status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
if (status)
pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
spin_unlock(&port->port_lock_dl);
}
static void gbam_stop_endless_rx(struct gbam_port *port)
{
struct bam_ch_info *d = &port->data_ch;
int status;
spin_lock(&port->port_lock_ul);
if (!port->port_usb) {
spin_unlock(&port->port_lock_ul);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
pr_debug("%s: dequeue\n", __func__);
status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
if (status)
pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
spin_unlock(&port->port_lock_ul);
}
static void gbam_stop_endless_tx(struct gbam_port *port)
{
struct bam_ch_info *d = &port->data_ch;
int status;
spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
spin_unlock(&port->port_lock_dl);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
pr_debug("%s: dequeue\n", __func__);
status = usb_ep_dequeue(port->port_usb->in, d->tx_req);
if (status)
pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
spin_unlock(&port->port_lock_dl);
}
static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
{
struct gbam_port *port = param;
if (dir == USB_TO_PEER_PERIPHERAL)
gbam_start_endless_rx(port);
else
gbam_start_endless_tx(port);
}
static void gbam_stop(void *param, enum usb_bam_pipe_dir dir)
{
struct gbam_port *port = param;
if (dir == USB_TO_PEER_PERIPHERAL)
gbam_stop_endless_rx(port);
else
gbam_stop_endless_tx(port);
}
static void gbam_start_io(struct gbam_port *port)
{
unsigned long flags;
struct usb_ep *ep;
int ret;
struct bam_ch_info *d;
pr_debug("%s: port:%p\n", __func__, port);
spin_lock_irqsave(&port->port_lock_ul, flags);
if (!port->port_usb) {
spin_unlock_irqrestore(&port->port_lock_ul, flags);
return;
}
d = &port->data_ch;
ep = port->port_usb->out;
ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
gbam_epout_complete, GFP_ATOMIC);
if (ret) {
pr_err("%s: rx req allocation failed\n", __func__);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
return;
}
spin_unlock_irqrestore(&port->port_lock_ul, flags);
spin_lock_irqsave(&port->port_lock_dl, flags);
if (!port->port_usb) {
gbam_free_requests(ep, &d->rx_idle);
spin_unlock_irqrestore(&port->port_lock_dl, flags);
return;
}
ep = port->port_usb->in;
ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
gbam_epin_complete, GFP_ATOMIC);
if (ret) {
pr_err("%s: tx req allocation failed\n", __func__);
gbam_free_requests(ep, &d->rx_idle);
spin_unlock_irqrestore(&port->port_lock_dl, flags);
return;
}
spin_unlock_irqrestore(&port->port_lock_dl, flags);
/* queue out requests */
gbam_start_rx(port);
}
static void gbam_notify(void *p, int event, unsigned long data)
{
switch (event) {
case BAM_DMUX_RECEIVE:
gbam_data_recv_cb(p, (struct sk_buff *)(data));
break;
case BAM_DMUX_WRITE_DONE:
gbam_data_write_done(p, (struct sk_buff *)(data));
break;
}
}
static void gbam_free_buffers(struct gbam_port *port)
{
struct sk_buff *skb;
unsigned long flags;
struct bam_ch_info *d;
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
if (!port || !port->port_usb)
goto free_buf_out;
d = &port->data_ch;
gbam_free_requests(port->port_usb->in, &d->tx_idle);
gbam_free_requests(port->port_usb->out, &d->rx_idle);
while ((skb = __skb_dequeue(&d->tx_skb_q)))
dev_kfree_skb_any(skb);
while ((skb = __skb_dequeue(&d->rx_skb_q)))
dev_kfree_skb_any(skb);
free_buf_out:
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
}
static void gbam_disconnect_work(struct work_struct *w)
{
struct gbam_port *port =
container_of(w, struct gbam_port, disconnect_w);
struct bam_ch_info *d = &port->data_ch;
if (!test_bit(BAM_CH_OPENED, &d->flags))
return;
msm_bam_dmux_close(d->id);
clear_bit(BAM_CH_OPENED, &d->flags);
}
static void gbam2bam_disconnect_work(struct work_struct *w)
{
struct gbam_port *port =
container_of(w, struct gbam_port, disconnect_w);
struct bam_ch_info *d = &port->data_ch;
int ret;
if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
ret = usb_bam_disconnect_ipa(&d->ipa_params);
if (ret)
pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
__func__, ret);
teth_bridge_disconnect();
}
}
static void gbam_connect_work(struct work_struct *w)
{
struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
struct bam_ch_info *d = &port->data_ch;
int ret;
unsigned long flags;
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
return;
}
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
if (!test_bit(BAM_CH_READY, &d->flags))
return;
ret = msm_bam_dmux_open(d->id, port, gbam_notify);
if (ret) {
pr_err("%s: unable open bam ch:%d err:%d\n",
__func__, d->id, ret);
return;
}
set_bit(BAM_CH_OPENED, &d->flags);
gbam_start_io(port);
pr_debug("%s: done\n", __func__);
}
static void gbam2bam_connect_work(struct work_struct *w)
{
struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
struct teth_bridge_connect_params connect_params;
struct bam_ch_info *d = &port->data_ch;
u32 sps_params;
ipa_notify_cb usb_notify_cb;
void *priv;
int ret;
unsigned long flags;
if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
usb_bam_reset_complete();
ret = usb_bam_connect(d->src_connection_idx, &d->src_pipe_idx);
if (ret) {
pr_err("%s: usb_bam_connect (src) failed: err:%d\n",
__func__, ret);
return;
}
ret = usb_bam_connect(d->dst_connection_idx, &d->dst_pipe_idx);
if (ret) {
pr_err("%s: usb_bam_connect (dst) failed: err:%d\n",
__func__, ret);
return;
}
} else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
ret = teth_bridge_init(&usb_notify_cb, &priv);
if (ret) {
pr_err("%s:teth_bridge_init() failed\n", __func__);
return;
}
d->ipa_params.notify = usb_notify_cb;
d->ipa_params.priv = priv;
d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
d->ipa_params.client = IPA_CLIENT_USB_PROD;
d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
ret = usb_bam_connect_ipa(&d->ipa_params);
if (ret) {
pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
__func__, ret);
return;
}
d->ipa_params.client = IPA_CLIENT_USB_CONS;
d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
ret = usb_bam_connect_ipa(&d->ipa_params);
if (ret) {
pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
__func__, ret);
return;
}
connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl;
connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl;
connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET;
ret = teth_bridge_connect(&connect_params);
if (ret) {
pr_err("%s:teth_bridge_connect() failed\n", __func__);
return;
}
}
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
pr_debug("%s: usb cable is disconnected, exiting\n", __func__);
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
return;
}
d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_ATOMIC);
if (!d->rx_req) {
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
pr_err("%s: out of memory\n", __func__);
return;
}
d->rx_req->context = port;
d->rx_req->complete = gbam_endless_rx_complete;
d->rx_req->length = 0;
d->rx_req->no_interrupt = 1;
sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
d->rx_req->udc_priv = sps_params;
d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_ATOMIC);
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
if (!d->tx_req) {
pr_err("%s: out of memory\n", __func__);
return;
}
d->tx_req->context = port;
d->tx_req->complete = gbam_endless_tx_complete;
d->tx_req->length = 0;
d->tx_req->no_interrupt = 1;
sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
d->tx_req->udc_priv = sps_params;
/* queue in & out requests */
gbam_start_endless_rx(port);
gbam_start_endless_tx(port);
if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) {
/* Register for peer reset callback */
usb_bam_register_peer_reset_cb(gbam_peer_reset_cb, port);
ret = usb_bam_client_ready(true);
if (ret) {
pr_err("%s: usb_bam_client_ready failed: err:%d\n",
__func__, ret);
return;
}
}
pr_debug("%s: done\n", __func__);
}
static int gbam_wake_cb(void *param)
{
struct gbam_port *port = (struct gbam_port *)param;
struct bam_ch_info *d;
struct f_rmnet *dev;
dev = port_to_rmnet(port->gr);
d = &port->data_ch;
pr_debug("%s: woken up by peer\n", __func__);
return usb_gadget_wakeup(dev->cdev->gadget);
}
static void gbam2bam_suspend_work(struct work_struct *w)
{
struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
struct bam_ch_info *d = &port->data_ch;
pr_debug("%s: suspend work started\n", __func__);
usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
usb_bam_suspend(&d->ipa_params);
}
}
static void gbam2bam_resume_work(struct work_struct *w)
{
struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
struct bam_ch_info *d = &port->data_ch;
pr_debug("%s: resume work started\n", __func__);
usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
usb_bam_resume(&d->ipa_params);
}
static int gbam_peer_reset_cb(void *param)
{
struct gbam_port *port = (struct gbam_port *)param;
struct bam_ch_info *d;
struct f_rmnet *dev;
struct usb_gadget *gadget;
int ret;
bool reenable_eps = false;
dev = port_to_rmnet(port->gr);
d = &port->data_ch;
gadget = dev->cdev->gadget;
pr_debug("%s: reset by peer\n", __func__);
/* Disable the relevant EPs if currently EPs are enabled */
if (port->port_usb && port->port_usb->in &&
port->port_usb->in->driver_data) {
usb_ep_disable(port->port_usb->out);
usb_ep_disable(port->port_usb->in);
port->port_usb->in->driver_data = NULL;
port->port_usb->out->driver_data = NULL;
reenable_eps = true;
}
/* Disable BAM */
msm_hw_bam_disable(1);
/* Reset BAM */
ret = usb_bam_a2_reset(0);
if (ret) {
pr_err("%s: BAM reset failed %d\n", __func__, ret);
goto reenable_eps;
}
/* Enable BAM */
msm_hw_bam_disable(0);
reenable_eps:
/* Re-Enable the relevant EPs, if EPs were originally enabled */
if (reenable_eps) {
ret = usb_ep_enable(port->port_usb->in);
if (ret) {
pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
__func__, port->port_usb->in);
return ret;
}
port->port_usb->in->driver_data = port;
ret = usb_ep_enable(port->port_usb->out);
if (ret) {
pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
__func__, port->port_usb->out);
port->port_usb->in->driver_data = 0;
return ret;
}
port->port_usb->out->driver_data = port;
gbam_start_endless_rx(port);
gbam_start_endless_tx(port);
}
/* Unregister the peer reset callback */
if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0)
usb_bam_register_peer_reset_cb(NULL, NULL);
return 0;
}
/* BAM data channel ready, allow attempt to open */
static int gbam_data_ch_probe(struct platform_device *pdev)
{
struct gbam_port *port;
struct bam_ch_info *d;
int i;
unsigned long flags;
pr_debug("%s: name:%s\n", __func__, pdev->name);
for (i = 0; i < n_bam_ports; i++) {
port = bam_ports[i].port;
d = &port->data_ch;
if (!strncmp(bam_ch_names[i], pdev->name,
BAM_DMUX_CH_NAME_MAX_LEN)) {
set_bit(BAM_CH_READY, &d->flags);
/* if usb is online, try opening bam_ch */
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
if (port->port_usb)
queue_work(gbam_wq, &port->connect_w);
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
break;
}
}
return 0;
}
/* BAM data channel went inactive, so close it */
static int gbam_data_ch_remove(struct platform_device *pdev)
{
struct gbam_port *port;
struct bam_ch_info *d;
struct usb_ep *ep_in = NULL;
struct usb_ep *ep_out = NULL;
unsigned long flags;
int i;
pr_debug("%s: name:%s\n", __func__, pdev->name);
for (i = 0; i < n_bam_ports; i++) {
if (!strncmp(bam_ch_names[i], pdev->name,
BAM_DMUX_CH_NAME_MAX_LEN)) {
port = bam_ports[i].port;
d = &port->data_ch;
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
if (port->port_usb) {
ep_in = port->port_usb->in;
ep_out = port->port_usb->out;
}
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
if (ep_in)
usb_ep_fifo_flush(ep_in);
if (ep_out)
usb_ep_fifo_flush(ep_out);
gbam_free_buffers(port);
msm_bam_dmux_close(d->id);
/* bam dmux will free all pending skbs */
d->pending_with_bam = 0;
clear_bit(BAM_CH_READY, &d->flags);
clear_bit(BAM_CH_OPENED, &d->flags);
}
}
return 0;
}
static void gbam_port_free(int portno)
{
struct gbam_port *port = bam_ports[portno].port;
struct platform_driver *pdrv = &bam_ports[portno].pdrv;
if (port) {
kfree(port);
platform_driver_unregister(pdrv);
}
}
static void gbam2bam_port_free(int portno)
{
struct gbam_port *port = bam2bam_ports[portno];
kfree(port);
}
static int gbam_port_alloc(int portno)
{
struct gbam_port *port;
struct bam_ch_info *d;
struct platform_driver *pdrv;
port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->port_num = portno;
/* port initialization */
spin_lock_init(&port->port_lock_ul);
spin_lock_init(&port->port_lock_dl);
INIT_WORK(&port->connect_w, gbam_connect_work);
INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
/* data ch */
d = &port->data_ch;
d->port = port;
INIT_LIST_HEAD(&d->tx_idle);
INIT_LIST_HEAD(&d->rx_idle);
INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
skb_queue_head_init(&d->tx_skb_q);
skb_queue_head_init(&d->rx_skb_q);
d->id = bam_ch_ids[portno];
bam_ports[portno].port = port;
pdrv = &bam_ports[portno].pdrv;
pdrv->probe = gbam_data_ch_probe;
pdrv->remove = gbam_data_ch_remove;
pdrv->driver.name = bam_ch_names[portno];
pdrv->driver.owner = THIS_MODULE;
platform_driver_register(pdrv);
pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
return 0;
}
static int gbam2bam_port_alloc(int portno)
{
struct gbam_port *port;
struct bam_ch_info *d;
port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->port_num = portno;
/* port initialization */
spin_lock_init(&port->port_lock_ul);
spin_lock_init(&port->port_lock_dl);
INIT_WORK(&port->connect_w, gbam2bam_connect_work);
INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
INIT_WORK(&port->resume_w, gbam2bam_resume_work);
/* data ch */
d = &port->data_ch;
d->port = port;
bam2bam_ports[portno] = port;
pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
return 0;
}
#if defined(CONFIG_DEBUG_FS)
#define DEBUG_BUF_SIZE 1024
static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct gbam_port *port;
struct bam_ch_info *d;
char *buf;
unsigned long flags;
int ret;
int i;
int temp = 0;
buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < n_bam_ports; i++) {
port = bam_ports[i].port;
if (!port)
continue;
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
d = &port->data_ch;
temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
"#PORT:%d port:%p data_ch:%p#\n"
"dpkts_to_usbhost: %lu\n"
"dpkts_to_modem: %lu\n"
"dpkts_pwith_bam: %u\n"
"to_usbhost_dcnt: %u\n"
"tomodem__dcnt: %u\n"
"rx_flow_control_disable_count: %u\n"
"rx_flow_control_enable_count: %u\n"
"rx_flow_control_triggered: %u\n"
"max_num_pkts_pending_with_bam: %u\n"
"tx_buf_len: %u\n"
"rx_buf_len: %u\n"
"data_ch_open: %d\n"
"data_ch_ready: %d\n",
i, port, &port->data_ch,
d->to_host, d->to_modem,
d->pending_with_bam,
d->tohost_drp_cnt, d->tomodem_drp_cnt,
d->rx_flow_control_disable,
d->rx_flow_control_enable,
d->rx_flow_control_triggered,
d->max_num_pkts_pending_with_bam,
d->tx_skb_q.qlen, d->rx_skb_q.qlen,
test_bit(BAM_CH_OPENED, &d->flags),
test_bit(BAM_CH_READY, &d->flags));
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
}
ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
kfree(buf);
return ret;
}
static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct gbam_port *port;
struct bam_ch_info *d;
int i;
unsigned long flags;
for (i = 0; i < n_bam_ports; i++) {
port = bam_ports[i].port;
if (!port)
continue;
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
d = &port->data_ch;
d->to_host = 0;
d->to_modem = 0;
d->pending_with_bam = 0;
d->tohost_drp_cnt = 0;
d->tomodem_drp_cnt = 0;
d->rx_flow_control_disable = 0;
d->rx_flow_control_enable = 0;
d->rx_flow_control_triggered = 0;
d->max_num_pkts_pending_with_bam = 0;
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
}
return count;
}
const struct file_operations gbam_stats_ops = {
.read = gbam_read_stats,
.write = gbam_reset_stats,
};
struct dentry *gbam_dent;
static void gbam_debugfs_init(void)
{
struct dentry *dfile;
gbam_dent = debugfs_create_dir("usb_rmnet", 0);
if (!gbam_dent || IS_ERR(gbam_dent))
return;
dfile = debugfs_create_file("status", 0444, gbam_dent, 0,
&gbam_stats_ops);
if (!dfile || IS_ERR(dfile)) {
debugfs_remove(gbam_dent);
gbam_dent = NULL;
return;
}
}
static void gbam_debugfs_remove(void)
{
debugfs_remove_recursive(gbam_dent);
}
#else
static inline void gbam_debugfs_init(void) {}
static inline void gbam_debugfs_remove(void) {}
#endif
void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
{
struct gbam_port *port;
unsigned long flags;
struct bam_ch_info *d;
pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
if (trans == USB_GADGET_XPORT_BAM &&
port_num >= n_bam_ports) {
pr_err("%s: invalid bam portno#%d\n",
__func__, port_num);
return;
}
if ((trans == USB_GADGET_XPORT_BAM2BAM ||
trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
port_num >= n_bam2bam_ports) {
pr_err("%s: invalid bam2bam portno#%d\n",
__func__, port_num);
return;
}
if (!gr) {
pr_err("%s: grmnet port is null\n", __func__);
return;
}
if (trans == USB_GADGET_XPORT_BAM)
port = bam_ports[port_num].port;
else
port = bam2bam_ports[port_num];
d = &port->data_ch;
port->gr = gr;
if (trans == USB_GADGET_XPORT_BAM)
gbam_free_buffers(port);
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
port->port_usb = 0;
n_tx_req_queued = 0;
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
/* disable endpoints */
usb_ep_disable(gr->out);
usb_ep_disable(gr->in);
gr->in->driver_data = NULL;
gr->out->driver_data = NULL;
if (trans == USB_GADGET_XPORT_BAM ||
trans == USB_GADGET_XPORT_BAM2BAM_IPA)
queue_work(gbam_wq, &port->disconnect_w);
else if (trans == USB_GADGET_XPORT_BAM2BAM) {
if (port_num == 0) {
if (usb_bam_client_ready(false)) {
pr_err("%s: usb_bam_client_ready failed\n",
__func__);
}
}
}
}
int gbam_connect(struct grmnet *gr, u8 port_num,
enum transport_type trans, u8 src_connection_idx,
u8 dst_connection_idx)
{
struct gbam_port *port;
struct bam_ch_info *d;
int ret;
unsigned long flags;
pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
pr_err("%s: invalid portno#%d\n", __func__, port_num);
return -ENODEV;
}
if ((trans == USB_GADGET_XPORT_BAM2BAM ||
trans == USB_GADGET_XPORT_BAM2BAM_IPA)
&& port_num >= n_bam2bam_ports) {
pr_err("%s: invalid portno#%d\n", __func__, port_num);
return -ENODEV;
}
if (!gr) {
pr_err("%s: grmnet port is null\n", __func__);
return -ENODEV;
}
if (trans == USB_GADGET_XPORT_BAM)
port = bam_ports[port_num].port;
else
port = bam2bam_ports[port_num];
d = &port->data_ch;
ret = usb_ep_enable(gr->in);
if (ret) {
pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
__func__, gr->in);
return ret;
}
gr->in->driver_data = port;
ret = usb_ep_enable(gr->out);
if (ret) {
pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
__func__, gr->out);
gr->in->driver_data = 0;
return ret;
}
gr->out->driver_data = port;
spin_lock_irqsave(&port->port_lock_ul, flags);
spin_lock(&port->port_lock_dl);
port->port_usb = gr;
if (trans == USB_GADGET_XPORT_BAM) {
d->to_host = 0;
d->to_modem = 0;
d->pending_with_bam = 0;
d->tohost_drp_cnt = 0;
d->tomodem_drp_cnt = 0;
d->rx_flow_control_disable = 0;
d->rx_flow_control_enable = 0;
d->rx_flow_control_triggered = 0;
d->max_num_pkts_pending_with_bam = 0;
}
spin_unlock(&port->port_lock_dl);
spin_unlock_irqrestore(&port->port_lock_ul, flags);
if (trans == USB_GADGET_XPORT_BAM2BAM) {
port->gr = gr;
d->src_connection_idx = src_connection_idx;
d->dst_connection_idx = dst_connection_idx;
} else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
port->gr = gr;
d->ipa_params.src_pipe = &(d->src_pipe_idx);
d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
d->ipa_params.src_idx = src_connection_idx;
d->ipa_params.dst_idx = dst_connection_idx;
}
d->trans = trans;
queue_work(gbam_wq, &port->connect_w);
return 0;
}
int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
{
int i;
int ret;
pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
__func__, no_bam_port, no_bam2bam_port);
if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
|| no_bam2bam_port > BAM2BAM_N_PORTS) {
pr_err("%s: Invalid num of ports count:%d,%d\n",
__func__, no_bam_port, no_bam2bam_port);
return -EINVAL;
}
gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
if (!gbam_wq) {
pr_err("%s: Unable to create workqueue gbam_wq\n",
__func__);
return -ENOMEM;
}
for (i = 0; i < no_bam_port; i++) {
n_bam_ports++;
ret = gbam_port_alloc(i);
if (ret) {
n_bam_ports--;
pr_err("%s: Unable to alloc port:%d\n", __func__, i);
goto free_bam_ports;
}
}
for (i = 0; i < no_bam2bam_port; i++) {
n_bam2bam_ports++;
ret = gbam2bam_port_alloc(i);
if (ret) {
n_bam2bam_ports--;
pr_err("%s: Unable to alloc port:%d\n", __func__, i);
goto free_bam_ports;
}
}
gbam_debugfs_init();
return 0;
free_bam_ports:
for (i = 0; i < n_bam_ports; i++)
gbam_port_free(i);
for (i = 0; i < n_bam2bam_ports; i++)
gbam2bam_port_free(i);
destroy_workqueue(gbam_wq);
return ret;
}
void gbam_cleanup(void)
{
gbam_debugfs_remove();
}
void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
{
struct gbam_port *port;
struct bam_ch_info *d;
if (trans != USB_GADGET_XPORT_BAM2BAM &&
trans != USB_GADGET_XPORT_BAM2BAM_IPA)
return;
port = bam2bam_ports[port_num];
d = &port->data_ch;
pr_debug("%s: suspended port %d\n", __func__, port_num);
queue_work(gbam_wq, &port->suspend_w);
}
void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
{
struct gbam_port *port;
struct bam_ch_info *d;
if (trans != USB_GADGET_XPORT_BAM2BAM &&
trans != USB_GADGET_XPORT_BAM2BAM_IPA)
return;
port = bam2bam_ports[port_num];
d = &port->data_ch;
pr_debug("%s: resumed port %d\n", __func__, port_num);
queue_work(gbam_wq, &port->resume_w);
}
| gpl-2.0 |
fortunaFiWn/android_kernel_samsung_fortunave3g | drivers/staging/prima/CORE/MAC/src/pe/lim/limPropExtsUtils.c | 518 | 9427 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
*
* Airgo Networks, Inc proprietary. All rights reserved.
* This file limPropExtsUtils.cc contains the utility functions
* to populate, parse proprietary extensions required to
* support ANI feature set.
*
* Author: Chandra Modumudi
* Date: 11/27/02
* History:-
* Date Modified by Modification Information
* --------------------------------------------------------------------
*
*/
#include "aniGlobal.h"
#include "wniCfgSta.h"
#include "sirCommon.h"
#include "sirDebug.h"
#include "utilsApi.h"
#include "cfgApi.h"
#include "limApi.h"
#include "limTypes.h"
#include "limUtils.h"
#include "limAssocUtils.h"
#include "limPropExtsUtils.h"
#include "limSerDesUtils.h"
#include "limTrace.h"
#include "limSession.h"
#define LIM_GET_NOISE_MAX_TRY 5
/**
* limExtractApCapability()
*
*FUNCTION:
* This function is called to extract AP's HCF/WME/WSM capability
* from the IEs received from it in Beacon/Probe Response frames
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
*
* @param pMac Pointer to Global MAC structure
* @param pIE Pointer to starting IE in Beacon/Probe Response
* @param ieLen Length of all IEs combined
* @param qosCap Bits are set according to capabilities
* @return 0 - If AP does not assert HCF capability & 1 - otherwise
*/
void
limExtractApCapability(tpAniSirGlobal pMac, tANI_U8 *pIE, tANI_U16 ieLen,
tANI_U8 *qosCap, tANI_U16 *propCap, tANI_U8 *uapsd,
tPowerdBm *localConstraint,
tpPESession psessionEntry
)
{
tSirProbeRespBeacon *pBeaconStruct;
#if !defined WLAN_FEATURE_VOWIFI
tANI_U32 localPowerConstraints = 0;
#endif
pBeaconStruct = vos_mem_malloc(sizeof(tSirProbeRespBeacon));
if ( NULL == pBeaconStruct )
{
limLog(pMac, LOGE, FL("Unable to allocate memory in limExtractApCapability") );
return;
}
vos_mem_set( (tANI_U8 *) pBeaconStruct, sizeof(tSirProbeRespBeacon), 0);
*qosCap = 0;
*propCap = 0;
*uapsd = 0;
PELOG3(limLog( pMac, LOG3,
FL("In limExtractApCapability: The IE's being received are:"));
sirDumpBuf( pMac, SIR_LIM_MODULE_ID, LOG3, pIE, ieLen );)
if (sirParseBeaconIE(pMac, pBeaconStruct, pIE, (tANI_U32)ieLen) == eSIR_SUCCESS)
{
if (pBeaconStruct->wmeInfoPresent || pBeaconStruct->wmeEdcaPresent)
LIM_BSS_CAPS_SET(WME, *qosCap);
if (LIM_BSS_CAPS_GET(WME, *qosCap) && pBeaconStruct->wsmCapablePresent)
LIM_BSS_CAPS_SET(WSM, *qosCap);
if (pBeaconStruct->propIEinfo.aniIndicator &&
pBeaconStruct->propIEinfo.capabilityPresent)
*propCap = pBeaconStruct->propIEinfo.capability;
if (pBeaconStruct->HTCaps.present)
pMac->lim.htCapabilityPresentInBeacon = 1;
else
pMac->lim.htCapabilityPresentInBeacon = 0;
#ifdef WLAN_FEATURE_11AC
VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_INFO_MED,
"***beacon.VHTCaps.present*****=%d",pBeaconStruct->VHTCaps.present);
VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_INFO_MED,
"***beacon.SU Beamformer Capable*****=%d",pBeaconStruct->VHTCaps.suBeamFormerCap);
if ( pBeaconStruct->VHTCaps.present && pBeaconStruct->VHTOperation.present)
{
psessionEntry->vhtCapabilityPresentInBeacon = 1;
psessionEntry->apCenterChan = pBeaconStruct->VHTOperation.chanCenterFreqSeg1;
psessionEntry->apChanWidth = pBeaconStruct->VHTOperation.chanWidth;
}
else
{
psessionEntry->vhtCapabilityPresentInBeacon = 0;
}
#endif
// Extract the UAPSD flag from WMM Parameter element
if (pBeaconStruct->wmeEdcaPresent)
*uapsd = pBeaconStruct->edcaParams.qosInfo.uapsd;
#if defined FEATURE_WLAN_CCX
/* If there is Power Constraint Element specifically,
* adapt to it. Hence there is else condition check
* for this if statement.
*/
if ( pBeaconStruct->ccxTxPwr.present)
{
*localConstraint = pBeaconStruct->ccxTxPwr.power_limit;
}
#endif
if (pBeaconStruct->powerConstraintPresent)
#if 0
//Remove this check. This function is expected to return localPowerConsraints
//and it should just do that. Check for 11h enabled or not can be done at the caller
#if defined WLAN_FEATURE_VOWIFI
&& ( pMac->lim.gLim11hEnable
|| pMac->rrm.rrmPEContext.rrmEnable
#endif
#endif
{
#if defined WLAN_FEATURE_VOWIFI
*localConstraint -= pBeaconStruct->localPowerConstraint.localPowerConstraints;
#else
localPowerConstraints = (tANI_U32)pBeaconStruct->localPowerConstraint.localPowerConstraints;
#endif
}
#if !defined WLAN_FEATURE_VOWIFI
if (cfgSetInt(pMac, WNI_CFG_LOCAL_POWER_CONSTRAINT, localPowerConstraints) != eSIR_SUCCESS)
{
limLog(pMac, LOGP, FL("Could not update local power constraint to cfg."));
}
#endif
}
vos_mem_free(pBeaconStruct);
return;
} /****** end limExtractApCapability() ******/
/**
* limGetHTCBState
*
*FUNCTION:
* This routing provides the translation of Airgo Enum to HT enum for determining
* secondary channel offset.
* Airgo Enum is required for backward compatibility purposes.
*
*
*NOTE:
*
* @param pMac - Pointer to Global MAC structure
* @return The corresponding HT enumeration
*/
ePhyChanBondState limGetHTCBState(ePhyChanBondState aniCBMode)
{
switch ( aniCBMode )
{
#ifdef WLAN_FEATURE_11AC
case PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW:
case PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED:
case PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH:
#endif
case PHY_DOUBLE_CHANNEL_HIGH_PRIMARY:
return PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
#ifdef WLAN_FEATURE_11AC
case PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW:
case PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED:
case PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH:
#endif
case PHY_DOUBLE_CHANNEL_LOW_PRIMARY:
return PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
#ifdef WLAN_FEATURE_11AC
case PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED:
return PHY_SINGLE_CHANNEL_CENTERED;
#endif
default :
return PHY_SINGLE_CHANNEL_CENTERED;
}
}
/*
* limGetStaPeerType
*
*FUNCTION:
* Based on a combination of the following -
* 1) tDphHashNode.aniPeer
* 2) tDphHashNode.propCapability
* this API determines if a given STA is an ANI peer or not
*
*LOGIC:
*
*ASSUMPTIONS:
*
*NOTE:
*
* @param pMac - Pointer to Global MAC structure
* @param pStaDs - Pointer to the tpDphHashNode of the STA
* under consideration
* @return tStaRateMode
*/
tStaRateMode limGetStaPeerType( tpAniSirGlobal pMac,
tpDphHashNode pStaDs,
tpPESession psessionEntry)
{
tStaRateMode staPeerType = eSTA_11b;
// Determine the peer-STA type
if( pStaDs->aniPeer )
{
if(PROP_CAPABILITY_GET( TAURUS, pStaDs->propCapability ))
staPeerType = eSTA_TAURUS;
else if( PROP_CAPABILITY_GET( TITAN, pStaDs->propCapability ))
staPeerType = eSTA_TITAN;
else
staPeerType = eSTA_POLARIS;
}
#ifdef WLAN_FEATURE_11AC
else if(pStaDs->mlmStaContext.vhtCapability)
staPeerType = eSTA_11ac;
#endif
else if(pStaDs->mlmStaContext.htCapability)
staPeerType = eSTA_11n;
else if(pStaDs->erpEnabled)
staPeerType = eSTA_11bg;
else if(psessionEntry->limRFBand == SIR_BAND_5_GHZ)
staPeerType = eSTA_11a;
return staPeerType;
}
| gpl-2.0 |
paladin74/linux | arch/tile/lib/atomic_32.c | 1542 | 5510 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/atomic.h>
#include <arch/chip.h>
/* This page is remapped on startup to be hash-for-home. */
int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
int *__atomic_hashed_lock(volatile void *v)
{
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
/*
* Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
* Using mm works here because atomic_locks is page aligned.
*/
unsigned long ptr = __insn_mm((unsigned long)v >> 1,
(unsigned long)atomic_locks,
2, (ATOMIC_HASH_SHIFT + 2) - 1);
return (int *)ptr;
}
#ifdef CONFIG_SMP
/* Return whether the passed pointer is a valid atomic lock pointer. */
static int is_atomic_lock(int *p)
{
return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
}
void __atomic_fault_unlock(int *irqlock_word)
{
BUG_ON(!is_atomic_lock(irqlock_word));
BUG_ON(*irqlock_word != 1);
*irqlock_word = 0;
}
#endif /* CONFIG_SMP */
static inline int *__atomic_setup(volatile void *v)
{
/* Issue a load to the target to bring it into cache. */
*(volatile int *)v;
return __atomic_hashed_lock(v);
}
int _atomic_xchg(int *v, int n)
{
return __atomic_xchg(v, __atomic_setup(v), n).val;
}
EXPORT_SYMBOL(_atomic_xchg);
int _atomic_xchg_add(int *v, int i)
{
return __atomic_xchg_add(v, __atomic_setup(v), i).val;
}
EXPORT_SYMBOL(_atomic_xchg_add);
int _atomic_xchg_add_unless(int *v, int a, int u)
{
/*
* Note: argument order is switched here since it is easier
* to use the first argument consistently as the "old value"
* in the assembly, as is done for _atomic_cmpxchg().
*/
return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
}
EXPORT_SYMBOL(_atomic_xchg_add_unless);
int _atomic_cmpxchg(int *v, int o, int n)
{
return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
}
EXPORT_SYMBOL(_atomic_cmpxchg);
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
{
return __atomic_or((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_or);
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
{
return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_andn);
unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
{
return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_xor);
long long _atomic64_xchg(long long *v, long long n)
{
return __atomic64_xchg(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_xchg);
long long _atomic64_xchg_add(long long *v, long long i)
{
return __atomic64_xchg_add(v, __atomic_setup(v), i);
}
EXPORT_SYMBOL(_atomic64_xchg_add);
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
{
/*
* Note: argument order is switched here since it is easier
* to use the first argument consistently as the "old value"
* in the assembly, as is done for _atomic_cmpxchg().
*/
return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
}
EXPORT_SYMBOL(_atomic64_xchg_add_unless);
long long _atomic64_cmpxchg(long long *v, long long o, long long n)
{
return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
}
EXPORT_SYMBOL(_atomic64_cmpxchg);
/*
* If any of the atomic or futex routines hit a bad address (not in
* the page tables at kernel PL) this routine is called. The futex
* routines are never used on kernel space, and the normal atomics and
* bitops are never used on user space. So a fault on kernel space
* must be fatal, but a fault on userspace is a futex fault and we
* need to return -EFAULT. Note that the context this routine is
* invoked in is the context of the "_atomic_xxx()" routines called
* by the functions in this file.
*/
struct __get_user __atomic_bad_address(int __user *addr)
{
if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
panic("Bad address used for kernel atomic op: %p\n", addr);
return (struct __get_user) { .err = -EFAULT };
}
void __init __init_atomic_per_cpu(void)
{
/* Validate power-of-two and "bigger than cpus" assumption */
BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
/*
* On TILEPro we prefer to use a single hash-for-home
* page, since this means atomic operations are less
* likely to encounter a TLB fault and thus should
* in general perform faster. You may wish to disable
* this in situations where few hash-for-home tiles
* are configured.
*/
BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
/* The locks must all fit on one page. */
BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
/*
* We use the page offset of the atomic value's address as
* an index into atomic_locks, excluding the low 3 bits.
* That should not produce more indices than ATOMIC_HASH_SIZE.
*/
BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
}
| gpl-2.0 |
drgreenth/UBER-L | arch/arm/mach-at91/at91sam9rl_devices.c | 1542 | 31346 | /*
* Copyright (C) 2007 Atmel Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/i2c-gpio.h>
#include <linux/fb.h>
#include <video/atmel_lcdc.h>
#include <mach/board.h>
#include <mach/at91sam9rl.h>
#include <mach/at91sam9rl_matrix.h>
#include <mach/at91_matrix.h>
#include <mach/at91sam9_smc.h>
#include <mach/at_hdmac.h>
#include "generic.h"
/* --------------------------------------------------------------------
* HDMAC - AHB DMA Controller
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
static u64 hdmac_dmamask = DMA_BIT_MASK(32);
static struct resource hdmac_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_DMA,
.end = AT91SAM9RL_BASE_DMA + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = AT91SAM9RL_ID_DMA,
.end = AT91SAM9RL_ID_DMA,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at_hdmac_device = {
.name = "at91sam9rl_dma",
.id = -1,
.dev = {
.dma_mask = &hdmac_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = hdmac_resources,
.num_resources = ARRAY_SIZE(hdmac_resources),
};
void __init at91_add_device_hdmac(void)
{
platform_device_register(&at_hdmac_device);
}
#else
void __init at91_add_device_hdmac(void) {}
#endif
/* --------------------------------------------------------------------
* USB HS Device (Gadget)
* -------------------------------------------------------------------- */
#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
static struct resource usba_udc_resources[] = {
[0] = {
.start = AT91SAM9RL_UDPHS_FIFO,
.end = AT91SAM9RL_UDPHS_FIFO + SZ_512K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_BASE_UDPHS,
.end = AT91SAM9RL_BASE_UDPHS + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = AT91SAM9RL_ID_UDPHS,
.end = AT91SAM9RL_ID_UDPHS,
.flags = IORESOURCE_IRQ,
},
};
#define EP(nam, idx, maxpkt, maxbk, dma, isoc) \
[idx] = { \
.name = nam, \
.index = idx, \
.fifo_size = maxpkt, \
.nr_banks = maxbk, \
.can_dma = dma, \
.can_isoc = isoc, \
}
static struct usba_ep_data usba_udc_ep[] __initdata = {
EP("ep0", 0, 64, 1, 0, 0),
EP("ep1", 1, 1024, 2, 1, 1),
EP("ep2", 2, 1024, 2, 1, 1),
EP("ep3", 3, 1024, 3, 1, 0),
EP("ep4", 4, 1024, 3, 1, 0),
EP("ep5", 5, 1024, 3, 1, 1),
EP("ep6", 6, 1024, 3, 1, 1),
};
#undef EP
/*
* pdata doesn't have room for any endpoints, so we need to
* append room for the ones we need right after it.
*/
static struct {
struct usba_platform_data pdata;
struct usba_ep_data ep[7];
} usba_udc_data;
static struct platform_device at91_usba_udc_device = {
.name = "atmel_usba_udc",
.id = -1,
.dev = {
.platform_data = &usba_udc_data.pdata,
},
.resource = usba_udc_resources,
.num_resources = ARRAY_SIZE(usba_udc_resources),
};
void __init at91_add_device_usba(struct usba_platform_data *data)
{
/*
* Invalid pins are 0 on AT91, but the usba driver is shared
* with AVR32, which use negative values instead. Once/if
* gpio_is_valid() is ported to AT91, revisit this code.
*/
usba_udc_data.pdata.vbus_pin = -EINVAL;
usba_udc_data.pdata.num_ep = ARRAY_SIZE(usba_udc_ep);
memcpy(usba_udc_data.ep, usba_udc_ep, sizeof(usba_udc_ep));
if (data && gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
usba_udc_data.pdata.vbus_pin = data->vbus_pin;
}
/* Pullup pin is handled internally by USB device peripheral */
platform_device_register(&at91_usba_udc_device);
}
#else
void __init at91_add_device_usba(struct usba_platform_data *data) {}
#endif
/* --------------------------------------------------------------------
* MMC / SD
* -------------------------------------------------------------------- */
#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
static u64 mmc_dmamask = DMA_BIT_MASK(32);
static struct at91_mmc_data mmc_data;
static struct resource mmc_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_MCI,
.end = AT91SAM9RL_BASE_MCI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_MCI,
.end = AT91SAM9RL_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_mmc_device = {
.name = "at91_mci",
.id = -1,
.dev = {
.dma_mask = &mmc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &mmc_data,
},
.resource = mmc_resources,
.num_resources = ARRAY_SIZE(mmc_resources),
};
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
{
if (!data)
return;
/* input/irq */
if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
if (gpio_is_valid(data->wp_pin))
at91_set_gpio_input(data->wp_pin, 1);
if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
/* CLK */
at91_set_A_periph(AT91_PIN_PA2, 0);
/* CMD */
at91_set_A_periph(AT91_PIN_PA1, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_A_periph(AT91_PIN_PA0, 1);
if (data->wire4) {
at91_set_A_periph(AT91_PIN_PA3, 1);
at91_set_A_periph(AT91_PIN_PA4, 1);
at91_set_A_periph(AT91_PIN_PA5, 1);
}
mmc_data = *data;
platform_device_register(&at91sam9rl_mmc_device);
}
#else
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
#endif
/* --------------------------------------------------------------------
* NAND / SmartMedia
* -------------------------------------------------------------------- */
#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
static struct atmel_nand_data nand_data;
#define NAND_BASE AT91_CHIPSELECT_3
static struct resource nand_resources[] = {
[0] = {
.start = NAND_BASE,
.end = NAND_BASE + SZ_256M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_BASE_ECC,
.end = AT91SAM9RL_BASE_ECC + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device atmel_nand_device = {
.name = "atmel_nand",
.id = -1,
.dev = {
.platform_data = &nand_data,
},
.resource = nand_resources,
.num_resources = ARRAY_SIZE(nand_resources),
};
void __init at91_add_device_nand(struct atmel_nand_data *data)
{
unsigned long csa;
if (!data)
return;
csa = at91_matrix_read(AT91_MATRIX_EBICSA);
at91_matrix_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
/* enable pin */
if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
at91_set_A_periph(AT91_PIN_PB4, 0); /* NANDOE */
at91_set_A_periph(AT91_PIN_PB5, 0); /* NANDWE */
nand_data = *data;
platform_device_register(&atmel_nand_device);
}
#else
void __init at91_add_device_nand(struct atmel_nand_data *data) {}
#endif
/* --------------------------------------------------------------------
* TWI (i2c)
* -------------------------------------------------------------------- */
/*
* Prefer the GPIO code since the TWI controller isn't robust
* (gets overruns and underruns under load) and can only issue
* repeated STARTs in one scenario (the driver doesn't yet handle them).
*/
#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
static struct i2c_gpio_platform_data pdata = {
.sda_pin = AT91_PIN_PA23,
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA24,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
};
static struct platform_device at91sam9rl_twi_device = {
.name = "i2c-gpio",
.id = 0,
.dev.platform_data = &pdata,
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
at91_set_GPIO_periph(AT91_PIN_PA23, 1); /* TWD (SDA) */
at91_set_multi_drive(AT91_PIN_PA23, 1);
at91_set_GPIO_periph(AT91_PIN_PA24, 1); /* TWCK (SCL) */
at91_set_multi_drive(AT91_PIN_PA24, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91sam9rl_twi_device);
}
#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
static struct resource twi_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_TWI0,
.end = AT91SAM9RL_BASE_TWI0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_TWI0,
.end = AT91SAM9RL_ID_TWI0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_twi_device = {
.name = "at91_i2c",
.id = -1,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
/* pins used for TWI interface */
at91_set_A_periph(AT91_PIN_PA23, 0); /* TWD */
at91_set_multi_drive(AT91_PIN_PA23, 1);
at91_set_A_periph(AT91_PIN_PA24, 0); /* TWCK */
at91_set_multi_drive(AT91_PIN_PA24, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91sam9rl_twi_device);
}
#else
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* SPI
* -------------------------------------------------------------------- */
#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
static u64 spi_dmamask = DMA_BIT_MASK(32);
static struct resource spi_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_SPI,
.end = AT91SAM9RL_BASE_SPI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_SPI,
.end = AT91SAM9RL_ID_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_spi_device = {
.name = "atmel_spi",
.id = 0,
.dev = {
.dma_mask = &spi_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = spi_resources,
.num_resources = ARRAY_SIZE(spi_resources),
};
static const unsigned spi_standard_cs[4] = { AT91_PIN_PA28, AT91_PIN_PB7, AT91_PIN_PD8, AT91_PIN_PD9 };
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
{
int i;
unsigned long cs_pin;
at91_set_A_periph(AT91_PIN_PA25, 0); /* MISO */
at91_set_A_periph(AT91_PIN_PA26, 0); /* MOSI */
at91_set_A_periph(AT91_PIN_PA27, 0); /* SPCK */
/* Enable SPI chip-selects */
for (i = 0; i < nr_devices; i++) {
if (devices[i].controller_data)
cs_pin = (unsigned long) devices[i].controller_data;
else
cs_pin = spi_standard_cs[devices[i].chip_select];
if (!gpio_is_valid(cs_pin))
continue;
/* enable chip-select pin */
at91_set_gpio_output(cs_pin, 1);
/* pass chip-select pin to driver */
devices[i].controller_data = (void *) cs_pin;
}
spi_register_board_info(devices, nr_devices);
platform_device_register(&at91sam9rl_spi_device);
}
#else
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* AC97
* -------------------------------------------------------------------- */
#if defined(CONFIG_SND_ATMEL_AC97C) || defined(CONFIG_SND_ATMEL_AC97C_MODULE)
static u64 ac97_dmamask = DMA_BIT_MASK(32);
static struct ac97c_platform_data ac97_data;
static struct resource ac97_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_AC97C,
.end = AT91SAM9RL_BASE_AC97C + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_AC97C,
.end = AT91SAM9RL_ID_AC97C,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_ac97_device = {
.name = "atmel_ac97c",
.id = 0,
.dev = {
.dma_mask = &ac97_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &ac97_data,
},
.resource = ac97_resources,
.num_resources = ARRAY_SIZE(ac97_resources),
};
void __init at91_add_device_ac97(struct ac97c_platform_data *data)
{
if (!data)
return;
at91_set_A_periph(AT91_PIN_PD1, 0); /* AC97FS */
at91_set_A_periph(AT91_PIN_PD2, 0); /* AC97CK */
at91_set_A_periph(AT91_PIN_PD3, 0); /* AC97TX */
at91_set_A_periph(AT91_PIN_PD4, 0); /* AC97RX */
/* reset */
if (gpio_is_valid(data->reset_pin))
at91_set_gpio_output(data->reset_pin, 0);
ac97_data = *data;
platform_device_register(&at91sam9rl_ac97_device);
}
#else
void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
#endif
/* --------------------------------------------------------------------
* LCD Controller
* -------------------------------------------------------------------- */
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
static struct atmel_lcdfb_info lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
.start = AT91SAM9RL_LCDC_BASE,
.end = AT91SAM9RL_LCDC_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_LCDC,
.end = AT91SAM9RL_ID_LCDC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91_lcdc_device = {
.name = "atmel_lcdfb",
.id = 0,
.dev = {
.dma_mask = &lcdc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &lcdc_data,
},
.resource = lcdc_resources,
.num_resources = ARRAY_SIZE(lcdc_resources),
};
void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
{
if (!data) {
return;
}
at91_set_B_periph(AT91_PIN_PC1, 0); /* LCDPWR */
at91_set_A_periph(AT91_PIN_PC5, 0); /* LCDHSYNC */
at91_set_A_periph(AT91_PIN_PC6, 0); /* LCDDOTCK */
at91_set_A_periph(AT91_PIN_PC7, 0); /* LCDDEN */
at91_set_A_periph(AT91_PIN_PC3, 0); /* LCDCC */
at91_set_B_periph(AT91_PIN_PC9, 0); /* LCDD3 */
at91_set_B_periph(AT91_PIN_PC10, 0); /* LCDD4 */
at91_set_B_periph(AT91_PIN_PC11, 0); /* LCDD5 */
at91_set_B_periph(AT91_PIN_PC12, 0); /* LCDD6 */
at91_set_B_periph(AT91_PIN_PC13, 0); /* LCDD7 */
at91_set_B_periph(AT91_PIN_PC15, 0); /* LCDD11 */
at91_set_B_periph(AT91_PIN_PC16, 0); /* LCDD12 */
at91_set_B_periph(AT91_PIN_PC17, 0); /* LCDD13 */
at91_set_B_periph(AT91_PIN_PC18, 0); /* LCDD14 */
at91_set_B_periph(AT91_PIN_PC19, 0); /* LCDD15 */
at91_set_B_periph(AT91_PIN_PC20, 0); /* LCDD18 */
at91_set_B_periph(AT91_PIN_PC21, 0); /* LCDD19 */
at91_set_B_periph(AT91_PIN_PC22, 0); /* LCDD20 */
at91_set_B_periph(AT91_PIN_PC23, 0); /* LCDD21 */
at91_set_B_periph(AT91_PIN_PC24, 0); /* LCDD22 */
at91_set_B_periph(AT91_PIN_PC25, 0); /* LCDD23 */
lcdc_data = *data;
platform_device_register(&at91_lcdc_device);
}
#else
void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
#endif
/* --------------------------------------------------------------------
* Timer/Counter block
* -------------------------------------------------------------------- */
#ifdef CONFIG_ATMEL_TCLIB
static struct resource tcb_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_TCB0,
.end = AT91SAM9RL_BASE_TCB0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_TC0,
.end = AT91SAM9RL_ID_TC0,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AT91SAM9RL_ID_TC1,
.end = AT91SAM9RL_ID_TC1,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = AT91SAM9RL_ID_TC2,
.end = AT91SAM9RL_ID_TC2,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_tcb_device = {
.name = "atmel_tcb",
.id = 0,
.resource = tcb_resources,
.num_resources = ARRAY_SIZE(tcb_resources),
};
static void __init at91_add_device_tc(void)
{
platform_device_register(&at91sam9rl_tcb_device);
}
#else
static void __init at91_add_device_tc(void) { }
#endif
/* --------------------------------------------------------------------
* Touchscreen
* -------------------------------------------------------------------- */
#if defined(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) || defined(CONFIG_TOUCHSCREEN_ATMEL_TSADCC_MODULE)
static u64 tsadcc_dmamask = DMA_BIT_MASK(32);
static struct at91_tsadcc_data tsadcc_data;
static struct resource tsadcc_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_TSC,
.end = AT91SAM9RL_BASE_TSC + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_TSC,
.end = AT91SAM9RL_ID_TSC,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device at91sam9rl_tsadcc_device = {
.name = "atmel_tsadcc",
.id = -1,
.dev = {
.dma_mask = &tsadcc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &tsadcc_data,
},
.resource = tsadcc_resources,
.num_resources = ARRAY_SIZE(tsadcc_resources),
};
void __init at91_add_device_tsadcc(struct at91_tsadcc_data *data)
{
if (!data)
return;
at91_set_A_periph(AT91_PIN_PA17, 0); /* AD0_XR */
at91_set_A_periph(AT91_PIN_PA18, 0); /* AD1_XL */
at91_set_A_periph(AT91_PIN_PA19, 0); /* AD2_YT */
at91_set_A_periph(AT91_PIN_PA20, 0); /* AD3_TB */
tsadcc_data = *data;
platform_device_register(&at91sam9rl_tsadcc_device);
}
#else
void __init at91_add_device_tsadcc(struct at91_tsadcc_data *data) {}
#endif
/* --------------------------------------------------------------------
* RTC
* -------------------------------------------------------------------- */
#if defined(CONFIG_RTC_DRV_AT91RM9200) || defined(CONFIG_RTC_DRV_AT91RM9200_MODULE)
static struct platform_device at91sam9rl_rtc_device = {
.name = "at91_rtc",
.id = -1,
.num_resources = 0,
};
static void __init at91_add_device_rtc(void)
{
platform_device_register(&at91sam9rl_rtc_device);
}
#else
static void __init at91_add_device_rtc(void) {}
#endif
/* --------------------------------------------------------------------
* RTT
* -------------------------------------------------------------------- */
static struct resource rtt_resources[] = {
{
.start = AT91SAM9RL_BASE_RTT,
.end = AT91SAM9RL_BASE_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
}
};
static struct platform_device at91sam9rl_rtt_device = {
.name = "at91_rtt",
.id = 0,
.resource = rtt_resources,
};
#if IS_ENABLED(CONFIG_RTC_DRV_AT91SAM9)
static void __init at91_add_device_rtt_rtc(void)
{
at91sam9rl_rtt_device.name = "rtc-at91sam9";
/*
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
at91sam9rl_rtt_device.num_resources = 2;
rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
}
#else
static void __init at91_add_device_rtt_rtc(void)
{
/* Only one resource is needed: RTT not used as RTC */
at91sam9rl_rtt_device.num_resources = 1;
}
#endif
static void __init at91_add_device_rtt(void)
{
at91_add_device_rtt_rtc();
platform_device_register(&at91sam9rl_rtt_device);
}
/* --------------------------------------------------------------------
* Watchdog
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
static struct resource wdt_resources[] = {
{
.start = AT91SAM9RL_BASE_WDT,
.end = AT91SAM9RL_BASE_WDT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device at91sam9rl_wdt_device = {
.name = "at91_wdt",
.id = -1,
.resource = wdt_resources,
.num_resources = ARRAY_SIZE(wdt_resources),
};
static void __init at91_add_device_watchdog(void)
{
platform_device_register(&at91sam9rl_wdt_device);
}
#else
static void __init at91_add_device_watchdog(void) {}
#endif
/* --------------------------------------------------------------------
* PWM
* --------------------------------------------------------------------*/
#if defined(CONFIG_ATMEL_PWM)
static u32 pwm_mask;
static struct resource pwm_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_PWMC,
.end = AT91SAM9RL_BASE_PWMC + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_PWMC,
.end = AT91SAM9RL_ID_PWMC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_pwm0_device = {
.name = "atmel_pwm",
.id = -1,
.dev = {
.platform_data = &pwm_mask,
},
.resource = pwm_resources,
.num_resources = ARRAY_SIZE(pwm_resources),
};
void __init at91_add_device_pwm(u32 mask)
{
if (mask & (1 << AT91_PWM0))
at91_set_B_periph(AT91_PIN_PB8, 1); /* enable PWM0 */
if (mask & (1 << AT91_PWM1))
at91_set_B_periph(AT91_PIN_PB9, 1); /* enable PWM1 */
if (mask & (1 << AT91_PWM2))
at91_set_B_periph(AT91_PIN_PD5, 1); /* enable PWM2 */
if (mask & (1 << AT91_PWM3))
at91_set_B_periph(AT91_PIN_PD8, 1); /* enable PWM3 */
pwm_mask = mask;
platform_device_register(&at91sam9rl_pwm0_device);
}
#else
void __init at91_add_device_pwm(u32 mask) {}
#endif
/* --------------------------------------------------------------------
* SSC -- Synchronous Serial Controller
* -------------------------------------------------------------------- */
#if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE)
static u64 ssc0_dmamask = DMA_BIT_MASK(32);
static struct resource ssc0_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_SSC0,
.end = AT91SAM9RL_BASE_SSC0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_SSC0,
.end = AT91SAM9RL_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_ssc0_device = {
.name = "ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc0_resources,
.num_resources = ARRAY_SIZE(ssc0_resources),
};
static inline void configure_ssc0_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_A_periph(AT91_PIN_PC0, 1);
if (pins & ATMEL_SSC_TK)
at91_set_A_periph(AT91_PIN_PC1, 1);
if (pins & ATMEL_SSC_TD)
at91_set_A_periph(AT91_PIN_PA15, 1);
if (pins & ATMEL_SSC_RD)
at91_set_A_periph(AT91_PIN_PA16, 1);
if (pins & ATMEL_SSC_RK)
at91_set_B_periph(AT91_PIN_PA10, 1);
if (pins & ATMEL_SSC_RF)
at91_set_B_periph(AT91_PIN_PA22, 1);
}
static u64 ssc1_dmamask = DMA_BIT_MASK(32);
static struct resource ssc1_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_SSC1,
.end = AT91SAM9RL_BASE_SSC1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_SSC1,
.end = AT91SAM9RL_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_ssc1_device = {
.name = "ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc1_resources,
.num_resources = ARRAY_SIZE(ssc1_resources),
};
static inline void configure_ssc1_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_B_periph(AT91_PIN_PA29, 1);
if (pins & ATMEL_SSC_TK)
at91_set_B_periph(AT91_PIN_PA30, 1);
if (pins & ATMEL_SSC_TD)
at91_set_B_periph(AT91_PIN_PA13, 1);
if (pins & ATMEL_SSC_RD)
at91_set_B_periph(AT91_PIN_PA14, 1);
if (pins & ATMEL_SSC_RK)
at91_set_B_periph(AT91_PIN_PA9, 1);
if (pins & ATMEL_SSC_RF)
at91_set_B_periph(AT91_PIN_PA8, 1);
}
/*
* SSC controllers are accessed through library code, instead of any
* kind of all-singing/all-dancing driver. For example one could be
* used by a particular I2S audio codec's driver, while another one
* on the same system might be used by a custom data capture driver.
*/
void __init at91_add_device_ssc(unsigned id, unsigned pins)
{
struct platform_device *pdev;
/*
* NOTE: caller is responsible for passing information matching
* "pins" to whatever will be using each particular controller.
*/
switch (id) {
case AT91SAM9RL_ID_SSC0:
pdev = &at91sam9rl_ssc0_device;
configure_ssc0_pins(pins);
break;
case AT91SAM9RL_ID_SSC1:
pdev = &at91sam9rl_ssc1_device;
configure_ssc1_pins(pins);
break;
default:
return;
}
platform_device_register(pdev);
}
#else
void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#endif
/* --------------------------------------------------------------------
* UART
* -------------------------------------------------------------------- */
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_DBGU,
.end = AT91SAM9RL_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91_ID_SYS,
.end = AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data dbgu_data = {
.use_dma_tx = 0,
.use_dma_rx = 0, /* DBGU not capable of receive DMA */
};
static u64 dbgu_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9rl_dbgu_device = {
.name = "atmel_usart",
.id = 0,
.dev = {
.dma_mask = &dbgu_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dbgu_data,
},
.resource = dbgu_resources,
.num_resources = ARRAY_SIZE(dbgu_resources),
};
static inline void configure_dbgu_pins(void)
{
at91_set_A_periph(AT91_PIN_PA21, 0); /* DRXD */
at91_set_A_periph(AT91_PIN_PA22, 1); /* DTXD */
}
static struct resource uart0_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_US0,
.end = AT91SAM9RL_BASE_US0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_US0,
.end = AT91SAM9RL_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart0_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart0_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9rl_uart0_device = {
.name = "atmel_usart",
.id = 1,
.dev = {
.dma_mask = &uart0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart0_data,
},
.resource = uart0_resources,
.num_resources = ARRAY_SIZE(uart0_resources),
};
static inline void configure_usart0_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PA6, 1); /* TXD0 */
at91_set_A_periph(AT91_PIN_PA7, 0); /* RXD0 */
if (pins & ATMEL_UART_RTS)
at91_set_A_periph(AT91_PIN_PA9, 0); /* RTS0 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PA10, 0); /* CTS0 */
if (pins & ATMEL_UART_DSR)
at91_set_A_periph(AT91_PIN_PD14, 0); /* DSR0 */
if (pins & ATMEL_UART_DTR)
at91_set_A_periph(AT91_PIN_PD15, 0); /* DTR0 */
if (pins & ATMEL_UART_DCD)
at91_set_A_periph(AT91_PIN_PD16, 0); /* DCD0 */
if (pins & ATMEL_UART_RI)
at91_set_A_periph(AT91_PIN_PD17, 0); /* RI0 */
}
static struct resource uart1_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_US1,
.end = AT91SAM9RL_BASE_US1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_US1,
.end = AT91SAM9RL_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart1_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart1_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9rl_uart1_device = {
.name = "atmel_usart",
.id = 2,
.dev = {
.dma_mask = &uart1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart1_data,
},
.resource = uart1_resources,
.num_resources = ARRAY_SIZE(uart1_resources),
};
static inline void configure_usart1_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PA11, 1); /* TXD1 */
at91_set_A_periph(AT91_PIN_PA12, 0); /* RXD1 */
if (pins & ATMEL_UART_RTS)
at91_set_B_periph(AT91_PIN_PA18, 0); /* RTS1 */
if (pins & ATMEL_UART_CTS)
at91_set_B_periph(AT91_PIN_PA19, 0); /* CTS1 */
}
static struct resource uart2_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_US2,
.end = AT91SAM9RL_BASE_US2 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_US2,
.end = AT91SAM9RL_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart2_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart2_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9rl_uart2_device = {
.name = "atmel_usart",
.id = 3,
.dev = {
.dma_mask = &uart2_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart2_data,
},
.resource = uart2_resources,
.num_resources = ARRAY_SIZE(uart2_resources),
};
static inline void configure_usart2_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PA13, 1); /* TXD2 */
at91_set_A_periph(AT91_PIN_PA14, 0); /* RXD2 */
if (pins & ATMEL_UART_RTS)
at91_set_A_periph(AT91_PIN_PA29, 0); /* RTS2 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PA30, 0); /* CTS2 */
}
static struct resource uart3_resources[] = {
[0] = {
.start = AT91SAM9RL_BASE_US3,
.end = AT91SAM9RL_BASE_US3 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9RL_ID_US3,
.end = AT91SAM9RL_ID_US3,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart3_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart3_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9rl_uart3_device = {
.name = "atmel_usart",
.id = 4,
.dev = {
.dma_mask = &uart3_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart3_data,
},
.resource = uart3_resources,
.num_resources = ARRAY_SIZE(uart3_resources),
};
static inline void configure_usart3_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PB0, 1); /* TXD3 */
at91_set_A_periph(AT91_PIN_PB1, 0); /* RXD3 */
if (pins & ATMEL_UART_RTS)
at91_set_B_periph(AT91_PIN_PD4, 0); /* RTS3 */
if (pins & ATMEL_UART_CTS)
at91_set_B_periph(AT91_PIN_PD3, 0); /* CTS3 */
}
static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9rl_dbgu_device;
configure_dbgu_pins();
break;
case AT91SAM9RL_ID_US0:
pdev = &at91sam9rl_uart0_device;
configure_usart0_pins(pins);
break;
case AT91SAM9RL_ID_US1:
pdev = &at91sam9rl_uart1_device;
configure_usart1_pins(pins);
break;
case AT91SAM9RL_ID_US2:
pdev = &at91sam9rl_uart2_device;
configure_usart2_pins(pins);
break;
case AT91SAM9RL_ID_US3:
pdev = &at91sam9rl_uart3_device;
configure_usart3_pins(pins);
break;
default:
return;
}
pdata = pdev->dev.platform_data;
pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
}
void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
at91sam9rl_set_console_clock(at91_uarts[portnr]->id);
}
}
void __init at91_add_device_serial(void)
{
int i;
for (i = 0; i < ATMEL_MAX_UART; i++) {
if (at91_uarts[i])
platform_device_register(at91_uarts[i]);
}
if (!atmel_default_console_device)
printk(KERN_INFO "AT91: No default serial console defined.\n");
}
#else
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
void __init at91_set_serial_console(unsigned portnr) {}
void __init at91_add_device_serial(void) {}
#endif
/* -------------------------------------------------------------------- */
/*
* These devices are always present and don't need any board-specific
* setup.
*/
static int __init at91_add_standard_devices(void)
{
at91_add_device_hdmac();
at91_add_device_rtc();
at91_add_device_rtt();
at91_add_device_watchdog();
at91_add_device_tc();
return 0;
}
arch_initcall(at91_add_standard_devices);
| gpl-2.0 |
dperezde/little-penguin | linux/drivers/ata/pata_sc1200.c | 2054 | 7293 | /*
* New ATA layer SC1200 driver Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* TODO: Mode selection filtering
* TODO: Needs custom DMA cleanup code
*
* Based very heavily on
*
* linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003
*
* Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
* May be copied or modified under the terms of the GNU General Public License
*
* Development of this chipset driver was funded
* by the nice folks at National Semiconductor.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sc1200"
#define DRV_VERSION "0.2.6"
#define SC1200_REV_A 0x00
#define SC1200_REV_B1 0x01
#define SC1200_REV_B3 0x02
#define SC1200_REV_C1 0x03
#define SC1200_REV_D1 0x04
/**
* sc1200_clock - PCI clock
*
* Return the PCI bus clocking for the SC1200 chipset configuration
* in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
*/
static int sc1200_clock(void)
{
/* Magic registers that give us the chipset data */
u8 chip_id = inb(0x903C);
u8 silicon_rev = inb(0x903D);
u16 pci_clock;
if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
return 0; /* 33 MHz mode */
/* Clock generator configuration 0x901E its 8/9 are the PCI clocking
0/3 is 33Mhz 1 is 48 2 is 66 */
pci_clock = inw(0x901E);
pci_clock >>= 8;
pci_clock &= 0x03;
if (pci_clock == 3)
pci_clock = 0;
return pci_clock;
}
/**
* sc1200_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. This is fairly simple on the SC1200
*/
static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 pio_timings[4][5] = {
/* format0, 33Mhz */
{ 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 },
/* format1, 33Mhz */
{ 0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010 },
/* format1, 48Mhz */
{ 0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021 },
/* format1, 66Mhz */
{ 0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 format;
unsigned int reg = 0x40 + 0x10 * ap->port_no;
int mode = adev->pio_mode - XFER_PIO_0;
pci_read_config_dword(pdev, reg + 4, &format);
format >>= 31;
format += sc1200_clock();
pci_write_config_dword(pdev, reg + 8 * adev->devno,
pio_timings[format][mode]);
}
/**
* sc1200_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* We cannot mix MWDMA and UDMA without reloading timings each switch
* master to slave.
*/
static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 udma_timing[3][3] = {
{ 0x00921250, 0x00911140, 0x00911030 },
{ 0x00932470, 0x00922260, 0x00922140 },
{ 0x009436A1, 0x00933481, 0x00923261 }
};
static const u32 mwdma_timing[3][3] = {
{ 0x00077771, 0x00012121, 0x00002020 },
{ 0x000BBBB2, 0x00024241, 0x00013131 },
{ 0x000FFFF3, 0x00035352, 0x00015151 }
};
int clock = sc1200_clock();
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int reg = 0x40 + 0x10 * ap->port_no;
int mode = adev->dma_mode;
u32 format;
if (mode >= XFER_UDMA_0)
format = udma_timing[clock][mode - XFER_UDMA_0];
else
format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
if (adev->devno == 0) {
u32 timings;
pci_read_config_dword(pdev, reg + 4, &timings);
timings &= 0x80000000UL;
timings |= format;
pci_write_config_dword(pdev, reg + 4, timings);
} else
pci_write_config_dword(pdev, reg + 12, format);
}
/**
* sc1200_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary. Specifically we have a problem that there is only
* one MWDMA/UDMA bit.
*/
static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_device *prev = ap->private_data;
/* See if the DMA settings could be wrong */
if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
/* Maybe, but do the channels match MWDMA/UDMA ? */
if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
(ata_using_udma(prev) && !ata_using_udma(adev)))
/* Switch the mode bits */
sc1200_set_dmamode(ap, adev);
}
return ata_bmdma_qc_issue(qc);
}
/**
* sc1200_qc_defer - implement serialization
* @qc: command
*
* Serialize command issue on this controller.
*/
static int sc1200_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_host *host = qc->ap->host;
struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
int rc;
/* First apply the usual rules */
rc = ata_std_qc_defer(qc);
if (rc != 0)
return rc;
/* Now apply serialization rules. Only allow a command if the
other channel state machine is idle */
if (alt && alt->qc_active)
return ATA_DEFER_PORT;
return 0;
}
static struct scsi_host_template sc1200_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
};
static struct ata_port_operations sc1200_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = sc1200_qc_issue,
.qc_defer = sc1200_qc_defer,
.cable_detect = ata_cable_40wire,
.set_piomode = sc1200_set_piomode,
.set_dmamode = sc1200_set_dmamode,
};
/**
* sc1200_init_one - Initialise an SC1200
* @dev: PCI device
* @id: Entry in match table
*
* Just throw the needed data at the libata helper and it does all
* our work.
*/
static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &sc1200_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
}
static const struct pci_device_id sc1200[] = {
{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
{ },
};
static struct pci_driver sc1200_pci_driver = {
.name = DRV_NAME,
.id_table = sc1200,
.probe = sc1200_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(sc1200_pci_driver);
MODULE_AUTHOR("Alan Cox, Mark Lord");
MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sc1200);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
kalltkaffe/galaxy-s3-kernel | arch/arm/mach-sa1100/neponset.c | 3078 | 7659 | /*
* linux/arch/arm/mach-sa1100/neponset.c
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/serial_sa1100.h>
#include <mach/assabet.h>
#include <mach/neponset.h>
#include <asm/hardware/sa1111.h>
#include <asm/sizes.h>
/*
* Install handler for Neponset IRQ. Note that we have to loop here
* since the ETHERNET and USAR IRQs are level based, and we need to
* ensure that the IRQ signal is deasserted before returning. This
* is rather unfortunate.
*/
static void
neponset_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned int irr;
while (1) {
/*
* Acknowledge the parent IRQ.
*/
desc->irq_data.chip->irq_ack(&desc->irq_data);
/*
* Read the interrupt reason register. Let's have all
* active IRQ bits high. Note: there is a typo in the
* Neponset user's guide for the SA1111 IRR level.
*/
irr = IRR ^ (IRR_ETHERNET | IRR_USAR);
if ((irr & (IRR_ETHERNET | IRR_USAR | IRR_SA1111)) == 0)
break;
/*
* Since there is no individual mask, we have to
* mask the parent IRQ. This is safe, since we'll
* recheck the register for any pending IRQs.
*/
if (irr & (IRR_ETHERNET | IRR_USAR)) {
desc->irq_data.chip->irq_mask(&desc->irq_data);
/*
* Ack the interrupt now to prevent re-entering
* this neponset handler. Again, this is safe
* since we'll check the IRR register prior to
* leaving.
*/
desc->irq_data.chip->irq_ack(&desc->irq_data);
if (irr & IRR_ETHERNET) {
generic_handle_irq(IRQ_NEPONSET_SMC9196);
}
if (irr & IRR_USAR) {
generic_handle_irq(IRQ_NEPONSET_USAR);
}
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
if (irr & IRR_SA1111) {
generic_handle_irq(IRQ_NEPONSET_SA1111);
}
}
}
static void neponset_set_mctrl(struct uart_port *port, u_int mctrl)
{
u_int mdm_ctl0 = MDM_CTL_0;
if (port->mapbase == _Ser1UTCR0) {
if (mctrl & TIOCM_RTS)
mdm_ctl0 &= ~MDM_CTL0_RTS2;
else
mdm_ctl0 |= MDM_CTL0_RTS2;
if (mctrl & TIOCM_DTR)
mdm_ctl0 &= ~MDM_CTL0_DTR2;
else
mdm_ctl0 |= MDM_CTL0_DTR2;
} else if (port->mapbase == _Ser3UTCR0) {
if (mctrl & TIOCM_RTS)
mdm_ctl0 &= ~MDM_CTL0_RTS1;
else
mdm_ctl0 |= MDM_CTL0_RTS1;
if (mctrl & TIOCM_DTR)
mdm_ctl0 &= ~MDM_CTL0_DTR1;
else
mdm_ctl0 |= MDM_CTL0_DTR1;
}
MDM_CTL_0 = mdm_ctl0;
}
static u_int neponset_get_mctrl(struct uart_port *port)
{
u_int ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR;
u_int mdm_ctl1 = MDM_CTL_1;
if (port->mapbase == _Ser1UTCR0) {
if (mdm_ctl1 & MDM_CTL1_DCD2)
ret &= ~TIOCM_CD;
if (mdm_ctl1 & MDM_CTL1_CTS2)
ret &= ~TIOCM_CTS;
if (mdm_ctl1 & MDM_CTL1_DSR2)
ret &= ~TIOCM_DSR;
} else if (port->mapbase == _Ser3UTCR0) {
if (mdm_ctl1 & MDM_CTL1_DCD1)
ret &= ~TIOCM_CD;
if (mdm_ctl1 & MDM_CTL1_CTS1)
ret &= ~TIOCM_CTS;
if (mdm_ctl1 & MDM_CTL1_DSR1)
ret &= ~TIOCM_DSR;
}
return ret;
}
static struct sa1100_port_fns neponset_port_fns __devinitdata = {
.set_mctrl = neponset_set_mctrl,
.get_mctrl = neponset_get_mctrl,
};
static int __devinit neponset_probe(struct platform_device *dev)
{
sa1100_register_uart_fns(&neponset_port_fns);
/*
* Install handler for GPIO25.
*/
irq_set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING);
irq_set_chained_handler(IRQ_GPIO25, neponset_irq_handler);
/*
* We would set IRQ_GPIO25 to be a wake-up IRQ, but
* unfortunately something on the Neponset activates
* this IRQ on sleep (ethernet?)
*/
#if 0
enable_irq_wake(IRQ_GPIO25);
#endif
/*
* Setup other Neponset IRQs. SA1111 will be done by the
* generic SA1111 code.
*/
irq_set_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq);
set_irq_flags(IRQ_NEPONSET_SMC9196, IRQF_VALID | IRQF_PROBE);
irq_set_handler(IRQ_NEPONSET_USAR, handle_simple_irq);
set_irq_flags(IRQ_NEPONSET_USAR, IRQF_VALID | IRQF_PROBE);
/*
* Disable GPIO 0/1 drivers so the buttons work on the module.
*/
NCR_0 = NCR_GP01_OFF;
return 0;
}
#ifdef CONFIG_PM
/*
* LDM power management.
*/
static unsigned int neponset_saved_state;
static int neponset_suspend(struct platform_device *dev, pm_message_t state)
{
/*
* Save state.
*/
neponset_saved_state = NCR_0;
return 0;
}
static int neponset_resume(struct platform_device *dev)
{
NCR_0 = neponset_saved_state;
return 0;
}
#else
#define neponset_suspend NULL
#define neponset_resume NULL
#endif
static struct platform_driver neponset_device_driver = {
.probe = neponset_probe,
.suspend = neponset_suspend,
.resume = neponset_resume,
.driver = {
.name = "neponset",
},
};
static struct resource neponset_resources[] = {
[0] = {
.start = 0x10000000,
.end = 0x17ffffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device neponset_device = {
.name = "neponset",
.id = 0,
.num_resources = ARRAY_SIZE(neponset_resources),
.resource = neponset_resources,
};
static struct resource sa1111_resources[] = {
[0] = {
.start = 0x40000000,
.end = 0x40001fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_NEPONSET_SA1111,
.end = IRQ_NEPONSET_SA1111,
.flags = IORESOURCE_IRQ,
},
};
static struct sa1111_platform_data sa1111_info = {
.irq_base = IRQ_BOARD_END,
};
static u64 sa1111_dmamask = 0xffffffffUL;
static struct platform_device sa1111_device = {
.name = "sa1111",
.id = 0,
.dev = {
.dma_mask = &sa1111_dmamask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &sa1111_info,
},
.num_resources = ARRAY_SIZE(sa1111_resources),
.resource = sa1111_resources,
};
static struct resource smc91x_resources[] = {
[0] = {
.name = "smc91x-regs",
.start = SA1100_CS3_PHYS,
.end = SA1100_CS3_PHYS + 0x01ffffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_NEPONSET_SMC9196,
.end = IRQ_NEPONSET_SMC9196,
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = "smc91x-attrib",
.start = SA1100_CS3_PHYS + 0x02000000,
.end = SA1100_CS3_PHYS + 0x03ffffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
static struct platform_device *devices[] __initdata = {
&neponset_device,
&sa1111_device,
&smc91x_device,
};
extern void sa1110_mb_disable(void);
static int __init neponset_init(void)
{
platform_driver_register(&neponset_device_driver);
/*
* The Neponset is only present on the Assabet machine type.
*/
if (!machine_is_assabet())
return -ENODEV;
/*
* Ensure that the memory bus request/grant signals are setup,
* and the grant is held in its inactive state, whether or not
* we actually have a Neponset attached.
*/
sa1110_mb_disable();
if (!machine_has_neponset()) {
printk(KERN_DEBUG "Neponset expansion board not present\n");
return -ENODEV;
}
if (WHOAMI != 0x11) {
printk(KERN_WARNING "Neponset board detected, but "
"wrong ID: %02x\n", WHOAMI);
return -ENODEV;
}
return platform_add_devices(devices, ARRAY_SIZE(devices));
}
subsys_initcall(neponset_init);
static struct map_desc neponset_io_desc[] __initdata = {
{ /* System Registers */
.virtual = 0xf3000000,
.pfn = __phys_to_pfn(0x10000000),
.length = SZ_1M,
.type = MT_DEVICE
}, { /* SA-1111 */
.virtual = 0xf4000000,
.pfn = __phys_to_pfn(0x40000000),
.length = SZ_1M,
.type = MT_DEVICE
}
};
void __init neponset_map_io(void)
{
iotable_init(neponset_io_desc, ARRAY_SIZE(neponset_io_desc));
}
| gpl-2.0 |
NoelMacwan/SXDNickiDS | drivers/usb/serial/garmin_gps.c | 3334 | 40002 | /*
* Garmin GPS driver
*
* Copyright (C) 2006-2011 Hermann Kneissel herkne@gmx.de
*
* The latest version of the driver can be found at
* http://sourceforge.net/projects/garmin-gps/
*
* This driver has been derived from v2.1 of the visor driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111 USA
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
/* the mode to be set when the port ist opened */
static int initial_mode = 1;
/* debug flag */
static bool debug;
#define GARMIN_VENDOR_ID 0x091E
/*
* Version Information
*/
#define VERSION_MAJOR 0
#define VERSION_MINOR 36
#define _STR(s) #s
#define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
#define DRIVER_VERSION _DRIVER_VERSION(VERSION_MAJOR, VERSION_MINOR)
#define DRIVER_AUTHOR "hermann kneissel"
#define DRIVER_DESC "garmin gps driver"
/* error codes returned by the driver */
#define EINVPKT 1000 /* invalid packet structure */
/* size of the header of a packet using the usb protocol */
#define GARMIN_PKTHDR_LENGTH 12
/* max. possible size of a packet using the serial protocol */
#define MAX_SERIAL_PKT_SIZ (3 + 255 + 3)
/* max. possible size of a packet with worst case stuffing */
#define MAX_SERIAL_PKT_SIZ_STUFFED (MAX_SERIAL_PKT_SIZ + 256)
/* size of a buffer able to hold a complete (no stuffing) packet
* (the document protocol does not contain packets with a larger
* size, but in theory a packet may be 64k+12 bytes - if in
* later protocol versions larger packet sizes occur, this value
* should be increased accordingly, so the input buffer is always
* large enough the store a complete packet inclusive header) */
#define GPS_IN_BUFSIZ (GARMIN_PKTHDR_LENGTH+MAX_SERIAL_PKT_SIZ)
/* size of a buffer able to hold a complete (incl. stuffing) packet */
#define GPS_OUT_BUFSIZ (GARMIN_PKTHDR_LENGTH+MAX_SERIAL_PKT_SIZ_STUFFED)
/* where to place the packet id of a serial packet, so we can
* prepend the usb-packet header without the need to move the
* packets data */
#define GSP_INITIAL_OFFSET (GARMIN_PKTHDR_LENGTH-2)
/* max. size of incoming private packets (header+1 param) */
#define PRIVPKTSIZ (GARMIN_PKTHDR_LENGTH+4)
#define GARMIN_LAYERID_TRANSPORT 0
#define GARMIN_LAYERID_APPL 20
/* our own layer-id to use for some control mechanisms */
#define GARMIN_LAYERID_PRIVATE 0x01106E4B
#define GARMIN_PKTID_PVT_DATA 51
#define GARMIN_PKTID_L001_COMMAND_DATA 10
#define CMND_ABORT_TRANSFER 0
/* packet ids used in private layer */
#define PRIV_PKTID_SET_DEBUG 1
#define PRIV_PKTID_SET_MODE 2
#define PRIV_PKTID_INFO_REQ 3
#define PRIV_PKTID_INFO_RESP 4
#define PRIV_PKTID_RESET_REQ 5
#define PRIV_PKTID_SET_DEF_MODE 6
#define ETX 0x03
#define DLE 0x10
#define ACK 0x06
#define NAK 0x15
/* structure used to queue incoming packets */
struct garmin_packet {
struct list_head list;
int seq;
/* the real size of the data array, always > 0 */
int size;
__u8 data[1];
};
/* structure used to keep the current state of the driver */
struct garmin_data {
__u8 state;
__u16 flags;
__u8 mode;
__u8 count;
__u8 pkt_id;
__u32 serial_num;
struct timer_list timer;
struct usb_serial_port *port;
int seq_counter;
int insize;
int outsize;
__u8 inbuffer [GPS_IN_BUFSIZ]; /* tty -> usb */
__u8 outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */
__u8 privpkt[4*6];
spinlock_t lock;
struct list_head pktlist;
};
#define STATE_NEW 0
#define STATE_INITIAL_DELAY 1
#define STATE_TIMEOUT 2
#define STATE_SESSION_REQ1 3
#define STATE_SESSION_REQ2 4
#define STATE_ACTIVE 5
#define STATE_RESET 8
#define STATE_DISCONNECTED 9
#define STATE_WAIT_TTY_ACK 10
#define STATE_GSP_WAIT_DATA 11
#define MODE_NATIVE 0
#define MODE_GARMIN_SERIAL 1
/* Flags used in garmin_data.flags: */
#define FLAGS_SESSION_REPLY_MASK 0x00C0
#define FLAGS_SESSION_REPLY1_SEEN 0x0080
#define FLAGS_SESSION_REPLY2_SEEN 0x0040
#define FLAGS_BULK_IN_ACTIVE 0x0020
#define FLAGS_BULK_IN_RESTART 0x0010
#define FLAGS_THROTTLED 0x0008
#define APP_REQ_SEEN 0x0004
#define APP_RESP_SEEN 0x0002
#define CLEAR_HALT_REQUIRED 0x0001
#define FLAGS_QUEUING 0x0100
#define FLAGS_DROP_DATA 0x0800
#define FLAGS_GSP_SKIP 0x1000
#define FLAGS_GSP_DLESEEN 0x2000
/* function prototypes */
static int gsp_next_packet(struct garmin_data *garmin_data_p);
static int garmin_write_bulk(struct usb_serial_port *port,
const unsigned char *buf, int count,
int dismiss_ack);
/* some special packets to be send or received */
static unsigned char const GARMIN_START_SESSION_REQ[]
= { 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_START_SESSION_REPLY[]
= { 0, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0 };
static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[]
= { 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_APP_LAYER_REPLY[]
= { 0x14, 0, 0, 0 };
static unsigned char const GARMIN_START_PVT_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 49, 0 };
static unsigned char const GARMIN_STOP_PVT_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 50, 0 };
static unsigned char const GARMIN_STOP_TRANSFER_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_STOP_TRANSFER_REQ_V2[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 1, 0, 0, 0, 0 };
static unsigned char const PRIVATE_REQ[]
= { 0x4B, 0x6E, 0x10, 0x01, 0xFF, 0, 0, 0, 0xFF, 0, 0, 0 };
static const struct usb_device_id id_table[] = {
/* the same device id seems to be used by all
usb enabled GPS devices */
{ USB_DEVICE(GARMIN_VENDOR_ID, 3) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver garmin_driver = {
.name = "garmin_gps",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
};
static inline int getLayerId(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket));
}
static inline int getPacketId(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket+4));
}
static inline int getDataLength(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket+8));
}
/*
* check if the usb-packet in buf contains an abort-transfer command.
* (if yes, all queued data will be dropped)
*/
static inline int isAbortTrfCmnd(const unsigned char *buf)
{
if (0 == memcmp(buf, GARMIN_STOP_TRANSFER_REQ,
sizeof(GARMIN_STOP_TRANSFER_REQ)) ||
0 == memcmp(buf, GARMIN_STOP_TRANSFER_REQ_V2,
sizeof(GARMIN_STOP_TRANSFER_REQ_V2)))
return 1;
else
return 0;
}
static void send_to_tty(struct usb_serial_port *port,
char *data, unsigned int actual_length)
{
struct tty_struct *tty = tty_port_tty_get(&port->port);
if (tty && actual_length) {
usb_serial_debug_data(debug, &port->dev,
__func__, actual_length, data);
tty_insert_flip_string(tty, data, actual_length);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
}
/******************************************************************************
* packet queue handling
******************************************************************************/
/*
* queue a received (usb-)packet for later processing
*/
static int pkt_add(struct garmin_data *garmin_data_p,
unsigned char *data, unsigned int data_length)
{
int state = 0;
int result = 0;
unsigned long flags;
struct garmin_packet *pkt;
/* process only packets containg data ... */
if (data_length) {
pkt = kmalloc(sizeof(struct garmin_packet)+data_length,
GFP_ATOMIC);
if (pkt == NULL) {
dev_err(&garmin_data_p->port->dev, "out of memory\n");
return 0;
}
pkt->size = data_length;
memcpy(pkt->data, data, data_length);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_QUEUING;
result = list_empty(&garmin_data_p->pktlist);
pkt->seq = garmin_data_p->seq_counter++;
list_add_tail(&pkt->list, &garmin_data_p->pktlist);
state = garmin_data_p->state;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
dbg("%s - added: pkt: %d - %d bytes",
__func__, pkt->seq, data_length);
/* in serial mode, if someone is waiting for data from
the device, convert and send the next packet to tty. */
if (result && (state == STATE_GSP_WAIT_DATA))
gsp_next_packet(garmin_data_p);
}
return result;
}
/* get the next pending packet */
static struct garmin_packet *pkt_pop(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *result = NULL;
spin_lock_irqsave(&garmin_data_p->lock, flags);
if (!list_empty(&garmin_data_p->pktlist)) {
result = (struct garmin_packet *)garmin_data_p->pktlist.next;
list_del(&result->list);
}
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
return result;
}
/* free up all queued data */
static void pkt_clear(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *result = NULL;
dbg("%s", __func__);
spin_lock_irqsave(&garmin_data_p->lock, flags);
while (!list_empty(&garmin_data_p->pktlist)) {
result = (struct garmin_packet *)garmin_data_p->pktlist.next;
list_del(&result->list);
kfree(result);
}
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
/******************************************************************************
* garmin serial protocol handling handling
******************************************************************************/
/* send an ack packet back to the tty */
static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
{
__u8 pkt[10];
__u8 cksum = 0;
__u8 *ptr = pkt;
unsigned l = 0;
dbg("%s - pkt-id: 0x%X.", __func__, 0xFF & pkt_id);
*ptr++ = DLE;
*ptr++ = ACK;
cksum += ACK;
*ptr++ = 2;
cksum += 2;
*ptr++ = pkt_id;
cksum += pkt_id;
if (pkt_id == DLE)
*ptr++ = DLE;
*ptr++ = 0;
*ptr++ = 0xFF & (-cksum);
*ptr++ = DLE;
*ptr++ = ETX;
l = ptr-pkt;
send_to_tty(garmin_data_p->port, pkt, l);
return 0;
}
/*
* called for a complete packet received from tty layer
*
* the complete packet (pktid ... cksum) is in garmin_data_p->inbuf starting
* at GSP_INITIAL_OFFSET.
*
* count - number of bytes in the input buffer including space reserved for
* the usb header: GSP_INITIAL_OFFSET + number of bytes in packet
* (including pkt-id, data-length a. cksum)
*/
static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
{
unsigned long flags;
const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET;
__le32 *usbdata = (__le32 *) garmin_data_p->inbuffer;
int cksum = 0;
int n = 0;
int pktid = recpkt[0];
int size = recpkt[1];
usb_serial_debug_data(debug, &garmin_data_p->port->dev,
__func__, count-GSP_INITIAL_OFFSET, recpkt);
if (size != (count-GSP_INITIAL_OFFSET-3)) {
dbg("%s - invalid size, expected %d bytes, got %d",
__func__, size, (count-GSP_INITIAL_OFFSET-3));
return -EINVPKT;
}
cksum += *recpkt++;
cksum += *recpkt++;
/* sanity check, remove after test ... */
if ((__u8 *)&(usbdata[3]) != recpkt) {
dbg("%s - ptr mismatch %p - %p",
__func__, &(usbdata[4]), recpkt);
return -EINVPKT;
}
while (n < size) {
cksum += *recpkt++;
n++;
}
if ((0xff & (cksum + *recpkt)) != 0) {
dbg("%s - invalid checksum, expected %02x, got %02x",
__func__, 0xff & -cksum, 0xff & *recpkt);
return -EINVPKT;
}
usbdata[0] = __cpu_to_le32(GARMIN_LAYERID_APPL);
usbdata[1] = __cpu_to_le32(pktid);
usbdata[2] = __cpu_to_le32(size);
garmin_write_bulk(garmin_data_p->port, garmin_data_p->inbuffer,
GARMIN_PKTHDR_LENGTH+size, 0);
/* if this was an abort-transfer command, flush all
queued data. */
if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_DROP_DATA;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
pkt_clear(garmin_data_p);
}
return count;
}
/*
* Called for data received from tty
*
* buf contains the data read, it may span more than one packet or even
* incomplete packets
*
* input record should be a serial-record, but it may not be complete.
* Copy it into our local buffer, until an etx is seen (or an error
* occurs).
* Once the record is complete, convert into a usb packet and send it
* to the bulk pipe, send an ack back to the tty.
*
* If the input is an ack, just send the last queued packet to the
* tty layer.
*
* if the input is an abort command, drop all queued data.
*/
static int gsp_receive(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
unsigned long flags;
int offs = 0;
int ack_or_nak_seen = 0;
__u8 *dest;
int size;
/* dleSeen: set if last byte read was a DLE */
int dleSeen;
/* skip: if set, skip incoming data until possible start of
* new packet
*/
int skip;
__u8 data;
spin_lock_irqsave(&garmin_data_p->lock, flags);
dest = garmin_data_p->inbuffer;
size = garmin_data_p->insize;
dleSeen = garmin_data_p->flags & FLAGS_GSP_DLESEEN;
skip = garmin_data_p->flags & FLAGS_GSP_SKIP;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* dbg("%s - dle=%d skip=%d size=%d count=%d",
__func__, dleSeen, skip, size, count); */
if (size == 0)
size = GSP_INITIAL_OFFSET;
while (offs < count) {
data = *(buf+offs);
offs++;
if (data == DLE) {
if (skip) { /* start of a new pkt */
skip = 0;
size = GSP_INITIAL_OFFSET;
dleSeen = 1;
} else if (dleSeen) {
dest[size++] = data;
dleSeen = 0;
} else {
dleSeen = 1;
}
} else if (data == ETX) {
if (dleSeen) {
/* packet complete */
data = dest[GSP_INITIAL_OFFSET];
if (data == ACK) {
ack_or_nak_seen = ACK;
dbg("ACK packet complete.");
} else if (data == NAK) {
ack_or_nak_seen = NAK;
dbg("NAK packet complete.");
} else {
dbg("packet complete - id=0x%X.",
0xFF & data);
gsp_rec_packet(garmin_data_p, size);
}
skip = 1;
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
} else {
dest[size++] = data;
}
} else if (!skip) {
if (dleSeen) {
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
}
dest[size++] = data;
}
if (size >= GPS_IN_BUFSIZ) {
dbg("%s - packet too large.", __func__);
skip = 1;
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
}
}
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->insize = size;
/* copy flags back to structure */
if (skip)
garmin_data_p->flags |= FLAGS_GSP_SKIP;
else
garmin_data_p->flags &= ~FLAGS_GSP_SKIP;
if (dleSeen)
garmin_data_p->flags |= FLAGS_GSP_DLESEEN;
else
garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (ack_or_nak_seen) {
if (gsp_next_packet(garmin_data_p) > 0)
garmin_data_p->state = STATE_ACTIVE;
else
garmin_data_p->state = STATE_GSP_WAIT_DATA;
}
return count;
}
/*
* Sends a usb packet to the tty
*
* Assumes, that all packages and at an usb-packet boundary.
*
* return <0 on error, 0 if packet is incomplete or > 0 if packet was sent
*/
static int gsp_send(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
const unsigned char *src;
unsigned char *dst;
int pktid = 0;
int datalen = 0;
int cksum = 0;
int i = 0;
int k;
dbg("%s - state %d - %d bytes.", __func__,
garmin_data_p->state, count);
k = garmin_data_p->outsize;
if ((k+count) > GPS_OUT_BUFSIZ) {
dbg("packet too large");
garmin_data_p->outsize = 0;
return -4;
}
memcpy(garmin_data_p->outbuffer+k, buf, count);
k += count;
garmin_data_p->outsize = k;
if (k >= GARMIN_PKTHDR_LENGTH) {
pktid = getPacketId(garmin_data_p->outbuffer);
datalen = getDataLength(garmin_data_p->outbuffer);
i = GARMIN_PKTHDR_LENGTH + datalen;
if (k < i)
return 0;
} else {
return 0;
}
dbg("%s - %d bytes in buffer, %d bytes in pkt.", __func__, k, i);
/* garmin_data_p->outbuffer now contains a complete packet */
usb_serial_debug_data(debug, &garmin_data_p->port->dev,
__func__, k, garmin_data_p->outbuffer);
garmin_data_p->outsize = 0;
if (GARMIN_LAYERID_APPL != getLayerId(garmin_data_p->outbuffer)) {
dbg("not an application packet (%d)",
getLayerId(garmin_data_p->outbuffer));
return -1;
}
if (pktid > 255) {
dbg("packet-id %d too large", pktid);
return -2;
}
if (datalen > 255) {
dbg("packet-size %d too large", datalen);
return -3;
}
/* the serial protocol should be able to handle this packet */
k = 0;
src = garmin_data_p->outbuffer+GARMIN_PKTHDR_LENGTH;
for (i = 0; i < datalen; i++) {
if (*src++ == DLE)
k++;
}
src = garmin_data_p->outbuffer+GARMIN_PKTHDR_LENGTH;
if (k > (GARMIN_PKTHDR_LENGTH-2)) {
/* can't add stuffing DLEs in place, move data to end
of buffer ... */
dst = garmin_data_p->outbuffer+GPS_OUT_BUFSIZ-datalen;
memcpy(dst, src, datalen);
src = dst;
}
dst = garmin_data_p->outbuffer;
*dst++ = DLE;
*dst++ = pktid;
cksum += pktid;
*dst++ = datalen;
cksum += datalen;
if (datalen == DLE)
*dst++ = DLE;
for (i = 0; i < datalen; i++) {
__u8 c = *src++;
*dst++ = c;
cksum += c;
if (c == DLE)
*dst++ = DLE;
}
cksum = 0xFF & -cksum;
*dst++ = cksum;
if (cksum == DLE)
*dst++ = DLE;
*dst++ = DLE;
*dst++ = ETX;
i = dst-garmin_data_p->outbuffer;
send_to_tty(garmin_data_p->port, garmin_data_p->outbuffer, i);
garmin_data_p->pkt_id = pktid;
garmin_data_p->state = STATE_WAIT_TTY_ACK;
return i;
}
/*
* Process the next pending data packet - if there is one
*/
static int gsp_next_packet(struct garmin_data *garmin_data_p)
{
int result = 0;
struct garmin_packet *pkt = NULL;
while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
dbg("%s - next pkt: %d", __func__, pkt->seq);
result = gsp_send(garmin_data_p, pkt->data, pkt->size);
if (result > 0) {
kfree(pkt);
return result;
}
kfree(pkt);
}
return result;
}
/******************************************************************************
* garmin native mode
******************************************************************************/
/*
* Called for data received from tty
*
* The input data is expected to be in garmin usb-packet format.
*
* buf contains the data read, it may span more than one packet
* or even incomplete packets
*/
static int nat_receive(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
unsigned long flags;
__u8 *dest;
int offs = 0;
int result = count;
int len;
while (offs < count) {
/* if buffer contains header, copy rest of data */
if (garmin_data_p->insize >= GARMIN_PKTHDR_LENGTH)
len = GARMIN_PKTHDR_LENGTH
+getDataLength(garmin_data_p->inbuffer);
else
len = GARMIN_PKTHDR_LENGTH;
if (len >= GPS_IN_BUFSIZ) {
/* seems to be an invalid packet, ignore rest
of input */
dbg("%s - packet size too large: %d", __func__, len);
garmin_data_p->insize = 0;
count = 0;
result = -EINVPKT;
} else {
len -= garmin_data_p->insize;
if (len > (count-offs))
len = (count-offs);
if (len > 0) {
dest = garmin_data_p->inbuffer
+ garmin_data_p->insize;
memcpy(dest, buf+offs, len);
garmin_data_p->insize += len;
offs += len;
}
}
/* do we have a complete packet ? */
if (garmin_data_p->insize >= GARMIN_PKTHDR_LENGTH) {
len = GARMIN_PKTHDR_LENGTH+
getDataLength(garmin_data_p->inbuffer);
if (garmin_data_p->insize >= len) {
garmin_write_bulk(garmin_data_p->port,
garmin_data_p->inbuffer,
len, 0);
garmin_data_p->insize = 0;
/* if this was an abort-transfer command,
flush all queued data. */
if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
spin_lock_irqsave(&garmin_data_p->lock,
flags);
garmin_data_p->flags |= FLAGS_DROP_DATA;
spin_unlock_irqrestore(
&garmin_data_p->lock, flags);
pkt_clear(garmin_data_p);
}
}
}
}
return result;
}
/******************************************************************************
* private packets
******************************************************************************/
static void priv_status_resp(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
__le32 *pkt = (__le32 *)garmin_data_p->privpkt;
pkt[0] = __cpu_to_le32(GARMIN_LAYERID_PRIVATE);
pkt[1] = __cpu_to_le32(PRIV_PKTID_INFO_RESP);
pkt[2] = __cpu_to_le32(12);
pkt[3] = __cpu_to_le32(VERSION_MAJOR << 16 | VERSION_MINOR);
pkt[4] = __cpu_to_le32(garmin_data_p->mode);
pkt[5] = __cpu_to_le32(garmin_data_p->serial_num);
send_to_tty(port, (__u8 *)pkt, 6 * 4);
}
/******************************************************************************
* Garmin specific driver functions
******************************************************************************/
static int process_resetdev_request(struct usb_serial_port *port)
{
unsigned long flags;
int status;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~(CLEAR_HALT_REQUIRED);
garmin_data_p->state = STATE_RESET;
garmin_data_p->serial_num = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
usb_kill_urb(port->interrupt_in_urb);
dbg("%s - usb_reset_device", __func__);
status = usb_reset_device(port->serial->dev);
if (status)
dbg("%s - usb_reset_device failed: %d",
__func__, status);
return status;
}
/*
* clear all cached data
*/
static int garmin_clear(struct garmin_data *garmin_data_p)
{
unsigned long flags;
int status = 0;
/* flush all queued data */
pkt_clear(garmin_data_p);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->insize = 0;
garmin_data_p->outsize = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
return status;
}
static int garmin_init_session(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
int status = 0;
int i = 0;
if (status == 0) {
usb_kill_urb(port->interrupt_in_urb);
dbg("%s - adding interrupt input", __func__);
status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (status)
dev_err(&serial->dev->dev,
"%s - failed submitting interrupt urb, error %d\n",
__func__, status);
}
/*
* using the initialization method from gpsbabel. See comments in
* gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
*/
if (status == 0) {
dbg("%s - starting session ...", __func__);
garmin_data_p->state = STATE_ACTIVE;
for (i = 0; i < 3; i++) {
status = garmin_write_bulk(port,
GARMIN_START_SESSION_REQ,
sizeof(GARMIN_START_SESSION_REQ), 0);
if (status < 0)
break;
}
if (status > 0)
status = 0;
}
return status;
}
static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
{
unsigned long flags;
int status = 0;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->mode = initial_mode;
garmin_data_p->count = 0;
garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
if (garmin_data_p->state == STATE_RESET)
status = garmin_init_session(port);
garmin_data_p->state = STATE_ACTIVE;
return status;
}
static void garmin_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s - port %d - mode=%d state=%d flags=0x%X", __func__,
port->number, garmin_data_p->mode,
garmin_data_p->state, garmin_data_p->flags);
if (!serial)
return;
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected)
garmin_clear(garmin_data_p);
/* shutdown our urbs */
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
/* keep reset state so we know that we must start a new session */
if (garmin_data_p->state != STATE_RESET)
garmin_data_p->state = STATE_DISCONNECTED;
mutex_unlock(&port->serial->disc_mutex);
}
static void garmin_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
if (port) {
struct garmin_data *garmin_data_p =
usb_get_serial_port_data(port);
dbg("%s - port %d", __func__, port->number);
if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)) {
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
gsp_send_ack(garmin_data_p,
((__u8 *)urb->transfer_buffer)[4]);
}
}
usb_serial_port_softint(port);
}
/* Ignore errors that resulted from garmin_write_bulk with
dismiss_ack = 1 */
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree(urb->transfer_buffer);
}
static int garmin_write_bulk(struct usb_serial_port *port,
const unsigned char *buf, int count,
int dismiss_ack)
{
unsigned long flags;
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
struct urb *urb;
unsigned char *buffer;
int status;
dbg("%s - port %d, state %d", __func__, port->number,
garmin_data_p->state);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_DROP_DATA;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
buffer = kmalloc(count, GFP_ATOMIC);
if (!buffer) {
dev_err(&port->dev, "out of memory\n");
return -ENOMEM;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
dev_err(&port->dev, "no more free urbs\n");
kfree(buffer);
return -ENOMEM;
}
memcpy(buffer, buf, count);
usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
buffer, count,
garmin_write_bulk_callback,
dismiss_ack ? NULL : port);
urb->transfer_flags |= URB_ZERO_PACKET;
if (GARMIN_LAYERID_APPL == getLayerId(buffer)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_REQ_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_clear(garmin_data_p);
garmin_data_p->state = STATE_GSP_WAIT_DATA;
}
}
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
}
/* we are done with this urb, so let the host driver
* really free it when it is finished with it */
usb_free_urb(urb);
return count;
}
static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int pktid, pktsiz, len;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
__le32 *privpkt = (__le32 *)garmin_data_p->privpkt;
usb_serial_debug_data(debug, &port->dev, __func__, count, buf);
if (garmin_data_p->state == STATE_RESET)
return -EIO;
/* check for our private packets */
if (count >= GARMIN_PKTHDR_LENGTH) {
len = PRIVPKTSIZ;
if (count < len)
len = count;
memcpy(garmin_data_p->privpkt, buf, len);
pktsiz = getDataLength(garmin_data_p->privpkt);
pktid = getPacketId(garmin_data_p->privpkt);
if (count == (GARMIN_PKTHDR_LENGTH+pktsiz)
&& GARMIN_LAYERID_PRIVATE ==
getLayerId(garmin_data_p->privpkt)) {
dbg("%s - processing private request %d",
__func__, pktid);
/* drop all unfinished transfers */
garmin_clear(garmin_data_p);
switch (pktid) {
case PRIV_PKTID_SET_DEBUG:
if (pktsiz != 4)
return -EINVPKT;
debug = __le32_to_cpu(privpkt[3]);
dbg("%s - debug level set to 0x%X",
__func__, debug);
break;
case PRIV_PKTID_SET_MODE:
if (pktsiz != 4)
return -EINVPKT;
garmin_data_p->mode = __le32_to_cpu(privpkt[3]);
dbg("%s - mode set to %d",
__func__, garmin_data_p->mode);
break;
case PRIV_PKTID_INFO_REQ:
priv_status_resp(port);
break;
case PRIV_PKTID_RESET_REQ:
process_resetdev_request(port);
break;
case PRIV_PKTID_SET_DEF_MODE:
if (pktsiz != 4)
return -EINVPKT;
initial_mode = __le32_to_cpu(privpkt[3]);
dbg("%s - initial_mode set to %d",
__func__,
garmin_data_p->mode);
break;
}
return count;
}
}
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
return gsp_receive(garmin_data_p, buf, count);
} else { /* MODE_NATIVE */
return nat_receive(garmin_data_p, buf, count);
}
}
static int garmin_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
/*
* Report back the bytes currently available in the output buffer.
*/
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
return GPS_OUT_BUFSIZ-garmin_data_p->outsize;
}
static void garmin_read_process(struct garmin_data *garmin_data_p,
unsigned char *data, unsigned data_length,
int bulk_data)
{
unsigned long flags;
if (garmin_data_p->flags & FLAGS_DROP_DATA) {
/* abort-transfer cmd is actice */
dbg("%s - pkt dropped", __func__);
} else if (garmin_data_p->state != STATE_DISCONNECTED &&
garmin_data_p->state != STATE_RESET) {
/* if throttling is active or postprecessing is required
put the received data in the input queue, otherwise
send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length);
} else if (bulk_data ||
getLayerId(data) == GARMIN_LAYERID_APPL) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_add(garmin_data_p, data, data_length);
} else {
send_to_tty(garmin_data_p->port, data,
data_length);
}
}
/* ignore system layer packets ... */
}
}
static void garmin_read_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
int retval;
dbg("%s - port %d", __func__, port->number);
if (!serial) {
dbg("%s - bad serial pointer, exiting", __func__);
return;
}
if (status) {
dbg("%s - nonzero read bulk status received: %d",
__func__, status);
return;
}
usb_serial_debug_data(debug, &port->dev,
__func__, urb->actual_length, data);
garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
if (urb->actual_length == 0 &&
0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, retval);
} else if (urb->actual_length > 0) {
/* Continue trying to read until nothing more is received */
if (0 == (garmin_data_p->flags & FLAGS_THROTTLED)) {
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - failed resubmitting read urb, "
"error %d\n", __func__, retval);
}
} else {
dbg("%s - end of bulk data", __func__);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_ACTIVE;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
}
static void garmin_read_int_callback(struct urb *urb)
{
unsigned long flags;
int retval;
struct usb_serial_port *port = urb->context;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
return;
}
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, urb->transfer_buffer);
if (urb->actual_length == sizeof(GARMIN_BULK_IN_AVAIL_REPLY) &&
0 == memcmp(data, GARMIN_BULK_IN_AVAIL_REPLY,
sizeof(GARMIN_BULK_IN_AVAIL_REPLY))) {
dbg("%s - bulk data available.", __func__);
if (0 == (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
/* bulk data available */
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, retval);
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
spin_unlock_irqrestore(&garmin_data_p->lock,
flags);
}
} else {
/* bulk-in transfer still active */
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
} else if (urb->actual_length == (4+sizeof(GARMIN_START_SESSION_REPLY))
&& 0 == memcmp(data, GARMIN_START_SESSION_REPLY,
sizeof(GARMIN_START_SESSION_REPLY))) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_SESSION_REPLY1_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* save the serial number */
garmin_data_p->serial_num = __le32_to_cpup(
(__le32 *)(data+GARMIN_PKTHDR_LENGTH));
dbg("%s - start-of-session reply seen - serial %u.",
__func__, garmin_data_p->serial_num);
}
garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, retval);
}
/*
* Sends the next queued packt to the tty port (garmin native mode only)
* and then sets a timer to call itself again until all queued data
* is sent.
*/
static int garmin_flush_queue(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *pkt;
if ((garmin_data_p->flags & FLAGS_THROTTLED) == 0) {
pkt = pkt_pop(garmin_data_p);
if (pkt != NULL) {
send_to_tty(garmin_data_p->port, pkt->data, pkt->size);
kfree(pkt);
mod_timer(&garmin_data_p->timer, (1)+jiffies);
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_QUEUING;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
}
return 0;
}
static void garmin_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s - port %d", __func__, port->number);
/* set flag, data received will be put into a queue
for later processing */
spin_lock_irq(&garmin_data_p->lock);
garmin_data_p->flags |= FLAGS_QUEUING|FLAGS_THROTTLED;
spin_unlock_irq(&garmin_data_p->lock);
}
static void garmin_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
int status;
dbg("%s - port %d", __func__, port->number);
spin_lock_irq(&garmin_data_p->lock);
garmin_data_p->flags &= ~FLAGS_THROTTLED;
spin_unlock_irq(&garmin_data_p->lock);
/* in native mode send queued data to tty, in
serial mode nothing needs to be done here */
if (garmin_data_p->mode == MODE_NATIVE)
garmin_flush_queue(garmin_data_p);
if (0 != (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, status);
}
}
/*
* The timer is currently only used to send queued packets to
* the tty in cases where the protocol provides no own handshaking
* to initiate the transfer.
*/
static void timeout_handler(unsigned long data)
{
struct garmin_data *garmin_data_p = (struct garmin_data *) data;
/* send the next queued packet to the tty port */
if (garmin_data_p->mode == MODE_NATIVE)
if (garmin_data_p->flags & FLAGS_QUEUING)
garmin_flush_queue(garmin_data_p);
}
static int garmin_attach(struct usb_serial *serial)
{
int status = 0;
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = NULL;
dbg("%s", __func__);
garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
if (garmin_data_p == NULL) {
dev_err(&port->dev, "%s - Out of memory\n", __func__);
return -ENOMEM;
}
init_timer(&garmin_data_p->timer);
spin_lock_init(&garmin_data_p->lock);
INIT_LIST_HEAD(&garmin_data_p->pktlist);
/* garmin_data_p->timer.expires = jiffies + session_timeout; */
garmin_data_p->timer.data = (unsigned long)garmin_data_p;
garmin_data_p->timer.function = timeout_handler;
garmin_data_p->port = port;
garmin_data_p->state = 0;
garmin_data_p->flags = 0;
garmin_data_p->count = 0;
usb_set_serial_port_data(port, garmin_data_p);
status = garmin_init_session(port);
return status;
}
static void garmin_disconnect(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s", __func__);
usb_kill_urb(port->interrupt_in_urb);
del_timer_sync(&garmin_data_p->timer);
}
static void garmin_release(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dbg("%s", __func__);
kfree(garmin_data_p);
}
/* All of the device info needed */
static struct usb_serial_driver garmin_device = {
.driver = {
.owner = THIS_MODULE,
.name = "garmin_gps",
},
.description = "Garmin GPS usb/tty",
.id_table = id_table,
.num_ports = 1,
.open = garmin_open,
.close = garmin_close,
.throttle = garmin_throttle,
.unthrottle = garmin_unthrottle,
.attach = garmin_attach,
.disconnect = garmin_disconnect,
.release = garmin_release,
.write = garmin_write,
.write_room = garmin_write_room,
.write_bulk_callback = garmin_write_bulk_callback,
.read_bulk_callback = garmin_read_bulk_callback,
.read_int_callback = garmin_read_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&garmin_device, NULL
};
module_usb_serial_driver(garmin_driver, serial_drivers);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "Debug enabled or not");
module_param(initial_mode, int, S_IRUGO);
MODULE_PARM_DESC(initial_mode, "Initial mode");
| gpl-2.0 |
kabata1975/android_kernel_c8690 | drivers/usb/host/ohci-sa1111.c | 3590 | 6699 | /*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
* (C) Copyright 2002 Hewlett-Packard Company
*
* SA1111 Bus Glue
*
* Written by Christopher Hoover <ch@hpl.hp.com>
* Based on fragments of previous driver by Russell King et al.
*
* This file is licenced under the GPL.
*/
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <mach/assabet.h>
#include <mach/badge4.h>
#include <asm/hardware/sa1111.h>
#ifndef CONFIG_SA1111
#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined."
#endif
extern int usb_disabled(void);
/*-------------------------------------------------------------------------*/
static void sa1111_start_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst = 0;
printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
__FILE__);
#ifdef CONFIG_SA1100_BADGE4
if (machine_is_badge4()) {
badge4_set_5V(BADGE4_5V_USB, 1);
}
#endif
if (machine_is_xp860() ||
machine_has_neponset() ||
machine_is_pfs168() ||
machine_is_badge4())
usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW;
/*
* Configure the power sense and control lines. Place the USB
* host controller in reset.
*/
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + SA1111_USB_RESET);
/*
* Now, carefully enable the USB clock, and take
* the USB host controller out of reset.
*/
sa1111_enable_device(dev);
udelay(11);
sa1111_writel(usb_rst, dev->mapbase + SA1111_USB_RESET);
}
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
__FILE__);
/*
* Put the USB host controller into reset.
*/
usb_rst = sa1111_readl(dev->mapbase + SA1111_USB_RESET);
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + SA1111_USB_RESET);
/*
* Stop the USB clock.
*/
sa1111_disable_device(dev);
#ifdef CONFIG_SA1100_BADGE4
if (machine_is_badge4()) {
/* Disable power to the USB bus */
badge4_set_5V(BADGE4_5V_USB, 0);
}
#endif
}
/*-------------------------------------------------------------------------*/
#if 0
static void dump_hci_status(struct usb_hcd *hcd, const char *label)
{
unsigned long status = sa1111_readl(hcd->regs + SA1111_USB_STATUS);
dbg ("%s USB_STATUS = { %s%s%s%s%s}", label,
((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
}
#endif
/*-------------------------------------------------------------------------*/
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/**
* usb_hcd_sa1111_probe - initialize SA-1111-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*
* Store this function in the HCD's struct pci_driver as probe().
*/
int usb_hcd_sa1111_probe (const struct hc_driver *driver,
struct sa1111_dev *dev)
{
struct usb_hcd *hcd;
int retval;
hcd = usb_create_hcd (driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = dev->res.start;
hcd->rsrc_len = dev->res.end - dev->res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dbg("request_mem_region failed");
retval = -EBUSY;
goto err1;
}
hcd->regs = dev->mapbase;
sa1111_start_hc(dev);
ohci_hcd_init(hcd_to_ohci(hcd));
retval = usb_add_hcd(hcd, dev->irq[1], IRQF_DISABLED);
if (retval == 0)
return retval;
sa1111_stop_hc(dev);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return retval;
}
/* may be called without controller electrically present */
/* may be called with controller, bus, and devices active */
/**
* usb_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
* Reverses the effect of usb_hcd_sa1111_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*
*/
void usb_hcd_sa1111_remove (struct usb_hcd *hcd, struct sa1111_dev *dev)
{
usb_remove_hcd(hcd);
sa1111_stop_hc(dev);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
/*-------------------------------------------------------------------------*/
static int __devinit
ohci_sa1111_start (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ret;
if ((ret = ohci_init(ohci)) < 0)
return ret;
if ((ret = ohci_run (ohci)) < 0) {
err ("can't start %s", hcd->self.bus_name);
ohci_stop (hcd);
return ret;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static const struct hc_driver ohci_sa1111_hc_driver = {
.description = hcd_name,
.product_desc = "SA-1111 OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.start = ohci_sa1111_start,
.stop = ohci_stop,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
/*-------------------------------------------------------------------------*/
static int ohci_hcd_sa1111_drv_probe(struct sa1111_dev *dev)
{
int ret;
if (usb_disabled())
return -ENODEV;
ret = usb_hcd_sa1111_probe(&ohci_sa1111_hc_driver, dev);
return ret;
}
static int ohci_hcd_sa1111_drv_remove(struct sa1111_dev *dev)
{
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
usb_hcd_sa1111_remove(hcd, dev);
return 0;
}
static struct sa1111_driver ohci_hcd_sa1111_driver = {
.drv = {
.name = "sa1111-ohci",
},
.devid = SA1111_DEVID_USB,
.probe = ohci_hcd_sa1111_drv_probe,
.remove = ohci_hcd_sa1111_drv_remove,
};
| gpl-2.0 |
TeamVilleC2/android_kernel_htc_liberty-villec2 | arch/x86/crypto/twofish_glue.c | 4870 | 3316 | /*
* Glue Code for assembler optimized version of TWOFISH
*
* Originally Twofish for GPG
* By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
* Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(twofish_enc_blk);
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(twofish_dec_blk);
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static struct crypto_alg alg = {
.cra_name = "twofish",
.cra_driver_name = "twofish-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
.cia_setkey = twofish_setkey,
.cia_encrypt = twofish_encrypt,
.cia_decrypt = twofish_decrypt
}
}
};
static int __init init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
MODULE_ALIAS("twofish");
MODULE_ALIAS("twofish-asm");
| gpl-2.0 |
holyangel/M8-GPE_M | drivers/media/video/zoran/zoran_driver.c | 4870 | 80936 | /*
* Zoran zr36057/zr36067 PCI controller driver, for the
* Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
* Media Labs LML33/LML33R10.
*
* Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
*
* Changes for BUZ by Wolfgang Scherr <scherr@net4you.net>
*
* Changes for DC10/DC30 by Laurent Pinchart <laurent.pinchart@skynet.be>
*
* Changes for LML33R10 by Maxim Yevtyushkin <max@linuxmedialabs.com>
*
* Changes for videodev2/v4l2 by Ronald Bultje <rbultje@ronald.bitfreak.net>
*
* Based on
*
* Miro DC10 driver
* Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net>
*
* Iomega Buz driver version 1.0
* Copyright (C) 1999 Rainer Johanni <Rainer@Johanni.de>
*
* buz.0.0.3
* Copyright (C) 1998 Dave Perks <dperks@ibm.net>
*
* bttv - Bt848 frame grabber driver
* Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
* & Marcus Metzler (mocm@thp.uni-koeln.de)
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include "videocodec.h"
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include "zoran.h"
#include "zoran_device.h"
#include "zoran_card.h"
const struct zoran_format zoran_formats[] = {
{
.name = "15-bit RGB LE",
.fourcc = V4L2_PIX_FMT_RGB555,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = 15,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif|
ZR36057_VFESPFR_LittleEndian,
}, {
.name = "15-bit RGB BE",
.fourcc = V4L2_PIX_FMT_RGB555X,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = 15,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif,
}, {
.name = "16-bit RGB LE",
.fourcc = V4L2_PIX_FMT_RGB565,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = 16,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif|
ZR36057_VFESPFR_LittleEndian,
}, {
.name = "16-bit RGB BE",
.fourcc = V4L2_PIX_FMT_RGB565X,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = 16,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif,
}, {
.name = "24-bit RGB",
.fourcc = V4L2_PIX_FMT_BGR24,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = 24,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_Pack24,
}, {
.name = "32-bit RGB LE",
.fourcc = V4L2_PIX_FMT_BGR32,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = 32,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_LittleEndian,
}, {
.name = "32-bit RGB BE",
.fourcc = V4L2_PIX_FMT_RGB32,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = 32,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_RGB888,
}, {
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
.depth = 16,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_YUV422,
}, {
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
.depth = 16,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
.vfespfr = ZR36057_VFESPFR_YUV422|ZR36057_VFESPFR_LittleEndian,
}, {
.name = "Hardware-encoded Motion-JPEG",
.fourcc = V4L2_PIX_FMT_MJPEG,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
.depth = 0,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_PLAYBACK |
ZORAN_FORMAT_COMPRESSED,
}
};
#define NUM_FORMATS ARRAY_SIZE(zoran_formats)
/* small helper function for calculating buffersizes for v4l2
* we calculate the nearest higher power-of-two, which
* will be the recommended buffersize */
static __u32
zoran_v4l2_calc_bufsize (struct zoran_jpg_settings *settings)
{
__u8 div = settings->VerDcm * settings->HorDcm * settings->TmpDcm;
__u32 num = (1024 * 512) / (div);
__u32 result = 2;
num--;
while (num) {
num >>= 1;
result <<= 1;
}
if (result > jpg_bufsize)
return jpg_bufsize;
if (result < 8192)
return 8192;
return result;
}
/* forward references */
static void v4l_fbuffer_free(struct zoran_fh *fh);
static void jpg_fbuffer_free(struct zoran_fh *fh);
/* Set mapping mode */
static void map_mode_raw(struct zoran_fh *fh)
{
fh->map_mode = ZORAN_MAP_MODE_RAW;
fh->buffers.buffer_size = v4l_bufsize;
fh->buffers.num_buffers = v4l_nbufs;
}
static void map_mode_jpg(struct zoran_fh *fh, int play)
{
fh->map_mode = play ? ZORAN_MAP_MODE_JPG_PLAY : ZORAN_MAP_MODE_JPG_REC;
fh->buffers.buffer_size = jpg_bufsize;
fh->buffers.num_buffers = jpg_nbufs;
}
static inline const char *mode_name(enum zoran_map_mode mode)
{
return mode == ZORAN_MAP_MODE_RAW ? "V4L" : "JPG";
}
/*
* Allocate the V4L grab buffers
*
* These have to be pysically contiguous.
*/
static int v4l_fbuffer_alloc(struct zoran_fh *fh)
{
struct zoran *zr = fh->zr;
int i, off;
unsigned char *mem;
for (i = 0; i < fh->buffers.num_buffers; i++) {
if (fh->buffers.buffer[i].v4l.fbuffer)
dprintk(2,
KERN_WARNING
"%s: %s - buffer %d already allocated!?\n",
ZR_DEVNAME(zr), __func__, i);
//udelay(20);
mem = kmalloc(fh->buffers.buffer_size,
GFP_KERNEL | __GFP_NOWARN);
if (!mem) {
dprintk(1,
KERN_ERR
"%s: %s - kmalloc for V4L buf %d failed\n",
ZR_DEVNAME(zr), __func__, i);
v4l_fbuffer_free(fh);
return -ENOBUFS;
}
fh->buffers.buffer[i].v4l.fbuffer = mem;
fh->buffers.buffer[i].v4l.fbuffer_phys = virt_to_phys(mem);
fh->buffers.buffer[i].v4l.fbuffer_bus = virt_to_bus(mem);
for (off = 0; off < fh->buffers.buffer_size;
off += PAGE_SIZE)
SetPageReserved(virt_to_page(mem + off));
dprintk(4,
KERN_INFO
"%s: %s - V4L frame %d mem 0x%lx (bus: 0x%llx)\n",
ZR_DEVNAME(zr), __func__, i, (unsigned long) mem,
(unsigned long long)virt_to_bus(mem));
}
fh->buffers.allocated = 1;
return 0;
}
/* free the V4L grab buffers */
static void v4l_fbuffer_free(struct zoran_fh *fh)
{
struct zoran *zr = fh->zr;
int i, off;
unsigned char *mem;
dprintk(4, KERN_INFO "%s: %s\n", ZR_DEVNAME(zr), __func__);
for (i = 0; i < fh->buffers.num_buffers; i++) {
if (!fh->buffers.buffer[i].v4l.fbuffer)
continue;
mem = fh->buffers.buffer[i].v4l.fbuffer;
for (off = 0; off < fh->buffers.buffer_size;
off += PAGE_SIZE)
ClearPageReserved(virt_to_page(mem + off));
kfree(fh->buffers.buffer[i].v4l.fbuffer);
fh->buffers.buffer[i].v4l.fbuffer = NULL;
}
fh->buffers.allocated = 0;
}
/*
* Allocate the MJPEG grab buffers.
*
* If a Natoma chipset is present and this is a revision 1 zr36057,
* each MJPEG buffer needs to be physically contiguous.
* (RJ: This statement is from Dave Perks' original driver,
* I could never check it because I have a zr36067)
*
* RJ: The contents grab buffers needs never be accessed in the driver.
* Therefore there is no need to allocate them with vmalloc in order
* to get a contiguous virtual memory space.
* I don't understand why many other drivers first allocate them with
* vmalloc (which uses internally also get_zeroed_page, but delivers you
* virtual addresses) and then again have to make a lot of efforts
* to get the physical address.
*
* Ben Capper:
* On big-endian architectures (such as ppc) some extra steps
* are needed. When reading and writing to the stat_com array
* and fragment buffers, the device expects to see little-
* endian values. The use of cpu_to_le32() and le32_to_cpu()
* in this function (and one or two others in zoran_device.c)
* ensure that these values are always stored in little-endian
* form, regardless of architecture. The zr36057 does Very Bad
* Things on big endian architectures if the stat_com array
* and fragment buffers are not little-endian.
*/
static int jpg_fbuffer_alloc(struct zoran_fh *fh)
{
struct zoran *zr = fh->zr;
int i, j, off;
u8 *mem;
for (i = 0; i < fh->buffers.num_buffers; i++) {
if (fh->buffers.buffer[i].jpg.frag_tab)
dprintk(2,
KERN_WARNING
"%s: %s - buffer %d already allocated!?\n",
ZR_DEVNAME(zr), __func__, i);
/* Allocate fragment table for this buffer */
mem = (void *)get_zeroed_page(GFP_KERNEL);
if (!mem) {
dprintk(1,
KERN_ERR
"%s: %s - get_zeroed_page (frag_tab) failed for buffer %d\n",
ZR_DEVNAME(zr), __func__, i);
jpg_fbuffer_free(fh);
return -ENOBUFS;
}
fh->buffers.buffer[i].jpg.frag_tab = (__le32 *)mem;
fh->buffers.buffer[i].jpg.frag_tab_bus = virt_to_bus(mem);
if (fh->buffers.need_contiguous) {
mem = kmalloc(fh->buffers.buffer_size, GFP_KERNEL);
if (mem == NULL) {
dprintk(1,
KERN_ERR
"%s: %s - kmalloc failed for buffer %d\n",
ZR_DEVNAME(zr), __func__, i);
jpg_fbuffer_free(fh);
return -ENOBUFS;
}
fh->buffers.buffer[i].jpg.frag_tab[0] =
cpu_to_le32(virt_to_bus(mem));
fh->buffers.buffer[i].jpg.frag_tab[1] =
cpu_to_le32((fh->buffers.buffer_size >> 1) | 1);
for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE)
SetPageReserved(virt_to_page(mem + off));
} else {
/* jpg_bufsize is already page aligned */
for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) {
mem = (void *)get_zeroed_page(GFP_KERNEL);
if (mem == NULL) {
dprintk(1,
KERN_ERR
"%s: %s - get_zeroed_page failed for buffer %d\n",
ZR_DEVNAME(zr), __func__, i);
jpg_fbuffer_free(fh);
return -ENOBUFS;
}
fh->buffers.buffer[i].jpg.frag_tab[2 * j] =
cpu_to_le32(virt_to_bus(mem));
fh->buffers.buffer[i].jpg.frag_tab[2 * j + 1] =
cpu_to_le32((PAGE_SIZE >> 2) << 1);
SetPageReserved(virt_to_page(mem));
}
fh->buffers.buffer[i].jpg.frag_tab[2 * j - 1] |= cpu_to_le32(1);
}
}
dprintk(4,
KERN_DEBUG "%s: %s - %d KB allocated\n",
ZR_DEVNAME(zr), __func__,
(fh->buffers.num_buffers * fh->buffers.buffer_size) >> 10);
fh->buffers.allocated = 1;
return 0;
}
/* free the MJPEG grab buffers */
static void jpg_fbuffer_free(struct zoran_fh *fh)
{
struct zoran *zr = fh->zr;
int i, j, off;
unsigned char *mem;
__le32 frag_tab;
struct zoran_buffer *buffer;
dprintk(4, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__);
for (i = 0, buffer = &fh->buffers.buffer[0];
i < fh->buffers.num_buffers; i++, buffer++) {
if (!buffer->jpg.frag_tab)
continue;
if (fh->buffers.need_contiguous) {
frag_tab = buffer->jpg.frag_tab[0];
if (frag_tab) {
mem = bus_to_virt(le32_to_cpu(frag_tab));
for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE)
ClearPageReserved(virt_to_page(mem + off));
kfree(mem);
buffer->jpg.frag_tab[0] = 0;
buffer->jpg.frag_tab[1] = 0;
}
} else {
for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) {
frag_tab = buffer->jpg.frag_tab[2 * j];
if (!frag_tab)
break;
ClearPageReserved(virt_to_page(bus_to_virt(le32_to_cpu(frag_tab))));
free_page((unsigned long)bus_to_virt(le32_to_cpu(frag_tab)));
buffer->jpg.frag_tab[2 * j] = 0;
buffer->jpg.frag_tab[2 * j + 1] = 0;
}
}
free_page((unsigned long)buffer->jpg.frag_tab);
buffer->jpg.frag_tab = NULL;
}
fh->buffers.allocated = 0;
}
/*
* V4L Buffer grabbing
*/
static int
zoran_v4l_set_format (struct zoran_fh *fh,
int width,
int height,
const struct zoran_format *format)
{
struct zoran *zr = fh->zr;
int bpp;
/* Check size and format of the grab wanted */
if (height < BUZ_MIN_HEIGHT || width < BUZ_MIN_WIDTH ||
height > BUZ_MAX_HEIGHT || width > BUZ_MAX_WIDTH) {
dprintk(1,
KERN_ERR
"%s: %s - wrong frame size (%dx%d)\n",
ZR_DEVNAME(zr), __func__, width, height);
return -EINVAL;
}
bpp = (format->depth + 7) / 8;
/* Check against available buffer size */
if (height * width * bpp > fh->buffers.buffer_size) {
dprintk(1,
KERN_ERR
"%s: %s - video buffer size (%d kB) is too small\n",
ZR_DEVNAME(zr), __func__, fh->buffers.buffer_size >> 10);
return -EINVAL;
}
/* The video front end needs 4-byte alinged line sizes */
if ((bpp == 2 && (width & 1)) || (bpp == 3 && (width & 3))) {
dprintk(1,
KERN_ERR
"%s: %s - wrong frame alignment\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
fh->v4l_settings.width = width;
fh->v4l_settings.height = height;
fh->v4l_settings.format = format;
fh->v4l_settings.bytesperline = bpp * fh->v4l_settings.width;
return 0;
}
static int zoran_v4l_queue_frame(struct zoran_fh *fh, int num)
{
struct zoran *zr = fh->zr;
unsigned long flags;
int res = 0;
if (!fh->buffers.allocated) {
dprintk(1,
KERN_ERR
"%s: %s - buffers not yet allocated\n",
ZR_DEVNAME(zr), __func__);
res = -ENOMEM;
}
/* No grabbing outside the buffer range! */
if (num >= fh->buffers.num_buffers || num < 0) {
dprintk(1,
KERN_ERR
"%s: %s - buffer %d is out of range\n",
ZR_DEVNAME(zr), __func__, num);
res = -EINVAL;
}
spin_lock_irqsave(&zr->spinlock, flags);
if (fh->buffers.active == ZORAN_FREE) {
if (zr->v4l_buffers.active == ZORAN_FREE) {
zr->v4l_buffers = fh->buffers;
fh->buffers.active = ZORAN_ACTIVE;
} else {
dprintk(1,
KERN_ERR
"%s: %s - another session is already capturing\n",
ZR_DEVNAME(zr), __func__);
res = -EBUSY;
}
}
/* make sure a grab isn't going on currently with this buffer */
if (!res) {
switch (zr->v4l_buffers.buffer[num].state) {
default:
case BUZ_STATE_PEND:
if (zr->v4l_buffers.active == ZORAN_FREE) {
fh->buffers.active = ZORAN_FREE;
zr->v4l_buffers.allocated = 0;
}
res = -EBUSY; /* what are you doing? */
break;
case BUZ_STATE_DONE:
dprintk(2,
KERN_WARNING
"%s: %s - queueing buffer %d in state DONE!?\n",
ZR_DEVNAME(zr), __func__, num);
case BUZ_STATE_USER:
/* since there is at least one unused buffer there's room for at least
* one more pend[] entry */
zr->v4l_pend[zr->v4l_pend_head++ & V4L_MASK_FRAME] = num;
zr->v4l_buffers.buffer[num].state = BUZ_STATE_PEND;
zr->v4l_buffers.buffer[num].bs.length =
fh->v4l_settings.bytesperline *
zr->v4l_settings.height;
fh->buffers.buffer[num] = zr->v4l_buffers.buffer[num];
break;
}
}
spin_unlock_irqrestore(&zr->spinlock, flags);
if (!res && zr->v4l_buffers.active == ZORAN_FREE)
zr->v4l_buffers.active = fh->buffers.active;
return res;
}
/*
* Sync on a V4L buffer
*/
static int v4l_sync(struct zoran_fh *fh, int frame)
{
struct zoran *zr = fh->zr;
unsigned long flags;
if (fh->buffers.active == ZORAN_FREE) {
dprintk(1,
KERN_ERR
"%s: %s - no grab active for this session\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
/* check passed-in frame number */
if (frame >= fh->buffers.num_buffers || frame < 0) {
dprintk(1,
KERN_ERR "%s: %s - frame %d is invalid\n",
ZR_DEVNAME(zr), __func__, frame);
return -EINVAL;
}
/* Check if is buffer was queued at all */
if (zr->v4l_buffers.buffer[frame].state == BUZ_STATE_USER) {
dprintk(1,
KERN_ERR
"%s: %s - attempt to sync on a buffer which was not queued?\n",
ZR_DEVNAME(zr), __func__);
return -EPROTO;
}
/* wait on this buffer to get ready */
if (!wait_event_interruptible_timeout(zr->v4l_capq,
(zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ))
return -ETIME;
if (signal_pending(current))
return -ERESTARTSYS;
/* buffer should now be in BUZ_STATE_DONE */
if (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_DONE)
dprintk(2,
KERN_ERR "%s: %s - internal state error\n",
ZR_DEVNAME(zr), __func__);
zr->v4l_buffers.buffer[frame].state = BUZ_STATE_USER;
fh->buffers.buffer[frame] = zr->v4l_buffers.buffer[frame];
spin_lock_irqsave(&zr->spinlock, flags);
/* Check if streaming capture has finished */
if (zr->v4l_pend_tail == zr->v4l_pend_head) {
zr36057_set_memgrab(zr, 0);
if (zr->v4l_buffers.active == ZORAN_ACTIVE) {
fh->buffers.active = zr->v4l_buffers.active = ZORAN_FREE;
zr->v4l_buffers.allocated = 0;
}
}
spin_unlock_irqrestore(&zr->spinlock, flags);
return 0;
}
/*
* Queue a MJPEG buffer for capture/playback
*/
static int zoran_jpg_queue_frame(struct zoran_fh *fh, int num,
enum zoran_codec_mode mode)
{
struct zoran *zr = fh->zr;
unsigned long flags;
int res = 0;
/* Check if buffers are allocated */
if (!fh->buffers.allocated) {
dprintk(1,
KERN_ERR
"%s: %s - buffers not yet allocated\n",
ZR_DEVNAME(zr), __func__);
return -ENOMEM;
}
/* No grabbing outside the buffer range! */
if (num >= fh->buffers.num_buffers || num < 0) {
dprintk(1,
KERN_ERR
"%s: %s - buffer %d out of range\n",
ZR_DEVNAME(zr), __func__, num);
return -EINVAL;
}
/* what is the codec mode right now? */
if (zr->codec_mode == BUZ_MODE_IDLE) {
zr->jpg_settings = fh->jpg_settings;
} else if (zr->codec_mode != mode) {
/* wrong codec mode active - invalid */
dprintk(1,
KERN_ERR
"%s: %s - codec in wrong mode\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
if (fh->buffers.active == ZORAN_FREE) {
if (zr->jpg_buffers.active == ZORAN_FREE) {
zr->jpg_buffers = fh->buffers;
fh->buffers.active = ZORAN_ACTIVE;
} else {
dprintk(1,
KERN_ERR
"%s: %s - another session is already capturing\n",
ZR_DEVNAME(zr), __func__);
res = -EBUSY;
}
}
if (!res && zr->codec_mode == BUZ_MODE_IDLE) {
/* Ok load up the jpeg codec */
zr36057_enable_jpg(zr, mode);
}
spin_lock_irqsave(&zr->spinlock, flags);
if (!res) {
switch (zr->jpg_buffers.buffer[num].state) {
case BUZ_STATE_DONE:
dprintk(2,
KERN_WARNING
"%s: %s - queing frame in BUZ_STATE_DONE state!?\n",
ZR_DEVNAME(zr), __func__);
case BUZ_STATE_USER:
/* since there is at least one unused buffer there's room for at
*least one more pend[] entry */
zr->jpg_pend[zr->jpg_que_head++ & BUZ_MASK_FRAME] = num;
zr->jpg_buffers.buffer[num].state = BUZ_STATE_PEND;
fh->buffers.buffer[num] = zr->jpg_buffers.buffer[num];
zoran_feed_stat_com(zr);
break;
default:
case BUZ_STATE_DMA:
case BUZ_STATE_PEND:
if (zr->jpg_buffers.active == ZORAN_FREE) {
fh->buffers.active = ZORAN_FREE;
zr->jpg_buffers.allocated = 0;
}
res = -EBUSY; /* what are you doing? */
break;
}
}
spin_unlock_irqrestore(&zr->spinlock, flags);
if (!res && zr->jpg_buffers.active == ZORAN_FREE)
zr->jpg_buffers.active = fh->buffers.active;
return res;
}
static int jpg_qbuf(struct zoran_fh *fh, int frame, enum zoran_codec_mode mode)
{
struct zoran *zr = fh->zr;
int res = 0;
/* Does the user want to stop streaming? */
if (frame < 0) {
if (zr->codec_mode == mode) {
if (fh->buffers.active == ZORAN_FREE) {
dprintk(1,
KERN_ERR
"%s: %s(-1) - session not active\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
fh->buffers.active = zr->jpg_buffers.active = ZORAN_FREE;
zr->jpg_buffers.allocated = 0;
zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
return 0;
} else {
dprintk(1,
KERN_ERR
"%s: %s - stop streaming but not in streaming mode\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
}
if ((res = zoran_jpg_queue_frame(fh, frame, mode)))
return res;
/* Start the jpeg codec when the first frame is queued */
if (!res && zr->jpg_que_head == 1)
jpeg_start(zr);
return res;
}
/*
* Sync on a MJPEG buffer
*/
static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs)
{
struct zoran *zr = fh->zr;
unsigned long flags;
int frame;
if (fh->buffers.active == ZORAN_FREE) {
dprintk(1,
KERN_ERR
"%s: %s - capture is not currently active\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
if (zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS &&
zr->codec_mode != BUZ_MODE_MOTION_COMPRESS) {
dprintk(1,
KERN_ERR
"%s: %s - codec not in streaming mode\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
if (!wait_event_interruptible_timeout(zr->jpg_capq,
(zr->jpg_que_tail != zr->jpg_dma_tail ||
zr->jpg_dma_tail == zr->jpg_dma_head),
10*HZ)) {
int isr;
btand(~ZR36057_JMC_Go_en, ZR36057_JMC);
udelay(1);
zr->codec->control(zr->codec, CODEC_G_STATUS,
sizeof(isr), &isr);
dprintk(1,
KERN_ERR
"%s: %s - timeout: codec isr=0x%02x\n",
ZR_DEVNAME(zr), __func__, isr);
return -ETIME;
}
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irqsave(&zr->spinlock, flags);
if (zr->jpg_dma_tail != zr->jpg_dma_head)
frame = zr->jpg_pend[zr->jpg_que_tail++ & BUZ_MASK_FRAME];
else
frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
/* buffer should now be in BUZ_STATE_DONE */
if (zr->jpg_buffers.buffer[frame].state != BUZ_STATE_DONE)
dprintk(2,
KERN_ERR "%s: %s - internal state error\n",
ZR_DEVNAME(zr), __func__);
*bs = zr->jpg_buffers.buffer[frame].bs;
bs->frame = frame;
zr->jpg_buffers.buffer[frame].state = BUZ_STATE_USER;
fh->buffers.buffer[frame] = zr->jpg_buffers.buffer[frame];
spin_unlock_irqrestore(&zr->spinlock, flags);
return 0;
}
static void zoran_open_init_session(struct zoran_fh *fh)
{
int i;
struct zoran *zr = fh->zr;
/* Per default, map the V4L Buffers */
map_mode_raw(fh);
/* take over the card's current settings */
fh->overlay_settings = zr->overlay_settings;
fh->overlay_settings.is_set = 0;
fh->overlay_settings.format = zr->overlay_settings.format;
fh->overlay_active = ZORAN_FREE;
/* v4l settings */
fh->v4l_settings = zr->v4l_settings;
/* jpg settings */
fh->jpg_settings = zr->jpg_settings;
/* buffers */
memset(&fh->buffers, 0, sizeof(fh->buffers));
for (i = 0; i < MAX_FRAME; i++) {
fh->buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */
fh->buffers.buffer[i].bs.frame = i;
}
fh->buffers.allocated = 0;
fh->buffers.active = ZORAN_FREE;
}
static void zoran_close_end_session(struct zoran_fh *fh)
{
struct zoran *zr = fh->zr;
/* overlay */
if (fh->overlay_active != ZORAN_FREE) {
fh->overlay_active = zr->overlay_active = ZORAN_FREE;
zr->v4l_overlay_active = 0;
if (!zr->v4l_memgrab_active)
zr36057_overlay(zr, 0);
zr->overlay_mask = NULL;
}
if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
/* v4l capture */
if (fh->buffers.active != ZORAN_FREE) {
unsigned long flags;
spin_lock_irqsave(&zr->spinlock, flags);
zr36057_set_memgrab(zr, 0);
zr->v4l_buffers.allocated = 0;
zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE;
spin_unlock_irqrestore(&zr->spinlock, flags);
}
/* v4l buffers */
if (fh->buffers.allocated)
v4l_fbuffer_free(fh);
} else {
/* jpg capture */
if (fh->buffers.active != ZORAN_FREE) {
zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
zr->jpg_buffers.allocated = 0;
zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE;
}
/* jpg buffers */
if (fh->buffers.allocated)
jpg_fbuffer_free(fh);
}
}
/*
* Open a zoran card. Right now the flags stuff is just playing
*/
static int zoran_open(struct file *file)
{
struct zoran *zr = video_drvdata(file);
struct zoran_fh *fh;
int res, first_open = 0;
dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n",
ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1);
mutex_lock(&zr->other_lock);
if (zr->user >= 2048) {
dprintk(1, KERN_ERR "%s: too many users (%d) on device\n",
ZR_DEVNAME(zr), zr->user);
res = -EBUSY;
goto fail_unlock;
}
/* now, create the open()-specific file_ops struct */
fh = kzalloc(sizeof(struct zoran_fh), GFP_KERNEL);
if (!fh) {
dprintk(1,
KERN_ERR
"%s: %s - allocation of zoran_fh failed\n",
ZR_DEVNAME(zr), __func__);
res = -ENOMEM;
goto fail_unlock;
}
/* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows
* on norm-change! */
fh->overlay_mask =
kmalloc(((768 + 31) / 32) * 576 * 4, GFP_KERNEL);
if (!fh->overlay_mask) {
dprintk(1,
KERN_ERR
"%s: %s - allocation of overlay_mask failed\n",
ZR_DEVNAME(zr), __func__);
res = -ENOMEM;
goto fail_fh;
}
if (zr->user++ == 0)
first_open = 1;
/*mutex_unlock(&zr->resource_lock);*/
/* default setup - TODO: look at flags */
if (first_open) { /* First device open */
zr36057_restart(zr);
zoran_open_init_params(zr);
zoran_init_hardware(zr);
btor(ZR36057_ICR_IntPinEn, ZR36057_ICR);
}
/* set file_ops stuff */
file->private_data = fh;
fh->zr = zr;
zoran_open_init_session(fh);
mutex_unlock(&zr->other_lock);
return 0;
fail_fh:
kfree(fh);
fail_unlock:
mutex_unlock(&zr->other_lock);
dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n",
ZR_DEVNAME(zr), res, zr->user);
return res;
}
static int
zoran_close(struct file *file)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(+)=%d\n",
ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user - 1);
/* kernel locks (fs/device.c), so don't do that ourselves
* (prevents deadlocks) */
mutex_lock(&zr->other_lock);
zoran_close_end_session(fh);
if (zr->user-- == 1) { /* Last process */
/* Clean up JPEG process */
wake_up_interruptible(&zr->jpg_capq);
zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
zr->jpg_buffers.allocated = 0;
zr->jpg_buffers.active = ZORAN_FREE;
/* disable interrupts */
btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR);
if (zr36067_debug > 1)
print_interrupts(zr);
/* Overlay off */
zr->v4l_overlay_active = 0;
zr36057_overlay(zr, 0);
zr->overlay_mask = NULL;
/* capture off */
wake_up_interruptible(&zr->v4l_capq);
zr36057_set_memgrab(zr, 0);
zr->v4l_buffers.allocated = 0;
zr->v4l_buffers.active = ZORAN_FREE;
zoran_set_pci_master(zr, 0);
if (!pass_through) { /* Switch to color bar */
decoder_call(zr, video, s_stream, 0);
encoder_call(zr, video, s_routing, 2, 0, 0);
}
}
mutex_unlock(&zr->other_lock);
file->private_data = NULL;
kfree(fh->overlay_mask);
kfree(fh);
dprintk(4, KERN_INFO "%s: %s done\n", ZR_DEVNAME(zr), __func__);
return 0;
}
static ssize_t
zoran_read (struct file *file,
char __user *data,
size_t count,
loff_t *ppos)
{
/* we simply don't support read() (yet)... */
return -EINVAL;
}
static ssize_t
zoran_write (struct file *file,
const char __user *data,
size_t count,
loff_t *ppos)
{
/* ...and the same goes for write() */
return -EINVAL;
}
static int setup_fbuffer(struct zoran_fh *fh,
void *base,
const struct zoran_format *fmt,
int width,
int height,
int bytesperline)
{
struct zoran *zr = fh->zr;
/* (Ronald) v4l/v4l2 guidelines */
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
return -EPERM;
/* Don't allow frame buffer overlay if PCI or AGP is buggy, or on
ALi Magik (that needs very low latency while the card needs a
higher value always) */
if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK))
return -ENXIO;
/* we need a bytesperline value, even if not given */
if (!bytesperline)
bytesperline = width * ((fmt->depth + 7) & ~7) / 8;
#if 0
if (zr->overlay_active) {
/* dzjee... stupid users... don't even bother to turn off
* overlay before changing the memory location...
* normally, we would return errors here. However, one of
* the tools that does this is... xawtv! and since xawtv
* is used by +/- 99% of the users, we'd rather be user-
* friendly and silently do as if nothing went wrong */
dprintk(3,
KERN_ERR
"%s: %s - forced overlay turnoff because framebuffer changed\n",
ZR_DEVNAME(zr), __func__);
zr36057_overlay(zr, 0);
}
#endif
if (!(fmt->flags & ZORAN_FORMAT_OVERLAY)) {
dprintk(1,
KERN_ERR
"%s: %s - no valid overlay format given\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
if (height <= 0 || width <= 0 || bytesperline <= 0) {
dprintk(1,
KERN_ERR
"%s: %s - invalid height/width/bpl value (%d|%d|%d)\n",
ZR_DEVNAME(zr), __func__, width, height, bytesperline);
return -EINVAL;
}
if (bytesperline & 3) {
dprintk(1,
KERN_ERR
"%s: %s - bytesperline (%d) must be 4-byte aligned\n",
ZR_DEVNAME(zr), __func__, bytesperline);
return -EINVAL;
}
zr->vbuf_base = (void *) ((unsigned long) base & ~3);
zr->vbuf_height = height;
zr->vbuf_width = width;
zr->vbuf_depth = fmt->depth;
zr->overlay_settings.format = fmt;
zr->vbuf_bytesperline = bytesperline;
/* The user should set new window parameters */
zr->overlay_settings.is_set = 0;
return 0;
}
static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height,
struct v4l2_clip __user *clips, int clipcount, void __user *bitmap)
{
struct zoran *zr = fh->zr;
struct v4l2_clip *vcp = NULL;
int on, end;
if (!zr->vbuf_base) {
dprintk(1,
KERN_ERR
"%s: %s - frame buffer has to be set first\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
if (!fh->overlay_settings.format) {
dprintk(1,
KERN_ERR
"%s: %s - no overlay format set\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
/*
* The video front end needs 4-byte alinged line sizes, we correct that
* silently here if necessary
*/
if (zr->vbuf_depth == 15 || zr->vbuf_depth == 16) {
end = (x + width) & ~1; /* round down */
x = (x + 1) & ~1; /* round up */
width = end - x;
}
if (zr->vbuf_depth == 24) {
end = (x + width) & ~3; /* round down */
x = (x + 3) & ~3; /* round up */
width = end - x;
}
if (width > BUZ_MAX_WIDTH)
width = BUZ_MAX_WIDTH;
if (height > BUZ_MAX_HEIGHT)
height = BUZ_MAX_HEIGHT;
/* Check for invalid parameters */
if (width < BUZ_MIN_WIDTH || height < BUZ_MIN_HEIGHT ||
width > BUZ_MAX_WIDTH || height > BUZ_MAX_HEIGHT) {
dprintk(1,
KERN_ERR
"%s: %s - width = %d or height = %d invalid\n",
ZR_DEVNAME(zr), __func__, width, height);
return -EINVAL;
}
fh->overlay_settings.x = x;
fh->overlay_settings.y = y;
fh->overlay_settings.width = width;
fh->overlay_settings.height = height;
fh->overlay_settings.clipcount = clipcount;
/*
* If an overlay is running, we have to switch it off
* and switch it on again in order to get the new settings in effect.
*
* We also want to avoid that the overlay mask is written
* when an overlay is running.
*/
on = zr->v4l_overlay_active && !zr->v4l_memgrab_active &&
zr->overlay_active != ZORAN_FREE &&
fh->overlay_active != ZORAN_FREE;
if (on)
zr36057_overlay(zr, 0);
/*
* Write the overlay mask if clips are wanted.
* We prefer a bitmap.
*/
if (bitmap) {
/* fake value - it just means we want clips */
fh->overlay_settings.clipcount = 1;
if (copy_from_user(fh->overlay_mask, bitmap,
(width * height + 7) / 8)) {
return -EFAULT;
}
} else if (clipcount > 0) {
/* write our own bitmap from the clips */
vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4));
if (vcp == NULL) {
dprintk(1,
KERN_ERR
"%s: %s - Alloc of clip mask failed\n",
ZR_DEVNAME(zr), __func__);
return -ENOMEM;
}
if (copy_from_user
(vcp, clips, sizeof(struct v4l2_clip) * clipcount)) {
vfree(vcp);
return -EFAULT;
}
write_overlay_mask(fh, vcp, clipcount);
vfree(vcp);
}
fh->overlay_settings.is_set = 1;
if (fh->overlay_active != ZORAN_FREE &&
zr->overlay_active != ZORAN_FREE)
zr->overlay_settings = fh->overlay_settings;
if (on)
zr36057_overlay(zr, 1);
/* Make sure the changes come into effect */
return wait_grab_pending(zr);
}
static int setup_overlay(struct zoran_fh *fh, int on)
{
struct zoran *zr = fh->zr;
/* If there is nothing to do, return immediately */
if ((on && fh->overlay_active != ZORAN_FREE) ||
(!on && fh->overlay_active == ZORAN_FREE))
return 0;
/* check whether we're touching someone else's overlay */
if (on && zr->overlay_active != ZORAN_FREE &&
fh->overlay_active == ZORAN_FREE) {
dprintk(1,
KERN_ERR
"%s: %s - overlay is already active for another session\n",
ZR_DEVNAME(zr), __func__);
return -EBUSY;
}
if (!on && zr->overlay_active != ZORAN_FREE &&
fh->overlay_active == ZORAN_FREE) {
dprintk(1,
KERN_ERR
"%s: %s - you cannot cancel someone else's session\n",
ZR_DEVNAME(zr), __func__);
return -EPERM;
}
if (on == 0) {
zr->overlay_active = fh->overlay_active = ZORAN_FREE;
zr->v4l_overlay_active = 0;
/* When a grab is running, the video simply
* won't be switched on any more */
if (!zr->v4l_memgrab_active)
zr36057_overlay(zr, 0);
zr->overlay_mask = NULL;
} else {
if (!zr->vbuf_base || !fh->overlay_settings.is_set) {
dprintk(1,
KERN_ERR
"%s: %s - buffer or window not set\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
if (!fh->overlay_settings.format) {
dprintk(1,
KERN_ERR
"%s: %s - no overlay format set\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
zr->overlay_active = fh->overlay_active = ZORAN_LOCKED;
zr->v4l_overlay_active = 1;
zr->overlay_mask = fh->overlay_mask;
zr->overlay_settings = fh->overlay_settings;
if (!zr->v4l_memgrab_active)
zr36057_overlay(zr, 1);
/* When a grab is running, the video will be
* switched on when grab is finished */
}
/* Make sure the changes come into effect */
return wait_grab_pending(zr);
}
/* get the status of a buffer in the clients buffer queue */
static int zoran_v4l2_buffer_status(struct zoran_fh *fh,
struct v4l2_buffer *buf, int num)
{
struct zoran *zr = fh->zr;
unsigned long flags;
buf->flags = V4L2_BUF_FLAG_MAPPED;
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
/* check range */
if (num < 0 || num >= fh->buffers.num_buffers ||
!fh->buffers.allocated) {
dprintk(1,
KERN_ERR
"%s: %s - wrong number or buffers not allocated\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
spin_lock_irqsave(&zr->spinlock, flags);
dprintk(3,
KERN_DEBUG
"%s: %s() - raw active=%c, buffer %d: state=%c, map=%c\n",
ZR_DEVNAME(zr), __func__,
"FAL"[fh->buffers.active], num,
"UPMD"[zr->v4l_buffers.buffer[num].state],
fh->buffers.buffer[num].map ? 'Y' : 'N');
spin_unlock_irqrestore(&zr->spinlock, flags);
buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf->length = fh->buffers.buffer_size;
/* get buffer */
buf->bytesused = fh->buffers.buffer[num].bs.length;
if (fh->buffers.buffer[num].state == BUZ_STATE_DONE ||
fh->buffers.buffer[num].state == BUZ_STATE_USER) {
buf->sequence = fh->buffers.buffer[num].bs.seq;
buf->flags |= V4L2_BUF_FLAG_DONE;
buf->timestamp = fh->buffers.buffer[num].bs.timestamp;
} else {
buf->flags |= V4L2_BUF_FLAG_QUEUED;
}
if (fh->v4l_settings.height <= BUZ_MAX_HEIGHT / 2)
buf->field = V4L2_FIELD_TOP;
else
buf->field = V4L2_FIELD_INTERLACED;
break;
case ZORAN_MAP_MODE_JPG_REC:
case ZORAN_MAP_MODE_JPG_PLAY:
/* check range */
if (num < 0 || num >= fh->buffers.num_buffers ||
!fh->buffers.allocated) {
dprintk(1,
KERN_ERR
"%s: %s - wrong number or buffers not allocated\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
buf->type = (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ?
V4L2_BUF_TYPE_VIDEO_CAPTURE :
V4L2_BUF_TYPE_VIDEO_OUTPUT;
buf->length = fh->buffers.buffer_size;
/* these variables are only written after frame has been captured */
if (fh->buffers.buffer[num].state == BUZ_STATE_DONE ||
fh->buffers.buffer[num].state == BUZ_STATE_USER) {
buf->sequence = fh->buffers.buffer[num].bs.seq;
buf->timestamp = fh->buffers.buffer[num].bs.timestamp;
buf->bytesused = fh->buffers.buffer[num].bs.length;
buf->flags |= V4L2_BUF_FLAG_DONE;
} else {
buf->flags |= V4L2_BUF_FLAG_QUEUED;
}
/* which fields are these? */
if (fh->jpg_settings.TmpDcm != 1)
buf->field = fh->jpg_settings.odd_even ?
V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
else
buf->field = fh->jpg_settings.odd_even ?
V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT;
break;
default:
dprintk(5,
KERN_ERR
"%s: %s - invalid buffer type|map_mode (%d|%d)\n",
ZR_DEVNAME(zr), __func__, buf->type, fh->map_mode);
return -EINVAL;
}
buf->memory = V4L2_MEMORY_MMAP;
buf->index = num;
buf->m.offset = buf->length * num;
return 0;
}
static int
zoran_set_norm (struct zoran *zr,
v4l2_std_id norm)
{
int on;
if (zr->v4l_buffers.active != ZORAN_FREE ||
zr->jpg_buffers.active != ZORAN_FREE) {
dprintk(1,
KERN_WARNING
"%s: %s called while in playback/capture mode\n",
ZR_DEVNAME(zr), __func__);
return -EBUSY;
}
if (!(norm & zr->card.norms)) {
dprintk(1,
KERN_ERR "%s: %s - unsupported norm %llx\n",
ZR_DEVNAME(zr), __func__, norm);
return -EINVAL;
}
if (norm == V4L2_STD_ALL) {
unsigned int status = 0;
v4l2_std_id std = 0;
decoder_call(zr, video, querystd, &std);
decoder_call(zr, core, s_std, std);
/* let changes come into effect */
ssleep(2);
decoder_call(zr, video, g_input_status, &status);
if (status & V4L2_IN_ST_NO_SIGNAL) {
dprintk(1,
KERN_ERR
"%s: %s - no norm detected\n",
ZR_DEVNAME(zr), __func__);
/* reset norm */
decoder_call(zr, core, s_std, zr->norm);
return -EIO;
}
norm = std;
}
if (norm & V4L2_STD_SECAM)
zr->timing = zr->card.tvn[2];
else if (norm & V4L2_STD_NTSC)
zr->timing = zr->card.tvn[1];
else
zr->timing = zr->card.tvn[0];
/* We switch overlay off and on since a change in the
* norm needs different VFE settings */
on = zr->overlay_active && !zr->v4l_memgrab_active;
if (on)
zr36057_overlay(zr, 0);
decoder_call(zr, core, s_std, norm);
encoder_call(zr, video, s_std_output, norm);
if (on)
zr36057_overlay(zr, 1);
/* Make sure the changes come into effect */
zr->norm = norm;
return 0;
}
static int
zoran_set_input (struct zoran *zr,
int input)
{
if (input == zr->input) {
return 0;
}
if (zr->v4l_buffers.active != ZORAN_FREE ||
zr->jpg_buffers.active != ZORAN_FREE) {
dprintk(1,
KERN_WARNING
"%s: %s called while in playback/capture mode\n",
ZR_DEVNAME(zr), __func__);
return -EBUSY;
}
if (input < 0 || input >= zr->card.inputs) {
dprintk(1,
KERN_ERR
"%s: %s - unnsupported input %d\n",
ZR_DEVNAME(zr), __func__, input);
return -EINVAL;
}
zr->input = input;
decoder_call(zr, video, s_routing,
zr->card.input[input].muxsel, 0, 0);
return 0;
}
/*
* ioctl routine
*/
static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
memset(cap, 0, sizeof(*cap));
strncpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)-1);
strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(zr->pci_dev));
cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
return 0;
}
static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
{
unsigned int num, i;
for (num = i = 0; i < NUM_FORMATS; i++) {
if (zoran_formats[i].flags & flag && num++ == fmt->index) {
strncpy(fmt->description, zoran_formats[i].name,
sizeof(fmt->description) - 1);
/* fmt struct pre-zeroed, so adding '\0' not needed */
fmt->pixelformat = zoran_formats[i].fourcc;
if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED)
fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
}
return -EINVAL;
}
static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_fmtdesc *f)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE);
}
static int zoran_enum_fmt_vid_out(struct file *file, void *__fh,
struct v4l2_fmtdesc *f)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
return zoran_enum_fmt(zr, f, ZORAN_FORMAT_PLAYBACK);
}
static int zoran_enum_fmt_vid_overlay(struct file *file, void *__fh,
struct v4l2_fmtdesc *f)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
return zoran_enum_fmt(zr, f, ZORAN_FORMAT_OVERLAY);
}
static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
mutex_lock(&zr->resource_lock);
fmt->fmt.pix.width = fh->jpg_settings.img_width / fh->jpg_settings.HorDcm;
fmt->fmt.pix.height = fh->jpg_settings.img_height * 2 /
(fh->jpg_settings.VerDcm * fh->jpg_settings.TmpDcm);
fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&fh->jpg_settings);
fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
if (fh->jpg_settings.TmpDcm == 1)
fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
else
fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
if (fh->map_mode != ZORAN_MAP_MODE_RAW)
return zoran_g_fmt_vid_out(file, fh, fmt);
mutex_lock(&zr->resource_lock);
fmt->fmt.pix.width = fh->v4l_settings.width;
fmt->fmt.pix.height = fh->v4l_settings.height;
fmt->fmt.pix.sizeimage = fh->v4l_settings.bytesperline *
fh->v4l_settings.height;
fmt->fmt.pix.pixelformat = fh->v4l_settings.format->fourcc;
fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace;
fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline;
if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2))
fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
else
fmt->fmt.pix.field = V4L2_FIELD_TOP;
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_g_fmt_vid_overlay(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
mutex_lock(&zr->resource_lock);
fmt->fmt.win.w.left = fh->overlay_settings.x;
fmt->fmt.win.w.top = fh->overlay_settings.y;
fmt->fmt.win.w.width = fh->overlay_settings.width;
fmt->fmt.win.w.height = fh->overlay_settings.height;
if (fh->overlay_settings.width * 2 > BUZ_MAX_HEIGHT)
fmt->fmt.win.field = V4L2_FIELD_INTERLACED;
else
fmt->fmt.win.field = V4L2_FIELD_TOP;
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_try_fmt_vid_overlay(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
mutex_lock(&zr->resource_lock);
if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH)
fmt->fmt.win.w.width = BUZ_MAX_WIDTH;
if (fmt->fmt.win.w.width < BUZ_MIN_WIDTH)
fmt->fmt.win.w.width = BUZ_MIN_WIDTH;
if (fmt->fmt.win.w.height > BUZ_MAX_HEIGHT)
fmt->fmt.win.w.height = BUZ_MAX_HEIGHT;
if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT)
fmt->fmt.win.w.height = BUZ_MIN_HEIGHT;
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
struct zoran_jpg_settings settings;
int res = 0;
if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
return -EINVAL;
mutex_lock(&zr->resource_lock);
settings = fh->jpg_settings;
/* we actually need to set 'real' parameters now */
if ((fmt->fmt.pix.height * 2) > BUZ_MAX_HEIGHT)
settings.TmpDcm = 1;
else
settings.TmpDcm = 2;
settings.decimation = 0;
if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2)
settings.VerDcm = 2;
else
settings.VerDcm = 1;
if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4)
settings.HorDcm = 4;
else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2)
settings.HorDcm = 2;
else
settings.HorDcm = 1;
if (settings.TmpDcm == 1)
settings.field_per_buff = 2;
else
settings.field_per_buff = 1;
if (settings.HorDcm > 1) {
settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
} else {
settings.img_x = 0;
settings.img_width = BUZ_MAX_WIDTH;
}
/* check */
res = zoran_check_jpg_settings(zr, &settings, 1);
if (res)
goto tryfmt_unlock_and_return;
/* tell the user what we actually did */
fmt->fmt.pix.width = settings.img_width / settings.HorDcm;
fmt->fmt.pix.height = settings.img_height * 2 /
(settings.TmpDcm * settings.VerDcm);
if (settings.TmpDcm == 1)
fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
else
fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings);
fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
tryfmt_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int bpp;
int i;
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
return zoran_try_fmt_vid_out(file, fh, fmt);
mutex_lock(&zr->resource_lock);
for (i = 0; i < NUM_FORMATS; i++)
if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
break;
if (i == NUM_FORMATS) {
mutex_unlock(&zr->resource_lock);
return -EINVAL;
}
bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
v4l_bound_align_image(
&fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
&fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_s_fmt_vid_overlay(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res;
dprintk(3, "x=%d, y=%d, w=%d, h=%d, cnt=%d, map=0x%p\n",
fmt->fmt.win.w.left, fmt->fmt.win.w.top,
fmt->fmt.win.w.width,
fmt->fmt.win.w.height,
fmt->fmt.win.clipcount,
fmt->fmt.win.bitmap);
mutex_lock(&zr->resource_lock);
res = setup_window(fh, fmt->fmt.win.w.left, fmt->fmt.win.w.top,
fmt->fmt.win.w.width, fmt->fmt.win.w.height,
(struct v4l2_clip __user *)fmt->fmt.win.clips,
fmt->fmt.win.clipcount, fmt->fmt.win.bitmap);
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
__le32 printformat = __cpu_to_le32(fmt->fmt.pix.pixelformat);
struct zoran_jpg_settings settings;
int res = 0;
dprintk(3, "size=%dx%d, fmt=0x%x (%4.4s)\n",
fmt->fmt.pix.width, fmt->fmt.pix.height,
fmt->fmt.pix.pixelformat,
(char *) &printformat);
if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
return -EINVAL;
mutex_lock(&zr->resource_lock);
if (fh->buffers.allocated) {
dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
ZR_DEVNAME(zr));
res = -EBUSY;
goto sfmtjpg_unlock_and_return;
}
settings = fh->jpg_settings;
/* we actually need to set 'real' parameters now */
if (fmt->fmt.pix.height * 2 > BUZ_MAX_HEIGHT)
settings.TmpDcm = 1;
else
settings.TmpDcm = 2;
settings.decimation = 0;
if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2)
settings.VerDcm = 2;
else
settings.VerDcm = 1;
if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4)
settings.HorDcm = 4;
else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2)
settings.HorDcm = 2;
else
settings.HorDcm = 1;
if (settings.TmpDcm == 1)
settings.field_per_buff = 2;
else
settings.field_per_buff = 1;
if (settings.HorDcm > 1) {
settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
} else {
settings.img_x = 0;
settings.img_width = BUZ_MAX_WIDTH;
}
/* check */
res = zoran_check_jpg_settings(zr, &settings, 0);
if (res)
goto sfmtjpg_unlock_and_return;
/* it's ok, so set them */
fh->jpg_settings = settings;
map_mode_jpg(fh, fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT);
fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings);
/* tell the user what we actually did */
fmt->fmt.pix.width = settings.img_width / settings.HorDcm;
fmt->fmt.pix.height = settings.img_height * 2 /
(settings.TmpDcm * settings.VerDcm);
if (settings.TmpDcm == 1)
fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
else
fmt->fmt.pix.field = (fh->jpg_settings.odd_even ?
V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.sizeimage = fh->buffers.buffer_size;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
sfmtjpg_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int i;
int res = 0;
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
return zoran_s_fmt_vid_out(file, fh, fmt);
for (i = 0; i < NUM_FORMATS; i++)
if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc)
break;
if (i == NUM_FORMATS) {
dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - unknown/unsupported format 0x%x\n",
ZR_DEVNAME(zr), fmt->fmt.pix.pixelformat);
return -EINVAL;
}
mutex_lock(&zr->resource_lock);
if ((fh->map_mode != ZORAN_MAP_MODE_RAW && fh->buffers.allocated) ||
fh->buffers.active != ZORAN_FREE) {
dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
ZR_DEVNAME(zr));
res = -EBUSY;
goto sfmtv4l_unlock_and_return;
}
if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
if (fmt->fmt.pix.width > BUZ_MAX_WIDTH)
fmt->fmt.pix.width = BUZ_MAX_WIDTH;
map_mode_raw(fh);
res = zoran_v4l_set_format(fh, fmt->fmt.pix.width, fmt->fmt.pix.height,
&zoran_formats[i]);
if (res)
goto sfmtv4l_unlock_and_return;
/* tell the user the results/missing stuff */
fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline;
fmt->fmt.pix.sizeimage = fh->v4l_settings.height * fh->v4l_settings.bytesperline;
fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace;
if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2))
fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
else
fmt->fmt.pix.field = V4L2_FIELD_TOP;
sfmtv4l_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_g_fbuf(struct file *file, void *__fh,
struct v4l2_framebuffer *fb)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
memset(fb, 0, sizeof(*fb));
mutex_lock(&zr->resource_lock);
fb->base = zr->vbuf_base;
fb->fmt.width = zr->vbuf_width;
fb->fmt.height = zr->vbuf_height;
if (zr->overlay_settings.format)
fb->fmt.pixelformat = fh->overlay_settings.format->fourcc;
fb->fmt.bytesperline = zr->vbuf_bytesperline;
mutex_unlock(&zr->resource_lock);
fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
fb->fmt.field = V4L2_FIELD_INTERLACED;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
return 0;
}
static int zoran_s_fbuf(struct file *file, void *__fh,
struct v4l2_framebuffer *fb)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int i, res = 0;
__le32 printformat = __cpu_to_le32(fb->fmt.pixelformat);
for (i = 0; i < NUM_FORMATS; i++)
if (zoran_formats[i].fourcc == fb->fmt.pixelformat)
break;
if (i == NUM_FORMATS) {
dprintk(1, KERN_ERR "%s: VIDIOC_S_FBUF - format=0x%x (%4.4s) not allowed\n",
ZR_DEVNAME(zr), fb->fmt.pixelformat,
(char *)&printformat);
return -EINVAL;
}
mutex_lock(&zr->resource_lock);
res = setup_fbuffer(fh, fb->base, &zoran_formats[i], fb->fmt.width,
fb->fmt.height, fb->fmt.bytesperline);
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_overlay(struct file *file, void *__fh, unsigned int on)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res;
mutex_lock(&zr->resource_lock);
res = setup_overlay(fh, on);
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type);
static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffers *req)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res = 0;
if (req->memory != V4L2_MEMORY_MMAP) {
dprintk(2,
KERN_ERR
"%s: only MEMORY_MMAP capture is supported, not %d\n",
ZR_DEVNAME(zr), req->memory);
return -EINVAL;
}
if (req->count == 0)
return zoran_streamoff(file, fh, req->type);
mutex_lock(&zr->resource_lock);
if (fh->buffers.allocated) {
dprintk(2,
KERN_ERR
"%s: VIDIOC_REQBUFS - buffers already allocated\n",
ZR_DEVNAME(zr));
res = -EBUSY;
goto v4l2reqbuf_unlock_and_return;
}
if (fh->map_mode == ZORAN_MAP_MODE_RAW &&
req->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
/* control user input */
if (req->count < 2)
req->count = 2;
if (req->count > v4l_nbufs)
req->count = v4l_nbufs;
/* The next mmap will map the V4L buffers */
map_mode_raw(fh);
fh->buffers.num_buffers = req->count;
if (v4l_fbuffer_alloc(fh)) {
res = -ENOMEM;
goto v4l2reqbuf_unlock_and_return;
}
} else if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC ||
fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) {
/* we need to calculate size ourselves now */
if (req->count < 4)
req->count = 4;
if (req->count > jpg_nbufs)
req->count = jpg_nbufs;
/* The next mmap will map the MJPEG buffers */
map_mode_jpg(fh, req->type == V4L2_BUF_TYPE_VIDEO_OUTPUT);
fh->buffers.num_buffers = req->count;
fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings);
if (jpg_fbuffer_alloc(fh)) {
res = -ENOMEM;
goto v4l2reqbuf_unlock_and_return;
}
} else {
dprintk(1,
KERN_ERR
"%s: VIDIOC_REQBUFS - unknown type %d\n",
ZR_DEVNAME(zr), req->type);
res = -EINVAL;
goto v4l2reqbuf_unlock_and_return;
}
v4l2reqbuf_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_querybuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res;
mutex_lock(&zr->resource_lock);
res = zoran_v4l2_buffer_status(fh, buf, buf->index);
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res = 0, codec_mode, buf_type;
mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dprintk(1, KERN_ERR
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
goto qbuf_unlock_and_return;
}
res = zoran_v4l_queue_frame(fh, buf->index);
if (res)
goto qbuf_unlock_and_return;
if (!zr->v4l_memgrab_active && fh->buffers.active == ZORAN_LOCKED)
zr36057_set_memgrab(zr, 1);
break;
case ZORAN_MAP_MODE_JPG_REC:
case ZORAN_MAP_MODE_JPG_PLAY:
if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) {
buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
codec_mode = BUZ_MODE_MOTION_DECOMPRESS;
} else {
buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
codec_mode = BUZ_MODE_MOTION_COMPRESS;
}
if (buf->type != buf_type) {
dprintk(1, KERN_ERR
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
goto qbuf_unlock_and_return;
}
res = zoran_jpg_queue_frame(fh, buf->index, codec_mode);
if (res != 0)
goto qbuf_unlock_and_return;
if (zr->codec_mode == BUZ_MODE_IDLE &&
fh->buffers.active == ZORAN_LOCKED)
zr36057_enable_jpg(zr, codec_mode);
break;
default:
dprintk(1, KERN_ERR
"%s: VIDIOC_QBUF - unsupported type %d\n",
ZR_DEVNAME(zr), buf->type);
res = -EINVAL;
break;
}
qbuf_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res = 0, buf_type, num = -1; /* compiler borks here (?) */
mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dprintk(1, KERN_ERR
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
goto dqbuf_unlock_and_return;
}
num = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME];
if (file->f_flags & O_NONBLOCK &&
zr->v4l_buffers.buffer[num].state != BUZ_STATE_DONE) {
res = -EAGAIN;
goto dqbuf_unlock_and_return;
}
res = v4l_sync(fh, num);
if (res)
goto dqbuf_unlock_and_return;
zr->v4l_sync_tail++;
res = zoran_v4l2_buffer_status(fh, buf, num);
break;
case ZORAN_MAP_MODE_JPG_REC:
case ZORAN_MAP_MODE_JPG_PLAY:
{
struct zoran_sync bs;
if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY)
buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
else
buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (buf->type != buf_type) {
dprintk(1, KERN_ERR
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
goto dqbuf_unlock_and_return;
}
num = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
if (file->f_flags & O_NONBLOCK &&
zr->jpg_buffers.buffer[num].state != BUZ_STATE_DONE) {
res = -EAGAIN;
goto dqbuf_unlock_and_return;
}
bs.frame = 0; /* suppress compiler warning */
res = jpg_sync(fh, &bs);
if (res)
goto dqbuf_unlock_and_return;
res = zoran_v4l2_buffer_status(fh, buf, bs.frame);
break;
}
default:
dprintk(1, KERN_ERR
"%s: VIDIOC_DQBUF - unsupported type %d\n",
ZR_DEVNAME(zr), buf->type);
res = -EINVAL;
break;
}
dqbuf_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res = 0;
mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW: /* raw capture */
if (zr->v4l_buffers.active != ZORAN_ACTIVE ||
fh->buffers.active != ZORAN_ACTIVE) {
res = -EBUSY;
goto strmon_unlock_and_return;
}
zr->v4l_buffers.active = fh->buffers.active = ZORAN_LOCKED;
zr->v4l_settings = fh->v4l_settings;
zr->v4l_sync_tail = zr->v4l_pend_tail;
if (!zr->v4l_memgrab_active &&
zr->v4l_pend_head != zr->v4l_pend_tail) {
zr36057_set_memgrab(zr, 1);
}
break;
case ZORAN_MAP_MODE_JPG_REC:
case ZORAN_MAP_MODE_JPG_PLAY:
/* what is the codec mode right now? */
if (zr->jpg_buffers.active != ZORAN_ACTIVE ||
fh->buffers.active != ZORAN_ACTIVE) {
res = -EBUSY;
goto strmon_unlock_and_return;
}
zr->jpg_buffers.active = fh->buffers.active = ZORAN_LOCKED;
if (zr->jpg_que_head != zr->jpg_que_tail) {
/* Start the jpeg codec when the first frame is queued */
jpeg_start(zr);
}
break;
default:
dprintk(1,
KERN_ERR
"%s: VIDIOC_STREAMON - invalid map mode %d\n",
ZR_DEVNAME(zr), fh->map_mode);
res = -EINVAL;
break;
}
strmon_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int i, res = 0;
unsigned long flags;
mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW: /* raw capture */
if (fh->buffers.active == ZORAN_FREE &&
zr->v4l_buffers.active != ZORAN_FREE) {
res = -EPERM; /* stay off other's settings! */
goto strmoff_unlock_and_return;
}
if (zr->v4l_buffers.active == ZORAN_FREE)
goto strmoff_unlock_and_return;
spin_lock_irqsave(&zr->spinlock, flags);
/* unload capture */
if (zr->v4l_memgrab_active) {
zr36057_set_memgrab(zr, 0);
}
for (i = 0; i < fh->buffers.num_buffers; i++)
zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER;
fh->buffers = zr->v4l_buffers;
zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE;
zr->v4l_grab_seq = 0;
zr->v4l_pend_head = zr->v4l_pend_tail = 0;
zr->v4l_sync_tail = 0;
spin_unlock_irqrestore(&zr->spinlock, flags);
break;
case ZORAN_MAP_MODE_JPG_REC:
case ZORAN_MAP_MODE_JPG_PLAY:
if (fh->buffers.active == ZORAN_FREE &&
zr->jpg_buffers.active != ZORAN_FREE) {
res = -EPERM; /* stay off other's settings! */
goto strmoff_unlock_and_return;
}
if (zr->jpg_buffers.active == ZORAN_FREE)
goto strmoff_unlock_and_return;
res = jpg_qbuf(fh, -1,
(fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ?
BUZ_MODE_MOTION_COMPRESS :
BUZ_MODE_MOTION_DECOMPRESS);
if (res)
goto strmoff_unlock_and_return;
break;
default:
dprintk(1, KERN_ERR
"%s: VIDIOC_STREAMOFF - invalid map mode %d\n",
ZR_DEVNAME(zr), fh->map_mode);
res = -EINVAL;
break;
}
strmoff_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_queryctrl(struct file *file, void *__fh,
struct v4l2_queryctrl *ctrl)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
/* we only support hue/saturation/contrast/brightness */
if (ctrl->id < V4L2_CID_BRIGHTNESS ||
ctrl->id > V4L2_CID_HUE)
return -EINVAL;
decoder_call(zr, core, queryctrl, ctrl);
return 0;
}
static int zoran_g_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
/* we only support hue/saturation/contrast/brightness */
if (ctrl->id < V4L2_CID_BRIGHTNESS ||
ctrl->id > V4L2_CID_HUE)
return -EINVAL;
mutex_lock(&zr->resource_lock);
decoder_call(zr, core, g_ctrl, ctrl);
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_s_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
/* we only support hue/saturation/contrast/brightness */
if (ctrl->id < V4L2_CID_BRIGHTNESS ||
ctrl->id > V4L2_CID_HUE)
return -EINVAL;
mutex_lock(&zr->resource_lock);
decoder_call(zr, core, s_ctrl, ctrl);
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
mutex_lock(&zr->resource_lock);
*std = zr->norm;
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id *std)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res = 0;
mutex_lock(&zr->resource_lock);
res = zoran_set_norm(zr, *std);
if (res)
goto sstd_unlock_and_return;
res = wait_grab_pending(zr);
sstd_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_enum_input(struct file *file, void *__fh,
struct v4l2_input *inp)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
if (inp->index >= zr->card.inputs)
return -EINVAL;
strncpy(inp->name, zr->card.input[inp->index].name,
sizeof(inp->name) - 1);
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = V4L2_STD_ALL;
/* Get status of video decoder */
mutex_lock(&zr->resource_lock);
decoder_call(zr, video, g_input_status, &inp->status);
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
mutex_lock(&zr->resource_lock);
*input = zr->input;
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res;
mutex_lock(&zr->resource_lock);
res = zoran_set_input(zr, input);
if (res)
goto sinput_unlock_and_return;
/* Make sure the changes come into effect */
res = wait_grab_pending(zr);
sinput_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_enum_output(struct file *file, void *__fh,
struct v4l2_output *outp)
{
if (outp->index != 0)
return -EINVAL;
outp->index = 0;
outp->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY;
strncpy(outp->name, "Autodetect", sizeof(outp->name)-1);
return 0;
}
static int zoran_g_output(struct file *file, void *__fh, unsigned int *output)
{
*output = 0;
return 0;
}
static int zoran_s_output(struct file *file, void *__fh, unsigned int output)
{
if (output != 0)
return -EINVAL;
return 0;
}
/* cropping (sub-frame capture) */
static int zoran_cropcap(struct file *file, void *__fh,
struct v4l2_cropcap *cropcap)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int type = cropcap->type, res = 0;
memset(cropcap, 0, sizeof(*cropcap));
cropcap->type = type;
mutex_lock(&zr->resource_lock);
if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
(cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fh->map_mode == ZORAN_MAP_MODE_RAW)) {
dprintk(1, KERN_ERR
"%s: VIDIOC_CROPCAP - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
res = -EINVAL;
goto cropcap_unlock_and_return;
}
cropcap->bounds.top = cropcap->bounds.left = 0;
cropcap->bounds.width = BUZ_MAX_WIDTH;
cropcap->bounds.height = BUZ_MAX_HEIGHT;
cropcap->defrect.top = cropcap->defrect.left = 0;
cropcap->defrect.width = BUZ_MIN_WIDTH;
cropcap->defrect.height = BUZ_MIN_HEIGHT;
cropcap_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_g_crop(struct file *file, void *__fh, struct v4l2_crop *crop)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int type = crop->type, res = 0;
memset(crop, 0, sizeof(*crop));
crop->type = type;
mutex_lock(&zr->resource_lock);
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
(crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fh->map_mode == ZORAN_MAP_MODE_RAW)) {
dprintk(1,
KERN_ERR
"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
res = -EINVAL;
goto gcrop_unlock_and_return;
}
crop->c.top = fh->jpg_settings.img_y;
crop->c.left = fh->jpg_settings.img_x;
crop->c.width = fh->jpg_settings.img_width;
crop->c.height = fh->jpg_settings.img_height;
gcrop_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_s_crop(struct file *file, void *__fh, struct v4l2_crop *crop)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res = 0;
struct zoran_jpg_settings settings;
settings = fh->jpg_settings;
mutex_lock(&zr->resource_lock);
if (fh->buffers.allocated) {
dprintk(1, KERN_ERR
"%s: VIDIOC_S_CROP - cannot change settings while active\n",
ZR_DEVNAME(zr));
res = -EBUSY;
goto scrop_unlock_and_return;
}
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
(crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fh->map_mode == ZORAN_MAP_MODE_RAW)) {
dprintk(1, KERN_ERR
"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
res = -EINVAL;
goto scrop_unlock_and_return;
}
/* move into a form that we understand */
settings.img_x = crop->c.left;
settings.img_y = crop->c.top;
settings.img_width = crop->c.width;
settings.img_height = crop->c.height;
/* check validity */
res = zoran_check_jpg_settings(zr, &settings, 0);
if (res)
goto scrop_unlock_and_return;
/* accept */
fh->jpg_settings = settings;
scrop_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static int zoran_g_jpegcomp(struct file *file, void *__fh,
struct v4l2_jpegcompression *params)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
memset(params, 0, sizeof(*params));
mutex_lock(&zr->resource_lock);
params->quality = fh->jpg_settings.jpg_comp.quality;
params->APPn = fh->jpg_settings.jpg_comp.APPn;
memcpy(params->APP_data,
fh->jpg_settings.jpg_comp.APP_data,
fh->jpg_settings.jpg_comp.APP_len);
params->APP_len = fh->jpg_settings.jpg_comp.APP_len;
memcpy(params->COM_data,
fh->jpg_settings.jpg_comp.COM_data,
fh->jpg_settings.jpg_comp.COM_len);
params->COM_len = fh->jpg_settings.jpg_comp.COM_len;
params->jpeg_markers =
fh->jpg_settings.jpg_comp.jpeg_markers;
mutex_unlock(&zr->resource_lock);
return 0;
}
static int zoran_s_jpegcomp(struct file *file, void *__fh,
struct v4l2_jpegcompression *params)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
int res = 0;
struct zoran_jpg_settings settings;
settings = fh->jpg_settings;
settings.jpg_comp = *params;
mutex_lock(&zr->resource_lock);
if (fh->buffers.active != ZORAN_FREE) {
dprintk(1, KERN_WARNING
"%s: VIDIOC_S_JPEGCOMP called while in playback/capture mode\n",
ZR_DEVNAME(zr));
res = -EBUSY;
goto sjpegc_unlock_and_return;
}
res = zoran_check_jpg_settings(zr, &settings, 0);
if (res)
goto sjpegc_unlock_and_return;
if (!fh->buffers.allocated)
fh->buffers.buffer_size =
zoran_v4l2_calc_bufsize(&fh->jpg_settings);
fh->jpg_settings.jpg_comp = *params = settings.jpg_comp;
sjpegc_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static unsigned int
zoran_poll (struct file *file,
poll_table *wait)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
int res = 0, frame;
unsigned long flags;
/* we should check whether buffers are ready to be synced on
* (w/o waits - O_NONBLOCK) here
* if ready for read (sync), return POLLIN|POLLRDNORM,
* if ready for write (sync), return POLLOUT|POLLWRNORM,
* if error, return POLLERR,
* if no buffers queued or so, return POLLNVAL
*/
mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
poll_wait(file, &zr->v4l_capq, wait);
frame = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME];
spin_lock_irqsave(&zr->spinlock, flags);
dprintk(3,
KERN_DEBUG
"%s: %s() raw - active=%c, sync_tail=%lu/%c, pend_tail=%lu, pend_head=%lu\n",
ZR_DEVNAME(zr), __func__,
"FAL"[fh->buffers.active], zr->v4l_sync_tail,
"UPMD"[zr->v4l_buffers.buffer[frame].state],
zr->v4l_pend_tail, zr->v4l_pend_head);
/* Process is the one capturing? */
if (fh->buffers.active != ZORAN_FREE &&
/* Buffer ready to DQBUF? */
zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE)
res = POLLIN | POLLRDNORM;
spin_unlock_irqrestore(&zr->spinlock, flags);
break;
case ZORAN_MAP_MODE_JPG_REC:
case ZORAN_MAP_MODE_JPG_PLAY:
poll_wait(file, &zr->jpg_capq, wait);
frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
spin_lock_irqsave(&zr->spinlock, flags);
dprintk(3,
KERN_DEBUG
"%s: %s() jpg - active=%c, que_tail=%lu/%c, que_head=%lu, dma=%lu/%lu\n",
ZR_DEVNAME(zr), __func__,
"FAL"[fh->buffers.active], zr->jpg_que_tail,
"UPMD"[zr->jpg_buffers.buffer[frame].state],
zr->jpg_que_head, zr->jpg_dma_tail, zr->jpg_dma_head);
if (fh->buffers.active != ZORAN_FREE &&
zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) {
if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC)
res = POLLIN | POLLRDNORM;
else
res = POLLOUT | POLLWRNORM;
}
spin_unlock_irqrestore(&zr->spinlock, flags);
break;
default:
dprintk(1,
KERN_ERR
"%s: %s - internal error, unknown map_mode=%d\n",
ZR_DEVNAME(zr), __func__, fh->map_mode);
res = POLLNVAL;
}
mutex_unlock(&zr->resource_lock);
return res;
}
/*
* This maps the buffers to user space.
*
* Depending on the state of fh->map_mode
* the V4L or the MJPEG buffers are mapped
* per buffer or all together
*
* Note that we need to connect to some
* unmap signal event to unmap the de-allocate
* the buffer accordingly (zoran_vm_close())
*/
static void
zoran_vm_open (struct vm_area_struct *vma)
{
struct zoran_mapping *map = vma->vm_private_data;
map->count++;
}
static void
zoran_vm_close (struct vm_area_struct *vma)
{
struct zoran_mapping *map = vma->vm_private_data;
struct zoran_fh *fh = map->file->private_data;
struct zoran *zr = fh->zr;
int i;
if (--map->count > 0)
return;
dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
__func__, mode_name(fh->map_mode));
for (i = 0; i < fh->buffers.num_buffers; i++) {
if (fh->buffers.buffer[i].map == map)
fh->buffers.buffer[i].map = NULL;
}
kfree(map);
/* Any buffers still mapped? */
for (i = 0; i < fh->buffers.num_buffers; i++)
if (fh->buffers.buffer[i].map)
return;
dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
__func__, mode_name(fh->map_mode));
mutex_lock(&zr->resource_lock);
if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
if (fh->buffers.active != ZORAN_FREE) {
unsigned long flags;
spin_lock_irqsave(&zr->spinlock, flags);
zr36057_set_memgrab(zr, 0);
zr->v4l_buffers.allocated = 0;
zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE;
spin_unlock_irqrestore(&zr->spinlock, flags);
}
v4l_fbuffer_free(fh);
} else {
if (fh->buffers.active != ZORAN_FREE) {
jpg_qbuf(fh, -1, zr->codec_mode);
zr->jpg_buffers.allocated = 0;
zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE;
}
jpg_fbuffer_free(fh);
}
mutex_unlock(&zr->resource_lock);
}
static const struct vm_operations_struct zoran_vm_ops = {
.open = zoran_vm_open,
.close = zoran_vm_close,
};
static int
zoran_mmap (struct file *file,
struct vm_area_struct *vma)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
unsigned long size = (vma->vm_end - vma->vm_start);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int i, j;
unsigned long page, start = vma->vm_start, todo, pos, fraglen;
int first, last;
struct zoran_mapping *map;
int res = 0;
dprintk(3,
KERN_INFO "%s: %s(%s) of 0x%08lx-0x%08lx (size=%lu)\n",
ZR_DEVNAME(zr), __func__,
mode_name(fh->map_mode), vma->vm_start, vma->vm_end, size);
if (!(vma->vm_flags & VM_SHARED) || !(vma->vm_flags & VM_READ) ||
!(vma->vm_flags & VM_WRITE)) {
dprintk(1,
KERN_ERR
"%s: %s - no MAP_SHARED/PROT_{READ,WRITE} given\n",
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
mutex_lock(&zr->resource_lock);
if (!fh->buffers.allocated) {
dprintk(1,
KERN_ERR
"%s: %s(%s) - buffers not yet allocated\n",
ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode));
res = -ENOMEM;
goto mmap_unlock_and_return;
}
first = offset / fh->buffers.buffer_size;
last = first - 1 + size / fh->buffers.buffer_size;
if (offset % fh->buffers.buffer_size != 0 ||
size % fh->buffers.buffer_size != 0 || first < 0 ||
last < 0 || first >= fh->buffers.num_buffers ||
last >= fh->buffers.buffer_size) {
dprintk(1,
KERN_ERR
"%s: %s(%s) - offset=%lu or size=%lu invalid for bufsize=%d and numbufs=%d\n",
ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), offset, size,
fh->buffers.buffer_size,
fh->buffers.num_buffers);
res = -EINVAL;
goto mmap_unlock_and_return;
}
/* Check if any buffers are already mapped */
for (i = first; i <= last; i++) {
if (fh->buffers.buffer[i].map) {
dprintk(1,
KERN_ERR
"%s: %s(%s) - buffer %d already mapped\n",
ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), i);
res = -EBUSY;
goto mmap_unlock_and_return;
}
}
/* map these buffers */
map = kmalloc(sizeof(struct zoran_mapping), GFP_KERNEL);
if (!map) {
res = -ENOMEM;
goto mmap_unlock_and_return;
}
map->file = file;
map->count = 1;
vma->vm_ops = &zoran_vm_ops;
vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = map;
if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
for (i = first; i <= last; i++) {
todo = size;
if (todo > fh->buffers.buffer_size)
todo = fh->buffers.buffer_size;
page = fh->buffers.buffer[i].v4l.fbuffer_phys;
if (remap_pfn_range(vma, start, page >> PAGE_SHIFT,
todo, PAGE_SHARED)) {
dprintk(1,
KERN_ERR
"%s: %s(V4L) - remap_pfn_range failed\n",
ZR_DEVNAME(zr), __func__);
res = -EAGAIN;
goto mmap_unlock_and_return;
}
size -= todo;
start += todo;
fh->buffers.buffer[i].map = map;
if (size == 0)
break;
}
} else {
for (i = first; i <= last; i++) {
for (j = 0;
j < fh->buffers.buffer_size / PAGE_SIZE;
j++) {
fraglen =
(le32_to_cpu(fh->buffers.buffer[i].jpg.
frag_tab[2 * j + 1]) & ~1) << 1;
todo = size;
if (todo > fraglen)
todo = fraglen;
pos =
le32_to_cpu(fh->buffers.
buffer[i].jpg.frag_tab[2 * j]);
/* should just be pos on i386 */
page = virt_to_phys(bus_to_virt(pos))
>> PAGE_SHIFT;
if (remap_pfn_range(vma, start, page,
todo, PAGE_SHARED)) {
dprintk(1,
KERN_ERR
"%s: %s(V4L) - remap_pfn_range failed\n",
ZR_DEVNAME(zr), __func__);
res = -EAGAIN;
goto mmap_unlock_and_return;
}
size -= todo;
start += todo;
if (size == 0)
break;
if (le32_to_cpu(fh->buffers.buffer[i].jpg.
frag_tab[2 * j + 1]) & 1)
break; /* was last fragment */
}
fh->buffers.buffer[i].map = map;
if (size == 0)
break;
}
}
mmap_unlock_and_return:
mutex_unlock(&zr->resource_lock);
return res;
}
static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
.vidioc_querycap = zoran_querycap,
.vidioc_cropcap = zoran_cropcap,
.vidioc_s_crop = zoran_s_crop,
.vidioc_g_crop = zoran_g_crop,
.vidioc_enum_input = zoran_enum_input,
.vidioc_g_input = zoran_g_input,
.vidioc_s_input = zoran_s_input,
.vidioc_enum_output = zoran_enum_output,
.vidioc_g_output = zoran_g_output,
.vidioc_s_output = zoran_s_output,
.vidioc_g_fbuf = zoran_g_fbuf,
.vidioc_s_fbuf = zoran_s_fbuf,
.vidioc_g_std = zoran_g_std,
.vidioc_s_std = zoran_s_std,
.vidioc_g_jpegcomp = zoran_g_jpegcomp,
.vidioc_s_jpegcomp = zoran_s_jpegcomp,
.vidioc_overlay = zoran_overlay,
.vidioc_reqbufs = zoran_reqbufs,
.vidioc_querybuf = zoran_querybuf,
.vidioc_qbuf = zoran_qbuf,
.vidioc_dqbuf = zoran_dqbuf,
.vidioc_streamon = zoran_streamon,
.vidioc_streamoff = zoran_streamoff,
.vidioc_enum_fmt_vid_cap = zoran_enum_fmt_vid_cap,
.vidioc_enum_fmt_vid_out = zoran_enum_fmt_vid_out,
.vidioc_enum_fmt_vid_overlay = zoran_enum_fmt_vid_overlay,
.vidioc_g_fmt_vid_cap = zoran_g_fmt_vid_cap,
.vidioc_g_fmt_vid_out = zoran_g_fmt_vid_out,
.vidioc_g_fmt_vid_overlay = zoran_g_fmt_vid_overlay,
.vidioc_s_fmt_vid_cap = zoran_s_fmt_vid_cap,
.vidioc_s_fmt_vid_out = zoran_s_fmt_vid_out,
.vidioc_s_fmt_vid_overlay = zoran_s_fmt_vid_overlay,
.vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap,
.vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out,
.vidioc_try_fmt_vid_overlay = zoran_try_fmt_vid_overlay,
.vidioc_queryctrl = zoran_queryctrl,
.vidioc_s_ctrl = zoran_s_ctrl,
.vidioc_g_ctrl = zoran_g_ctrl,
};
/* please use zr->resource_lock consistently and kill this wrapper */
static long zoran_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
int ret;
mutex_lock(&zr->other_lock);
ret = video_ioctl2(file, cmd, arg);
mutex_unlock(&zr->other_lock);
return ret;
}
static const struct v4l2_file_operations zoran_fops = {
.owner = THIS_MODULE,
.open = zoran_open,
.release = zoran_close,
.unlocked_ioctl = zoran_ioctl,
.read = zoran_read,
.write = zoran_write,
.mmap = zoran_mmap,
.poll = zoran_poll,
};
struct video_device zoran_template __devinitdata = {
.name = ZORAN_NAME,
.fops = &zoran_fops,
.ioctl_ops = &zoran_ioctl_ops,
.release = &zoran_vdev_release,
.tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
};
| gpl-2.0 |
ReaperXL2/Overkill_v4_extended | arch/alpha/kernel/alpha_ksyms.c | 9222 | 2696 | /*
* linux/arch/alpha/kernel/alpha_ksyms.c
*
* Export the alpha-specific functions that are needed for loadable
* modules.
*/
#include <linux/module.h>
#include <asm/console.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/fpu.h>
#include <asm/machvec.h>
#include <linux/syscalls.h>
/* these are C runtime functions with special calling conventions: */
extern void __divl (void);
extern void __reml (void);
extern void __divq (void);
extern void __remq (void);
extern void __divlu (void);
extern void __remlu (void);
extern void __divqu (void);
extern void __remqu (void);
EXPORT_SYMBOL(alpha_mv);
EXPORT_SYMBOL(callback_getenv);
EXPORT_SYMBOL(callback_setenv);
EXPORT_SYMBOL(callback_save_env);
/* platform dependent support */
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memsetw);
EXPORT_SYMBOL(__constant_c_memset);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(alpha_read_fp_reg);
EXPORT_SYMBOL(alpha_read_fp_reg_s);
EXPORT_SYMBOL(alpha_write_fp_reg);
EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* entry.S */
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(kernel_execve);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(ip_compute_csum);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_ipv6_magic);
#ifdef CONFIG_MATHEMU_MODULE
extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
extern long (*alpha_fp_emul) (unsigned long pc);
EXPORT_SYMBOL(alpha_fp_emul_imprecise);
EXPORT_SYMBOL(alpha_fp_emul);
#endif
/*
* The following are specially called from the uaccess assembly stubs.
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__do_clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
/*
* SMP-specific symbols.
*/
#ifdef CONFIG_SMP
EXPORT_SYMBOL(_atomic_dec_and_lock);
#endif /* CONFIG_SMP */
/*
* The following are special because they're not called
* explicitly (the C compiler or assembler generates them in
* response to division operations). Fortunately, their
* interface isn't gonna change any time soon now, so it's OK
* to leave it out of version control.
*/
# undef memcpy
# undef memset
EXPORT_SYMBOL(__divl);
EXPORT_SYMBOL(__divlu);
EXPORT_SYMBOL(__divq);
EXPORT_SYMBOL(__divqu);
EXPORT_SYMBOL(__reml);
EXPORT_SYMBOL(__remlu);
EXPORT_SYMBOL(__remq);
EXPORT_SYMBOL(__remqu);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memchr);
| gpl-2.0 |
androidaosp/kernel-msm | drivers/net/irda/act200l-sir.c | 11526 | 7155 | /*********************************************************************
*
* Filename: act200l.c
* Version: 0.8
* Description: Implementation for the ACTiSYS ACT-IR200L dongle
* Status: Experimental.
* Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
* Created at: Fri Aug 3 17:35:42 2001
* Modified at: Fri Aug 17 10:22:40 2001
* Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
*
* Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <net/irda/irda.h>
#include "sir-dev.h"
static int act200l_reset(struct sir_dev *dev);
static int act200l_open(struct sir_dev *dev);
static int act200l_close(struct sir_dev *dev);
static int act200l_change_speed(struct sir_dev *dev, unsigned speed);
/* Regsiter 0: Control register #1 */
#define ACT200L_REG0 0x00
#define ACT200L_TXEN 0x01 /* Enable transmitter */
#define ACT200L_RXEN 0x02 /* Enable receiver */
/* Register 1: Control register #2 */
#define ACT200L_REG1 0x10
#define ACT200L_LODB 0x01 /* Load new baud rate count value */
#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
/* Register 4: Output Power register */
#define ACT200L_REG4 0x40
#define ACT200L_OP0 0x01 /* Enable LED1C output */
#define ACT200L_OP1 0x02 /* Enable LED2C output */
#define ACT200L_BLKR 0x04
/* Register 5: Receive Mode register */
#define ACT200L_REG5 0x50
#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
/* Register 6: Receive Sensitivity register #1 */
#define ACT200L_REG6 0x60
#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
/* Register 7: Receive Sensitivity register #2 */
#define ACT200L_REG7 0x70
#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
/* Register 8,9: Baud Rate Dvider register #1,#2 */
#define ACT200L_REG8 0x80
#define ACT200L_REG9 0x90
#define ACT200L_2400 0x5f
#define ACT200L_9600 0x17
#define ACT200L_19200 0x0b
#define ACT200L_38400 0x05
#define ACT200L_57600 0x03
#define ACT200L_115200 0x01
/* Register 13: Control register #3 */
#define ACT200L_REG13 0xd0
#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
/* Register 15: Status register */
#define ACT200L_REG15 0xf0
/* Register 21: Control register #4 */
#define ACT200L_REG21 0x50
#define ACT200L_EXCK 0x02 /* Disable clock output driver */
#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
static struct dongle_driver act200l = {
.owner = THIS_MODULE,
.driver_name = "ACTiSYS ACT-IR200L",
.type = IRDA_ACT200L_DONGLE,
.open = act200l_open,
.close = act200l_close,
.reset = act200l_reset,
.set_speed = act200l_change_speed,
};
static int __init act200l_sir_init(void)
{
return irda_register_dongle(&act200l);
}
static void __exit act200l_sir_cleanup(void)
{
irda_unregister_dongle(&act200l);
}
static int act200l_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
IRDA_DEBUG(2, "%s()\n", __func__ );
/* Power on the dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
/* Set the speeds we can accept */
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
qos->min_turn_time.bits = 0x03;
irda_qos_bits_to_value(qos);
/* irda thread waits 50 msec for power settling */
return 0;
}
static int act200l_close(struct sir_dev *dev)
{
IRDA_DEBUG(2, "%s()\n", __func__ );
/* Power off the dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
return 0;
}
/*
* Function act200l_change_speed (dev, speed)
*
* Set the speed for the ACTiSYS ACT-IR200L type dongle.
*
*/
static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
{
u8 control[3];
int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__ );
/* Clear DTR and set RTS to enter command mode */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
switch (speed) {
default:
ret = -EINVAL;
/* fall through */
case 9600:
control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f);
control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f);
break;
case 19200:
control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f);
control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f);
break;
case 38400:
control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f);
control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f);
break;
case 57600:
control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f);
control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f);
break;
case 115200:
control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f);
control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f);
break;
}
control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE;
/* Write control bytes */
sirdev_raw_write(dev, control, 3);
msleep(5);
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
dev->speed = speed;
return ret;
}
/*
* Function act200l_reset (driver)
*
* Reset the ACTiSYS ACT-IR200L type dongle.
*/
#define ACT200L_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
#define ACT200L_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
static int act200l_reset(struct sir_dev *dev)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
static const u8 control[9] = {
ACT200L_REG15,
ACT200L_REG13 | ACT200L_SHDW,
ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
ACT200L_REG13,
ACT200L_REG7 | ACT200L_ENPOS,
ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
ACT200L_REG5 | ACT200L_RWIDL,
ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN
};
int ret = 0;
IRDA_DEBUG(2, "%s()\n", __func__ );
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
/* Reset the dongle : set RTS low for 25 ms */
sirdev_set_dtr_rts(dev, TRUE, FALSE);
state = ACT200L_STATE_WAIT1_RESET;
delay = 50;
break;
case ACT200L_STATE_WAIT1_RESET:
/* Clear DTR and set RTS to enter command mode */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
udelay(25); /* better wait for some short while */
/* Write control bytes */
sirdev_raw_write(dev, control, sizeof(control));
state = ACT200L_STATE_WAIT2_RESET;
delay = 15;
break;
case ACT200L_STATE_WAIT2_RESET:
/* Go back to normal mode */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
dev->speed = 9600;
break;
default:
IRDA_ERROR("%s(), unknown state %d\n", __func__, state);
ret = -1;
break;
}
dev->fsm.substate = state;
return (delay > 0) ? delay : ret;
}
MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>");
MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */
module_init(act200l_sir_init);
module_exit(act200l_sir_cleanup);
| gpl-2.0 |
SayCV/tools-SourceNavigator-NG | snavigator/db/dbutils.c | 7 | 63914 | /*
Copyright (c) 2000, Red Hat, Inc.
This file is part of Source-Navigator.
Source-Navigator is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2, or (at your option)
any later version.
Source-Navigator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with Source-Navigator; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*/
/*
* dbutils.c
*
* This version has been modified to support blanks and backslashes in file names
* to do this the split character has been modified to be != SPACE
*/
#ifdef __MSVC__
#include <stdio.h>
#include <stdlib.h>
#endif
#include <config.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <stdlib.h>
#include "dbutils.h"
#include <setjmp.h>
#include <ctype.h>
#include "longstr.h"
#include "fileutils.h"
#include <tcl.h>
#include <compat.h>
#ifndef MY_DEBUG2
#define MY_DEBUG(x)
#define MY_DEBUG2(x)
#define MY_DEBUG(x)
#endif
#if !defined (__MSVC__) && ! defined (HAVE_STRTOUL)
#define strtoul(s,p,b) (unsigned long)strtol(s,p,b)
#endif
#ifdef __MSVC__
#define use_STRNCASECMP strnicmp
#else
#define use_STRNCASECMP strncasecmp
#endif
/*
* Don't exceed buffer size
*/
#define MY_STRNCPY(str1, str2, len) strncpy (str1, str2, len); str1[len-1] = 0;
#define DEL_SEQ_VALUE 0 /* Search even when deleting sequentially!*/
/*#define DEL_SEQ_VALUE R_CURSOR */
#define BUG_TRACE 0 /* Should be always 0 */
static DB *db_class_tree;
static DB *db_cached_classes;
#define INH_AC_TYPE (PAF_PRIVATE|PAF_PROTECTED|PAF_PUBLIC)
static DB *db_include;
static char **include_array;
int xref_fastupdate = FALSE;
static DB *db_syms[50]; /* Keep enough space */
#define CREATE_ALWAYS_BTREES 1
static char db_project_dir[MAXPATHLEN];
static u_int db_cachesize = 0;
static u_int db_cross_cachesize = 0;
#define DB_NO_CASE_COMPARE 1
int db_case_compare (const DBT *, const DBT *);
int db_no_case_compare(const DBT *a,const DBT *b);
int db_compare_nocase = 0;
int db_action_is_fetching = 0; /* marks if we are in a fetching routine */
static int db_key_in_table(int type, char* key);
char *SN_StrDup(char*);
static DB *db_scopes;
/*
return db4 version
will look something like
Berkeley DB version Berkeley DB 4.8.30: (April 9, 2010)
we also need to do a forward declaration, because we don't
have access to "normal" db4 routines here (due to db185-compat mode)
via headers, but the final binary has access (due to linkage to db.a)
*/
extern char * db_version(int *, int *, int *);
char * Paf_db_get_version(void)
{
int _unused;
return db_version(&_unused, &_unused, &_unused);
}
/*
* get database permission from an environment, set in the gui
*/
int get_db_permission()
{
char *p;
int perm = 0660;
p = getenv ("SN_DB_PERMS");
if (p != NULL && *p != 0)
{
sscanf (p, "%o", &perm);
}
return perm;
}
/*
* We can't use Tcl_SplitList anymore, however we could use a trick here
*
*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
*
* This procedure uses a static string variable to store the results.
* If you want to use it more than one time, you have to copy the
* results before calling this function again.
*/
int
my_SplitList (char *str, int *num, char ***argvPtr, char sep)
{
static char *lst=NULL;
char *p, **argv;
int cnt, i;
int have_bracket;
MY_DEBUG ((Output, "ping..<%s>\n", str));
/*
* Use a static string pointer to store the splited string
*/
if (lst)
{
ckfree (lst);
lst=NULL;
}
lst = SN_StrDup (str);
/*
* Count how many fields
*/
for (have_bracket = 0, cnt=1, p=lst; *p; p++)
{
if (*p == '{')
{
have_bracket = 1;
}
if (*p == '}')
{
have_bracket = 0;
}
if (! have_bracket && *p == sep)
{
cnt ++;
}
}
argv = (char**)ckalloc (sizeof (char*) * cnt);
/*
* Set pointers and skip leading '{'
*/
for (argv[0] = lst, p=lst, i=0; *p; p++)
{
if (*p == '{')
{
have_bracket = 1;
argv[i] ++; /* skip opening bracket */
*p = 0;
}
if (*p == '}')
{
have_bracket = 0;
*p = 0;
}
if (! have_bracket && *p == sep)
{
*p = 0; /* terminate string */
argv[++i] = p+1;
}
}
*argvPtr = argv;
*num = cnt;
return TCL_OK;
}
/*
* Read next field in a database record, this is up to the end
* of the string or when a split-chr has been reached.
*
* If nothing has been read, the same pointer will be return.
*
* str: Points to the string with the db-fields.
* It could contain \null pointer, then the last specified
* pointer will be taken to continue parsing
* size: buffer size without terminated \null
* if size is equal 0, all characters will be reverted, this
* is usefull when a field must be skiped.
* buf: could be \null pointer
*/
static char *oldPtr=NULL;
char *
read_next_field (char *str, char *buf, int size, char sep)
{
char *p=str, *q=buf;
int i;
if (q)
{
q[0] = 0;
}
/*
* Use stored pointer to continue parsing */
if (p == NULL)
{
if (oldPtr == NULL)
{
return NULL;
}
p = oldPtr;
}
for(i=0; *p && *p != sep; p++)
{
/* skip the rest of string, if buffer exceeded */
if (q && i < size)
{
*q++ = *p; i++;
}
}
/*
* terminate constructed buffer
*/
if (q)
{
*q = 0;
}
/*
* Skip the separator
*/
if (*p == sep)
p ++;
/*
* Store last position for next stips */
oldPtr = p;
return p;
}
int
read_next_int_field (char *str, char sep)
{
char tmp[64];
read_next_field (str, tmp, sizeof(tmp), sep);
return atoi (tmp);
}
/*
Create a new database file that reflect a table
Comments:
It's too bad to create a database for each used table in SN,
this is to be changed in 5.0
A feature that is missing compared to SN NG2 (and all prior
SN versions) is the possibility to override malloc() and friends
to use the TCL ckalloc() et al routines.
This was implemented by a dbckalloc.c which is now gone; it was only
usable because the SN guys patched the db-1.85 accordingly.
This feature can be revived by calling approx. the following code
on each freshly opened DB4 handle.
Note that currently the compat-1.85 layer is used; this means
you have to get to the underlying DB4 *, located in the void * internal,
and call the function on that!
#ifdef TCL_MEM_DEBUG
DB4 *DB4=(DB4 *)DB->internal;
DB4->set_alloc(DB4, ckalloc(), ckrealloc(), ckfree());
#endif
*/
static DB *
create_table(int type,int mode,int cachesize)
{
DB *dbp;
char fname[MAXPATHLEN + 1];
const void *db_inf;
int db_type;
HASHINFO db_hash_info;
BTREEINFO db_btree_info;
/* Check if that DB has already been opened */
if (db_syms[type]) {
return db_syms[type];
}
#if CREATE_ALWAYS_BTREES
db_type = DB_BTREE;
memset((char *)&db_btree_info,0,sizeof(db_btree_info));
db_btree_info.cachesize = cachesize;
#if _WINDOWS
/*
* On windows we do support lower/upper case pathnames
*/
if (type == PAF_FILE)
{
db_btree_info.compare = db_no_case_compare;
}
else
{
db_btree_info.compare = db_case_compare;
}
#else
db_btree_info.compare = db_case_compare;
#endif
db_inf = (void *)&db_btree_info;
sprintf(fname,"%s.%s",db_project_dir,SN_GetSymbolType(type));
dbp = dbopen(fname, mode, get_db_permission(),db_type,db_inf);
/*
* Backward compatiblility:
* if file exists it could be a hash table
*/
if (!dbp && access (fname, R_OK) == 0)
{
db_type = DB_HASH;
memset((char *)&db_hash_info,0,sizeof(db_hash_info));
db_hash_info.cachesize = cachesize;
db_inf = (void *)&db_hash_info;
dbp = dbopen (fname, mode, get_db_permission(), db_type, db_inf);
}
#else
if (type == PAF_FILE)
{
db_type = DB_HASH;
memset((char *)&db_hash_info,0,sizeof(db_hash_info));
db_hash_info.cachesize = cachesize;
db_inf = (void *)&db_hash_info;
}
else
{
db_type = DB_BTREE;
memset((char *)&db_btree_info,0,sizeof(db_btree_info));
db_btree_info.cachesize = cachesize;
db_btree_info.compare = db_case_compare;
db_inf = (void *)&db_btree_info;
}
sprintf(fname,"%s.%s",db_project_dir,SN_GetSymbolType(type));
dbp = dbopen (fname, mode, get_db_permission(), db_type, db_inf);
#endif
if (!dbp)
{
/* Display error only if we could not open the
* file and that should have not been created either.
*/
if ((errno != ENOENT || (mode & O_CREAT)))
{
fprintf(stderr,"create table %s: %s\n",fname,strerror(errno));
Paf_panic(PAF_PANIC_SIMPLE);
}
}
db_syms[type] = dbp;
return dbp;
}
#define PUR_DIR_TOO 0
/*
* Adds a file into the file database "<db name>.f"
* Format:
* <file>;<type> <modify time> ?<highlight file>?
*
* <highlight file> is optional
*
* Return values:
* 1 Successful, and the file was not part of the project yet.
* 0 Successful, and the file was already part of the project.
* else error.
*/
static int
put_file_db(char *file_name,char *group,char *highlight_file)
{
Tcl_DString filenameDStr;
DB *dbp = db_syms[PAF_FILE];
DBT data;
DBT key;
int found;
int ret;
char buf[MAXPATHLEN + 200]; /* 200: group + mtime */
#if PUR_DIR_TOO
char *p;
#endif /* PUR_DIR_TOO */
#ifdef __MSVC__
struct _stat stb;
#else
struct stat stb;
#endif /* __MSVC__ */
if (!dbp)
{
dbp = create_table(PAF_FILE,O_RDWR|O_CREAT,db_cachesize);
if (!dbp)
return -1;
}
Tcl_DStringInit(&filenameDStr);
Tcl_UtfToExternalDString(NULL,
file_name, strlen(file_name), &filenameDStr);
#ifdef __MSVC__
if (_stat(Tcl_DStringValue(&filenameDStr), &stb) != 0)
#else
if (stat(Tcl_DStringValue(&filenameDStr), &stb) != 0)
#endif
stb.st_mtime = 0;
Tcl_DStringFree(&filenameDStr);
/*
* Note: No need to add the special separator in this
* file, however to not be confused, the fields are
* separated with the defined separator
*/
sprintf(buf,"%s%c%lu%c%s",
group, DB_FLDSEP_CHR,
(unsigned long)stb.st_mtime, DB_FLDSEP_CHR,
highlight_file ? highlight_file : "");
key.data = file_name;
key.size = strlen(file_name) + 1;
/*
* Is the file already in the table ? */
found = dbp->get(dbp,&key,&data,0);
if (found == -1)
{
return -1;
}
else if (found == 0)
{
char highfile[MAXPATHLEN];
char *pData;
/*
* skip type
*/
pData = read_next_field (data.data, NULL, 0, DB_FLDSEP_CHR);
/*
* skip mtime
*/
pData = read_next_field (pData, NULL, 0, DB_FLDSEP_CHR);
/*
* get the highlight file to remove it
*/
pData = read_next_field (pData, highfile, MAXPATHLEN-1, DB_FLDSEP_CHR);
if (highfile[0])
{
unlink(highfile); /* Old highlighting file. */
}
}
data.data = buf;
data.size = strlen(buf) + 1;
ret = dbp->put(dbp,&key,&data,0);
if (ret != 0)
{
fprintf(stderr,"put_file_db error: %s\n",strerror(errno));
return ret;
}
return found;
}
void Paf_Close_Include_Dirs()
{
if (include_array)
{
char **ip;
for (ip = include_array; *ip; ip++)
{
ckfree(*ip);
}
ckfree ((char *)include_array);
include_array = NULL;
}
if (db_include)
{
db_include->close(db_include);
db_include = NULL;
}
}
int
Paf_db_close_tables()
{
DB *dbp;
int cou;
int max;
int ret = 0;
int saved_errno = 0;
int fd;
for (cou = 0, max = sizeof(db_syms) / sizeof(*db_syms); cou < max; cou++)
{
dbp = db_syms[cou];
if (dbp)
{
fd = dbp->fd(dbp);
if (dbp->close(dbp) == -1)
{
ret = -1;
saved_errno = errno;
close(fd);
}
db_syms[cou] = NULL;
}
}
if (db_class_tree)
{
char fname[MAXPATHLEN];
fd = db_class_tree->fd(db_class_tree);
if (db_class_tree->close(db_class_tree) == -1)
{
ret = -1;
saved_errno = errno;
close(fd);
}
sprintf(fname,"%s.ctr",db_project_dir);
/* unlink(fname); */
}
if (db_cached_classes)
{
char fname[MAXPATHLEN];
fd = db_cached_classes->fd(db_cached_classes);
if (db_cached_classes->close(db_cached_classes) == -1)
{
ret = -1;
saved_errno = errno;
close(fd);
}
sprintf(fname,"%s.xhs",db_project_dir);
unlink(fname);
}
if (db_scopes)
{
fd = db_scopes->fd(db_scopes);
if (db_scopes->close(db_scopes) == -1)
{
ret = -1;
saved_errno = errno;
close(fd);
}
}
errno = saved_errno;
return ret;
}
static void
db_remove_comment_def(int softdel,char *file)
{
DB *dbp = db_syms[PAF_COMMENT_DEF];
DBT data;
DBT key;
char filename[MAXPATHLEN];
int flag;
unsigned int cmp_len;
if (!dbp)
return;
sprintf (filename, "%s%c",file, DB_FLDSEP_CHR);
cmp_len = strlen(filename);
key.data = filename;
key.size = cmp_len;
for (flag = R_CURSOR; dbp->seq(dbp,&key,&data,flag) == 0; flag = R_NEXT)
{
if ((int)key.size < cmp_len || memcmp(key.data,filename,cmp_len) != 0)
{
break;
}
dbp->del(dbp,&key,DEL_SEQ_VALUE);
}
}
void
db_remove_file_def(int softdel,char *file)
{
DB *dbp = db_syms[PAF_FILE_SYMBOLS];
DB *db_del = NULL;
DBT data;
DBT key;
DBT sc_key;
DBT sc_data;
int flag;
unsigned int cmp_len;
char filename[MAXPATHLEN + 1];
LongString delkey;
LongString pars;
LongString pars_data;
char startpos_buff[11]; /* format "%06d.%03d" */
int scope_val = -1;
int del;
int del_fil;
fprintf(stdout, "Status: Deleting: %s\n",file); /* Informs SN which files is being deleted. */
fflush(stdout);
if (!dbp)
{
dbp = create_table(PAF_FILE_SYMBOLS,O_RDWR,db_cachesize);
if (!dbp)
return;
if (!db_syms[PAF_COMMENT_DEF])
create_table(PAF_COMMENT_DEF,O_RDWR,db_cachesize);
}
db_remove_comment_def(softdel,file);
if (!db_scopes)
{
db_scopes = dbopen (NULL, O_RDWR|O_CREAT, get_db_permission(), DB_HASH, NULL);
data.data = (char *)&scope_val;
data.size = sizeof(scope_val);
for (scope_val = PAF_FILE; scope_val < PAF_SYMBOL_TYPE_MAX; scope_val++)
{
key.data = SN_GetSymbolType(scope_val);
key.size = strlen(key.data);
if (key.size > 0)
{
if (db_scopes->put(db_scopes,&key,&data,R_NOOVERWRITE) == -1)
{
fprintf(stderr,"Write error: %s\n",strerror(errno));
}
}
}
}
LongStringInit(&delkey,0);
LongStringInit(&pars,0);
LongStringInit(&pars_data,0);
/*
* Add separator at the end of file to unique identify it's records
*/
sprintf(filename,"%s%c", file, DB_FLDSEP_CHR);
cmp_len = strlen(filename);
key.data = filename;
key.size = cmp_len;
data.data = NULL;
data.size = 0;
for (flag = R_CURSOR; dbp->seq(dbp,&key,&data,flag) == 0; flag = R_NEXT)
{
if ((int)key.size < cmp_len || memcmp(key.data,filename,cmp_len) != 0)
{
break;
}
/*
* get scope name (at the end of the key field list)
*/
sc_key.data = strrchr(key.data, DB_FLDSEP_CHR) + 1;
sc_key.size = strlen(sc_key.data);
/*
* verify if this scope exists
*/
if (db_scopes->get(db_scopes,&sc_key,&sc_data,0) != 0)
{
fprintf(stderr,"unknown scope: \"%s\" in \"%s\"\n",
(char *)sc_key.data,(char *)key.data);
continue;
}
/* Key format: file startpos scope symbol symbol_type */
pars.split(&pars, key.data, key.size - 1, 0, DB_FLDSEP_CHR, -1);
/* Data format: endpos high-startpos high-endpos arg-types */
pars_data.split(&pars_data, data.data, data.size - 1, 0, DB_FLDSEP_CHR, -1);
/* Reformat highlight position into long format: 1.18 -> 000001.018 */
{
int line;
int column;
if (pars_data.field_size[1] + 1 > sizeof(startpos_buff)) {
Paf_panic(PAF_PANIC_EMERGENCY);
}
memcpy(startpos_buff,
pars_data.field_value[1],
pars_data.field_size[1]);
startpos_buff[pars_data.field_size[1]] = '\0';
if (sscanf(startpos_buff, "%d.%d", &line, &column) == 2) {
sprintf(startpos_buff, "%06d.%03d", line, column);
} else {
fprintf(stderr,"unable to scan decimal values from \"%s\"\n",
startpos_buff);
}
}
memcpy((char *)&scope_val,sc_data.data,sc_data.size);
switch (scope_val)
{
case PAF_EXCEPTION_DEF:
case PAF_MBR_FUNC_DEF:
case PAF_MBR_FUNC_DCL:
case PAF_FRIEND_DCL:
case PAF_MBR_VAR_DEF:
case PAF_COMMON_MBR_VAR_DEF:
case PAF_CLASS_INHERIT:
case PAF_LOCAL_VAR_DEF:
/* Key format: class member high-startpos filename */
delkey.copy(&delkey,
pars.field_value[2],
pars.field_size[2]);
delkey.append (&delkey, DB_FLDSEP_STR, -1); /* Separator */
delkey.append(&delkey,
pars.field_value[3],
pars.field_size[3]);
delkey.append(&delkey, DB_FLDSEP_STR, -1); /* Separator */
delkey.append(&delkey,
startpos_buff,
strlen(startpos_buff));
delkey.append(&delkey, DB_FLDSEP_STR, -1); /* Separator */
delkey.append(&delkey,
file,
cmp_len -1);
break;
default:
/* Key format: symbol high-startpos filename */
delkey.copy(&delkey,
pars.field_value[3],
pars.field_size[3]);
delkey.append(&delkey, DB_FLDSEP_STR, -1); /* Separator */
delkey.append(&delkey,
startpos_buff,
strlen(startpos_buff));
delkey.append(&delkey, DB_FLDSEP_STR, -1); /* Separator */
delkey.append(&delkey,
file,
cmp_len -1);
break;
}
sc_key.data = (void *)delkey.buf;
sc_key.size = delkey.len + 1;
if (!(db_del = db_syms[scope_val]))
{
db_del = create_table(scope_val,O_RDWR,db_cachesize);
if (!db_del)
{
continue;
}
}
del_fil = dbp->del(dbp,&key,DEL_SEQ_VALUE);
del = db_del->del(db_del,&sc_key,0);
/* Send message if something went wrong except we could
* not delete a local variable from its table. */
if (del_fil != 0 || (del != 0 && scope_val != PAF_LOCAL_VAR_DEF))
{
fprintf(stderr,"DELETING of <%s> with size: %d type <%s> returned: %d,%d\n",
(char *)sc_key.data,
sc_key.size,
SN_GetSymbolType(scope_val),
del_fil,
del);
}
}
delkey.free(&delkey);
pars.free(&pars);
pars_data.free(&pars_data);
}
#define DB_XREF_FLD_SEP_STR DB_FLDSEP_STR
void
db_remove_file_xfer_using_keys(int softdel, char *key_files)
{
FILE *fp;
DB *dbp = db_syms[PAF_CROSS_REF];
DB *dbp_by = db_syms[PAF_CROSS_REF_BY];
DBT key;
DBT by_key;
DBT data;
LongString key_to;
LongString key_by;
LongString file_del_key;
LongString data_buf;
char last_del_fname[MAXPATHLEN];
int flag;
char *pfn;
char *fn;
char *ref_type;
void *key_buf;
int key_len;
int line_cou = 1;
int delete_file = TRUE;
Tcl_DString sys;
LongStringInit(&key_to,0);
LongStringInit(&data_buf,0);
LongStringInit(&key_by,0);
LongStringInit(&file_del_key,0);
/* File name is utf-8 encoded, pass system encoding to fopen. */
Tcl_UtfToExternalDString(NULL, key_files, -1, &sys);
if (!key_files || *key_files == '\0' ||
(fp = fopen(Tcl_DStringValue(&sys), "r")) == NULL) {
goto cleanup; /* Should never happen. */
}
if (!dbp)
{
dbp = create_table(PAF_CROSS_REF,O_RDWR,
db_cross_cachesize); /* Open the table ! */
if (!dbp)
goto cleanup;
}
if (!dbp_by)
{
dbp_by = create_table(PAF_CROSS_REF_BY,O_RDWR,
db_cross_cachesize); /* Open the table ! */
}
for (last_del_fname[0] = '\0', line_cou = 1; file_del_key.fgets(&file_del_key, fp); line_cou++)
{
if (file_del_key.split(&file_del_key,
file_del_key.buf,
file_del_key.len,
TRUE,
(int)';',
-1) != 5)
{
continue;
}
fn = file_del_key.field_value[3];
if (strcmp(last_del_fname, fn) != 0)
{
fprintf(stdout, "Status: Deleting: %s\n", fn);
fflush(stdout);
strcpy(last_del_fname, fn);
}
ref_type = file_del_key.field_value[2];
/*
* Remove references in the bodies of "md" | "mi" | "fu" | "su" !
*/
if ((ref_type[0] == 'm' && (ref_type[1] == 'd' || ref_type[1] == 'i')) ||
((ref_type[0] == 'f' || ref_type[0] == 's') && ref_type[1] == 'u'))
{
key_to.copystrings(&key_to,
file_del_key.field_value[0], DB_FLDSEP_STR,
file_del_key.field_value[1], DB_FLDSEP_STR,
file_del_key.field_value[2], DB_FLDSEP_STR,
NULL);
key.data = (void *)key_to.buf;
key.size = key_to.len;
key_buf = key_to.buf;
key_len = key_to.len;
for (flag = R_CURSOR; dbp->seq(dbp,&key,&data,flag) == 0; flag = R_NEXT)
{
if ((int)key.size < key_len || memcmp(key_buf, key.data, key_len) != 0)
{
break;
}
/* The filename starts after the last separator in the key. */
for (pfn = (char *)key.data + key.size - 1; pfn > (char*)key.data && *pfn != DB_FLDSEP_CHR; pfn--);
if (strcmp(pfn + 1, fn) != 0)
{
continue; /* An other file, don't delete the record! */
}
/* Create the key for the "by" record !*/
data_buf.split(&data_buf,
key.data, key.size - 1, FALSE, DB_FLDSEP_CHR, -1);
ref_type = data_buf.field_value[5];
/*
* There is no xref "by" info for local variables. !
*/
if (dbp_by && (ref_type[0] != 'l' || ref_type[1] != 'v'))
{
key_by.copy(&key_by,
data_buf.field_value[3],
data_buf.field_size[3] + data_buf.field_size[4] + data_buf.field_size[5] + 3);
key_by.append(&key_by,
data_buf.field_value[0],
data_buf.field_size[0] + data_buf.field_size[1] + data_buf.field_size[2] + 3);
key_by.append(&key_by,
data_buf.field_value[6],
data_buf.field_size[6] + data_buf.field_size[7] + data_buf.field_size[8] + 2);
by_key.data = (void *)key_by.buf;
by_key.size = key_by.len + 1;
if (dbp_by->del(dbp_by,&by_key,0) != 0)
{
fprintf(stderr,"Delete (BY) not found <%s>\n",(char *)by_key.data);
}
}
dbp->del(dbp,&key,DEL_SEQ_VALUE);
}
}
}
fclose(fp);
/* Delete the file only if its contains was ok, otherwise we need a chance
* too be able to look. */
if (delete_file)
unlink(Tcl_DStringValue(&sys));
cleanup:
Tcl_DStringFree(&sys);
file_del_key.free(&file_del_key);
key_to.free(&key_to);
data_buf.free(&data_buf);
key_by.free(&key_by);
return;
}
void
db_insert_entry(int type,char *key_buf,char *data_buf)
{
register unsigned char *p;
DB *dbp = db_syms[type];
DBT data;
DBT key;
LongString tmp;
LongString xref_data;
LongString xref;
LongString xref_data_fields;
if (type == PAF_FILE)
{
char group[80];
char highfile[MAXPATHLEN];
int state;
group[0] = '\0';
highfile[0] = '\0';
/*
* read group and highlight file */
read_next_field (data_buf, group, 80, DB_FLDSEP_CHR);
read_next_field (NULL, highfile, MAXPATHLEN, DB_FLDSEP_CHR);
if (highfile[0])
p = (unsigned char *)highfile;
else
p = NULL;
state = put_file_db(key_buf,group,p);
if (state == 0)
{
db_remove_file_def(0,key_buf);
}
return;
}
if (!dbp)
{
int csize;
if (type == PAF_CROSS_REF)
{
csize = db_cross_cachesize;
}
else
{
csize = db_cachesize;
}
dbp = create_table(type,O_RDWR|O_CREAT,csize); /* Open the table ! */
if (!dbp)
return;
}
if (type != PAF_CROSS_REF)
{
key.data = key_buf;
key.size = strlen(key.data) + 1;
data.data = data_buf;
for (p = (unsigned char *)data.data; *p; p++)
{
if (*p == 0xff)
*p = '\n';
}
data.size = (char *)p - (char *)data.data + 1;
if(dbp->put(dbp,&key,&data,0) == -1)
{
Paf_panic(PAF_PANIC_EMERGENCY);
}
return;
}
LongStringInit(&xref,0);
LongStringInit(&xref_data,0);
LongStringInit(&xref_data_fields,0);
LongStringInit(&tmp,0);
xref_data_fields.split (&xref_data_fields,
data_buf,
-1,
FALSE,
DB_FLDSEP_CHR,
-1);
xref_data.copy(&xref_data,
"{", -1);
xref_data.append (&xref_data,
xref_data_fields.field_value[0],
xref_data_fields.field_size[0]);
xref_data.appendstrings(&xref_data, "}", DB_FLDSEP_STR, "{", NULL);
xref_data.append (&xref_data,
xref_data_fields.field_value[1],
xref_data_fields.field_size[1]);
xref_data.append (&xref_data, "}", -1);
data.data = (void *)xref_data.buf;
data.size = xref_data.len + 1;
/* If a parser could not figure out a scope of a member because
* for example an inherited class was not known, we can fix it
* here.
*/
if (*key_buf == '?')
{
static int opened = FALSE;
char arg_types[10000] = {0};
char ret_type [1024] = {0};
char this_cls [1024] = {0};
char name [1024] = {0};
char scope_buf[1024] = {0};
if (!opened)
{
open_tables_for_cross_ref();
opened = TRUE;
}
MY_DEBUG((Output, "%s\n", key_buf));
/*
* skip field
*/
read_next_field (key_buf, NULL, 0, DB_FLDSEP_CHR);
/*
* Name */
read_next_field (NULL, name, sizeof(name), DB_FLDSEP_CHR);
/*
* Skip field */
read_next_field (NULL, NULL, 0, DB_FLDSEP_CHR);
/*
* Class */
read_next_field (NULL, this_cls, sizeof (this_cls), DB_FLDSEP_CHR);
get_symbol (this_cls,
NULL,
name,
arg_types,
scope_buf,
ret_type,
NULL,
1);
/* switch "?" with the returned class name */
if (*scope_buf != '\0' && *scope_buf != '?')
{
int len = strlen(scope_buf);
memmove (key_buf+len-1, key_buf, strlen (key_buf) - len);
memcpy(key_buf, scope_buf, len);
}
MY_DEBUG2 ((Output, "split key <%s>\n", key_buf));
/* Format:
* Class Method Type DeclClass Var VarType Line File
* Example:RectShape draw mi ? chr1 iv 000003 RectShape.java
*/
tmp.split(&tmp, key_buf, key.size, FALSE, DB_FLDSEP_CHR, 7);
tmp.append(&tmp,
tmp.field_value[3],
tmp.field_size[3] + tmp.field_size[4] + tmp.field_size[5] + 3);
tmp.append(&tmp,
tmp.field_value[0],
tmp.field_size[0] + 1);
tmp.append(&tmp,
tmp.field_value[1],
tmp.field_size[1] + tmp.field_size[2] + 2);
tmp.append(&tmp,
tmp.field_value[6],
tmp.field_size[6]);
key.data = tmp.buf;
key.size = tmp.len + 1;
}
else
{
key.data = key_buf;
key.size = strlen(key.data) + 1;
}
if(dbp->put(dbp,&key,&data,0) == -1)
{
Paf_panic(PAF_PANIC_EMERGENCY);
}
MY_DEBUG2 ((Output, "split data <%s>\n", key.data));
tmp.split (&tmp,key.data, key.size -1, FALSE, DB_FLDSEP_CHR, -1);
if (tmp.field_value[5][0] != SN_GetSymbolType(PAF_REF_TO_LOCAL_VAR)[0] ||
tmp.field_value[5][1] != SN_GetSymbolType(PAF_REF_TO_LOCAL_VAR)[1])
{
dbp = db_syms[PAF_CROSS_REF_BY];
if (!dbp)
{
dbp = create_table(PAF_CROSS_REF_BY,O_RDWR|O_CREAT,
db_cross_cachesize); /* Open the table ! */
if (!dbp)
{
xref_data.free(&xref_data);
xref.free(&xref);
tmp.free(&tmp);
return;
}
}
xref.copy(&xref,
tmp.field_value[3],
tmp.field_size[3] + tmp.field_size[4] + tmp.field_size[5] + 3);
xref.append(&xref,
tmp.field_value[0],
tmp.field_size[0] + tmp.field_size[1] + tmp.field_size[2] + 3);
xref.append(&xref,
tmp.field_value[6],
tmp.field_size[6] + tmp.field_size[7] + tmp.field_size[8] + 2);
key.data = (void *)xref.buf;
key.size = xref.len + 1;
xref_data.copy(&xref_data,
"{", -1);
xref_data.append(&xref_data,
xref_data_fields.field_value[1],
xref_data_fields.field_size[1]);
xref_data.appendstrings (&xref_data, "}", DB_FLDSEP_STR, "{", NULL);
xref_data.append(&xref_data,
xref_data_fields.field_value[0],
xref_data_fields.field_size[0]);
xref_data.append(&xref_data, "}", -1);
data.data = (void *)xref_data.buf;
data.size = xref_data.len + 1;
if(dbp->put(dbp,&key,&data,0) == -1)
{
Paf_panic(PAF_PANIC_EMERGENCY);
}
}
/* If we are dealing with an xref to a global
* variable and the special "UNDECLARED" marker
* is passed then add a define for the global
* variable if it has not already been defined.
* Use the fake file name "GLOBAL". We can't
* define a global multiple times since the
* IDE would not see them as the same variable
* and we can leave it undefined since the
* variable would not appear in the symbols list.
*/
if ((strncmp(tmp.field_value[5], "gv\001", 3) == 0) &&
(strcmp(xref_data_fields.field_value[1], "UNDECLARED") == 0)) {
char * field, *end;
char * varname;
LongString var_key, data_key;
int result;
end = field = tmp.field_value[4];
while (*end != '\1') {
end++;
}
*end = '\0';
varname = SN_StrDup(field);
*end = '\1';
/* See if this symbols already exists in the special
* file name GLOBAL, and insert it if not.
*/
LongStringInit(&var_key,0);
LongStringInit(&data_key,0);
var_key.copystrings(&var_key,
varname, DB_FLDSEP_STR,
"000000.000", DB_FLDSEP_STR,
"GLOBAL",
NULL);
data_key.copystrings(&data_key,
"0.0", DB_FLDSEP_STR,
"0x0", DB_FLDSEP_STR,
"{}", DB_FLDSEP_STR,
"{}", DB_FLDSEP_STR,
"{}", DB_FLDSEP_STR,
"{}",
NULL);
if (!db_key_in_table(PAF_GLOB_VAR_DEF, var_key.buf)) {
/* Insert special global variable symbol */
db_insert_entry(PAF_GLOB_VAR_DEF,
var_key.buf,
data_key.buf);
var_key.free(&var_key);
data_key.free(&data_key);
LongStringInit(&var_key,0);
LongStringInit(&data_key,0);
var_key.copystrings(&var_key,
"GLOBAL", DB_FLDSEP_STR,
"000000.000", DB_FLDSEP_STR,
"#", DB_FLDSEP_STR,
varname, DB_FLDSEP_STR,
"gv",
NULL);
data_key.copystrings(&data_key,
"0.0", DB_FLDSEP_STR,
"0.0", DB_FLDSEP_STR,
"0.0", DB_FLDSEP_STR,
"{}",
NULL);
db_insert_entry(PAF_FILE_SYMBOLS,
var_key.buf,
data_key.buf);
}
var_key.free(&var_key);
data_key.free(&data_key);
ckfree((char*) varname);
}
xref_data_fields.free(&xref_data_fields);
xref_data.free(&xref_data);
xref.free(&xref);
tmp.free(&tmp);
}
void
Paf_Open_Include_Dirs(char *inf_name,char *db_prefix)
{
FILE *include_fp;
int opt;
char *fname;
char tmp[MAXPATHLEN];
/* inf_name comes from -I option to cbrowser, it
* is in the native system encoding
*/
include_fp = fopen(inf_name,"r");
if (!include_fp)
return;
#define INC_ARR_ICR 50
for (opt = 0; fgets(tmp,sizeof(tmp) -1,include_fp);)
{
if ((fname = strchr(tmp,'\n')))
{
*fname = '\0';
}
if (!*tmp || *tmp == '#')
continue;
if ((opt % INC_ARR_ICR) == 0)
{
if (!include_array)
{
include_array = (char **)ckalloc((INC_ARR_ICR + 1) * sizeof(char *));
}
else
{
include_array = (char **)ckrealloc((char*)include_array,
(opt + INC_ARR_ICR + 1) * sizeof(char *));
}
}
include_array[opt++] = SN_StrDup(tmp);
include_array[opt] = NULL;
}
fclose(include_fp);
if (include_array)
{
BTREEINFO inf;
include_array[opt] = NULL;
memset((char *)&inf,0,sizeof(inf));
inf.cachesize = (u_int)db_cachesize;
inf.compare = db_case_compare;
sprintf(tmp,"%s.icl",db_prefix);
db_include = dbopen (tmp, O_RDWR, get_db_permission(), DB_BTREE, &inf);
}
}
char *
Paf_Search_Include_dir(char *name)
{
char *incl_with_path = NULL;
static char incl_file[MAXPATHLEN];
DBT key;
DBT data;
char *p;
char *bfn; /* Base filename */
char dirn[MAXPATHLEN];
char filename[MAXPATHLEN];
char **ip;
int path_argc = 0;
char **path_argv = NULL;
int cou;
int len;
int flag;
int cpy_len;
sn_internal_convert_path (name, SN_PATH_UNIX);
if (! (bfn = file_lastroot (name)))
{
bfn = name;
}
else
{
bfn ++;
}
if (!db_include)
{
return name;
}
if (bfn == name)
{
dirn[0] = '\0';
}
else
{
len = bfn - name - 1;
memcpy(dirn,name,len);
dirn[len] = '\0';
}
sprintf(filename,"%s%c", bfn, DB_FLDSEP_CHR);
key.data = filename;
key.size = strlen(filename);
len = key.size;
for (flag = R_CURSOR; db_include->seq(db_include,&key,&data,flag) == 0; flag = R_NEXT)
{
if ((int)key.size < len || memcmp(filename,key.data,len) != 0)
{
break;
}
p = strchr((char *)key.data, DB_FLDSEP_CHR);
if (!p)
continue; /* Should never happen !!! */
*p = '\0';
sprintf (incl_file, "%s/%s", p + 1, (char *)key.data);
*p++ = DB_FLDSEP_CHR; /* Restore separator and point to the directory name ! */
/* Does it still exist ? */
if (access(incl_file,0) != 0)
{
db_include->del(db_include,&key,0);
db_include->sync(db_include,0); /* Now, we can even crash. */
continue;
}
if (path_argv)
{
path_argv = (char **)ckrealloc((char*)path_argv,(path_argc + 1) * sizeof(char *));
}
else
{
path_argv = (char **)ckalloc(sizeof(char *));
}
cpy_len = key.size - (p - (char *)key.data);
path_argv[path_argc] = ckalloc(cpy_len);
memcpy(path_argv[path_argc],p,cpy_len);
path_argc++;
}
/*
if (!path_argc)
{
return name;
}
*/
if (path_argc == 1)
{
/* Onle one file has been found, we just take it. */
if (path_argv[0][0] == '.')
{
strcpy(incl_file,bfn);
}
else
{
sprintf(incl_file,"%s/%s",path_argv[0],bfn);
}
incl_with_path = incl_file;
}
else if (path_argc > 1)
{
int found = 0;
/*
* More than one file have been found, we have to find the
* right one.
*/
for (ip = include_array; *ip; ip++)
{
for (cou = 0; cou < path_argc; cou++)
{
if (**ip != path_argv[cou][0])
{
continue;
}
if (dirn[0])
{
sprintf(filename,"%s/%s",*ip,dirn);
p = filename;
}
else
{
p = *ip;
}
if (strcmp(p,path_argv[cou]) == 0)
{
if (**ip == '.')
strcpy(incl_file,bfn);
else
sprintf(incl_file,"%s/%s",p,bfn);
incl_with_path = incl_file;
found = 1;
break;
}
}
if (found)
break;
}
/*
* take the first one, if nothing found
*/
if (! found)
{
sprintf (incl_file, "%s/%s", path_argv[0], bfn);
incl_with_path = incl_file;
}
}
else
{
/*
* as last possibility, we should look at the file if it
* exists on the desk
*/
for (ip = include_array; *ip; ip++)
{
sprintf (incl_file, "%s/%s", *ip, bfn);
if (access (incl_file, R_OK) == 0)
{
incl_with_path = incl_file;
break;
}
}
}
if (path_argv)
{
for (cou = 0; cou < path_argc; cou++)
ckfree(path_argv[cou]);
ckfree((char*)path_argv);
}
if (incl_with_path)
{
return incl_with_path;
}
return name;
}
#define OPT_CLASS_TREE 0
/*
* This function copies the exisinting members of the inherited
* class. The newly created members will be known in the new class
* too.
*/
static void
inherit_members(char *class_name,char *inherited_class, char *inh_buf)
{
DB *dbp = db_class_tree;
#if OPT_CLASS_TREE
DB *db_mbr = db_syms[PAF_MBR_FUNC_DCL];
#endif /* OPT_CLASS_TREE */
DBT key;
DBT data;
DBT mbr_key;
DBT mbr_data;
char *inh_cls_name;
char *inh_symbol_type;
char *inh_access;
char *inh_mbr_type;
char *nm;
char *mbr_name;
char *rest;
unsigned int flag;
unsigned int len;
unsigned long inh_ac_tp;
unsigned long output_ac_tp;
LongString tmp_class_name;
LongString data_buf;
LongString key_buf;
LongString key_pars;
LongString inh_pars;
char inh_access_tmp[100];
output_ac_tp = strtoul(inh_buf,NULL,16);
LongStringInit(&tmp_class_name,0);
LongStringInit(&key_buf,0);
LongStringInit(&data_buf,0);
LongStringInit(&key_pars,0);
LongStringInit(&inh_pars,0);
tmp_class_name.appendstrings(&tmp_class_name, inherited_class, DB_FLDSEP_STR,
NULL);
nm = tmp_class_name.buf;
len = tmp_class_name.len;
key.data = (void *)nm;
key.size = len;
for (flag = R_CURSOR; dbp->seq(dbp,&key,&data,flag) == 0; flag = R_NEXT)
{
if ((int)key.size < len || memcmp(nm,key.data,len) != 0)
{
break;
}
inh_pars.split (&inh_pars, (char*)data.data, data.size-1, TRUE,
DB_FLDSEP_CHR, 4);
inh_cls_name = inh_pars.field_value[0];
inh_symbol_type = inh_pars.field_value[1];
inh_access = inh_pars.field_value[2];
inh_mbr_type = inh_pars.field_value[3];
/* Private members cannot be inherited ! */
inh_ac_tp = strtoul(inh_access,NULL,16);
if ((inh_ac_tp & INH_AC_TYPE) == PAF_PRIVATE)
{
continue;
}
switch (output_ac_tp)
{
case PAF_PRIVATE:
inh_ac_tp = PAF_PRIVATE;
break;
case PAF_PROTECTED:
inh_ac_tp = PAF_PROTECTED;
break;
case PAF_PUBLIC: /* Take the read value ! */
break;
}
key_pars.split (&key_pars, key.data, key.size-1, TRUE, DB_FLDSEP_CHR, 3);
mbr_name = key_pars.field_value[1];
rest = key_pars.field_value[2];
key_buf.copystrings(&key_buf,
class_name, DB_FLDSEP_STR,
mbr_name, DB_FLDSEP_STR,
NULL);
#if OPT_CLASS_TREE
/* Don't load members of base classes that exist in the superclass . */
if (inh_symbol_type[0] == SN_GetSymbolType(PAF_MBR_FUNC_DCL)[0]
&& db_mbr) /* Search for method of the superclass. */
{
/* We load (inherit) the member only if the superclass
* does not contain such a member.
*/
char *keyp = key_buf.buf;
int kln = key_buf.len;
mbr_key.data = (void *)keyp;
mbr_key.size = kln;
mbr_data.data = NULL;
mbr_data.size = 0;
if (db_mbr->seq(db_mbr,&mbr_key,&mbr_data,R_CURSOR) == 0 &&
mbr_key.size >= kln && memcmp(mbr_key.data,keyp,kln) == 0)
{
/* The superclass has such a member. */
continue;
}
}
#endif /* OPT_CLASS_TREE */
key_buf.append(&key_buf,rest,-1);
sprintf(inh_access_tmp, "0x%lx%c", inh_ac_tp, DB_FLDSEP_CHR);
data_buf.copystrings (&data_buf,
inh_cls_name, DB_FLDSEP_STR,
inh_symbol_type, DB_FLDSEP_STR,
inh_access_tmp,
inh_mbr_type,
NULL);
mbr_key.data = (void *)key_buf.buf;
mbr_key.size = key_buf.len + 1;
mbr_data.data = (void *)data_buf.buf;
mbr_data.size = data_buf.len + 1;
if (dbp->put(dbp,&mbr_key,&mbr_data,0) == -1)
{
Paf_panic(PAF_PANIC_EMERGENCY);
}
}
key_pars.free(&key_pars);
inh_pars.free(&inh_pars);
key_buf.free(&key_buf);
key_buf.free(&key_buf);
data_buf.free(&data_buf);
tmp_class_name.free(&tmp_class_name);
}
/*
* This functions reads the members of a class from a '.iv' or '.md'
* tables.
*/
static void
load_class_members(int type, char *class_name)
{
DB *dbp = db_syms[type];
DBT key;
DBT data;
DBT mbr_key;
DBT mbr_data;
char *symbol_type = SN_GetSymbolType(type);
char *mbr_type;
int mbr_type_len;
char *mbr_pars;
int mbr_pars_len;
char *inher_access;
int inher_access_len;
LongString tmp_class_name;
LongString key_buf;
LongString data_buf;
char *nm;
u_int flag;
unsigned int len;
char *pend;
int mbr_name_beg,mbr_name_end;
if (!dbp)
return;
LongStringInit(&tmp_class_name,0);
LongStringInit(&key_buf,0);
LongStringInit(&data_buf,0);
tmp_class_name.copystrings(&tmp_class_name,class_name," ",NULL);
nm = tmp_class_name.buf;
len = tmp_class_name.len;
key.data = (void *)nm;
key.size = len;
data.data = NULL;
data.size = 0;
for (flag = R_CURSOR; dbp->seq(dbp,&key,&data,flag) == 0; flag = R_NEXT)
{
if ((int)key.size < len || memcmp(nm,key.data,len) != 0)
{
break;
}
/*
* Skip field */
read_next_field (key.data, NULL, 0, DB_FLDSEP_CHR);
/*
* read integer field */
mbr_name_beg = read_next_int_field (NULL, DB_FLDSEP_CHR);
/*
* Skip field */
read_next_field (NULL, NULL, 0, DB_FLDSEP_CHR);
/*
* read integer field */
mbr_name_end = read_next_int_field (NULL, DB_FLDSEP_CHR);
inher_access = strchr (data.data, DB_FLDSEP_CHR) + 1;
pend = strchr (inher_access, DB_FLDSEP_CHR);
inher_access_len = pend - inher_access;
mbr_type = strchr(pend + 1,'{') + 1;
pend = strchr(mbr_type,'}');
mbr_type_len = pend - mbr_type;
mbr_pars = strchr(pend + 1,'{') + 1;
pend = strchr(mbr_pars,'}');
mbr_pars_len = pend - mbr_pars;
/*
* Key format: class_name member_name parameter_list
* Data format: class_name sybmbol_type (e.g. md or iv) inher_access type (e.g int)
*/
key_buf.copystrings(&key_buf, class_name, DB_FLDSEP_STR, NULL);
key_buf.append(&key_buf,
(char *)key.data + mbr_name_beg, mbr_name_end - mbr_name_beg);
key_buf.append(&key_buf,mbr_pars,mbr_pars_len);
data_buf.copystrings(&data_buf,
class_name, DB_FLDSEP_STR,
symbol_type, DB_FLDSEP_STR,
NULL);
data_buf.append(&data_buf,
inher_access, inher_access_len);
data_buf.append(&data_buf, DB_FLDSEP_STR, -1);
data_buf.append(&data_buf, mbr_type, mbr_type_len);
mbr_key.data = (void *)key_buf.buf;
mbr_key.size = key_buf.len + 1;
mbr_data.data = (void *)data_buf.buf;
mbr_data.size = data_buf.len + 1;
if (db_class_tree->put(db_class_tree,&mbr_key,&mbr_data,0) == -1)
{
Paf_panic(PAF_PANIC_EMERGENCY);
}
}
key_buf.free(&key_buf);
data_buf.free(&data_buf);
tmp_class_name.free(&tmp_class_name);
}
static int
store_symbol_to_cache(char *name,int sym_type,char *origin)
{
DBT key;
DBT data;
int ret;
LongString buf;
char smt = (char)sym_type;
LongStringInit(&buf,0);
buf.append(&buf,&smt,1); /* A '\0' will be appended too. */
if (origin)
{
buf.append(&buf,origin,-1);
data.size = buf.len + 1; /* Include '\0' too ! */
}
else
{
data.size = 2; /* Include '\0' too! */
}
key.data = (void *)name;
key.size = strlen(name);
data.data = (void *)buf.buf;
ret = db_cached_classes->put(db_cached_classes,&key,&data,
R_NOOVERWRITE);
if (ret == -1)
{
Paf_panic(PAF_PANIC_EMERGENCY);
}
buf.free(&buf);
return ret;
}
/*
* This function loads a class including its base classes.
*/
static void
load_class(char *class_name)
{
DB *dbp = db_cached_classes;
DBT key;
DBT data;
LongString tmp_class_name;
char *nm;
unsigned int len;
key.data = (void *)class_name;
key.size = strlen(class_name);
if (dbp->get(dbp,&key,&data,0) == 0)
{
return; /* It has already been loaded, don't do it again! */
}
dbp = db_syms[PAF_CLASS_DEF];
if (!dbp)
return;
LongStringInit(&tmp_class_name,0);
tmp_class_name.copystrings(&tmp_class_name,class_name," ",NULL);
nm = tmp_class_name.buf;
len = tmp_class_name.len;
key.data = (void *)nm;
key.size = len;
if (dbp->seq(dbp,&key,&data,R_CURSOR) != 0 ||
(int)key.size < len || memcmp(nm,key.data,len) != 0)
{
tmp_class_name.free(&tmp_class_name);
return; /* The class does not exist. */
}
store_symbol_to_cache(class_name,PAF_CLASS_DEF,NULL);
dbp = db_syms[PAF_CLASS_INHERIT];
if (dbp)
{
LongString tmp_inherited_class;
LongString inh_buf;
char *icls;
char *inhb;
unsigned int flag;
LongStringInit(&tmp_inherited_class,0);
LongStringInit(&inh_buf,0);
for (flag = R_CURSOR;
dbp->seq(dbp,&key,&data,flag) == 0 &&
key.size >= len && memcmp(nm,key.data,len) == 0; flag = R_NEXT)
{
tmp_inherited_class.split(&tmp_inherited_class,
key.data,
key.size -1,
TRUE,
DB_FLDSEP_CHR,
-1);
icls = tmp_inherited_class.field_value[1];
inh_buf.split(&inh_buf,
data.data,
data.size -1,
TRUE,
DB_FLDSEP_CHR,
-1);
inhb = inh_buf.field_value[1];
load_class(icls);
inherit_members(class_name,icls,inhb);
dbp->seq(dbp,&key,&data,R_CURSOR); /* Restore the cursor ! */
}
tmp_inherited_class.free(&tmp_inherited_class);
inh_buf.free(&inh_buf);
}
tmp_class_name.free(&tmp_class_name);
load_class_members(PAF_MBR_VAR_DEF, class_name);
load_class_members(PAF_MBR_FUNC_DCL, class_name);
}
void
open_tables_for_cross_ref()
{
char fname[MAXPATHLEN];
BTREEINFO db_inf;
HASHINFO ha_inf;
memset((char *)&db_inf,0,sizeof(db_inf));
db_inf.cachesize = (u_int)db_cross_cachesize;
db_inf.compare = db_case_compare;
sprintf(fname,"%s.ctr",db_project_dir);
db_class_tree = dbopen(fname, O_RDWR|O_CREAT|O_TRUNC, get_db_permission(), DB_BTREE,
&db_inf);
if (!db_class_tree)
{
fprintf(stderr,"dbimp: %s, %s",fname,strerror(errno));
fflush(stderr);
Paf_panic(PAF_PANIC_SIMPLE);
}
memset((char *)&ha_inf,0,sizeof(ha_inf));
ha_inf.cachesize = (u_int)db_cross_cachesize;
ha_inf.nelem = 10000;
sprintf(fname,"%s.xhs",db_project_dir);
db_cached_classes = dbopen(fname,O_RDWR|O_CREAT|O_TRUNC, get_db_permission(), DB_HASH,
&ha_inf);
if (!db_cached_classes)
{
fprintf(stderr,"dbimp: %s, %s\n",fname,strerror(errno));
fflush(stderr);
Paf_panic(PAF_PANIC_SIMPLE);
}
create_table(PAF_CLASS_DEF,O_RDONLY,db_cachesize);
create_table(PAF_TYPE_DEF,O_RDONLY,db_cachesize);
create_table(PAF_MBR_VAR_DEF,O_RDONLY,db_cachesize);
create_table(PAF_CONS_DEF,O_RDONLY,db_cachesize);
create_table(PAF_MACRO_DEF,O_RDONLY,db_cachesize);
create_table(PAF_FUNC_DEF,O_RDONLY,db_cachesize);
create_table(PAF_FUNC_DCL,O_RDONLY,db_cachesize);
create_table(PAF_GLOB_VAR_DEF,O_RDONLY,db_cachesize);
create_table(PAF_CLASS_INHERIT,O_RDONLY,db_cachesize);
create_table(PAF_MBR_FUNC_DCL,O_RDONLY,db_cachesize);
create_table(PAF_ENUM_CONST_DEF,O_RDONLY,db_cachesize);
create_table(PAF_ENUM_DEF,O_RDONLY,db_cachesize);
#if PAF_UNION_DEF != PAF_CLASS_DEF
create_table(PAF_UNION_DEF,O_RDONLY,db_cachesize);
#endif /* PAF_UNION_DEF != PAF_CLASS_DEF */
}
#undef USE_LEVELS
#ifdef USE_LEVELS
#define LEVELS 4
#else
#define LEVELS 2
#endif
struct CIgnored_Words {
char *wrd;
int size;
};
static struct CIgnored_Words CIgnoredWords[] =
{
{"const",5},
{"unsigned",8},
{NULL,0}
};
static int RemoveIgnoredWords (char *target_buf, char *pb)
{
char *target = target_buf;
char *pe;
int len;
char fc;
struct CIgnored_Words *ci;
for (pe = pb; *pe && (len = strcspn(pb, " ,")); pb = pe + 1)
{
pe = pb + len;
fc = *pb;
for (ci = &CIgnoredWords[0]; ci->size; ci++)
{
if (*ci->wrd == fc && ci->size == len &&
memcmp (pb, ci->wrd,len) == 0)
{
goto next;
}
}
memcpy(target,pb,len + 1);
target += len + 1;
next:
;
}
if (target == target_buf)
{
*target = '\0';
return 0;
}
return target - target_buf - 1;
}
#define PAF_CLASS_TREE -1
#define RETURN_FROM_SEARCH(value) \
{\
buf.free(&buf);\
rounded_arg_types.free(&rounded_arg_types);\
cls_name.free(&cls_name);\
tmp.free(&tmp);\
return (value);\
}
static int
search_for_symbol(char *global_class_name,char *local_class_name,
char *name,char *arg_types,int db_type,char *scope,char *ret_type,
char *macro_value,int exact)
{
DB *dbp;
DBT key;
DBT data;
int length;
char in_access_val[80];
char sym_type[50];
char param_args[2048];
char *bufval;
LongString buf;
LongString rounded_arg_types;
LongString cls_name;
LongString tmp;
int flag;
int cmp = 1;
int fetch;
int arg_types_len;
int num;
char **fields;
#if BUG_TRACE
static FILE *trace_fp;
if (!trace_fp)
{
trace_fp = fopen("/tmp/dbutils.log","w+");
chmod("/tmp/dbutils.log",0666);
}
#endif /* BUG_TRACE */
LongStringInit(&buf,0);
LongStringInit(&rounded_arg_types,0);
LongStringInit(&cls_name,0);
LongStringInit(&tmp,0);
if (db_type == PAF_CLASS_TREE)
{
dbp = db_class_tree;
}
else
{
dbp = db_syms[db_type];
}
if (!dbp)
return FALSE;
if (db_type == PAF_CLASS_TREE)
{
buf.copystrings(&buf,
local_class_name
? local_class_name
: global_class_name, DB_FLDSEP_STR,
name, DB_FLDSEP_STR,
arg_types ? arg_types : "",
NULL);
length = buf.len + 1;
}
else
{
buf.appendstrings (&buf,name, DB_FLDSEP_STR, NULL);
length = buf.len;
}
bufval = buf.buf;
arg_types_len = arg_types ? strlen(arg_types) : 0;
key.data = (void *)bufval;
key.size = (u_int)length;
data.data = NULL;
data.size = 0;
fetch = dbp->seq(dbp,&key,&data,R_CURSOR);
#if BUG_TRACE
fprintf(trace_fp,"Search for: <%s> return: %d ARGS: <%s>\n",
buf.buf,
fetch,
arg_types ? arg_types : "NULL");
#endif /* BUG_TRACE */
if (fetch == -1) /* Error */
{
RETURN_FROM_SEARCH(FALSE);
}
if (fetch == 1 || (int)key.size < length ||
memcmp(buf.buf,key.data,length) != 0)
{
if (db_type == PAF_CLASS_TREE && !exact && arg_types)
{
#if !OPT_CLASS_TREE
DB *db_md = db_syms[PAF_MBR_FUNC_DCL];
#endif /* !OPT_CLASS_TREE */
char *cl_nm_p;
int Round;
/* If might happen that we cannot find a method
* because of typecasting problems. In this case, we
* check whether the fetched or the previous
* record matches the target method name. */
cls_name.copystrings(&cls_name,
local_class_name
? local_class_name
: global_class_name, DB_FLDSEP_STR,
name, DB_FLDSEP_STR,
NULL);
length = cls_name.len;
cl_nm_p = cls_name.buf;
rounded_arg_types.makespace (&rounded_arg_types, arg_types_len);
rounded_arg_types.len = RemoveIgnoredWords(rounded_arg_types.buf,arg_types);
#if !OPT_CLASS_TREE
/* Search for the method in the current class, (not in base classes) */
if (db_md)
{
int function_avail = 0;
for (Round = 1; Round < LEVELS; Round++)
{
key.data = (void *)cl_nm_p;
key.size = (u_int)length;
data.data = NULL;
data.size = 0;
/* search for method in the base class! */
for (flag = R_CURSOR; db_md->seq(db_md, &key, &data, flag)== 0; flag = R_NEXT)
{
/* Still target class and method name ? */
if ((int)key.size < length + 1 || memcmp (cl_nm_p, key.data, length) != 0)
break;
function_avail = 1;
/*
* Format:
*
* <line> <access> <return type> <parameters> <parameter names>
*/
my_SplitList (data.data, &num, &fields, DB_FLDSEP_CHR);
if (num >= 4)
{
MY_STRNCPY (in_access_val, fields[1], sizeof (in_access_val));
MY_STRNCPY (ret_type, fields[2], 1024); /* FIXME. */
MY_STRNCPY (param_args, fields[3], sizeof (param_args));
}
else
{
in_access_val[0] = 0;
ret_type [0] = 0;
param_args [0] = 0;
}
ckfree ((char*)fields);
tmp.makespace (&tmp,data.size);
tmp.len = RemoveIgnoredWords (tmp.buf, param_args);
#if BUG_TRACE
fprintf(trace_fp,"method type: <%s> <%s> <%s>\n",
tmp.buf,cl_nm_p,(char *)data.data);
#endif /* BUG_TRACE */
if (tmp.len == rounded_arg_types.len &&
memcmp(tmp.buf,rounded_arg_types.buf,tmp.len) == 0)
{
/* method found and arguments are similar. */
memcpy(arg_types, param_args, strlen (param_args)+1);
if (scope)
{
sscanf(cl_nm_p,"%s",scope);
MY_DEBUG ((Output, "sscanf used by cl_nm_p <%s> Scope <%s>\n", cl_nm_p, scope));
}
#if BUG_TRACE
fprintf(trace_fp,"1 scope: <%s> access: <%s> ret_type: <%s> arg_types: <%s>\n",
scope ? scope : "NULL",
in_access_val,ret_type,arg_types);
#endif /* BUG_TRACE */
RETURN_FROM_SEARCH(PAF_MBR_FUNC_DEF);
}
}
}
/*
* If method is availiable, we don't search in base classes,
* we just take the first method of the class.
*/
if (function_avail)
{
key.data = (void *)cl_nm_p;
key.size = (u_int)length;
data.data = NULL;
data.size = 0;
if (db_md->seq (db_md, &key, &data, R_CURSOR) != 0)
{
RETURN_FROM_SEARCH(FALSE);
}
/*
* format:
* <line> <access> <return type> <parameters> <parameter names>
*/
my_SplitList (data.data, &num, &fields, DB_FLDSEP_CHR);
if (num >= 4)
{
MY_STRNCPY (in_access_val, fields[1], sizeof (in_access_val));
MY_STRNCPY (ret_type, fields[2], 1024);
MY_STRNCPY (arg_types, fields[3], 10000);
}
else
{
in_access_val[0] = 0;
ret_type [0] = 0;
arg_types [0] = 0;
}
ckfree ((char*)fields);
if (scope)
{
sscanf(cl_nm_p,"%s",scope);
MY_DEBUG ((Output, "sscanf used 2 by cl_nm_p <%s> scope <%s>\n", cl_nm_p, scope));
}
#if BUG_TRACE
fprintf(trace_fp,"2 scope: <%s> access: <%s> ret_type: <%s> arg_types: <%s>\n",
scope ? scope : "NULL",
in_access_val,ret_type,arg_types);
#endif /* BUG_TRACE */
RETURN_FROM_SEARCH(PAF_MBR_FUNC_DEF);
}
}
#endif /* !OPT_CLASS_TREE */
/* Search for the method in the base classes with similar parameters! */
for (Round = 1; Round < LEVELS; Round++)
{
/* search method exactly in the base classes */
key.data = (void *)cl_nm_p;
key.size = (u_int)length;
data.data = NULL;
data.size = 0;
for (flag = R_CURSOR; dbp->seq(dbp, &key, &data, flag)== 0; flag = R_NEXT)
{
/* Still the method being searching for ? */
if ((int)key.size < length + 1 || memcmp (cl_nm_p, key.data, length) != 0)
break;
tmp.makespace(&tmp,key.size);
tmp.len = RemoveIgnoredWords (tmp.buf,(char *)key.data + length);
if (tmp.len == rounded_arg_types.len &&
memcmp(tmp.buf,rounded_arg_types.buf,tmp.len) == 0)
{
/* It will copy the terminating '\0' too. */
memcpy(arg_types, (char*)key.data + length, key.size - length);
/*
* format
*
* <scope> <type> <access> <return type> <parameters> <parameter names>
*/
my_SplitList (data.data, &num, &fields, DB_FLDSEP_CHR);
if (num >= 4)
{
MY_STRNCPY (scope, fields[0], sizeof(scope));
MY_STRNCPY (sym_type, fields[1], sizeof(sym_type));
MY_STRNCPY (in_access_val, fields[2], sizeof(in_access_val));
MY_STRNCPY (ret_type, fields[3], 1024);
}
else
{
scope [0] = 0;
sym_type [0] = 0;
in_access_val[0] = 0;
ret_type [0] = 0;
}
ckfree ((char*)fields);
#if BUG_TRACE
fprintf(trace_fp,"3 scope: <%s> access: <%s> ret_type: <%s> arg_types: <%s>\n",
scope ? scope : "NULL",
in_access_val,ret_type,arg_types);
#endif /* BUG_TRACE */
RETURN_FROM_SEARCH(PAF_MBR_FUNC_DEF);
}
}
}
/* No method matches the argument list, thus we just take the
* first one.
*/
key.data = (void *)cl_nm_p;
key.size = (u_int)length;
data.data = NULL;
data.size = 0;
if (dbp->seq(dbp, &key, &data, R_CURSOR) != 0 ||
(int)key.size < length || memcmp(cl_nm_p, key.data, length) != 0)
{
RETURN_FROM_SEARCH(FALSE);
}
/* The terminating '\0' will be copied too. */
memcpy(arg_types,(char*)key.data + length,key.size - length);
}
else
{
RETURN_FROM_SEARCH(FALSE);
}
}
if (db_type == PAF_CLASS_TREE)
{
LongString pars;
LongStringInit(&pars,0);
pars.split (&pars, data.data, data.size-1, FALSE, DB_FLDSEP_CHR, 4);
memcpy(scope, pars.field_value[0], pars.field_size[0]);
scope[pars.field_size[0]] = '\0';
memcpy(sym_type, pars.field_value[1], pars.field_size[1]);
sym_type[pars.field_size[1]] = '\0';
memcpy (in_access_val, pars.field_value[2], pars.field_size[2]);
in_access_val[pars.field_size[2]] = '\0';
memcpy(ret_type,
pars.field_value[3],
pars.field_size[3]);
ret_type[pars.field_size[3]] = '\0';
pars.free(&pars);
if (sym_type[0] == SN_GetSymbolType(PAF_MBR_VAR_DEF)[0]) /* "iv" ? */
{
RETURN_FROM_SEARCH(PAF_MBR_VAR_DEF);
}
#if BUG_TRACE
fprintf(trace_fp,"4 name: <%s> scope: <%s> access: <%s> ret_type: <%s> arg_types: <%s>\n",
buf.buf,
scope ? scope : "NULL",
in_access_val,
ret_type,
arg_types ? arg_types : "NULL");
#endif /* BUG_TRACE */
/* At this point we might return a method with wrong
* argument list, but it is still better than saying that
* the method is not known.
*/
RETURN_FROM_SEARCH(PAF_MBR_FUNC_DEF);
}
/* If we are searching for a function we have to check its
* input arguments' types. (ANSI).
*/
fetch = 0;
do
{
/* Parse for example: line.col attr {struct fp *} {unsigned char *,const int} */
*in_access_val = '\0';
*ret_type = '\0';
/*
* Field has the format, separated with DB_FLDSEP_CHR:
*
* <line> <access> <return type> <paramaeter list> <parameter variables>
*
*/
my_SplitList (data.data, &num, &fields, DB_FLDSEP_CHR);
if (num >= 4)
{
MY_STRNCPY (in_access_val, fields[1], sizeof (in_access_val));
MY_STRNCPY (ret_type, fields[2], 1024);
MY_STRNCPY (param_args, fields[3], sizeof (param_args));
if (arg_types && arg_types_len == (int)strlen(param_args) &&
memcmp (param_args, arg_types, arg_types_len) == 0)
{
ckfree ((char*)fields);
cmp = 0;
break;
}
else
cmp = 1;
}
ckfree ((char*)fields);
} while(arg_types &&
(fetch = dbp->seq(dbp,&key,&data,R_NEXT)) == 0 &&
(int)key.size >= length &&
(cmp = memcmp(bufval,key.data,length)) == 0);
#define MACRO_XREF 1
#if MACRO_XREF
/* If the macro name and its contents are identical, we don't
* return the contents because that could cause an end less loop.
*/
if (db_type == PAF_MACRO_DEF && macro_value)
{
strcpy(macro_value, ret_type);
}
#endif /* MACRO_XREF */
if (arg_types)
{
/*
* Here we take the last found function with its arguments.
*/
strcpy (arg_types, param_args);
}
#if BUG_TRACE
fprintf(trace_fp,"5 func: <%s> access: <%s> ret_type: <%s> arg_types: <%s>\n",
buf.buf,in_access_val,ret_type,arg_types ? arg_types : "NULL");
fflush(trace_fp);
#endif /* BUG_TRACE */
if (exact && arg_types && (fetch != 0 || cmp != 0))
{
RETURN_FROM_SEARCH(FALSE);
}
/* At this point we might return a function with wrong
* argument list, but it is still better than saying that
* the function is not known.
*/
RETURN_FROM_SEARCH(db_type);
}
static struct check_symbol_types {
int db_type;
int needs_class;
int needs_args;
} symbol_types_seq[] = {
{PAF_CLASS_TREE, TRUE, TRUE}, /* Methods. */
{PAF_CLASS_TREE, TRUE, FALSE}, /* Inst. variables. */
{PAF_FUNC_DEF, FALSE, TRUE},
{PAF_FUNC_DCL, FALSE, TRUE},
{PAF_MACRO_DEF, FALSE, TRUE}, /* Macros suchas DEF() */
{PAF_MACRO_DEF, FALSE, FALSE}, /* Macros such as BUFSIZE */
{PAF_GLOB_VAR_DEF, FALSE, FALSE},
{PAF_CONS_DEF, FALSE, FALSE},
{PAF_ENUM_CONST_DEF, FALSE, FALSE}
};
static int check_class_typedeff_enum_union[] = {
PAF_CLASS_DEF,PAF_ENUM_DEF,PAF_UNION_DEF,PAF_TYPE_DEF,0
};
int
get_class_or_typedef(char *name, char *origin)
{
int sym_type;
char dummy[1000];
DBT key;
DBT data;
int *tp;
/* Check it in the hash table. */
key.data = (void *)name;
key.size = strlen(name);
if (db_cached_classes->get(db_cached_classes,&key,&data,0) == 0)
{
char *p = (char *)data.data;
sym_type = (int)p[0];
memcpy(origin,&p[1],data.size - 1); /* That will copy a '\0' too. */
return sym_type;
}
*origin = '\0';
*dummy = '\0';
for (sym_type = 0, tp = check_class_typedeff_enum_union;
!sym_type && *tp; tp++)
{
sym_type = search_for_symbol(
NULL,
NULL,
name,
NULL,
*tp,
dummy,
origin,
NULL,
TRUE);
}
if (sym_type == PAF_CLASS_DEF)
{
load_class(name);
}
return sym_type;
}
int
get_symbol(char *global_class_name,char *local_class_name,char *name,
char *arg_types,char *scope,char *ret_type,char *macro_value,int exact)
{
int cou;
int sym_type;
struct check_symbol_types *sym;
*scope = '\0';
*ret_type = '\0';
if (local_class_name)
{
if (*local_class_name == '\0')
local_class_name = NULL;
else
load_class(local_class_name);
}
if (global_class_name)
{
if (*global_class_name == '\0')
global_class_name = NULL;
else
load_class(global_class_name);
}
cou = sizeof(symbol_types_seq) / sizeof(struct check_symbol_types);
for (sym_type = FALSE, sym = &symbol_types_seq[0]; !sym_type && cou-- > 0; sym++)
{
if ((sym->needs_class && !global_class_name && !local_class_name) ||
(sym->needs_args && !arg_types) || (local_class_name && !sym->needs_class))
{
continue;
}
sym_type = search_for_symbol(
sym->needs_class ? global_class_name : NULL,
sym->needs_class ? local_class_name : NULL,
name,
arg_types,
sym->db_type,
scope,
ret_type,
macro_value,
exact);
}
/* It still can be a constructor. */
if (!sym_type && arg_types)
{
sym_type = search_for_symbol(
NULL,
name,
name,
arg_types,
PAF_CLASS_TREE,
scope,
ret_type,
macro_value,
exact);
}
return sym_type;
}
void
Paf_db_init_tables(const char *proj_dir,const char *cache,const char *cross_cache)
{
if (proj_dir)
{
strcpy(db_project_dir,proj_dir);
}
if (cache)
{
db_cachesize = (u_int)(atoi(cache) * 1024);
}
if (cross_cache)
{
db_cross_cachesize = (u_int)(atoi(cross_cache) * 1024);
}
}
#if DB_NO_CASE_COMPARE
/*
* Database (without case) comparison routine.
*
* Parameters:
* a: DBT #1
* b: DBT #2
*
* Returns:
* < 0 if a is < b
* = 0 if a is = b
* > 0 if a is > b
*/
int
db_no_case_compare(const DBT *a,const DBT *b)
{
register int cmp;
cmp = use_STRNCASECMP (a->data, b->data, a->size < b->size ? a->size : b->size);
if (cmp == 0)
{
/*
* This is something tricky,
* if "Foo" == "foo" and we are in a fetching routine,
* "foo" must replace "Foo", so the original text must
* be returned.
*/
if (db_action_is_fetching &&
a->size == b->size &&
memcmp(a->data, b->data, a->size < b->size ? a->size : b->size) != 0)
{
memcpy (a->data, b->data, b->size);
}
return ((int)a->size - (int)b->size); /* return only if it is equal to a string */
}
if (cmp != 0)
{
return cmp;
}
return ((int)a->size - (int)b->size);
}
#endif /* DB_NO_CASE_COMPARE */
/*
* Database comparison routine.
*
* Parameters:
* a: DBT #1
* b: DBT #2
*
* Returns:
* < 0 if a is < b
* = 0 if a is = b
* > 0 if a is > b
*/
int
db_case_compare(register const DBT *a,register const DBT *b)
{
register int cmp;
if (db_compare_nocase && a->size == b->size)
{
cmp = use_STRNCASECMP (a->data, b->data, a->size < b->size ? a->size : b->size);
if (cmp == 0)
{
return ((int)a->size - (int)b->size); /* return only if it is equal to a string */
}
}
cmp = memcmp(a->data,b->data,a->size < b->size ? a->size : b->size);
if (cmp != 0)
{
return cmp;
}
return ((int)a->size - (int)b->size);
}
/*
* Return true if the given key exists in the table.
*/
static
int
db_key_in_table(int type, char* key) {
DB *dbp = db_syms[type];
DBT dbkey, dbres;
int csize;
int result;
if (type == PAF_CROSS_REF) {
csize = db_cross_cachesize;
} else {
csize = db_cachesize;
}
if (!dbp)
dbp = create_table(type, O_RDWR|O_CREAT, csize);
if (dbp == NULL)
return 0;
dbkey.data = key;
dbkey.size = strlen(dbkey.data) + 1;
result = dbp->get(dbp, &dbkey, &dbres, 0);
if (result < 0) {
Paf_panic(PAF_PANIC_EMERGENCY);
} else if (result == 0) {
return 1; /* key in table */
} else {
return 0; /* key not in table */
}
}
/* very bad solution for error handling */
jmp_buf BAD_IMPL_jmp_buf;
void
Paf_panic(int level)
{
if (level <= 0)
level = 1;
longjmp(BAD_IMPL_jmp_buf,level);
}
#if defined(__MSVC__) || defined(__MINGW32__)
/* This function checks for a process (with pid - proc_id).
Returns:
-1 if not there
1 if there
NB: this won't kill the process.
[irox:3.3.98]
*/
int kill(pid_t pid, int dummy) /*sn_win32_ping*/
{
HANDLE hProcess;
char debug_str[200];
DWORD proc_id = pid;
hProcess = OpenProcess(PROCESS_ALL_ACCESS, TRUE, proc_id);
if (hProcess!=NULL)
{
sprintf(debug_str,"Opened process %d.", (int)proc_id);
/* MessageBox(NULL,debug_str,"debug InFo",MB_OK);*/
/* It's alive! */
return 1;
} else {
sprintf(debug_str,"Couldn't opened process %d.", (int)proc_id);
/* MessageBox(NULL,debug_str,"debug InFo",MB_OK);*/
/* I think it's dead Jim. */
return -1;
}
}
#endif
| gpl-2.0 |
DevriesL/HeroQLTE_ImageBreaker | mm/memory.c | 7 | 104610 | /*
* linux/mm/memory.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
/*
* demand-loading started 01.12.91 - seems it is high on the list of
* things wanted, and it should be easy to implement. - Linus
*/
/*
* Ok, demand-loading was easy, shared pages a little bit tricker. Shared
* pages started 02.12.91, seems to work. - Linus.
*
* Tested sharing by executing about 30 /bin/sh: under the old kernel it
* would have taken more than the 6M I have free, but it worked well as
* far as I could see.
*
* Also corrected some "invalidate()"s - I wasn't doing enough of them.
*/
/*
* Real VM (paging to/from disk) started 18.12.91. Much more work and
* thought has to go into this. Oh, well..
* 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
* Found it. Everything seems to work now.
* 20.12.91 - Ok, making the swap-device changeable like the root.
*/
/*
* 05.04.94 - Multi-page memory management added for v1.1.
* Idea by Alex Bligh (alex@cconcepts.co.uk)
*
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
* (Gerhard.Wichert@pdb.siemens.de)
*
* Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
*/
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/kallsyms.h>
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
#include <linux/dma-debug.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include "internal.h"
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
struct page *mem_map;
EXPORT_SYMBOL(max_mapnr);
EXPORT_SYMBOL(mem_map);
#endif
/*
* A number of key systems in x86 including ioremap() rely on the assumption
* that high_memory defines the upper bound on direct map memory, then end
* of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
* and ZONE_HIGHMEM.
*/
void * high_memory;
EXPORT_SYMBOL(high_memory);
/*
* Randomize the address space (stacks, mmaps, brk, etc.).
*
* ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
* as ancient (libc5 based) binaries can segfault. )
*/
int randomize_va_space __read_mostly =
#ifdef CONFIG_COMPAT_BRK
1;
#else
2;
#endif
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
return 1;
}
__setup("norandmaps", disable_randmaps);
unsigned long zero_pfn __read_mostly;
unsigned long highest_memmap_pfn __read_mostly;
EXPORT_SYMBOL(zero_pfn);
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
static int __init init_zero_pfn(void)
{
zero_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
core_initcall(init_zero_pfn);
#if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (current->rss_stat.count[i]) {
add_mm_counter(mm, i, current->rss_stat.count[i]);
current->rss_stat.count[i] = 0;
}
}
current->rss_stat.events = 0;
}
static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
{
struct task_struct *task = current;
if (likely(task->mm == mm))
task->rss_stat.count[member] += val;
else
add_mm_counter(mm, member, val);
}
#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
/* sync counter once per 64 page faults */
#define TASK_RSS_EVENTS_THRESH (64)
static void check_sync_rss_stat(struct task_struct *task)
{
if (unlikely(task != current))
return;
if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
sync_mm_rss(task->mm);
}
#else /* SPLIT_RSS_COUNTING */
#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
static void check_sync_rss_stat(struct task_struct *task)
{
}
#endif /* SPLIT_RSS_COUNTING */
#ifdef HAVE_GENERIC_MMU_GATHER
static int tlb_next_batch(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
batch = tlb->active;
if (batch->next) {
tlb->active = batch->next;
return 1;
}
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
return 0;
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (!batch)
return 0;
tlb->batch_count++;
batch->next = NULL;
batch->nr = 0;
batch->max = MAX_GATHER_BATCH;
tlb->active->next = batch;
tlb->active = batch;
return 1;
}
/* tlb_gather_mmu
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
__tlb_reset_range(tlb);
}
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
if (!tlb->end)
return;
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
__tlb_reset_range(tlb);
}
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
for (batch = &tlb->local; batch; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
}
void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
struct mmu_gather_batch *batch, *next;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
}
tlb->local.next = NULL;
}
/* __tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
*/
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end);
batch = tlb->active;
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return 0;
batch = tlb->active;
}
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
return batch->max - batch->nr;
}
#endif /* HAVE_GENERIC_MMU_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* See the comment near struct mmu_table_batch.
*/
static void tlb_remove_table_smp_sync(void *arg)
{
/* Simply deliver the interrupt */
}
static void tlb_remove_table_one(void *table)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
* assumed to be actually RCU-freed.
*
* It is however sufficient for software page-table walkers that rely on
* IRQ disabling. See the comment near struct mmu_table_batch.
*/
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
__tlb_remove_table(table);
}
static void tlb_remove_table_rcu(struct rcu_head *head)
{
struct mmu_table_batch *batch;
int i;
batch = container_of(head, struct mmu_table_batch, rcu);
for (i = 0; i < batch->nr; i++)
__tlb_remove_table(batch->tables[i]);
free_page((unsigned long)batch);
}
void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
/*
* When there's less then two users of this mm there cannot be a
* concurrent page-table walk.
*/
if (atomic_read(&tlb->mm->mm_users) < 2) {
__tlb_remove_table(table);
return;
}
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
tlb_remove_table_one(table);
return;
}
(*batch)->nr = 0;
}
(*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH)
tlb_table_flush(tlb);
}
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
atomic_long_dec(&tlb->mm->nr_ptes);
}
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pmd_t *pmd;
unsigned long next;
unsigned long start;
start = addr;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
start &= PUD_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PUD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pud_t *pud;
unsigned long next;
unsigned long start;
start = addr;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
start &= PGDIR_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PGDIR_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
}
/*
* This function frees user-level page tables of a process.
*/
void free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
unsigned long next;
/*
* The next few lines have given us lots of grief...
*
* Why are we testing PMD* at this top level? Because often
* there will be no work to do at all, and we'd prefer not to
* go all the way down to the bottom just to discover that.
*
* Why all these "- 1"s? Because 0 represents both the bottom
* of the address space and the top of it (using -1 for the
* top wouldn't help much: the masks would do the wrong thing).
* The rule is that addr 0 and floor 0 refer to the bottom of
* the address space, but end 0 and ceiling 0 refer to the top
* Comparisons need to use "end - 1" and "ceiling - 1" (though
* that end 0 case should be mythical).
*
* Wherever addr is brought up or ceiling brought down, we must
* be careful to reject "the opposite 0" before it confuses the
* subsequent tests. But what about where end is brought down
* by PMD_SIZE below? no, end can't go down to 0 there.
*
* Whereas we round start (addr) and ceiling down, by different
* masks at different levels, in order to test whether a table
* now has no other vmas using it, so can be freed, we don't
* bother to round floor or end up - the tests don't need that.
*/
addr &= PMD_MASK;
if (addr < floor) {
addr += PMD_SIZE;
if (!addr)
return;
}
if (ceiling) {
ceiling &= PMD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
end -= PMD_SIZE;
if (addr > end - 1)
return;
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long floor, unsigned long ceiling)
{
while (vma) {
struct vm_area_struct *next = vma->vm_next;
unsigned long addr = vma->vm_start;
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
unlink_anon_vmas(vma);
unlink_file_vma(vma);
if (is_vm_hugetlb_page(vma)) {
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
} else {
/*
* Optimization: gather nearby vmas into one call down
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
&& !is_vm_hugetlb_page(next)) {
vma = next;
next = vma->vm_next;
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}
free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
}
vma = next;
}
}
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long address)
{
spinlock_t *ptl;
pgtable_t new = pte_alloc_one(mm, address);
int wait_split_huge_page;
if (!new)
return -ENOMEM;
/*
* Ensure all pte setup (eg. pte page lock and page clearing) are
* visible before the pte is made visible to other CPUs by being
* put into page tables.
*
* The other side of the story is the pointer chasing in the page
* table walking code (when walking the page table without locking;
* ie. most of the time). Fortunately, these data accesses consist
* of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the
* smp_read_barrier_depends() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
ptl = pmd_lock(mm, pmd);
wait_split_huge_page = 0;
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
atomic_long_inc(&mm->nr_ptes);
pmd_populate(mm, pmd, new);
new = NULL;
} else if (unlikely(pmd_trans_splitting(*pmd)))
wait_split_huge_page = 1;
spin_unlock(ptl);
if (new)
pte_free(mm, new);
if (wait_split_huge_page)
wait_split_huge_page(vma->anon_vma, pmd);
return 0;
}
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
pte_t *new = pte_alloc_one_kernel(&init_mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
new = NULL;
} else
VM_BUG_ON(pmd_trans_splitting(*pmd));
spin_unlock(&init_mm.page_table_lock);
if (new)
pte_free_kernel(&init_mm, new);
return 0;
}
static inline void init_rss_vec(int *rss)
{
memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
}
static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
{
int i;
if (current->mm == mm)
sync_mm_rss(mm);
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
add_mm_counter(mm, i, rss[i]);
}
/*
* This function is called to print an error when a bad pte
* is found. For example, we might have a PFN-mapped pte in
* a region that doesn't allow it.
*
* The calling function must still handle the error.
*/
static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
pte_t pte, struct page *page)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
struct address_space *mapping;
pgoff_t index;
static unsigned long resume;
static unsigned long nr_shown;
static unsigned long nr_unshown;
/*
* Allow a burst of 60 reports, then keep quiet for that minute;
* or allow a steady drip of one report per second.
*/
if (nr_shown == 60) {
if (time_before(jiffies, resume)) {
nr_unshown++;
return;
}
if (nr_unshown) {
printk(KERN_ALERT
"BUG: Bad page map: %lu messages suppressed\n",
nr_unshown);
nr_unshown = 0;
}
nr_shown = 0;
}
if (nr_shown++ == 0)
resume = jiffies + 60 * HZ;
mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
index = linear_page_index(vma, addr);
printk(KERN_ALERT
"BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
current->comm,
(long long)pte_val(pte), (long long)pmd_val(*pmd));
if (page)
dump_page(page, "bad pte");
printk(KERN_ALERT
"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
/*
* Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
*/
if (vma->vm_ops)
printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
vma->vm_ops->fault);
if (vma->vm_file)
printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
/*
* vm_normal_page -- This function gets the "struct page" associated with a pte.
*
* "Special" mappings do not wish to be associated with a "struct page" (either
* it doesn't exist, or it exists but they don't want to touch it). In this
* case, NULL is returned here. "Normal" mappings do have a struct page.
*
* There are 2 broad cases. Firstly, an architecture may define a pte_special()
* pte bit, in which case this function is trivial. Secondly, an architecture
* may not have a spare pte bit, which requires a more complicated scheme,
* described below.
*
* A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
* special mapping (even if there are underlying and valid "struct pages").
* COWed pages of a VM_PFNMAP are always normal.
*
* The way we recognize COWed pages within VM_PFNMAP mappings is through the
* rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
* set, and the vm_pgoff will point to the first PFN mapped: thus every special
* mapping will always honor the rule
*
* pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
*
* And for normal mappings this is false.
*
* This restricts such mappings to be a linear translation from virtual address
* to pfn. To get around this restriction, we allow arbitrary mappings so long
* as the vma is not a COW mapping; in that case, we know that all ptes are
* special (because none can have been COWed).
*
*
* In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
*
* VM_MIXEDMAP mappings can likewise contain memory with or without "struct
* page" backing, however the difference is that _all_ pages with a struct
* page (that is, those where pfn_valid is true) are refcounted and considered
* normal pages by the VM. The disadvantage is that pages are refcounted
* (which can be slower and simply not an option for some PFNMAP users). The
* advantage is that we don't have to follow the strict linearity rule of
* PFNMAP mappings in order to support COWable mappings.
*
*/
#ifdef __HAVE_ARCH_PTE_SPECIAL
# define HAVE_PTE_SPECIAL 1
#else
# define HAVE_PTE_SPECIAL 0
#endif
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
if (HAVE_PTE_SPECIAL) {
if (likely(!pte_special(pte)))
goto check_pfn;
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return NULL;
if (!is_zero_pfn(pfn))
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
/* !HAVE_PTE_SPECIAL case follows: */
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
return NULL;
goto out;
} else {
unsigned long off;
off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
if (!is_cow_mapping(vma->vm_flags))
return NULL;
}
}
if (is_zero_pfn(pfn))
return NULL;
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
/*
* NOTE! We still have PageReserved() pages in the page tables.
* eg. VDSO mappings can cause them to exist.
*/
out:
return pfn_to_page(pfn);
}
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
* covered by this vma.
*/
static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
{
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0)
return entry.val;
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
if (list_empty(&dst_mm->mmlist))
list_add(&dst_mm->mmlist,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
rss[MM_SWAPENTS]++;
} else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(*src_pte))
pte = pte_swp_mksoft_dirty(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
}
goto out_set_pte;
}
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
/*
* If it's a shared mapping, mark it clean in
* the child
*/
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
page_dup_rmap(page);
if (PageAnon(page))
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
}
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
return 0;
}
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pte_t *orig_src_pte, *orig_dst_pte;
pte_t *src_pte, *dst_pte;
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[NR_MM_COUNTERS];
swp_entry_t entry = (swp_entry_t){0};
again:
init_rss_vec(rss);
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
if (!dst_pte)
return -ENOMEM;
src_pte = pte_offset_map(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
orig_src_pte = src_pte;
orig_dst_pte = dst_pte;
arch_enter_lazy_mmu_mode();
do {
/*
* We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU.
*/
if (progress >= 32) {
progress = 0;
if (need_resched() ||
spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
break;
}
if (pte_none(*src_pte)) {
progress++;
continue;
}
entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
vma, addr, rss);
if (entry.val)
break;
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl);
pte_unmap(orig_src_pte);
add_mm_rss_vec(dst_mm, rss);
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
if (entry.val) {
if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
return -ENOMEM;
progress = 0;
}
if (addr != end)
goto again;
return 0;
}
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pmd_t *src_pmd, *dst_pmd;
unsigned long next;
dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
if (!dst_pmd)
return -ENOMEM;
src_pmd = pmd_offset(src_pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*src_pmd)) {
int err;
VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
err = copy_huge_pmd(dst_mm, src_mm,
dst_pmd, src_pmd, addr, vma);
if (err == -ENOMEM)
return -ENOMEM;
if (!err)
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(src_pmd))
continue;
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
vma, addr, next))
return -ENOMEM;
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
return 0;
}
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pud_t *src_pud, *dst_pud;
unsigned long next;
dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
if (!dst_pud)
return -ENOMEM;
src_pud = pud_offset(src_pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
vma, addr, next))
return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end);
return 0;
}
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct vm_area_struct *vma)
{
pgd_t *src_pgd, *dst_pgd;
unsigned long next;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
bool is_cow;
int ret;
/*
* Don't copy ptes where a page fault will fill them correctly.
* Fork becomes much lighter when there are big shared or private
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
VM_PFNMAP | VM_MIXEDMAP))) {
if (!vma->anon_vma)
return 0;
}
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
if (unlikely(vma->vm_flags & VM_PFNMAP)) {
/*
* We do not free on error cases below as remove_vma
* gets called on error from higher level routine
*/
ret = track_pfn_copy(vma);
if (ret)
return ret;
}
/*
* We need to invalidate the secondary MMU mappings only when
* there could be a permission downgrade on the ptes of the
* parent mm. And a permission downgrade will only happen if
* is_cow_mapping() returns true.
*/
is_cow = is_cow_mapping(vma->vm_flags);
mmun_start = addr;
mmun_end = end;
if (is_cow)
mmu_notifier_invalidate_range_start(src_mm, mmun_start,
mmun_end);
ret = 0;
dst_pgd = pgd_offset(dst_mm, addr);
src_pgd = pgd_offset(src_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(src_pgd))
continue;
if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
vma, addr, next))) {
ret = -ENOMEM;
break;
}
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
if (is_cow)
mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
return ret;
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
struct mm_struct *mm = tlb->mm;
int force_flush = 0;
int rss[NR_MM_COUNTERS];
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
again:
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
if (pte_none(ptent)) {
continue;
}
if (pte_present(ptent)) {
struct page *page;
page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
* unmap_shared_mapping_pages() wants to
* invalidate cache without truncating:
* unmap shared but keep private pages.
*/
if (details->check_mapping &&
details->check_mapping != page->mapping)
continue;
/*
* Each page->index must be checked when
* invalidating or truncating nonlinear.
*/
if (details->nonlinear_vma &&
(page->index < details->first_index ||
page->index > details->last_index))
continue;
}
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page))
continue;
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
addr) != page->index) {
pte_t ptfile = pgoff_to_pte(page->index);
if (pte_soft_dirty(ptent))
ptfile = pte_file_mksoft_dirty(ptfile);
set_pte_at(mm, addr, pte, ptfile);
}
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
if (pte_dirty(ptent)) {
force_flush = 1;
set_page_dirty(page);
}
if (pte_young(ptent) &&
likely(!(vma->vm_flags & VM_SEQ_READ)))
mark_page_accessed(page);
rss[MM_FILEPAGES]--;
}
page_remove_rmap(page);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
if (unlikely(!__tlb_remove_page(tlb, page))) {
force_flush = 1;
addr += PAGE_SIZE;
break;
}
continue;
}
/*
* If details->check_mapping, we leave swap entries;
* if details->nonlinear_vma, we leave file entries.
*/
if (unlikely(details))
continue;
if (pte_file(ptent)) {
if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
print_bad_pte(vma, addr, ptent, NULL);
} else {
swp_entry_t entry = pte_to_swp_entry(ptent);
if (!non_swap_entry(entry))
rss[MM_SWAPENTS]--;
else if (is_migration_entry(entry)) {
struct page *page;
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else
rss[MM_FILEPAGES]--;
}
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
}
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
} while (pte++, addr += PAGE_SIZE, addr != end);
add_mm_rss_vec(mm, rss);
arch_leave_lazy_mmu_mode();
/* Do the actual TLB flush before dropping ptl */
if (force_flush)
tlb_flush_mmu_tlbonly(tlb);
pte_unmap_unlock(start_pte, ptl);
/*
* If we forced a TLB flush (either due to running out of
* batch buffers or because we needed to flush dirty TLB
* entries before releasing the ptl), free the batched
* memory too. Restart if we didn't do everything.
*/
if (force_flush) {
force_flush = 0;
tlb_flush_mmu_free(tlb);
if (addr != end)
goto again;
}
return addr;
}
static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE) {
#ifdef CONFIG_DEBUG_VM
if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
__func__, addr, end,
vma->vm_start,
vma->vm_end);
BUG();
}
#endif
split_huge_page_pmd(vma, addr, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
}
/*
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
* none or trans huge it can change under us. This is
* because MADV_DONTNEED holds the mmap_sem in read
* mode.
*/
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
goto next;
next = zap_pte_range(tlb, vma, pmd, addr, next, details);
next:
cond_resched();
} while (pmd++, addr = next, addr != end);
return addr;
}
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pud_t *pud;
unsigned long next;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
next = zap_pmd_range(tlb, vma, pud, addr, next, details);
} while (pud++, addr = next, addr != end);
return addr;
}
static void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pgd_t *pgd;
unsigned long next;
if (details && !details->check_mapping && !details->nonlinear_vma)
details = NULL;
BUG_ON(addr >= end);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
next = zap_pud_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
}
static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr,
struct zap_details *details)
{
unsigned long start = max(vma->vm_start, start_addr);
unsigned long end;
if (start >= vma->vm_end)
return;
end = min(vma->vm_end, end_addr);
if (end <= vma->vm_start)
return;
if (vma->vm_file)
uprobe_munmap(vma, start, end);
if (unlikely(vma->vm_flags & VM_PFNMAP))
untrack_pfn(vma, 0, 0);
if (start != end) {
if (unlikely(is_vm_hugetlb_page(vma))) {
/*
* It is undesirable to test vma->vm_file as it
* should be non-null for valid hugetlb area.
* However, vm_file will be NULL in the error
* cleanup path of mmap_region. When
* hugetlbfs ->mmap method fails,
* mmap_region() nullifies vma->vm_file
* before calling this function to clean up.
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
*/
if (vma->vm_file) {
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
} else
unmap_page_range(tlb, vma, start, end, details);
}
}
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlb: address of the caller's struct mmu_gather
* @vma: the starting vma
* @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping
*
* Unmap all pages in the vma list.
*
* Only addresses between `start' and `end' will be unmapped.
*
* The VMA list must be sorted in ascending virtual address order.
*
* unmap_vmas() assumes that the caller will flush the whole unmapped address
* range after unmap_vmas() returns. So the only responsibility here is to
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules.
*/
void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr)
{
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
}
/**
* zap_page_range - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @start: starting address of pages to zap
* @size: number of bytes to zap
* @details: details of nonlinear truncation or shared cache invalidation
*
* Caller must protect the VMA list
*/
void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unsigned long end = start + size;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, end, details);
mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end);
}
/**
* zap_page_range_single - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to zap
* @size: number of bytes to zap
* @details: details of nonlinear truncation or shared cache invalidation
*
* The range must fit into one VMA.
*/
static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unsigned long end = address + size;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, address, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, details);
mmu_notifier_invalidate_range_end(mm, address, end);
tlb_finish_mmu(&tlb, address, end);
}
/**
* zap_vma_ptes - remove ptes mapping the vma
* @vma: vm_area_struct holding ptes to be zapped
* @address: starting address of pages to zap
* @size: number of bytes to zap
*
* This function only unmaps ptes assigned to VM_PFNMAP vmas.
*
* The entire address range must be fully contained within the vma.
*
* Returns 0 if successful.
*/
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (address < vma->vm_start || address + size > vma->vm_end ||
!(vma->vm_flags & VM_PFNMAP))
return -1;
zap_page_range_single(vma, address, size, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
pud_t * pud = pud_alloc(mm, pgd, addr);
if (pud) {
pmd_t * pmd = pmd_alloc(mm, pud, addr);
if (pmd) {
VM_BUG_ON(pmd_trans_huge(*pmd));
return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
}
return NULL;
}
/*
* This is the old fallback for page remapping.
*
* For historical reasons, it only allows reserved pages. Only
* old drivers should use this, and they needed to mark their
* pages reserved for the old functions anyway.
*/
static int insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte;
spinlock_t *ptl;
retval = -EINVAL;
if (PageAnon(page))
goto out;
retval = -ENOMEM;
flush_dcache_page(page);
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
/* Ok, finally just insert the thing.. */
get_page(page);
inc_mm_counter_fast(mm, MM_FILEPAGES);
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
/**
* vm_insert_page - insert single page into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @page: source kernel page
*
* This allows drivers to insert individual pages they've allocated
* into a user vma.
*
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
* (see split_page()).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
* that. Your vma protection will have to be set up correctly, which
* means that if you want a shared writable mapping, you'd better
* ask for a shared writable mapping!
*
* The page does not need to be reserved.
*
* Usually this function is called from f_op->mmap() handler
* under mm->mmap_sem write-lock, so it can change vma->vm_flags.
* Caller must set VM_MIXEDMAP on vma if it wants to call this
* function from other places, for example from page-fault handler.
*/
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte, entry;
spinlock_t *ptl;
retval = -ENOMEM;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
/* Ok, finally just insert the thing.. */
entry = pte_mkspecial(pfn_pte(pfn, prot));
set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
retval = 0;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
/**
* vm_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
*
* Similar to vm_insert_page, this allows drivers to insert individual pages
* they've allocated into a user vma. Same comments apply.
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return NULL.
*
* vma cannot be a COW mapping.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*/
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
pgprot_t pgprot = vma->vm_page_prot;
/*
* Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like
* consistency in testing and feature parity among all, so we should
* try to keep these invariants in place for everybody.
*/
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (track_pfn_insert(vma, &pgprot, pfn))
return -EINVAL;
ret = insert_pfn(vma, addr, pfn, pgprot);
return ret;
}
EXPORT_SYMBOL(vm_insert_pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
/*
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
* refcount the page if pfn_valid is true (hence insert_page rather
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
* without pte special, it would there be refcounted as a normal page.
*/
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
struct page *page;
page = pfn_to_page(pfn);
return insert_page(vma, addr, page, vma->vm_page_prot);
}
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_mixed);
/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
*/
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pte_t *pte;
spinlock_t *ptl;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
return 0;
}
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
pfn -= addr >> PAGE_SHIFT;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
VM_BUG_ON(pmd_trans_huge(*pmd));
do {
next = pmd_addr_end(addr, end);
if (remap_pte_range(mm, pmd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
pfn -= addr >> PAGE_SHIFT;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
if (remap_pmd_range(mm, pud, addr, next,
pfn + (addr >> PAGE_SHIFT), prot))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
* @addr: target user address to start at
* @pfn: physical address of kernel memory
* @size: size of map area
* @prot: page protection flags for this mapping
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + PAGE_ALIGN(size);
struct mm_struct *mm = vma->vm_mm;
int err;
/*
* Physically remapped pages are special. Tell the
* rest of the world about it:
* VM_IO tells people not to look at these pages
* (accesses can have side effects).
* VM_PFNMAP tells the core MM that the base pages are just
* raw PFN mappings, and do not have a "struct page" associated
* with them.
* VM_DONTEXPAND
* Disable vma merging and expanding with mremap().
* VM_DONTDUMP
* Omit vma from core dump, even when VM_IO turned off.
*
* There's a horrible special case to handle copy-on-write
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
if (err)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
err = remap_pud_range(mm, pgd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
if (err)
untrack_pfn(vma, pfn, PAGE_ALIGN(size));
return err;
}
EXPORT_SYMBOL(remap_pfn_range);
/**
* vm_iomap_memory - remap memory to userspace
* @vma: user vma to map to
* @start: start of area
* @len: size of area
*
* This is a simplified io_remap_pfn_range() for common driver use. The
* driver just needs to give us the physical memory range to be mapped,
* we'll figure out the rest from the vma information.
*
* NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
* whatever write-combining details or similar.
*/
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
unsigned long vm_len, pfn, pages;
/* Check that the physical memory area passed in looks valid */
if (start + len < start)
return -EINVAL;
/*
* You *really* shouldn't map things that aren't page-aligned,
* but we've historically allowed it because IO memory might
* just have smaller alignment.
*/
len += start & ~PAGE_MASK;
pfn = start >> PAGE_SHIFT;
pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
if (pfn + pages < pfn)
return -EINVAL;
/* We start the mapping 'vm_pgoff' pages into the area */
if (vma->vm_pgoff > pages)
return -EINVAL;
pfn += vma->vm_pgoff;
pages -= vma->vm_pgoff;
/* Can we fit all of the mapping? */
vm_len = vma->vm_end - vma->vm_start;
if (vm_len >> PAGE_SHIFT > pages)
return -EINVAL;
/* Ok, let it rip */
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_iomap_memory);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pte_t *pte;
int err;
pgtable_t token;
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
pte_alloc_kernel(pmd, addr) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
BUG_ON(pmd_huge(*pmd));
arch_enter_lazy_mmu_mode();
token = pmd_pgtable(*pmd);
do {
err = fn(pte++, token, addr, data);
if (err)
break;
} while (addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
if (mm != &init_mm)
pte_unmap_unlock(pte-1, ptl);
return err;
}
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pmd_t *pmd;
unsigned long next;
int err;
BUG_ON(pud_huge(*pud));
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pud_t *pud;
unsigned long next;
int err;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
/*
* Scan a region of virtual memory, filling in page tables as necessary
* and calling a provided function on each leaf page table.
*/
int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn, void *data)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + size;
int err;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
do {
next = pgd_addr_end(addr, end);
err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
if (err)
break;
} while (pgd++, addr = next, addr != end);
return err;
}
EXPORT_SYMBOL_GPL(apply_to_page_range);
/*
* handle_pte_fault chooses page fault handler according to an entry
* which was read non-atomically. Before making any commitment, on
* those architectures or configurations (e.g. i386 with PAE) which
* might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
* must check under lock before unmapping the pte and proceeding
* (but do_wp_page is only called after already making such a check;
* and do_anonymous_page can safely check later on).
*/
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
pte_t *page_table, pte_t orig_pte)
{
int same = 1;
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
if (sizeof(pte_t) > sizeof(unsigned long)) {
spinlock_t *ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
same = pte_same(*page_table, orig_pte);
spin_unlock(ptl);
}
#endif
pte_unmap(page_table);
return same;
}
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
debug_dma_assert_idle(src);
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
if (unlikely(!src)) {
void *kaddr = kmap_atomic(dst);
void __user *uaddr = (void __user *)(va & PAGE_MASK);
/*
* This really shouldn't fail, because the page is there
* in the page tables. But it might just be unreadable,
* in which case we just give up and fill the result with
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
clear_page(kaddr);
kunmap_atomic(kaddr);
flush_dcache_page(dst);
} else
copy_user_highpage(dst, src, va, vma);
}
/*
* Notify the address space that the page is about to become writable so that
* it can prohibit this or wait for the page to get into an appropriate state.
*
* We do this without the lock held, so that it can sleep if it needs to.
*/
static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
unsigned long address)
{
struct vm_fault vmf;
int ret;
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = page->index;
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
vmf.page = page;
ret = vma->vm_ops->page_mkwrite(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
return ret;
if (unlikely(!(ret & VM_FAULT_LOCKED))) {
lock_page(page);
if (!page->mapping) {
unlock_page(page);
return 0; /* retry */
}
ret |= VM_FAULT_LOCKED;
} else
VM_BUG_ON_PAGE(!PageLocked(page), page);
return ret;
}
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
* and decrementing the shared-page counter for the old page.
*
* Note that this routine assumes that the protection checks have been
* done by the caller (the low-level page fault routine in most cases).
* Thus we can safely just mark it writable once we've done any necessary
* COW.
*
* We also mark the page dirty at this point even though the page will
* change only once the write actually happens. This avoids a few races,
* and potentially makes it more efficient.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), with pte both mapped and locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
spinlock_t *ptl, pte_t orig_pte)
__releases(ptl)
{
struct page *old_page, *new_page = NULL;
pte_t entry;
int ret = 0;
int page_mkwrite = 0;
struct page *dirty_page = NULL;
unsigned long mmun_start = 0; /* For mmu_notifiers */
unsigned long mmun_end = 0; /* For mmu_notifiers */
struct mem_cgroup *memcg;
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
/*
* VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
* VM_PFNMAP VMA.
*
* We should not cow pages in a shared writeable mapping.
* Just mark the pages writable as we can't do any dirty
* accounting on raw pfn maps.
*/
if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))
goto reuse;
goto gotten;
}
/*
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
if (PageAnon(old_page) && !PageKsm(old_page)) {
if (!trylock_page(old_page)) {
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
lock_page(old_page);
page_table = pte_offset_map_lock(mm, pmd, address,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
goto unlock;
}
page_cache_release(old_page);
}
if (reuse_swap_page(old_page)) {
/*
* The page is all ours. Move it to our anon_vma so
* the rmap code will not search our parent or siblings.
* Protected against the rmap code by the page lock.
*/
page_move_anon_rmap(old_page, vma, address);
unlock_page(old_page);
goto reuse;
}
unlock_page(old_page);
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
/*
* Only catch write-faults on shared writable pages,
* read-only shared pages can get COWed by
* get_user_pages(.write=1, .force=1).
*/
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
int tmp;
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
tmp = do_page_mkwrite(vma, old_page, address);
if (unlikely(!tmp || (tmp &
(VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
page_cache_release(old_page);
return tmp;
}
/*
* Since we dropped the lock we need to revalidate
* the PTE as someone else may have changed it. If
* they did, we just return, as we can count on the
* MMU to tell us if they didn't also make it writable.
*/
page_table = pte_offset_map_lock(mm, pmd, address,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
goto unlock;
}
page_mkwrite = 1;
}
dirty_page = old_page;
get_page(dirty_page);
reuse:
/*
* Clear the pages cpupid information as the existing
* information potentially belongs to a now completely
* unrelated process.
*/
if (old_page)
page_cpupid_xchg_last(old_page, (1 << LAST_CPUPID_SHIFT) - 1);
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, address, page_table, entry,1))
update_mmu_cache(vma, address, page_table);
pte_unmap_unlock(page_table, ptl);
ret |= VM_FAULT_WRITE;
if (!dirty_page)
return ret;
if (!page_mkwrite) {
struct address_space *mapping;
int dirtied;
lock_page(dirty_page);
dirtied = set_page_dirty(dirty_page);
VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
mapping = dirty_page->mapping;
unlock_page(dirty_page);
if (dirtied && mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
/* file_update_time outside page_lock */
if (vma->vm_file)
file_update_time(vma->vm_file);
}
put_page(dirty_page);
if (page_mkwrite) {
struct address_space *mapping = dirty_page->mapping;
set_page_dirty(dirty_page);
unlock_page(dirty_page);
page_cache_release(dirty_page);
if (mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
}
return ret;
}
/*
* Ok, we need to copy. Oh, well..
*/
page_cache_get(old_page);
gotten:
pte_unmap_unlock(page_table, ptl);
if (unlikely(anon_vma_prepare(vma)))
goto oom;
if (is_zero_pfn(pte_pfn(orig_pte))) {
new_page = alloc_zeroed_user_highpage_movable(vma, address);
if (!new_page)
goto oom;
} else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma);
}
__SetPageUptodate(new_page);
if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg))
goto oom_free_new;
mmun_start = address & PAGE_MASK;
mmun_end = mmun_start + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
* Re-check the pte - we dropped the lock
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
} else
inc_mm_counter_fast(mm, MM_ANONPAGES);
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/*
* Clear the pte entry and flush it first, before updating the
* pte with the new entry. This will avoid a race condition
* seen in the presence of one thread doing SMC and another
* thread doing COW.
*/
ptep_clear_flush(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
/*
* We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the
* new page to be mapped directly into the secondary page table.
*/
set_pte_at_notify(mm, address, page_table, entry);
update_mmu_cache(vma, address, page_table);
if (old_page) {
/*
* Only after switching the pte to the new page may
* we remove the mapcount here. Otherwise another
* process may come and find the rmap count decremented
* before the pte is switched to the new page, and
* "reuse" the old page writing into it while our pte
* here still points into it and can be read by other
* threads.
*
* The critical issue is to order this
* page_remove_rmap with the ptp_clear_flush above.
* Those stores are ordered by (if nothing else,)
* the barrier present in the atomic_add_negative
* in page_remove_rmap.
*
* Then the TLB flush in ptep_clear_flush ensures that
* no process can access the old page before the
* decremented mapcount is visible. And the old page
* cannot be reused until after the decremented
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
page_remove_rmap(old_page);
}
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
} else
mem_cgroup_cancel_charge(new_page, memcg);
if (new_page)
page_cache_release(new_page);
unlock:
pte_unmap_unlock(page_table, ptl);
if (mmun_end > mmun_start)
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,
* keep the mlocked page.
*/
if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
lock_page(old_page); /* LRU manipulation */
munlock_vma_page(old_page);
unlock_page(old_page);
}
page_cache_release(old_page);
}
return ret;
oom_free_new:
page_cache_release(new_page);
oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
}
static void unmap_mapping_range_vma(struct vm_area_struct *vma,
unsigned long start_addr, unsigned long end_addr,
struct zap_details *details)
{
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
}
static inline void unmap_mapping_range_tree(struct rb_root *root,
struct zap_details *details)
{
struct vm_area_struct *vma;
pgoff_t vba, vea, zba, zea;
vma_interval_tree_foreach(vma, root,
details->first_index, details->last_index) {
vba = vma->vm_pgoff;
vea = vba + vma_pages(vma) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
zba = details->first_index;
if (zba < vba)
zba = vba;
zea = details->last_index;
if (zea > vea)
zea = vea;
unmap_mapping_range_vma(vma,
((zba - vba) << PAGE_SHIFT) + vma->vm_start,
((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
details);
}
}
static inline void unmap_mapping_range_list(struct list_head *head,
struct zap_details *details)
{
struct vm_area_struct *vma;
/*
* In nonlinear VMAs there is no correspondence between virtual address
* offset and file offset. So we must perform an exhaustive search
* across *all* the pages in each nonlinear VMA, not just the pages
* whose virtual address lies outside the file truncation point.
*/
list_for_each_entry(vma, head, shared.nonlinear) {
details->nonlinear_vma = vma;
unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
}
}
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
* @mapping: the address space containing mmaps to be unmapped.
* @holebegin: byte in first page to unmap, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
* boundary. Note that this is different from truncate_pagecache(), which
* must keep the partial page. In contrast, we must get rid of
* partial pages.
* @holelen: size of prospective hole in bytes. This will be rounded
* up to a PAGE_SIZE boundary. A holelen of zero truncates to the
* end of the file.
* @even_cows: 1 when truncating a file, unmap even private COWed pages;
* but 0 when invalidating pagecache, don't throw away private data.
*/
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
struct zap_details details;
pgoff_t hba = holebegin >> PAGE_SHIFT;
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
long long holeend =
(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (holeend & ~(long long)ULONG_MAX)
hlen = ULONG_MAX - hba + 1;
}
details.check_mapping = even_cows? NULL: mapping;
details.nonlinear_vma = NULL;
details.first_index = hba;
details.last_index = hba + hlen - 1;
if (details.last_index < details.first_index)
details.last_index = ULONG_MAX;
mutex_lock(&mapping->i_mmap_mutex);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
mutex_unlock(&mapping->i_mmap_mutex);
}
EXPORT_SYMBOL(unmap_mapping_range);
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
*
* We return with the mmap_sem locked or unlocked in the same cases
* as does filemap_fault().
*/
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
struct page *page, *swapcache;
struct mem_cgroup *memcg;
swp_entry_t entry;
pte_t pte;
int locked;
int exclusive = 0;
int ret = 0;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
goto out;
entry = pte_to_swp_entry(orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (is_migration_entry(entry)) {
migration_entry_wait(mm, pmd, address);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else {
print_bad_pte(vma, address, orig_pte, NULL);
ret = VM_FAULT_SIGBUS;
}
goto out;
}
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry);
if (!page) {
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte
* while we released the pte lock.
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte)))
ret = VM_FAULT_OOM;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
goto unlock;
}
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(mm, PGMAJFAULT);
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
* owner processes (which may be unknown at hwpoison time)
*/
ret = VM_FAULT_HWPOISON;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
swapcache = page;
goto out_release;
}
swapcache = page;
locked = lock_page_or_retry(page, mm, flags);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
goto out_release;
}
/*
* Make sure try_to_free_swap or reuse_swap_page or swapoff did not
* release the swapcache from under us. The page pin, and pte_same
* test below, are not enough to exclude that. Even if it is still
* swapcache, we need to check that the page's swap has not changed.
*/
if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
goto out_page;
page = ksm_might_need_to_copy(page, vma, address);
if (unlikely(!page)) {
ret = VM_FAULT_OOM;
page = swapcache;
goto out_page;
}
if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) {
ret = VM_FAULT_OOM;
goto out_page;
}
/*
* Back out if somebody else already faulted in this pte.
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*page_table, orig_pte)))
goto out_nomap;
if (unlikely(!PageUptodate(page))) {
ret = VM_FAULT_SIGBUS;
goto out_nomap;
}
/*
* The page isn't present yet, go ahead with the fault.
*
* Be careful about the sequence of operations here.
* To get its accounting right, reuse_swap_page() must be called
* while the page is counted on swap but not yet in mapcount i.e.
* before page_add_anon_rmap() and swap_free(); try_to_free_swap()
* must be called after the swap_free(), or it will never succeed.
*/
inc_mm_counter_fast(mm, MM_ANONPAGES);
dec_mm_counter_fast(mm, MM_SWAPENTS);
pte = mk_pte(page, vma->vm_page_prot);
if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
flags &= ~FAULT_FLAG_WRITE;
ret |= VM_FAULT_WRITE;
exclusive = 1;
}
flush_icache_page(vma, page);
if (pte_swp_soft_dirty(orig_pte))
pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
if (page == swapcache) {
do_page_add_anon_rmap(page, vma, address, exclusive);
mem_cgroup_commit_charge(page, memcg, true);
} else { /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, address);
mem_cgroup_commit_charge(page, memcg, false);
lru_cache_add_active_or_unevictable(page, vma);
}
swap_free(entry);
if ((PageSwapCache(page) && vm_swap_full(page_swap_info(page))) ||
(vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (page != swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
* (to avoid false positives from pte_same). For
* further safety release the lock after the swap_free
* so that the swap count won't change under a
* parallel locked swapcache.
*/
unlock_page(swapcache);
page_cache_release(swapcache);
}
if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
goto out;
}
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
out:
return ret;
out_nomap:
mem_cgroup_cancel_charge(page, memcg);
pte_unmap_unlock(page_table, ptl);
out_page:
unlock_page(page);
out_release:
page_cache_release(page);
if (page != swapcache) {
unlock_page(swapcache);
page_cache_release(swapcache);
}
return ret;
}
/*
* This is like a special single-page "expand_{down|up}wards()",
* except we must first make sure that 'address{-|+}PAGE_SIZE'
* doesn't hit another vma.
*/
static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
struct vm_area_struct *prev = vma->vm_prev;
/*
* Is there a mapping abutting this one below?
*
* That's only ok if it's the same stack mapping
* that has gotten split..
*/
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
/* As VM_GROWSDOWN but s/below/above/ */
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
struct mem_cgroup *memcg;
struct page *page;
spinlock_t *ptl;
pte_t entry;
pte_unmap(page_table);
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
vma->vm_page_prot));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto unlock;
goto setpte;
}
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
if (!page)
goto oom;
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceeding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg))
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto release;
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
mem_cgroup_commit_charge(page, memcg, false);
lru_cache_add_active_or_unevictable(page, vma);
setpte:
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
release:
mem_cgroup_cancel_charge(page, memcg);
page_cache_release(page);
goto unlock;
oom_free_page:
page_cache_release(page);
oom:
return VM_FAULT_OOM;
}
/*
* The mmap_sem must have been held on entry, and may have been
* released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry().
*/
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
pgoff_t pgoff, unsigned int flags, struct page **page)
{
struct vm_fault vmf;
int ret;
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = pgoff;
vmf.flags = flags;
vmf.page = NULL;
ret = vma->vm_ops->fault(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
if (unlikely(PageHWPoison(vmf.page))) {
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf.page);
page_cache_release(vmf.page);
return VM_FAULT_HWPOISON;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
lock_page(vmf.page);
else
VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
*page = vmf.page;
return ret;
}
/**
* do_set_pte - setup new PTE entry for given page and add reverse page mapping.
*
* @vma: virtual memory area
* @address: user virtual address
* @page: page to map
* @pte: pointer to target page table entry
* @write: true, if new entry is writable
* @anon: true, if it's anonymous page
*
* Caller must hold page table lock relevant for @pte.
*
* Target users are page handler itself and implementations of
* vm_ops->map_pages.
*/
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
struct page *page, pte_t *pte, bool write, bool anon)
{
pte_t entry;
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
entry = pte_mksoft_dirty(entry);
if (anon) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
} else {
inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES);
page_add_file_rmap(page);
}
set_pte_at(vma->vm_mm, address, pte, entry);
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, pte);
}
static unsigned long fault_around_bytes __read_mostly =
rounddown_pow_of_two(65536);
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
{
*val = fault_around_bytes;
return 0;
}
/*
* fault_around_pages() and fault_around_mask() expects fault_around_bytes
* rounded down to nearest page order. It's what do_fault_around() expects to
* see.
*/
static int fault_around_bytes_set(void *data, u64 val)
{
if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
if (val > PAGE_SIZE)
fault_around_bytes = rounddown_pow_of_two(val);
else
fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
static int __init fault_around_debugfs(void)
{
void *ret;
ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL,
&fault_around_bytes_fops);
if (!ret)
pr_warn("Failed to create fault_around_bytes in debugfs");
return 0;
}
late_initcall(fault_around_debugfs);
#endif
/*
* do_fault_around() tries to map few pages around the fault address. The hope
* is that the pages will be needed soon and this will lower the number of
* faults to handle.
*
* It uses vm_ops->map_pages() to map the pages, which skips the page if it's
* not ready to be mapped: not up-to-date, locked, etc.
*
* This function is called with the page table lock taken. In the split ptlock
* case the page table lock only protects only those entries which belong to
* the page table corresponding to the fault address.
*
* This function doesn't cross the VMA boundaries, in order to call map_pages()
* only once.
*
* fault_around_pages() defines how many pages we'll try to map.
* do_fault_around() expects it to return a power of two less than or equal to
* PTRS_PER_PTE.
*
* The virtual address of the area that we map is naturally aligned to the
* fault_around_pages() value (and therefore to page order). This way it's
* easier to guarantee that we don't cross page table boundaries.
*/
static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pgoff_t pgoff, unsigned int flags)
{
unsigned long start_addr, nr_pages, mask;
pgoff_t max_pgoff;
struct vm_fault vmf;
int off;
nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT;
mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
start_addr = max(address & mask, vma->vm_start);
off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
pte -= off;
pgoff -= off;
/*
* max_pgoff is either end of page table or end of vma
* or fault_around_pages() from pgoff, depending what is nearest.
*/
max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
PTRS_PER_PTE - 1;
max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1,
pgoff + nr_pages - 1);
/* Check if it makes any sense to call ->map_pages */
while (!pte_none(*pte)) {
if (++pgoff > max_pgoff)
return;
start_addr += PAGE_SIZE;
if (start_addr >= vma->vm_end)
return;
pte++;
}
vmf.virtual_address = (void __user *) start_addr;
vmf.pte = pte;
vmf.pgoff = pgoff;
vmf.max_pgoff = max_pgoff;
vmf.flags = flags;
vma->vm_ops->map_pages(vma, &vmf);
}
static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page;
spinlock_t *ptl;
pte_t *pte;
int ret = 0;
/*
* Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
fault_around_bytes >> PAGE_SHIFT > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte))
goto unlock_out;
pte_unmap_unlock(pte, ptl);
}
ret = __do_fault(vma, address, pgoff, flags, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
return ret;
}
do_set_pte(vma, address, fault_page, pte, false, false);
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
return ret;
}
static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page, *new_page;
struct mem_cgroup *memcg;
spinlock_t *ptl;
pte_t *pte;
int ret;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!new_page)
return VM_FAULT_OOM;
if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) {
page_cache_release(new_page);
return VM_FAULT_OOM;
}
ret = __do_fault(vma, address, pgoff, flags, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
copy_user_highpage(new_page, fault_page, address, vma);
__SetPageUptodate(new_page);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
goto uncharge_out;
}
do_set_pte(vma, address, new_page, pte, true, true);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
return ret;
uncharge_out:
mem_cgroup_cancel_charge(new_page, memcg);
page_cache_release(new_page);
return ret;
}
static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page;
struct address_space *mapping;
spinlock_t *ptl;
pte_t *pte;
int dirtied = 0;
int ret, tmp;
ret = __do_fault(vma, address, pgoff, flags, &fault_page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
/*
* Check if the backing address space wants to know that the page is
* about to become writable
*/
if (vma->vm_ops->page_mkwrite) {
unlock_page(fault_page);
tmp = do_page_mkwrite(vma, fault_page, address);
if (unlikely(!tmp ||
(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
page_cache_release(fault_page);
return tmp;
}
}
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
return ret;
}
do_set_pte(vma, address, fault_page, pte, true, false);
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
dirtied = 1;
mapping = fault_page->mapping;
unlock_page(fault_page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
* dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
/* file_update_time outside page_lock */
if (vma->vm_file && !vma->vm_ops->page_mkwrite)
file_update_time(vma->vm_file);
return ret;
}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults).
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
if (!(flags & FAULT_FLAG_WRITE))
return do_read_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
if (!(vma->vm_flags & VM_SHARED))
return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
/*
* Fault of a previously existing named mapping. Repopulate the pte
* from the encoded file_pte if possible. This enables swappable
* nonlinear vmas.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff;
flags |= FAULT_FLAG_NONLINEAR;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
return 0;
if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
/*
* Page table corrupted: show pte and kill process.
*/
print_bad_pte(vma, address, orig_pte, NULL);
return VM_FAULT_SIGBUS;
}
pgoff = pte_to_pgoff(orig_pte);
if (!(flags & FAULT_FLAG_WRITE))
return do_read_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
if (!(vma->vm_flags & VM_SHARED))
return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int page_nid,
int *flags)
{
get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
if (page_nid == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
*flags |= TNF_FAULT_LOCAL;
}
return mpol_misplaced(page, vma, addr);
}
static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
{
struct page *page = NULL;
spinlock_t *ptl;
int page_nid = -1;
int last_cpupid;
int target_nid;
bool migrated = false;
int flags = 0;
/*
* The "pte" at this point cannot be used safely without
* validation through pte_unmap_same(). It's of NUMA type but
* the pfn may be screwed if the read is non atomic.
*
* ptep_modify_prot_start is not called as this is clearing
* the _PAGE_NUMA bit and it is not really expected that there
* would be concurrent hardware modifications to the PTE.
*/
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*ptep, pte))) {
pte_unmap_unlock(ptep, ptl);
goto out;
}
pte = pte_mknonnuma(pte);
set_pte_at(mm, addr, ptep, pte);
update_mmu_cache(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (!page) {
pte_unmap_unlock(ptep, ptl);
return 0;
}
BUG_ON(is_zero_pfn(page_to_pfn(page)));
/*
* Avoid grouping on DSO/COW pages in specific and RO pages
* in general, RO pages shouldn't hurt as much anyway since
* they can be in shared cache state.
*/
if (!pte_write(pte))
flags |= TNF_NO_GROUP;
/*
* Flag if the page is shared between multiple address spaces. This
* is later used when determining whether to group tasks together
*/
if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
flags |= TNF_SHARED;
last_cpupid = page_cpupid_last(page);
page_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags);
pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) {
put_page(page);
goto out;
}
/* Migrate to the requested node */
migrated = migrate_misplaced_page(page, vma, target_nid);
if (migrated) {
page_nid = target_nid;
flags |= TNF_MIGRATED;
}
out:
if (page_nid != -1)
task_numa_fault(last_cpupid, page_nid, 1, flags);
return 0;
}
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
* RISC architectures). The early dirtying is also good on the i386.
*
* There is also a hook called "update_mmu_cache()" that architectures
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags)
{
pte_t entry;
spinlock_t *ptl;
entry = ACCESS_ONCE(*pte);
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops) {
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
pte, pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
}
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
pte, pmd, flags, entry);
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
}
if (pte_numa(entry))
return do_numa_page(mm, vma, address, entry, pte, pmd);
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vma, address, pte);
} else {
/*
* This is needed only for protection faults but the arch code
* is not yet telling us if this is a protection fault or not.
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
}
/*
* By the time we get here, we already hold the mm semaphore
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
return VM_FAULT_OOM;
pmd = pmd_alloc(mm, pud, address);
if (!pmd)
return VM_FAULT_OOM;
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
int ret = VM_FAULT_FALLBACK;
if (!vma->vm_ops)
ret = do_huge_pmd_anonymous_page(mm, vma, address,
pmd, flags);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
pmd_t orig_pmd = *pmd;
int ret;
barrier();
if (pmd_trans_huge(orig_pmd)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE;
/*
* If the pmd is splitting, return and retry the
* the fault. Alternative: wait until the split
* is done, and goto retry.
*/
if (pmd_trans_splitting(orig_pmd))
return 0;
if (pmd_numa(orig_pmd))
return do_huge_pmd_numa_page(mm, vma, address,
orig_pmd, pmd);
if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
return 0;
}
}
}
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
if (unlikely(pmd_none(*pmd)) &&
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
* from under us anymore at this point because we hold the mmap_sem
* read mode and khugepaged takes it in write mode. So now it's
* safe to run pte_offset_map().
*/
pte = pte_offset_map(pmd, address);
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
/*
* By the time we get here, we already hold the mm semaphore
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
int ret;
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
mem_cgroup_count_vm_event(mm, PGFAULT);
/* do counter updates before entering really critical section. */
check_sync_rss_stat(current);
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.
*/
if (flags & FAULT_FLAG_USER)
mem_cgroup_oom_enable();
ret = __handle_mm_fault(mm, vma, address, flags);
if (flags & FAULT_FLAG_USER) {
mem_cgroup_oom_disable();
/*
* The task may have entered a memcg OOM situation but
* if the allocation error was handled gracefully (no
* VM_FAULT_OOM), there is no need to kill anything.
* Just clean up the OOM state peacefully.
*/
if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
mem_cgroup_oom_synchronize(false);
}
return ret;
}
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
* We've already handled the fast-path in-line.
*/
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
pud_t *new = pud_alloc_one(mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) /* Another has populated it */
pud_free(mm, new);
else
pgd_populate(mm, pgd, new);
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
/*
* Allocate page middle directory.
* We've already handled the fast-path in-line.
*/
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
pmd_t *new = pmd_alloc_one(mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (pud_present(*pud)) /* Another has populated it */
pmd_free(mm, new);
else
pud_populate(mm, pud, new);
#else
if (pgd_present(*pud)) /* Another has populated it */
pmd_free(mm, new);
else
pgd_populate(mm, pud, new);
#endif /* __ARCH_HAS_4LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto out;
pud = pud_offset(pgd, address);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
goto out;
pmd = pmd_offset(pud, address);
VM_BUG_ON(pmd_trans_huge(*pmd));
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto out;
/* We cannot handle huge page PFN maps. Luckily they don't exist. */
if (pmd_huge(*pmd))
goto out;
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
if (!ptep)
goto out;
if (!pte_present(*ptep))
goto unlock;
*ptepp = ptep;
return 0;
unlock:
pte_unmap_unlock(ptep, *ptlp);
out:
return -EINVAL;
}
static inline int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
!(res = __follow_pte(mm, address, ptepp, ptlp)));
return res;
}
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
* @address: user virtual address
* @pfn: location to store found PFN
*
* Only IO mappings and raw PFN mappings are allowed.
*
* Returns zero and the pfn at @pfn on success, -ve otherwise.
*/
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn)
{
int ret = -EINVAL;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return ret;
ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
if (ret)
return ret;
*pfn = pte_pfn(*ptep);
pte_unmap_unlock(ptep, ptl);
return 0;
}
EXPORT_SYMBOL(follow_pfn);
#ifdef CONFIG_HAVE_IOREMAP_PROT
int follow_phys(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned long *prot, resource_size_t *phys)
{
int ret = -EINVAL;
pte_t *ptep, pte;
spinlock_t *ptl;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out;
if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
goto out;
pte = *ptep;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
*prot = pgprot_val(pte_pgprot(pte));
*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
ret = 0;
unlock:
pte_unmap_unlock(ptep, ptl);
out:
return ret;
}
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
resource_size_t phys_addr;
unsigned long prot = 0;
void __iomem *maddr;
int offset = addr & (PAGE_SIZE-1);
if (follow_phys(vma, addr, write, &prot, &phys_addr))
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (write)
memcpy_toio(maddr + offset, buf, len);
else
memcpy_fromio(buf, maddr + offset, len);
iounmap(maddr);
return len;
}
EXPORT_SYMBOL_GPL(generic_access_phys);
#endif
/*
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, int write)
{
struct vm_area_struct *vma;
void *old_buf = buf;
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, ret, offset;
void *maddr;
struct page *page = NULL;
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT
break;
#else
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
break;
if (vma->vm_ops && vma->vm_ops->access)
ret = vma->vm_ops->access(vma, addr, buf,
len, write);
if (ret <= 0)
break;
bytes = ret;
#endif
} else {
bytes = len;
offset = addr & (PAGE_SIZE-1);
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
maddr = kmap(page);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
set_page_dirty_lock(page);
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
kunmap(page);
page_cache_release(page);
}
len -= bytes;
buf += bytes;
addr += bytes;
}
up_read(&mm->mmap_sem);
return buf - old_buf;
}
/**
* access_remote_vm - access another process' address space
* @mm: the mm_struct of the target address space
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
* @write: whether the access is a write
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write)
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
/*
* Access another process' address space.
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
struct mm_struct *mm;
int ret;
mm = get_task_mm(tsk);
if (!mm)
return 0;
ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
mmput(mm);
return ret;
}
/*
* Print the name of a VMA.
*/
void print_vma_addr(char *prefix, unsigned long ip)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
/*
* Do not print if we are in atomic
* contexts (in exception stacks, etc.):
*/
if (preempt_count())
return;
down_read(&mm->mmap_sem);
vma = find_vma(mm, ip);
if (vma && vma->vm_file) {
struct file *f = vma->vm_file;
char *buf = (char *)__get_free_page(GFP_KERNEL);
if (buf) {
char *p;
p = d_path(&f->f_path, buf, PAGE_SIZE);
if (IS_ERR(p))
p = "?";
printk("%s%s[%lx+%lx]", prefix, kbasename(p),
vma->vm_start,
vma->vm_end - vma->vm_start);
free_page((unsigned long)buf);
}
}
up_read(&mm->mmap_sem);
}
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void)
{
/*
* Some code (nfs/sunrpc) uses socket ops on kernel memory while
* holding the mmap_sem, this is safe because kernel memory doesn't
* get paged out, therefore we'll never actually fault, and the
* below annotations will generate false positives.
*/
if (segment_eq(get_fs(), KERNEL_DS))
return;
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
if (in_atomic())
return;
__might_sleep(__FILE__, __LINE__, 0);
if (current->mm)
might_lock_read(¤t->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
static void clear_gigantic_page(struct page *page,
unsigned long addr,
unsigned int pages_per_huge_page)
{
int i;
struct page *p = page;
might_sleep();
for (i = 0; i < pages_per_huge_page;
i++, p = mem_map_next(p, page, i)) {
cond_resched();
clear_user_highpage(p, addr + i * PAGE_SIZE);
}
}
void clear_huge_page(struct page *page,
unsigned long addr, unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
static void copy_user_gigantic_page(struct page *dst, struct page *src,
unsigned long addr,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
int i;
struct page *dst_base = dst;
struct page *src_base = src;
for (i = 0; i < pages_per_huge_page; ) {
cond_resched();
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
i++;
dst = mem_map_next(dst, dst_base, i);
src = mem_map_next(src, src_base, i);
}
}
void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
static struct kmem_cache *page_ptl_cachep;
void __init ptlock_cache_init(void)
{
page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
SLAB_PANIC, NULL);
}
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
if (!ptl)
return false;
page->ptl = ptl;
return true;
}
void ptlock_free(struct page *page)
{
kmem_cache_free(page_ptl_cachep, page->ptl);
}
#endif
| gpl-2.0 |
BhargavaRamM/bluez-custom | gdbus/object.c | 7 | 45184 | /*
*
* D-Bus helper library
*
* Copyright (C) 2004-2011 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include <dbus/dbus.h>
#include "gdbus.h"
#define info(fmt...)
#define error(fmt...)
#define debug(fmt...)
#define DBUS_INTERFACE_OBJECT_MANAGER "org.freedesktop.DBus.ObjectManager"
#ifndef DBUS_ERROR_UNKNOWN_PROPERTY
#define DBUS_ERROR_UNKNOWN_PROPERTY "org.freedesktop.DBus.Error.UnknownProperty"
#endif
#ifndef DBUS_ERROR_PROPERTY_READ_ONLY
#define DBUS_ERROR_PROPERTY_READ_ONLY "org.freedesktop.DBus.Error.PropertyReadOnly"
#endif
struct generic_data {
unsigned int refcount;
DBusConnection *conn;
char *path;
GSList *interfaces;
GSList *objects;
GSList *added;
GSList *removed;
guint process_id;
gboolean pending_prop;
char *introspect;
struct generic_data *parent;
};
struct interface_data {
char *name;
const GDBusMethodTable *methods;
const GDBusSignalTable *signals;
const GDBusPropertyTable *properties;
GSList *pending_prop;
void *user_data;
GDBusDestroyFunction destroy;
};
struct security_data {
GDBusPendingReply pending;
DBusMessage *message;
const GDBusMethodTable *method;
void *iface_user_data;
};
struct property_data {
DBusConnection *conn;
GDBusPendingPropertySet id;
DBusMessage *message;
};
static int global_flags = 0;
static struct generic_data *root;
static GSList *pending = NULL;
static gboolean process_changes(gpointer user_data);
static void process_properties_from_interface(struct generic_data *data,
struct interface_data *iface);
static void process_property_changes(struct generic_data *data);
static void print_arguments(GString *gstr, const GDBusArgInfo *args,
const char *direction)
{
for (; args && args->name; args++) {
g_string_append_printf(gstr,
"<arg name=\"%s\" type=\"%s\"",
args->name, args->signature);
if (direction)
g_string_append_printf(gstr,
" direction=\"%s\"/>\n", direction);
else
g_string_append_printf(gstr, "/>\n");
}
}
#define G_DBUS_ANNOTATE(name_, value_) \
"<annotation name=\"org.freedesktop.DBus." name_ "\" " \
"value=\"" value_ "\"/>"
#define G_DBUS_ANNOTATE_DEPRECATED \
G_DBUS_ANNOTATE("Deprecated", "true")
#define G_DBUS_ANNOTATE_NOREPLY \
G_DBUS_ANNOTATE("Method.NoReply", "true")
static gboolean check_experimental(int flags, int flag)
{
if (!(flags & flag))
return FALSE;
return !(global_flags & G_DBUS_FLAG_ENABLE_EXPERIMENTAL);
}
static void generate_interface_xml(GString *gstr, struct interface_data *iface)
{
const GDBusMethodTable *method;
const GDBusSignalTable *signal;
const GDBusPropertyTable *property;
for (method = iface->methods; method && method->name; method++) {
if (check_experimental(method->flags,
G_DBUS_METHOD_FLAG_EXPERIMENTAL))
continue;
g_string_append_printf(gstr, "<method name=\"%s\">",
method->name);
print_arguments(gstr, method->in_args, "in");
print_arguments(gstr, method->out_args, "out");
if (method->flags & G_DBUS_METHOD_FLAG_DEPRECATED)
g_string_append_printf(gstr,
G_DBUS_ANNOTATE_DEPRECATED);
if (method->flags & G_DBUS_METHOD_FLAG_NOREPLY)
g_string_append_printf(gstr, G_DBUS_ANNOTATE_NOREPLY);
g_string_append_printf(gstr, "</method>");
}
for (signal = iface->signals; signal && signal->name; signal++) {
if (check_experimental(signal->flags,
G_DBUS_SIGNAL_FLAG_EXPERIMENTAL))
continue;
g_string_append_printf(gstr, "<signal name=\"%s\">",
signal->name);
print_arguments(gstr, signal->args, NULL);
if (signal->flags & G_DBUS_SIGNAL_FLAG_DEPRECATED)
g_string_append_printf(gstr,
G_DBUS_ANNOTATE_DEPRECATED);
g_string_append_printf(gstr, "</signal>\n");
}
for (property = iface->properties; property && property->name;
property++) {
if (check_experimental(property->flags,
G_DBUS_PROPERTY_FLAG_EXPERIMENTAL))
continue;
g_string_append_printf(gstr, "<property name=\"%s\""
" type=\"%s\" access=\"%s%s\">",
property->name, property->type,
property->get ? "read" : "",
property->set ? "write" : "");
if (property->flags & G_DBUS_PROPERTY_FLAG_DEPRECATED)
g_string_append_printf(gstr,
G_DBUS_ANNOTATE_DEPRECATED);
g_string_append_printf(gstr, "</property>");
}
}
static void generate_introspection_xml(DBusConnection *conn,
struct generic_data *data, const char *path)
{
GSList *list;
GString *gstr;
char **children;
int i;
g_free(data->introspect);
gstr = g_string_new(DBUS_INTROSPECT_1_0_XML_DOCTYPE_DECL_NODE);
g_string_append_printf(gstr, "<node>");
for (list = data->interfaces; list; list = list->next) {
struct interface_data *iface = list->data;
g_string_append_printf(gstr, "<interface name=\"%s\">",
iface->name);
generate_interface_xml(gstr, iface);
g_string_append_printf(gstr, "</interface>");
}
if (!dbus_connection_list_registered(conn, path, &children))
goto done;
for (i = 0; children[i]; i++)
g_string_append_printf(gstr, "<node name=\"%s\"/>",
children[i]);
dbus_free_string_array(children);
done:
g_string_append_printf(gstr, "</node>");
data->introspect = g_string_free(gstr, FALSE);
}
static DBusMessage *introspect(DBusConnection *connection,
DBusMessage *message, void *user_data)
{
struct generic_data *data = user_data;
DBusMessage *reply;
if (data->introspect == NULL)
generate_introspection_xml(connection, data,
dbus_message_get_path(message));
reply = dbus_message_new_method_return(message);
if (reply == NULL)
return NULL;
dbus_message_append_args(reply, DBUS_TYPE_STRING, &data->introspect,
DBUS_TYPE_INVALID);
return reply;
}
static DBusHandlerResult process_message(DBusConnection *connection,
DBusMessage *message, const GDBusMethodTable *method,
void *iface_user_data)
{
DBusMessage *reply;
reply = method->function(connection, message, iface_user_data);
if (method->flags & G_DBUS_METHOD_FLAG_NOREPLY ||
dbus_message_get_no_reply(message)) {
if (reply != NULL)
dbus_message_unref(reply);
return DBUS_HANDLER_RESULT_HANDLED;
}
if (method->flags & G_DBUS_METHOD_FLAG_ASYNC) {
if (reply == NULL)
return DBUS_HANDLER_RESULT_HANDLED;
}
if (reply == NULL)
return DBUS_HANDLER_RESULT_NEED_MEMORY;
g_dbus_send_message(connection, reply);
return DBUS_HANDLER_RESULT_HANDLED;
}
static GDBusPendingReply next_pending = 1;
static GSList *pending_security = NULL;
static const GDBusSecurityTable *security_table = NULL;
void g_dbus_pending_success(DBusConnection *connection,
GDBusPendingReply pending)
{
GSList *list;
for (list = pending_security; list; list = list->next) {
struct security_data *secdata = list->data;
if (secdata->pending != pending)
continue;
pending_security = g_slist_remove(pending_security, secdata);
process_message(connection, secdata->message,
secdata->method, secdata->iface_user_data);
dbus_message_unref(secdata->message);
g_free(secdata);
return;
}
}
void g_dbus_pending_error_valist(DBusConnection *connection,
GDBusPendingReply pending, const char *name,
const char *format, va_list args)
{
GSList *list;
for (list = pending_security; list; list = list->next) {
struct security_data *secdata = list->data;
if (secdata->pending != pending)
continue;
pending_security = g_slist_remove(pending_security, secdata);
g_dbus_send_error_valist(connection, secdata->message,
name, format, args);
dbus_message_unref(secdata->message);
g_free(secdata);
return;
}
}
void g_dbus_pending_error(DBusConnection *connection,
GDBusPendingReply pending,
const char *name, const char *format, ...)
{
va_list args;
va_start(args, format);
g_dbus_pending_error_valist(connection, pending, name, format, args);
va_end(args);
}
int polkit_check_authorization(DBusConnection *conn,
const char *action, gboolean interaction,
void (*function) (dbus_bool_t authorized,
void *user_data),
void *user_data, int timeout);
struct builtin_security_data {
DBusConnection *conn;
GDBusPendingReply pending;
};
static void builtin_security_result(dbus_bool_t authorized, void *user_data)
{
struct builtin_security_data *data = user_data;
if (authorized == TRUE)
g_dbus_pending_success(data->conn, data->pending);
else
g_dbus_pending_error(data->conn, data->pending,
DBUS_ERROR_AUTH_FAILED, NULL);
g_free(data);
}
static void builtin_security_function(DBusConnection *conn,
const char *action,
gboolean interaction,
GDBusPendingReply pending)
{
struct builtin_security_data *data;
data = g_new0(struct builtin_security_data, 1);
data->conn = conn;
data->pending = pending;
if (polkit_check_authorization(conn, action, interaction,
builtin_security_result, data, 30000) < 0)
g_dbus_pending_error(conn, pending, NULL, NULL);
}
static gboolean check_privilege(DBusConnection *conn, DBusMessage *msg,
const GDBusMethodTable *method, void *iface_user_data)
{
const GDBusSecurityTable *security;
for (security = security_table; security && security->privilege;
security++) {
struct security_data *secdata;
gboolean interaction;
if (security->privilege != method->privilege)
continue;
secdata = g_new(struct security_data, 1);
secdata->pending = next_pending++;
secdata->message = dbus_message_ref(msg);
secdata->method = method;
secdata->iface_user_data = iface_user_data;
pending_security = g_slist_prepend(pending_security, secdata);
if (security->flags & G_DBUS_SECURITY_FLAG_ALLOW_INTERACTION)
interaction = TRUE;
else
interaction = FALSE;
if (!(security->flags & G_DBUS_SECURITY_FLAG_BUILTIN) &&
security->function)
security->function(conn, security->action,
interaction, secdata->pending);
else
builtin_security_function(conn, security->action,
interaction, secdata->pending);
return TRUE;
}
return FALSE;
}
static GDBusPendingPropertySet next_pending_property = 1;
static GSList *pending_property_set;
static struct property_data *remove_pending_property_data(
GDBusPendingPropertySet id)
{
struct property_data *propdata;
GSList *l;
for (l = pending_property_set; l != NULL; l = l->next) {
propdata = l->data;
if (propdata->id != id)
continue;
break;
}
if (l == NULL)
return NULL;
pending_property_set = g_slist_delete_link(pending_property_set, l);
return propdata;
}
void g_dbus_pending_property_success(GDBusPendingPropertySet id)
{
struct property_data *propdata;
propdata = remove_pending_property_data(id);
if (propdata == NULL)
return;
g_dbus_send_reply(propdata->conn, propdata->message,
DBUS_TYPE_INVALID);
dbus_message_unref(propdata->message);
g_free(propdata);
}
void g_dbus_pending_property_error_valist(GDBusPendingReply id,
const char *name, const char *format,
va_list args)
{
struct property_data *propdata;
propdata = remove_pending_property_data(id);
if (propdata == NULL)
return;
g_dbus_send_error_valist(propdata->conn, propdata->message, name,
format, args);
dbus_message_unref(propdata->message);
g_free(propdata);
}
void g_dbus_pending_property_error(GDBusPendingReply id, const char *name,
const char *format, ...)
{
va_list args;
va_start(args, format);
g_dbus_pending_property_error_valist(id, name, format, args);
va_end(args);
}
static void reset_parent(gpointer data, gpointer user_data)
{
struct generic_data *child = data;
struct generic_data *parent = user_data;
child->parent = parent;
}
static void append_property(struct interface_data *iface,
const GDBusPropertyTable *p, DBusMessageIter *dict)
{
DBusMessageIter entry, value;
dbus_message_iter_open_container(dict, DBUS_TYPE_DICT_ENTRY, NULL,
&entry);
dbus_message_iter_append_basic(&entry, DBUS_TYPE_STRING, &p->name);
dbus_message_iter_open_container(&entry, DBUS_TYPE_VARIANT, p->type,
&value);
p->get(p, &value, iface->user_data);
dbus_message_iter_close_container(&entry, &value);
dbus_message_iter_close_container(dict, &entry);
}
static void append_properties(struct interface_data *data,
DBusMessageIter *iter)
{
DBusMessageIter dict;
const GDBusPropertyTable *p;
dbus_message_iter_open_container(iter, DBUS_TYPE_ARRAY,
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING
DBUS_TYPE_VARIANT_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING, &dict);
for (p = data->properties; p && p->name; p++) {
if (check_experimental(p->flags,
G_DBUS_PROPERTY_FLAG_EXPERIMENTAL))
continue;
if (p->get == NULL)
continue;
if (p->exists != NULL && !p->exists(p, data->user_data))
continue;
append_property(data, p, &dict);
}
dbus_message_iter_close_container(iter, &dict);
}
static void append_interface(gpointer data, gpointer user_data)
{
struct interface_data *iface = data;
DBusMessageIter *array = user_data;
DBusMessageIter entry;
dbus_message_iter_open_container(array, DBUS_TYPE_DICT_ENTRY, NULL,
&entry);
dbus_message_iter_append_basic(&entry, DBUS_TYPE_STRING, &iface->name);
append_properties(data, &entry);
dbus_message_iter_close_container(array, &entry);
}
static void emit_interfaces_added(struct generic_data *data)
{
DBusMessage *signal;
DBusMessageIter iter, array;
if (root == NULL || data == root)
return;
signal = dbus_message_new_signal(root->path,
DBUS_INTERFACE_OBJECT_MANAGER,
"InterfacesAdded");
if (signal == NULL)
return;
dbus_message_iter_init_append(signal, &iter);
dbus_message_iter_append_basic(&iter, DBUS_TYPE_OBJECT_PATH,
&data->path);
dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY,
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING
DBUS_TYPE_ARRAY_AS_STRING
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING
DBUS_TYPE_VARIANT_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING, &array);
g_slist_foreach(data->added, append_interface, &array);
g_slist_free(data->added);
data->added = NULL;
dbus_message_iter_close_container(&iter, &array);
/* Use dbus_connection_send to avoid recursive calls to g_dbus_flush */
dbus_connection_send(data->conn, signal, NULL);
dbus_message_unref(signal);
}
static struct interface_data *find_interface(GSList *interfaces,
const char *name)
{
GSList *list;
if (name == NULL)
return NULL;
for (list = interfaces; list; list = list->next) {
struct interface_data *iface = list->data;
if (!strcmp(name, iface->name))
return iface;
}
return NULL;
}
static gboolean g_dbus_args_have_signature(const GDBusArgInfo *args,
DBusMessage *message)
{
const char *sig = dbus_message_get_signature(message);
const char *p = NULL;
for (; args && args->signature && *sig; args++) {
p = args->signature;
for (; *sig && *p; sig++, p++) {
if (*p != *sig)
return FALSE;
}
}
if (*sig || (p && *p) || (args && args->signature))
return FALSE;
return TRUE;
}
static void add_pending(struct generic_data *data)
{
guint old_id = data->process_id;
data->process_id = g_idle_add(process_changes, data);
if (old_id > 0) {
/*
* If the element already had an old idler, remove the old one,
* no need to re-add it to the pending list.
*/
g_source_remove(old_id);
return;
}
pending = g_slist_append(pending, data);
}
static gboolean remove_interface(struct generic_data *data, const char *name)
{
struct interface_data *iface;
iface = find_interface(data->interfaces, name);
if (iface == NULL)
return FALSE;
process_properties_from_interface(data, iface);
data->interfaces = g_slist_remove(data->interfaces, iface);
if (iface->destroy) {
iface->destroy(iface->user_data);
iface->user_data = NULL;
}
/*
* Interface being removed was just added, on the same mainloop
* iteration? Don't send any signal
*/
if (g_slist_find(data->added, iface)) {
data->added = g_slist_remove(data->added, iface);
g_free(iface->name);
g_free(iface);
return TRUE;
}
if (data->parent == NULL) {
g_free(iface->name);
g_free(iface);
return TRUE;
}
data->removed = g_slist_prepend(data->removed, iface->name);
g_free(iface);
add_pending(data);
return TRUE;
}
static struct generic_data *invalidate_parent_data(DBusConnection *conn,
const char *child_path)
{
struct generic_data *data = NULL, *child = NULL, *parent = NULL;
char *parent_path, *slash;
parent_path = g_strdup(child_path);
slash = strrchr(parent_path, '/');
if (slash == NULL)
goto done;
if (slash == parent_path && parent_path[1] != '\0')
parent_path[1] = '\0';
else
*slash = '\0';
if (!strlen(parent_path))
goto done;
if (dbus_connection_get_object_path_data(conn, parent_path,
(void *) &data) == FALSE) {
goto done;
}
parent = invalidate_parent_data(conn, parent_path);
if (data == NULL) {
data = parent;
if (data == NULL)
goto done;
}
g_free(data->introspect);
data->introspect = NULL;
if (!dbus_connection_get_object_path_data(conn, child_path,
(void *) &child))
goto done;
if (child == NULL || g_slist_find(data->objects, child) != NULL)
goto done;
data->objects = g_slist_prepend(data->objects, child);
child->parent = data;
done:
g_free(parent_path);
return data;
}
static inline const GDBusPropertyTable *find_property(const GDBusPropertyTable *properties,
const char *name)
{
const GDBusPropertyTable *p;
for (p = properties; p && p->name; p++) {
if (strcmp(name, p->name) != 0)
continue;
if (check_experimental(p->flags,
G_DBUS_PROPERTY_FLAG_EXPERIMENTAL))
break;
return p;
}
return NULL;
}
static DBusMessage *properties_get(DBusConnection *connection,
DBusMessage *message, void *user_data)
{
struct generic_data *data = user_data;
struct interface_data *iface;
const GDBusPropertyTable *property;
const char *interface, *name;
DBusMessageIter iter, value;
DBusMessage *reply;
if (!dbus_message_get_args(message, NULL,
DBUS_TYPE_STRING, &interface,
DBUS_TYPE_STRING, &name,
DBUS_TYPE_INVALID))
return NULL;
iface = find_interface(data->interfaces, interface);
if (iface == NULL)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"No such interface '%s'", interface);
property = find_property(iface->properties, name);
if (property == NULL)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"No such property '%s'", name);
if (property->exists != NULL &&
!property->exists(property, iface->user_data))
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"No such property '%s'", name);
if (property->get == NULL)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"Property '%s' is not readable", name);
reply = dbus_message_new_method_return(message);
if (reply == NULL)
return NULL;
dbus_message_iter_init_append(reply, &iter);
dbus_message_iter_open_container(&iter, DBUS_TYPE_VARIANT,
property->type, &value);
if (!property->get(property, &value, iface->user_data)) {
dbus_message_unref(reply);
return NULL;
}
dbus_message_iter_close_container(&iter, &value);
return reply;
}
static DBusMessage *properties_get_all(DBusConnection *connection,
DBusMessage *message, void *user_data)
{
struct generic_data *data = user_data;
struct interface_data *iface;
const char *interface;
DBusMessageIter iter;
DBusMessage *reply;
if (!dbus_message_get_args(message, NULL,
DBUS_TYPE_STRING, &interface,
DBUS_TYPE_INVALID))
return NULL;
iface = find_interface(data->interfaces, interface);
if (iface == NULL)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"No such interface '%s'", interface);
reply = dbus_message_new_method_return(message);
if (reply == NULL)
return NULL;
dbus_message_iter_init_append(reply, &iter);
append_properties(iface, &iter);
return reply;
}
static DBusMessage *properties_set(DBusConnection *connection,
DBusMessage *message, void *user_data)
{
struct generic_data *data = user_data;
DBusMessageIter iter, sub;
struct interface_data *iface;
const GDBusPropertyTable *property;
const char *name, *interface;
struct property_data *propdata;
gboolean valid_signature;
char *signature;
if (!dbus_message_iter_init(message, &iter))
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"No arguments given");
if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_STRING)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"Invalid argument type: '%c'",
dbus_message_iter_get_arg_type(&iter));
dbus_message_iter_get_basic(&iter, &interface);
dbus_message_iter_next(&iter);
if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_STRING)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"Invalid argument type: '%c'",
dbus_message_iter_get_arg_type(&iter));
dbus_message_iter_get_basic(&iter, &name);
dbus_message_iter_next(&iter);
if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_VARIANT)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"Invalid argument type: '%c'",
dbus_message_iter_get_arg_type(&iter));
dbus_message_iter_recurse(&iter, &sub);
iface = find_interface(data->interfaces, interface);
if (iface == NULL)
return g_dbus_create_error(message, DBUS_ERROR_INVALID_ARGS,
"No such interface '%s'", interface);
property = find_property(iface->properties, name);
if (property == NULL)
return g_dbus_create_error(message,
DBUS_ERROR_UNKNOWN_PROPERTY,
"No such property '%s'", name);
if (property->set == NULL)
return g_dbus_create_error(message,
DBUS_ERROR_PROPERTY_READ_ONLY,
"Property '%s' is not writable", name);
if (property->exists != NULL &&
!property->exists(property, iface->user_data))
return g_dbus_create_error(message,
DBUS_ERROR_UNKNOWN_PROPERTY,
"No such property '%s'", name);
signature = dbus_message_iter_get_signature(&sub);
valid_signature = strcmp(signature, property->type) ? FALSE : TRUE;
dbus_free(signature);
if (!valid_signature)
return g_dbus_create_error(message,
DBUS_ERROR_INVALID_SIGNATURE,
"Invalid signature for '%s'", name);
propdata = g_new(struct property_data, 1);
propdata->id = next_pending_property++;
propdata->message = dbus_message_ref(message);
propdata->conn = connection;
pending_property_set = g_slist_prepend(pending_property_set, propdata);
property->set(property, &sub, propdata->id, iface->user_data);
return NULL;
}
static const GDBusMethodTable properties_methods[] = {
{ GDBUS_METHOD("Get",
GDBUS_ARGS({ "interface", "s" }, { "name", "s" }),
GDBUS_ARGS({ "value", "v" }),
properties_get) },
{ GDBUS_ASYNC_METHOD("Set",
GDBUS_ARGS({ "interface", "s" }, { "name", "s" },
{ "value", "v" }),
NULL,
properties_set) },
{ GDBUS_METHOD("GetAll",
GDBUS_ARGS({ "interface", "s" }),
GDBUS_ARGS({ "properties", "a{sv}" }),
properties_get_all) },
{ }
};
static const GDBusSignalTable properties_signals[] = {
{ GDBUS_SIGNAL("PropertiesChanged",
GDBUS_ARGS({ "interface", "s" },
{ "changed_properties", "a{sv}" },
{ "invalidated_properties", "as"})) },
{ }
};
static void append_name(gpointer data, gpointer user_data)
{
char *name = data;
DBusMessageIter *iter = user_data;
dbus_message_iter_append_basic(iter, DBUS_TYPE_STRING, &name);
}
static void emit_interfaces_removed(struct generic_data *data)
{
DBusMessage *signal;
DBusMessageIter iter, array;
if (root == NULL || data == root)
return;
signal = dbus_message_new_signal(root->path,
DBUS_INTERFACE_OBJECT_MANAGER,
"InterfacesRemoved");
if (signal == NULL)
return;
dbus_message_iter_init_append(signal, &iter);
dbus_message_iter_append_basic(&iter, DBUS_TYPE_OBJECT_PATH,
&data->path);
dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY,
DBUS_TYPE_STRING_AS_STRING, &array);
g_slist_foreach(data->removed, append_name, &array);
g_slist_free_full(data->removed, g_free);
data->removed = NULL;
dbus_message_iter_close_container(&iter, &array);
/* Use dbus_connection_send to avoid recursive calls to g_dbus_flush */
dbus_connection_send(data->conn, signal, NULL);
dbus_message_unref(signal);
}
static void remove_pending(struct generic_data *data)
{
if (data->process_id > 0) {
g_source_remove(data->process_id);
data->process_id = 0;
}
pending = g_slist_remove(pending, data);
}
static gboolean process_changes(gpointer user_data)
{
struct generic_data *data = user_data;
remove_pending(data);
if (data->added != NULL)
emit_interfaces_added(data);
/* Flush pending properties */
if (data->pending_prop == TRUE)
process_property_changes(data);
if (data->removed != NULL)
emit_interfaces_removed(data);
data->process_id = 0;
return FALSE;
}
static void generic_unregister(DBusConnection *connection, void *user_data)
{
struct generic_data *data = user_data;
struct generic_data *parent = data->parent;
if (parent != NULL)
parent->objects = g_slist_remove(parent->objects, data);
if (data->process_id > 0) {
g_source_remove(data->process_id);
data->process_id = 0;
process_changes(data);
}
g_slist_foreach(data->objects, reset_parent, data->parent);
g_slist_free(data->objects);
dbus_connection_unref(data->conn);
g_free(data->introspect);
g_free(data->path);
g_free(data);
}
static DBusHandlerResult generic_message(DBusConnection *connection,
DBusMessage *message, void *user_data)
{
struct generic_data *data = user_data;
struct interface_data *iface;
const GDBusMethodTable *method;
const char *interface;
interface = dbus_message_get_interface(message);
iface = find_interface(data->interfaces, interface);
if (iface == NULL)
return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
for (method = iface->methods; method &&
method->name && method->function; method++) {
if (dbus_message_is_method_call(message, iface->name,
method->name) == FALSE)
continue;
if (check_experimental(method->flags,
G_DBUS_METHOD_FLAG_EXPERIMENTAL))
return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
if (g_dbus_args_have_signature(method->in_args,
message) == FALSE)
continue;
if (check_privilege(connection, message, method,
iface->user_data) == TRUE)
return DBUS_HANDLER_RESULT_HANDLED;
return process_message(connection, message, method,
iface->user_data);
}
return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
}
static DBusObjectPathVTable generic_table = {
.unregister_function = generic_unregister,
.message_function = generic_message,
};
static const GDBusMethodTable introspect_methods[] = {
{ GDBUS_METHOD("Introspect", NULL,
GDBUS_ARGS({ "xml", "s" }), introspect) },
{ }
};
static void append_interfaces(struct generic_data *data, DBusMessageIter *iter)
{
DBusMessageIter array;
dbus_message_iter_open_container(iter, DBUS_TYPE_ARRAY,
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING
DBUS_TYPE_ARRAY_AS_STRING
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING
DBUS_TYPE_VARIANT_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING, &array);
g_slist_foreach(data->interfaces, append_interface, &array);
dbus_message_iter_close_container(iter, &array);
}
static void append_object(gpointer data, gpointer user_data)
{
struct generic_data *child = data;
DBusMessageIter *array = user_data;
DBusMessageIter entry;
dbus_message_iter_open_container(array, DBUS_TYPE_DICT_ENTRY, NULL,
&entry);
dbus_message_iter_append_basic(&entry, DBUS_TYPE_OBJECT_PATH,
&child->path);
append_interfaces(child, &entry);
dbus_message_iter_close_container(array, &entry);
g_slist_foreach(child->objects, append_object, user_data);
}
static DBusMessage *get_objects(DBusConnection *connection,
DBusMessage *message, void *user_data)
{
struct generic_data *data = user_data;
DBusMessage *reply;
DBusMessageIter iter;
DBusMessageIter array;
reply = dbus_message_new_method_return(message);
if (reply == NULL)
return NULL;
dbus_message_iter_init_append(reply, &iter);
dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY,
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_OBJECT_PATH_AS_STRING
DBUS_TYPE_ARRAY_AS_STRING
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING
DBUS_TYPE_ARRAY_AS_STRING
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING
DBUS_TYPE_VARIANT_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING,
&array);
g_slist_foreach(data->objects, append_object, &array);
dbus_message_iter_close_container(&iter, &array);
return reply;
}
static const GDBusMethodTable manager_methods[] = {
{ GDBUS_METHOD("GetManagedObjects", NULL,
GDBUS_ARGS({ "objects", "a{oa{sa{sv}}}" }), get_objects) },
{ }
};
static const GDBusSignalTable manager_signals[] = {
{ GDBUS_SIGNAL("InterfacesAdded",
GDBUS_ARGS({ "object", "o" },
{ "interfaces", "a{sa{sv}}" })) },
{ GDBUS_SIGNAL("InterfacesRemoved",
GDBUS_ARGS({ "object", "o" }, { "interfaces", "as" })) },
{ }
};
static gboolean add_interface(struct generic_data *data,
const char *name,
const GDBusMethodTable *methods,
const GDBusSignalTable *signals,
const GDBusPropertyTable *properties,
void *user_data,
GDBusDestroyFunction destroy)
{
struct interface_data *iface;
const GDBusMethodTable *method;
const GDBusSignalTable *signal;
const GDBusPropertyTable *property;
for (method = methods; method && method->name; method++) {
if (!check_experimental(method->flags,
G_DBUS_METHOD_FLAG_EXPERIMENTAL))
goto done;
}
for (signal = signals; signal && signal->name; signal++) {
if (!check_experimental(signal->flags,
G_DBUS_SIGNAL_FLAG_EXPERIMENTAL))
goto done;
}
for (property = properties; property && property->name; property++) {
if (!check_experimental(property->flags,
G_DBUS_PROPERTY_FLAG_EXPERIMENTAL))
goto done;
}
/* Nothing to register */
return FALSE;
done:
iface = g_new0(struct interface_data, 1);
iface->name = g_strdup(name);
iface->methods = methods;
iface->signals = signals;
iface->properties = properties;
iface->user_data = user_data;
iface->destroy = destroy;
data->interfaces = g_slist_append(data->interfaces, iface);
if (data->parent == NULL)
return TRUE;
data->added = g_slist_append(data->added, iface);
add_pending(data);
return TRUE;
}
static struct generic_data *object_path_ref(DBusConnection *connection,
const char *path)
{
struct generic_data *data;
if (dbus_connection_get_object_path_data(connection, path,
(void *) &data) == TRUE) {
if (data != NULL) {
data->refcount++;
return data;
}
}
data = g_new0(struct generic_data, 1);
data->conn = dbus_connection_ref(connection);
data->path = g_strdup(path);
data->refcount = 1;
data->introspect = g_strdup(DBUS_INTROSPECT_1_0_XML_DOCTYPE_DECL_NODE "<node></node>");
if (!dbus_connection_register_object_path(connection, path,
&generic_table, data)) {
dbus_connection_unref(data->conn);
g_free(data->path);
g_free(data->introspect);
g_free(data);
return NULL;
}
invalidate_parent_data(connection, path);
add_interface(data, DBUS_INTERFACE_INTROSPECTABLE, introspect_methods,
NULL, NULL, data, NULL);
return data;
}
static void object_path_unref(DBusConnection *connection, const char *path)
{
struct generic_data *data = NULL;
if (dbus_connection_get_object_path_data(connection, path,
(void *) &data) == FALSE)
return;
if (data == NULL)
return;
data->refcount--;
if (data->refcount > 0)
return;
remove_interface(data, DBUS_INTERFACE_INTROSPECTABLE);
remove_interface(data, DBUS_INTERFACE_PROPERTIES);
invalidate_parent_data(data->conn, data->path);
dbus_connection_unregister_object_path(data->conn, data->path);
}
static gboolean check_signal(DBusConnection *conn, const char *path,
const char *interface, const char *name,
const GDBusArgInfo **args)
{
struct generic_data *data = NULL;
struct interface_data *iface;
const GDBusSignalTable *signal;
*args = NULL;
if (!dbus_connection_get_object_path_data(conn, path,
(void *) &data) || data == NULL) {
error("dbus_connection_emit_signal: path %s isn't registered",
path);
return FALSE;
}
iface = find_interface(data->interfaces, interface);
if (iface == NULL) {
error("dbus_connection_emit_signal: %s does not implement %s",
path, interface);
return FALSE;
}
for (signal = iface->signals; signal && signal->name; signal++) {
if (strcmp(signal->name, name) != 0)
continue;
if (signal->flags & G_DBUS_SIGNAL_FLAG_EXPERIMENTAL) {
const char *env = g_getenv("GDBUS_EXPERIMENTAL");
if (g_strcmp0(env, "1") != 0)
break;
}
*args = signal->args;
return TRUE;
}
error("No signal named %s on interface %s", name, interface);
return FALSE;
}
gboolean g_dbus_register_interface(DBusConnection *connection,
const char *path, const char *name,
const GDBusMethodTable *methods,
const GDBusSignalTable *signals,
const GDBusPropertyTable *properties,
void *user_data,
GDBusDestroyFunction destroy)
{
struct generic_data *data;
data = object_path_ref(connection, path);
if (data == NULL)
return FALSE;
if (find_interface(data->interfaces, name)) {
object_path_unref(connection, path);
return FALSE;
}
if (!add_interface(data, name, methods, signals, properties, user_data,
destroy)) {
object_path_unref(connection, path);
return FALSE;
}
if (properties != NULL && !find_interface(data->interfaces,
DBUS_INTERFACE_PROPERTIES))
add_interface(data, DBUS_INTERFACE_PROPERTIES,
properties_methods, properties_signals, NULL,
data, NULL);
g_free(data->introspect);
data->introspect = NULL;
return TRUE;
}
gboolean g_dbus_unregister_interface(DBusConnection *connection,
const char *path, const char *name)
{
struct generic_data *data = NULL;
if (path == NULL)
return FALSE;
if (dbus_connection_get_object_path_data(connection, path,
(void *) &data) == FALSE)
return FALSE;
if (data == NULL)
return FALSE;
if (remove_interface(data, name) == FALSE)
return FALSE;
g_free(data->introspect);
data->introspect = NULL;
object_path_unref(connection, data->path);
return TRUE;
}
gboolean g_dbus_register_security(const GDBusSecurityTable *security)
{
if (security_table != NULL)
return FALSE;
security_table = security;
return TRUE;
}
gboolean g_dbus_unregister_security(const GDBusSecurityTable *security)
{
security_table = NULL;
return TRUE;
}
DBusMessage *g_dbus_create_error_valist(DBusMessage *message, const char *name,
const char *format, va_list args)
{
char str[1024];
if (format)
vsnprintf(str, sizeof(str), format, args);
else
str[0] = '\0';
return dbus_message_new_error(message, name, str);
}
DBusMessage *g_dbus_create_error(DBusMessage *message, const char *name,
const char *format, ...)
{
va_list args;
DBusMessage *reply;
va_start(args, format);
reply = g_dbus_create_error_valist(message, name, format, args);
va_end(args);
return reply;
}
DBusMessage *g_dbus_create_reply_valist(DBusMessage *message,
int type, va_list args)
{
DBusMessage *reply;
reply = dbus_message_new_method_return(message);
if (reply == NULL)
return NULL;
if (dbus_message_append_args_valist(reply, type, args) == FALSE) {
dbus_message_unref(reply);
return NULL;
}
return reply;
}
DBusMessage *g_dbus_create_reply(DBusMessage *message, int type, ...)
{
va_list args;
DBusMessage *reply;
va_start(args, type);
reply = g_dbus_create_reply_valist(message, type, args);
va_end(args);
return reply;
}
static void g_dbus_flush(DBusConnection *connection)
{
GSList *l;
for (l = pending; l;) {
struct generic_data *data = l->data;
l = l->next;
if (data->conn != connection)
continue;
process_changes(data);
}
}
gboolean g_dbus_send_message(DBusConnection *connection, DBusMessage *message)
{
dbus_bool_t result = FALSE;
if (dbus_message_get_type(message) == DBUS_MESSAGE_TYPE_METHOD_CALL)
dbus_message_set_no_reply(message, TRUE);
else if (dbus_message_get_type(message) == DBUS_MESSAGE_TYPE_SIGNAL) {
const char *path = dbus_message_get_path(message);
const char *interface = dbus_message_get_interface(message);
const char *name = dbus_message_get_member(message);
const GDBusArgInfo *args;
if (!check_signal(connection, path, interface, name, &args))
goto out;
}
/* Flush pending signal to guarantee message order */
g_dbus_flush(connection);
result = dbus_connection_send(connection, message, NULL);
out:
dbus_message_unref(message);
return result;
}
gboolean g_dbus_send_message_with_reply(DBusConnection *connection,
DBusMessage *message,
DBusPendingCall **call, int timeout)
{
dbus_bool_t ret;
/* Flush pending signal to guarantee message order */
g_dbus_flush(connection);
ret = dbus_connection_send_with_reply(connection, message, call,
timeout);
if (ret == TRUE && call != NULL && *call == NULL) {
error("Unable to send message (passing fd blocked?)");
return FALSE;
}
return ret;
}
gboolean g_dbus_send_error_valist(DBusConnection *connection,
DBusMessage *message, const char *name,
const char *format, va_list args)
{
DBusMessage *error;
error = g_dbus_create_error_valist(message, name, format, args);
if (error == NULL)
return FALSE;
return g_dbus_send_message(connection, error);
}
gboolean g_dbus_send_error(DBusConnection *connection, DBusMessage *message,
const char *name, const char *format, ...)
{
va_list args;
gboolean result;
va_start(args, format);
result = g_dbus_send_error_valist(connection, message, name,
format, args);
va_end(args);
return result;
}
gboolean g_dbus_send_reply_valist(DBusConnection *connection,
DBusMessage *message, int type, va_list args)
{
DBusMessage *reply;
reply = dbus_message_new_method_return(message);
if (reply == NULL)
return FALSE;
if (dbus_message_append_args_valist(reply, type, args) == FALSE) {
dbus_message_unref(reply);
return FALSE;
}
return g_dbus_send_message(connection, reply);
}
gboolean g_dbus_send_reply(DBusConnection *connection,
DBusMessage *message, int type, ...)
{
va_list args;
gboolean result;
va_start(args, type);
result = g_dbus_send_reply_valist(connection, message, type, args);
va_end(args);
return result;
}
gboolean g_dbus_emit_signal(DBusConnection *connection,
const char *path, const char *interface,
const char *name, int type, ...)
{
va_list args;
gboolean result;
va_start(args, type);
result = g_dbus_emit_signal_valist(connection, path, interface,
name, type, args);
va_end(args);
return result;
}
gboolean g_dbus_emit_signal_valist(DBusConnection *connection,
const char *path, const char *interface,
const char *name, int type, va_list args)
{
DBusMessage *signal;
dbus_bool_t ret;
const GDBusArgInfo *args_info;
if (!check_signal(connection, path, interface, name, &args_info))
return FALSE;
signal = dbus_message_new_signal(path, interface, name);
if (signal == NULL) {
error("Unable to allocate new %s.%s signal", interface, name);
return FALSE;
}
ret = dbus_message_append_args_valist(signal, type, args);
if (!ret)
goto fail;
if (g_dbus_args_have_signature(args_info, signal) == FALSE) {
error("%s.%s: got unexpected signature '%s'", interface, name,
dbus_message_get_signature(signal));
ret = FALSE;
goto fail;
}
return g_dbus_send_message(connection, signal);
fail:
dbus_message_unref(signal);
return ret;
}
static void process_properties_from_interface(struct generic_data *data,
struct interface_data *iface)
{
GSList *l;
DBusMessage *signal;
DBusMessageIter iter, dict, array;
GSList *invalidated;
data->pending_prop = FALSE;
if (iface->pending_prop == NULL)
return;
signal = dbus_message_new_signal(data->path,
DBUS_INTERFACE_PROPERTIES, "PropertiesChanged");
if (signal == NULL) {
error("Unable to allocate new " DBUS_INTERFACE_PROPERTIES
".PropertiesChanged signal");
return;
}
iface->pending_prop = g_slist_reverse(iface->pending_prop);
dbus_message_iter_init_append(signal, &iter);
dbus_message_iter_append_basic(&iter, DBUS_TYPE_STRING, &iface->name);
dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY,
DBUS_DICT_ENTRY_BEGIN_CHAR_AS_STRING
DBUS_TYPE_STRING_AS_STRING DBUS_TYPE_VARIANT_AS_STRING
DBUS_DICT_ENTRY_END_CHAR_AS_STRING, &dict);
invalidated = NULL;
for (l = iface->pending_prop; l != NULL; l = l->next) {
GDBusPropertyTable *p = l->data;
if (p->get == NULL)
continue;
if (p->exists != NULL && !p->exists(p, iface->user_data)) {
invalidated = g_slist_prepend(invalidated, p);
continue;
}
append_property(iface, p, &dict);
}
dbus_message_iter_close_container(&iter, &dict);
dbus_message_iter_open_container(&iter, DBUS_TYPE_ARRAY,
DBUS_TYPE_STRING_AS_STRING, &array);
for (l = invalidated; l != NULL; l = g_slist_next(l)) {
GDBusPropertyTable *p = l->data;
dbus_message_iter_append_basic(&array, DBUS_TYPE_STRING,
&p->name);
}
g_slist_free(invalidated);
dbus_message_iter_close_container(&iter, &array);
g_slist_free(iface->pending_prop);
iface->pending_prop = NULL;
/* Use dbus_connection_send to avoid recursive calls to g_dbus_flush */
dbus_connection_send(data->conn, signal, NULL);
dbus_message_unref(signal);
}
static void process_property_changes(struct generic_data *data)
{
GSList *l;
for (l = data->interfaces; l != NULL; l = l->next) {
struct interface_data *iface = l->data;
process_properties_from_interface(data, iface);
}
}
void g_dbus_emit_property_changed_full(DBusConnection *connection,
const char *path, const char *interface,
const char *name,
GDbusPropertyChangedFlags flags)
{
const GDBusPropertyTable *property;
struct generic_data *data;
struct interface_data *iface;
if (path == NULL)
return;
if (!dbus_connection_get_object_path_data(connection, path,
(void **) &data) || data == NULL)
return;
iface = find_interface(data->interfaces, interface);
if (iface == NULL)
return;
/*
* If ObjectManager is attached, don't emit property changed if
* interface is not yet published
*/
if (root && g_slist_find(data->added, iface))
return;
property = find_property(iface->properties, name);
if (property == NULL) {
error("Could not find property %s in %p", name,
iface->properties);
return;
}
if (g_slist_find(iface->pending_prop, (void *) property) != NULL)
return;
data->pending_prop = TRUE;
iface->pending_prop = g_slist_prepend(iface->pending_prop,
(void *) property);
if (flags & G_DBUS_PROPERTY_CHANGED_FLAG_FLUSH)
process_property_changes(data);
else
add_pending(data);
}
void g_dbus_emit_property_changed(DBusConnection *connection, const char *path,
const char *interface, const char *name)
{
g_dbus_emit_property_changed_full(connection, path, interface, name, 0);
}
gboolean g_dbus_get_properties(DBusConnection *connection, const char *path,
const char *interface, DBusMessageIter *iter)
{
struct generic_data *data;
struct interface_data *iface;
if (path == NULL)
return FALSE;
if (!dbus_connection_get_object_path_data(connection, path,
(void **) &data) || data == NULL)
return FALSE;
iface = find_interface(data->interfaces, interface);
if (iface == NULL)
return FALSE;
append_properties(iface, iter);
return TRUE;
}
gboolean g_dbus_attach_object_manager(DBusConnection *connection)
{
struct generic_data *data;
data = object_path_ref(connection, "/");
if (data == NULL)
return FALSE;
add_interface(data, DBUS_INTERFACE_OBJECT_MANAGER,
manager_methods, manager_signals,
NULL, data, NULL);
root = data;
return TRUE;
}
gboolean g_dbus_detach_object_manager(DBusConnection *connection)
{
if (!g_dbus_unregister_interface(connection, "/",
DBUS_INTERFACE_OBJECT_MANAGER))
return FALSE;
root = NULL;
return TRUE;
}
void g_dbus_set_flags(int flags)
{
global_flags = flags;
}
int g_dbus_get_flags(void)
{
return global_flags;
}
| gpl-2.0 |
mmoiozo/pprz_vg | sw/airborne/subsystems/ins/ins_gps_passthrough_utm.c | 7 | 2430 | /*
* Copyright (C) 2004-2012 The Paparazzi Team
*
* This file is part of paparazzi.
*
* paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with paparazzi; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/**
* @file subsystems/ins/ins_gps_passthrough_utm.c
*
* Simply passes GPS UTM position and velocity through to the state interface.
* For fixedwing firmware since it sets UTM pos only.
*/
#include "subsystems/ins.h"
#include <inttypes.h>
#include <math.h>
#include "state.h"
#include "subsystems/gps.h"
#include "firmwares/fixedwing/nav.h"
void ins_init(void)
{
struct UtmCoor_f utm0 = { nav_utm_north0, nav_utm_east0, 0., nav_utm_zone0 };
stateSetLocalUtmOrigin_f(&utm0);
stateSetPositionUtm_f(&utm0);
ins.status = INS_RUNNING;
}
void ins_reset_local_origin(void)
{
struct UtmCoor_f utm;
#ifdef GPS_USE_LATLONG
/* Recompute UTM coordinates in this zone */
struct LlaCoor_f lla;
LLA_FLOAT_OF_BFP(lla, gps.lla_pos);
utm.zone = (gps.lla_pos.lon / 1e7 + 180) / 6 + 1;
utm_of_lla_f(&utm, &lla);
#else
utm.zone = gps.utm_pos.zone;
utm.east = gps.utm_pos.east / 100.0f;
utm.north = gps.utm_pos.north / 100.0f;
#endif
// ground_alt
utm.alt = gps.hmsl / 1000.0f;
// reset state UTM ref
stateSetLocalUtmOrigin_f(&utm);
}
void ins_reset_altitude_ref(void)
{
struct UtmCoor_f utm = state.utm_origin_f;
utm.alt = gps.hmsl / 1000.0f;
stateSetLocalUtmOrigin_f(&utm);
}
void ins_update_gps(void)
{
struct UtmCoor_f utm;
utm.east = gps.utm_pos.east / 100.0f;
utm.north = gps.utm_pos.north / 100.0f;
utm.zone = nav_utm_zone0;
utm.alt = gps.hmsl / 1000.0f;
// set position
stateSetPositionUtm_f(&utm);
struct NedCoor_f ned_vel = {
gps.ned_vel.x / 100.0f,
gps.ned_vel.y / 100.0f,
gps.ned_vel.z / 100.0f
};
// set velocity
stateSetSpeedNed_f(&ned_vel);
}
| gpl-2.0 |
moonman/linux-stable | drivers/gpio/gpio-zevio.c | 7 | 6411 | /*
* GPIO controller in LSI ZEVIO SoCs.
*
* Author: Fabian Vogt <fabian@ritter-vogt.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/gpio.h>
/*
* Memory layout:
* This chip has four gpio sections, each controls 8 GPIOs.
* Bit 0 in section 0 is GPIO 0, bit 2 in section 1 is GPIO 10.
* Disclaimer: Reverse engineered!
* For more information refer to:
* http://hackspire.unsads.com/wiki/index.php/Memory-mapped_I/O_ports#90000000_-_General_Purpose_I.2FO_.28GPIO.29
*
* 0x00-0x3F: Section 0
* +0x00: Masked interrupt status (read-only)
* +0x04: R: Interrupt status W: Reset interrupt status
* +0x08: R: Interrupt mask W: Mask interrupt
* +0x0C: W: Unmask interrupt (write-only)
* +0x10: Direction: I/O=1/0
* +0x14: Output
* +0x18: Input (read-only)
* +0x20: R: Level interrupt W: Set as level interrupt
* 0x40-0x7F: Section 1
* 0x80-0xBF: Section 2
* 0xC0-0xFF: Section 3
*/
#define ZEVIO_GPIO_SECTION_SIZE 0x40
/* Offsets to various registers */
#define ZEVIO_GPIO_INT_MASKED_STATUS 0x00
#define ZEVIO_GPIO_INT_STATUS 0x04
#define ZEVIO_GPIO_INT_UNMASK 0x08
#define ZEVIO_GPIO_INT_MASK 0x0C
#define ZEVIO_GPIO_DIRECTION 0x10
#define ZEVIO_GPIO_OUTPUT 0x14
#define ZEVIO_GPIO_INPUT 0x18
#define ZEVIO_GPIO_INT_STICKY 0x20
#define to_zevio_gpio(chip) container_of(to_of_mm_gpio_chip(chip), \
struct zevio_gpio, chip)
/* Bit number of GPIO in its section */
#define ZEVIO_GPIO_BIT(gpio) (gpio&7)
struct zevio_gpio {
spinlock_t lock;
struct of_mm_gpio_chip chip;
};
static inline u32 zevio_gpio_port_get(struct zevio_gpio *c, unsigned pin,
unsigned port_offset)
{
unsigned section_offset = ((pin >> 3) & 3)*ZEVIO_GPIO_SECTION_SIZE;
return readl(IOMEM(c->chip.regs + section_offset + port_offset));
}
static inline void zevio_gpio_port_set(struct zevio_gpio *c, unsigned pin,
unsigned port_offset, u32 val)
{
unsigned section_offset = ((pin >> 3) & 3)*ZEVIO_GPIO_SECTION_SIZE;
writel(val, IOMEM(c->chip.regs + section_offset + port_offset));
}
/* Functions for struct gpio_chip */
static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin)
{
struct zevio_gpio *controller = to_zevio_gpio(chip);
u32 val, dir;
spin_lock(&controller->lock);
dir = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION);
if (dir & BIT(ZEVIO_GPIO_BIT(pin)))
val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT);
else
val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT);
spin_unlock(&controller->lock);
return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1;
}
static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
{
struct zevio_gpio *controller = to_zevio_gpio(chip);
u32 val;
spin_lock(&controller->lock);
val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT);
if (value)
val |= BIT(ZEVIO_GPIO_BIT(pin));
else
val &= ~BIT(ZEVIO_GPIO_BIT(pin));
zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val);
spin_unlock(&controller->lock);
}
static int zevio_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
{
struct zevio_gpio *controller = to_zevio_gpio(chip);
u32 val;
spin_lock(&controller->lock);
val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION);
val |= BIT(ZEVIO_GPIO_BIT(pin));
zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_DIRECTION, val);
spin_unlock(&controller->lock);
return 0;
}
static int zevio_gpio_direction_output(struct gpio_chip *chip,
unsigned pin, int value)
{
struct zevio_gpio *controller = to_zevio_gpio(chip);
u32 val;
spin_lock(&controller->lock);
val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT);
if (value)
val |= BIT(ZEVIO_GPIO_BIT(pin));
else
val &= ~BIT(ZEVIO_GPIO_BIT(pin));
zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val);
val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION);
val &= ~BIT(ZEVIO_GPIO_BIT(pin));
zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_DIRECTION, val);
spin_unlock(&controller->lock);
return 0;
}
static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
{
/*
* TODO: Implement IRQs.
* Not implemented yet due to weird lockups
*/
return -ENXIO;
}
static struct gpio_chip zevio_gpio_chip = {
.direction_input = zevio_gpio_direction_input,
.direction_output = zevio_gpio_direction_output,
.set = zevio_gpio_set,
.get = zevio_gpio_get,
.to_irq = zevio_gpio_to_irq,
.base = 0,
.owner = THIS_MODULE,
.ngpio = 32,
.of_gpio_n_cells = 2,
};
/* Initialization */
static int zevio_gpio_probe(struct platform_device *pdev)
{
struct zevio_gpio *controller;
int status, i;
controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL);
if (!controller)
return -ENOMEM;
platform_set_drvdata(pdev, controller);
/* Copy our reference */
controller->chip.gc = zevio_gpio_chip;
controller->chip.gc.parent = &pdev->dev;
status = of_mm_gpiochip_add(pdev->dev.of_node, &(controller->chip));
if (status) {
dev_err(&pdev->dev, "failed to add gpiochip: %d\n", status);
return status;
}
spin_lock_init(&controller->lock);
/* Disable interrupts, they only cause errors */
for (i = 0; i < controller->chip.gc.ngpio; i += 8)
zevio_gpio_port_set(controller, i, ZEVIO_GPIO_INT_MASK, 0xFF);
dev_dbg(controller->chip.gc.parent, "ZEVIO GPIO controller set up!\n");
return 0;
}
static int zevio_gpio_remove(struct platform_device *pdev)
{
struct zevio_gpio *controller = platform_get_drvdata(pdev);
of_mm_gpiochip_remove(&controller->chip);
return 0;
}
static const struct of_device_id zevio_gpio_of_match[] = {
{ .compatible = "lsi,zevio-gpio", },
{ },
};
MODULE_DEVICE_TABLE(of, zevio_gpio_of_match);
static struct platform_driver zevio_gpio_driver = {
.driver = {
.name = "gpio-zevio",
.of_match_table = zevio_gpio_of_match,
},
.probe = zevio_gpio_probe,
.remove = zevio_gpio_remove,
};
module_platform_driver(zevio_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fabian Vogt <fabian@ritter-vogt.de>");
MODULE_DESCRIPTION("LSI ZEVIO SoC GPIO driver");
| gpl-2.0 |
sangwook236/general-development-and-testing | sw_dev/cpp/rnd/test/motion_analysis/klt_lib/writeFeatures.c | 7 | 20509 | /*********************************************************************
* writeFeatures.c
*
*********************************************************************
*/
/* Standard includes */
#include <assert.h>
#include <ctype.h> /* isdigit() */
#include <stdio.h> /* sprintf(), fprintf(), sscanf(), fscanf() */
#include <stdlib.h> /* malloc() */
#include <string.h> /* memcpy(), strcmp() */
/* Our includes */
#include "base.h"
#include "error.h"
#include "pnmio.h" /* ppmWriteFileRGB() */
#include "klt.h"
#define BINHEADERLENGTH 6
extern int KLT_verbose;
typedef enum {FEATURE_LIST, FEATURE_HISTORY, FEATURE_TABLE} structureType;
static char warning_line[] = "!!! Warning: This is a KLT data file. "
"Do not modify below this line !!!\n";
static char binheader_fl[BINHEADERLENGTH+1] = "KLTFL1";
static char binheader_fh[BINHEADERLENGTH+1] = "KLTFH1";
static char binheader_ft[BINHEADERLENGTH+1] = "KLTFT1";
/*********************************************************************
* KLTWriteFeatureListToPPM
*/
void KLTWriteFeatureListToPPM(
KLT_FeatureList featurelist,
KLT_PixelType *greyimg,
int ncols,
int nrows,
char *filename)
{
int nbytes = ncols * nrows * sizeof(char);
uchar *redimg, *grnimg, *bluimg;
int offset;
int x, y, xx, yy;
int i;
if (KLT_verbose >= 1)
fprintf(stderr, "(KLT) Writing %d features to PPM file: '%s'\n",
KLTCountRemainingFeatures(featurelist), filename);
/* Allocate memory for component images */
redimg = (uchar *) malloc(nbytes);
grnimg = (uchar *) malloc(nbytes);
bluimg = (uchar *) malloc(nbytes);
if (redimg == NULL || grnimg == NULL || bluimg == NULL)
KLTError("(KLTWriteFeaturesToPPM) Out of memory\n");
/* Copy grey image to component images */
if (sizeof(KLT_PixelType) != 1)
KLTWarning("(KLTWriteFeaturesToPPM) KLT_PixelType is not uchar");
memcpy(redimg, greyimg, nbytes);
memcpy(grnimg, greyimg, nbytes);
memcpy(bluimg, greyimg, nbytes);
/* Overlay features in red */
for (i = 0 ; i < featurelist->nFeatures ; i++)
if (featurelist->feature[i]->val >= 0) {
x = (int) (featurelist->feature[i]->x + 0.5);
y = (int) (featurelist->feature[i]->y + 0.5);
for (yy = y - 1 ; yy <= y + 1 ; yy++)
for (xx = x - 1 ; xx <= x + 1 ; xx++)
if (xx >= 0 && yy >= 0 && xx < ncols && yy < nrows) {
offset = yy * ncols + xx;
*(redimg + offset) = 255;
*(grnimg + offset) = 0;
*(bluimg + offset) = 0;
}
}
/* Write to PPM file */
ppmWriteFileRGB(filename, redimg, grnimg, bluimg, ncols, nrows);
/* Free memory */
free(redimg);
free(grnimg);
free(bluimg);
}
static FILE* _printSetupTxt(
char *fname, /* Input: filename, or NULL for stderr */
char *fmt, /* Input: format (e.g., %5.1f or %3d) */
char *format, /* Output: format (e.g., (%5.1f,%5.1f)=%3d) */
char *type) /* Output: either 'f' or 'd', based on input format */
{
FILE *fp;
const int val_width = 5;
int i;
/* Either open file or use stderr */
if (fname == NULL) fp = stderr;
else fp = fopen(fname, "wb");
if (fp == NULL)
KLTError("(KLTWriteFeatures) "
"Can't open file '%s' for writing\n", fname);
/* Parse format */
if (fmt[0] != '%')
KLTError("(KLTWriteFeatures) Bad Format: %s\n", fmt);
i = 0; while (fmt[i] != '\0') i++; *type = fmt[i-1];
if (*type != 'f' && *type != 'd')
KLTError("(KLTWriteFeatures) Format must end in 'f' or 'd'.");
/* Construct feature format */
sprintf(format, "(%s,%s)=%%%dd ", fmt, fmt, val_width);
return fp;
}
static FILE* _printSetupBin(
char *fname) /* Input: filename */
{
FILE *fp;
if (fname == NULL)
KLTError("(KLTWriteFeatures) Can't write binary data to stderr");
fp = fopen(fname, "wb");
if (fp == NULL)
KLTError("(KLTWriteFeatures) "
"Can't open file '%s' for writing", fname);
return fp;
}
static void _printNhyphens(
FILE *fp,
int n)
{
int i;
for (i = 0 ; i < n ; i++)
fprintf(fp, "-");
}
static void _printInteger(
FILE *fp,
int integer,
int width)
{
char fmt[80];
sprintf(fmt, "%%%dd", width);
fprintf(fp, fmt, integer);
}
static KLT_BOOL _isCharInString(
char c,
char *str)
{
int width = strlen(str);
int i;
for (i = 0 ; i < width ; i++)
if (c == str[i]) return TRUE;
return FALSE;
}
/*********************************************************************
* _findStringWidth
*
* Calculates the length of a string after expansion. E.g., the
* length of "(%6.1f)" is eight -- six for the floating-point number,
* and two for the parentheses.
*/
static int _findStringWidth(
char *str)
{
int width = 0;
int add;
int maxi = strlen(str) - 1;
int i = 0;
while (str[i] != '\0') {
if (str[i] == '%') {
if (isdigit(str[i+1])) {
sscanf(str+i+1, "%d", &add);
width += add;
i += 2;
while (!_isCharInString(str[i], "diouxefgn")) {
i++;
if (i > maxi)
KLTError("(_findStringWidth) Can't determine length "
"of string '%s'", str);
}
i++;
} else if (str[i+1] == 'c') {
width++;
i += 2;
} else
KLTError("(_findStringWidth) Can't determine length "
"of string '%s'", str);
} else {
i++;
width++;
}
}
return width;
}
static void _printHeader(
FILE *fp,
char *format,
structureType id,
int nFrames,
int nFeatures)
{
int width = _findStringWidth(format);
int i;
assert(id == FEATURE_LIST || id == FEATURE_HISTORY || id == FEATURE_TABLE);
if (fp != stderr) {
fprintf(fp, "Feel free to place comments here.\n\n\n");
fprintf(fp, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
fprintf(fp, warning_line);
fprintf(fp, "\n");
}
fprintf(fp, "------------------------------\n");
switch (id) {
case FEATURE_LIST: fprintf(fp, "KLT Feature List\n"); break;
case FEATURE_HISTORY: fprintf(fp, "KLT Feature History\n"); break;
case FEATURE_TABLE: fprintf(fp, "KLT Feature Table\n"); break;
}
fprintf(fp, "------------------------------\n\n");
switch (id) {
case FEATURE_LIST: fprintf(fp, "nFeatures = %d\n\n", nFeatures); break;
case FEATURE_HISTORY: fprintf(fp, "nFrames = %d\n\n", nFrames); break;
case FEATURE_TABLE: fprintf(fp, "nFrames = %d, nFeatures = %d\n\n",
nFrames, nFeatures); break;
}
switch (id) {
case FEATURE_LIST: fprintf(fp, "feature | (x,y)=val\n");
fprintf(fp, "--------+-");
_printNhyphens(fp, width);
fprintf(fp, "\n");
break;
case FEATURE_HISTORY: fprintf(fp, "frame | (x,y)=val\n");
fprintf(fp, "------+-");
_printNhyphens(fp, width);
fprintf(fp, "\n");
break;
case FEATURE_TABLE: fprintf(fp, "feature | frame\n");
fprintf(fp, " |");
for (i = 0 ; i < nFrames ; i++) _printInteger(fp, i, width);
fprintf(fp, "\n--------+-");
for (i = 0 ; i < nFrames ; i++) _printNhyphens(fp, width);
fprintf(fp, "\n");
break;
}
}
static void _printFeatureTxt(
FILE *fp,
KLT_Feature feat,
char *format,
char type)
{
assert(type == 'f' || type == 'd');
if (type == 'f')
fprintf(fp, format, (float) feat->x, (float) feat->y, feat->val);
else if (type == 'd') {
/* Round x & y to nearest integer, unless negative */
float x = feat->x;
float y = feat->y;
if (x >= 0.0) x += 0.5;
if (y >= 0.0) y += 0.5;
fprintf(fp, format,
(int) x, (int) y, feat->val);
}
}
static void _printFeatureBin(
FILE *fp,
KLT_Feature feat)
{
fwrite(&(feat->x), sizeof(KLT_locType), 1, fp);
fwrite(&(feat->y), sizeof(KLT_locType), 1, fp);
fwrite(&(feat->val), sizeof(int), 1, fp);
}
static void _printShutdown(
FILE *fp)
{
/* Close file, if necessary */
if (fp != stderr)
fclose(fp);
}
/*********************************************************************
* KLTWriteFeatureList()
* KLTWriteFeatureHistory()
* KLTWriteFeatureTable()
*
* Writes features to file or to screen.
*
* INPUTS
* fname: name of file to write data; if NULL, then print to stderr
* fmt: format for printing (e.g., "%5.1f" or "%3d");
* if NULL, and if fname is not NULL, then write to binary file.
*/
void KLTWriteFeatureList(
KLT_FeatureList fl,
char *fname,
char *fmt)
{
FILE *fp;
char format[100];
char type;
int i;
if (KLT_verbose >= 1 && fname != NULL) {
fprintf(stderr,
"(KLT) Writing feature list to %s file: '%s'\n",
(fmt == NULL ? "binary" : "text"), fname);
}
if (fmt != NULL) { /* text file or stderr */
fp = _printSetupTxt(fname, fmt, format, &type);
_printHeader(fp, format, FEATURE_LIST, 0, fl->nFeatures);
for (i = 0 ; i < fl->nFeatures ; i++) {
fprintf(fp, "%7d | ", i);
_printFeatureTxt(fp, fl->feature[i], format, type);
fprintf(fp, "\n");
}
_printShutdown(fp);
} else { /* binary file */
fp = _printSetupBin(fname);
fwrite(binheader_fl, sizeof(char), BINHEADERLENGTH, fp);
fwrite(&(fl->nFeatures), sizeof(int), 1, fp);
for (i = 0 ; i < fl->nFeatures ; i++) {
_printFeatureBin(fp, fl->feature[i]);
}
fclose(fp);
}
}
void KLTWriteFeatureHistory(
KLT_FeatureHistory fh,
char *fname,
char *fmt)
{
FILE *fp;
char format[100];
char type;
int i;
if (KLT_verbose >= 1 && fname != NULL) {
fprintf(stderr,
"(KLT) Writing feature history to %s file: '%s'\n",
(fmt == NULL ? "binary" : "text"), fname);
}
if (fmt != NULL) { /* text file or stderr */
fp = _printSetupTxt(fname, fmt, format, &type);
_printHeader(fp, format, FEATURE_HISTORY, fh->nFrames, 0);
for (i = 0 ; i < fh->nFrames ; i++) {
fprintf(fp, "%5d | ", i);
_printFeatureTxt(fp, fh->feature[i], format, type);
fprintf(fp, "\n");
}
_printShutdown(fp);
} else { /* binary file */
fp = _printSetupBin(fname);
fwrite(binheader_fh, sizeof(char), BINHEADERLENGTH, fp);
fwrite(&(fh->nFrames), sizeof(int), 1, fp);
for (i = 0 ; i < fh->nFrames ; i++) {
_printFeatureBin(fp, fh->feature[i]);
}
fclose(fp);
}
}
void KLTWriteFeatureTable(
KLT_FeatureTable ft,
char *fname,
char *fmt)
{
FILE *fp;
char format[100];
char type;
int i, j;
if (KLT_verbose >= 1 && fname != NULL) {
fprintf(stderr,
"(KLT) Writing feature table to %s file: '%s'\n",
(fmt == NULL ? "binary" : "text"), fname);
}
if (fmt != NULL) { /* text file or stderr */
fp = _printSetupTxt(fname, fmt, format, &type);
_printHeader(fp, format, FEATURE_TABLE, ft->nFrames, ft->nFeatures);
for (j = 0 ; j < ft->nFeatures ; j++) {
fprintf(fp, "%7d | ", j);
for (i = 0 ; i < ft->nFrames ; i++)
_printFeatureTxt(fp, ft->feature[j][i], format, type);
fprintf(fp, "\n");
}
_printShutdown(fp);
} else { /* binary file */
fp = _printSetupBin(fname);
fwrite(binheader_ft, sizeof(char), BINHEADERLENGTH, fp);
fwrite(&(ft->nFrames), sizeof(int), 1, fp);
fwrite(&(ft->nFeatures), sizeof(int), 1, fp);
for (j = 0 ; j < ft->nFeatures ; j++) {
for (i = 0 ; i < ft->nFrames ; i++) {
_printFeatureBin(fp, ft->feature[j][i]);
}
}
fclose(fp);
}
}
static structureType _readHeader(
FILE *fp,
int *nFrames,
int *nFeatures,
KLT_BOOL *binary)
{
#define LINELENGTH 100
char line[LINELENGTH];
structureType id;
/* If file is binary, then read data and return */
fread(line, sizeof(char), BINHEADERLENGTH, fp);
line[BINHEADERLENGTH] = 0;
if (strcmp(line, binheader_fl) == 0) {
assert(nFeatures != NULL);
fread(nFeatures, sizeof(int), 1, fp);
*binary = TRUE;
return FEATURE_LIST;
} else if (strcmp(line, binheader_fh) == 0) {
assert(nFrames != NULL);
fread(nFrames, sizeof(int), 1, fp);
*binary = TRUE;
return FEATURE_HISTORY;
} else if (strcmp(line, binheader_ft) == 0) {
assert(nFrames != NULL);
assert(nFeatures != NULL);
fread(nFrames, sizeof(int), 1, fp);
fread(nFeatures, sizeof(int), 1, fp);
*binary = TRUE;
return FEATURE_TABLE;
/* If file is NOT binary, then continue.*/
} else {
rewind(fp);
*binary = FALSE;
}
/* Skip comments until warning line */
while (strcmp(line, warning_line) != 0) {
fgets(line, LINELENGTH, fp);
if (feof(fp))
KLTError("(_readFeatures) File is corrupted -- Couldn't find line:\n"
"\t%s\n", warning_line);
}
/* Read 'Feature List', 'Feature History', or 'Feature Table' */
while (fgetc(fp) != '-');
while (fgetc(fp) != '\n');
fgets(line, LINELENGTH, fp);
if (strcmp(line, "KLT Feature List\n") == 0) id = FEATURE_LIST;
else if (strcmp(line, "KLT Feature History\n") == 0) id = FEATURE_HISTORY;
else if (strcmp(line, "KLT Feature Table\n") == 0) id = FEATURE_TABLE;
else
KLTError("(_readFeatures) File is corrupted -- (Not 'KLT Feature List', "
"'KLT Feature History', or 'KLT Feature Table')");
/* If there's an incompatibility between the type of file */
/* and the parameters passed, exit now before we attempt */
/* to write to non-allocated memory. Higher routine should */
/* detect and handle this error. */
if ((id == FEATURE_LIST && nFeatures == NULL) ||
(id == FEATURE_HISTORY && nFrames == NULL) ||
(id == FEATURE_TABLE && (nFeatures == NULL || nFrames == NULL)))
return id;
/* Read nFeatures and nFrames */
while (fgetc(fp) != '-');
while (fgetc(fp) != '\n');
fscanf(fp, "%s", line);
if (id == FEATURE_LIST) {
if (strcmp(line, "nFeatures") != 0)
KLTError("(_readFeatures) File is corrupted -- "
"(Expected 'nFeatures', found '%s' instead)", line);
} else if (strcmp(line, "nFrames") != 0)
KLTError("(_readFeatures) File is corrupted -- "
"(Expected 'nFrames', found '%s' instead)", line);
fscanf(fp, "%s", line);
if (strcmp(line, "=") != 0)
KLTError("(_readFeatures) File is corrupted -- "
"(Expected '=', found '%s' instead)", line);
if (id == FEATURE_LIST) fscanf(fp, "%d", nFeatures);
else fscanf(fp, "%d", nFrames);
/* If 'Feature Table', then also get nFeatures */
if (id == FEATURE_TABLE) {
fscanf(fp, "%s", line);
if (strcmp(line, ",") != 0)
KLTError("(_readFeatures) File '%s' is corrupted -- "
"(Expected 'comma', found '%s' instead)", line);
fscanf(fp, "%s", line);
if (strcmp(line, "nFeatures") != 0)
KLTError("(_readFeatures) File '%s' is corrupted -- "
"(2 Expected 'nFeatures ', found '%s' instead)", line);
fscanf(fp, "%s", line);
if (strcmp(line, "=") != 0)
KLTError("(_readFeatures) File '%s' is corrupted -- "
"(2 Expected '= ', found '%s' instead)", line);
fscanf(fp, "%d", nFeatures);
}
/* Skip junk before data */
while (fgetc(fp) != '-');
while (fgetc(fp) != '\n');
return id;
#undef LINELENGTH
}
static void _readFeatureTxt(
FILE *fp,
KLT_Feature feat)
{
while (fgetc(fp) != '(');
fscanf(fp, "%f,%f)=%d", &(feat->x), &(feat->y), &(feat->val));
}
static void _readFeatureBin(
FILE *fp,
KLT_Feature feat)
{
fread(&(feat->x), sizeof(KLT_locType), 1, fp);
fread(&(feat->y), sizeof(KLT_locType), 1, fp);
fread(&(feat->val), sizeof(int), 1, fp);
}
/*********************************************************************
* KLTReadFeatureList
* KLTReadFeatureHistory
* KLTReadFeatureTable
*
* If the first parameter (fl, fh, or ft) is NULL, then the
* corresponding structure is created.
*/
KLT_FeatureList KLTReadFeatureList(
KLT_FeatureList fl_in,
char *fname)
{
FILE *fp;
KLT_FeatureList fl;
int nFeatures;
structureType id;
int indx;
KLT_BOOL binary; /* whether file is binary or text */
int i;
fp = fopen(fname, "rb");
if (fp == NULL) KLTError("(KLTReadFeatureList) Can't open file '%s' "
"for reading", fname);
if (KLT_verbose >= 1)
fprintf(stderr, "(KLT) Reading feature list from '%s'\n", fname);
id = _readHeader(fp, NULL, &nFeatures, &binary);
if (id != FEATURE_LIST)
KLTError("(KLTReadFeatureList) File '%s' does not contain "
"a FeatureList", fname);
if (fl_in == NULL) {
fl = KLTCreateFeatureList(nFeatures);
fl->nFeatures = nFeatures;
}
else {
fl = fl_in;
if (fl->nFeatures != nFeatures)
KLTError("(KLTReadFeatureList) The feature list passed "
"does not contain the same number of features as "
"the feature list in file '%s' ", fname);
}
if (!binary) { /* text file */
for (i = 0 ; i < fl->nFeatures ; i++) {
fscanf(fp, "%d |", &indx);
if (indx != i) KLTError("(KLTReadFeatureList) Bad index at i = %d"
"-- %d", i, indx);
_readFeatureTxt(fp, fl->feature[i]);
}
} else { /* binary file */
for (i = 0 ; i < fl->nFeatures ; i++) {
_readFeatureBin(fp, fl->feature[i]);
}
}
fclose(fp);
return fl;
}
KLT_FeatureHistory KLTReadFeatureHistory(
KLT_FeatureHistory fh_in,
char *fname)
{
FILE *fp;
KLT_FeatureHistory fh;
int nFrames;
structureType id;
int indx;
KLT_BOOL binary; /* whether file is binary or text */
int i;
fp = fopen(fname, "rb");
if (fp == NULL) KLTError("(KLTReadFeatureHistory) Can't open file '%s' "
"for reading", fname);
if (KLT_verbose >= 1) fprintf(stderr, "(KLT) Reading feature history from '%s'\n", fname);
id = _readHeader(fp, &nFrames, NULL, &binary);
if (id != FEATURE_HISTORY) KLTError("(KLTReadFeatureHistory) File '%s' does not contain "
"a FeatureHistory", fname);
if (fh_in == NULL) {
fh = KLTCreateFeatureHistory(nFrames);
fh->nFrames = nFrames;
}
else {
fh = fh_in;
if (fh->nFrames != nFrames)
KLTError("(KLTReadFeatureHistory) The feature history passed "
"does not contain the same number of frames as "
"the feature history in file '%s' ", fname);
}
if (!binary) { /* text file */
for (i = 0 ; i < fh->nFrames ; i++) {
fscanf(fp, "%d |", &indx);
if (indx != i)
KLTError("(KLTReadFeatureHistory) Bad index at i = %d"
"-- %d", i, indx);
_readFeatureTxt(fp, fh->feature[i]);
}
} else { /* binary file */
for (i = 0 ; i < fh->nFrames ; i++) {
_readFeatureBin(fp, fh->feature[i]);
}
}
fclose(fp);
return fh;
}
KLT_FeatureTable KLTReadFeatureTable(
KLT_FeatureTable ft_in,
char *fname)
{
FILE *fp;
KLT_FeatureTable ft;
int nFrames;
int nFeatures;
structureType id;
int indx;
KLT_BOOL binary; /* whether file is binary or text */
int i, j;
fp = fopen(fname, "rb");
if (fp == NULL) KLTError("(KLTReadFeatureTable) Can't open file '%s' "
"for reading", fname);
if (KLT_verbose >= 1) fprintf(stderr, "(KLT) Reading feature table from '%s'\n", fname);
id = _readHeader(fp, &nFrames, &nFeatures, &binary);
if (id != FEATURE_TABLE) KLTError("(KLTReadFeatureTable) File '%s' does not contain "
"a FeatureTable", fname);
if (ft_in == NULL) {
ft = KLTCreateFeatureTable(nFrames, nFeatures);
ft->nFrames = nFrames;
ft->nFeatures = nFeatures;
}
else {
ft = ft_in;
if (ft->nFrames != nFrames || ft->nFeatures != nFeatures)
KLTError("(KLTReadFeatureTable) The feature table passed "
"does not contain the same number of frames and "
"features as the feature table in file '%s' ", fname);
}
if (!binary) { /* text file */
for (j = 0 ; j < ft->nFeatures ; j++) {
fscanf(fp, "%d |", &indx);
if (indx != j)
KLTError("(KLTReadFeatureTable) Bad index at j = %d"
"-- %d", j, indx);
for (i = 0 ; i < ft->nFrames ; i++)
_readFeatureTxt(fp, ft->feature[j][i]);
}
} else { /* binary file */
for (j = 0 ; j < ft->nFeatures ; j++) {
for (i = 0 ; i < ft->nFrames ; i++)
_readFeatureBin(fp, ft->feature[j][i]);
}
}
fclose(fp);
return ft;
}
| gpl-2.0 |
gpandcb/pkernel | fs/ext4/inode.c | 7 | 172003 | /*
* linux/fs/ext4/inode.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 64-bit file support on 64-bit platforms by Jakub Jelinek
* (jj@sunsite.ms.mff.cuni.cz)
*
* Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/dax.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include "truncate.h"
#include <trace/events/ext4.h>
#define MPAGE_DA_EXTENT_TAIL 0x01
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
__u16 dummy_csum = 0;
int offset = offsetof(struct ext4_inode, i_checksum_lo);
unsigned int csum_size = sizeof(dummy_csum);
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
offset += csum_size;
csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
EXT4_GOOD_OLD_INODE_SIZE - offset);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
offset = offsetof(struct ext4_inode, i_checksum_hi);
csum = ext4_chksum(sbi, csum, (__u8 *)raw +
EXT4_GOOD_OLD_INODE_SIZE,
offset - EXT4_GOOD_OLD_INODE_SIZE);
if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
EXT4_INODE_SIZE(inode->i_sb) -
offset);
}
}
return csum;
}
static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
__u32 provided, calculated;
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
!ext4_has_metadata_csum(inode->i_sb))
return 1;
provided = le16_to_cpu(raw->i_checksum_lo);
calculated = ext4_inode_csum(inode, raw, ei);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
else
calculated &= 0xFFFF;
return provided == calculated;
}
static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
__u32 csum;
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_LINUX) ||
!ext4_has_metadata_csum(inode->i_sb))
return;
csum = ext4_inode_csum(inode, raw, ei);
raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
raw->i_checksum_hi = cpu_to_le16(csum >> 16);
}
static inline int ext4_begin_ordered_truncate(struct inode *inode,
loff_t new_size)
{
trace_ext4_begin_ordered_truncate(inode, new_size);
/*
* If jinode is zero, then we never opened the file for
* writing, so there's no need to call
* jbd2_journal_begin_ordered_truncate() since there's no
* outstanding writes we need to flush.
*/
if (!EXT4_I(inode)->jinode)
return 0;
return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode,
new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents);
/*
* Test whether an inode is a fast symlink.
*/
int ext4_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
if (ext4_has_inline_data(inode))
return 0;
return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}
/*
* Restart the transaction associated with *handle. This does a commit,
* so before we call here everything must be consistently dirtied against
* this transaction.
*/
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
int nblocks)
{
int ret;
/*
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
* i_mutex. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
jbd_debug(2, "restarting handle %p\n", handle);
up_write(&EXT4_I(inode)->i_data_sem);
ret = ext4_journal_restart(handle, nblocks);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
return ret;
}
/*
* Called at the last iput() if i_nlink is zero.
*/
void ext4_evict_inode(struct inode *inode)
{
handle_t *handle;
int err;
trace_ext4_evict_inode(inode);
if (inode->i_nlink) {
/*
* When journalling data dirty buffers are tracked only in the
* journal. So although mm thinks everything is clean and
* ready for reaping the inode might still have some pages to
* write in the running transaction or waiting to be
* checkpointed. Thus calling jbd2_journal_invalidatepage()
* (via truncate_inode_pages()) to discard these buffers can
* cause data loss. Also even if we did not discard these
* buffers, we would have no way to find them after the inode
* is reaped and thus user could see stale data if he tries to
* read them before the transaction is checkpointed. So be
* careful and force everything to disk here... We use
* ei->i_datasync_tid to store the newest transaction
* containing inode's data.
*
* Note that directories do not have this problem because they
* don't use page cache.
*/
if (inode->i_ino != EXT4_JOURNAL_INO &&
ext4_should_journal_data(inode) &&
(S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
jbd2_complete_transaction(journal, commit_tid);
filemap_write_and_wait(&inode->i_data);
}
truncate_inode_pages_final(&inode->i_data);
goto no_delete;
}
if (is_bad_inode(inode))
goto no_delete;
dquot_initialize(inode);
if (ext4_should_order_data(inode))
ext4_begin_ordered_truncate(inode, 0);
truncate_inode_pages_final(&inode->i_data);
/*
* Protect us against freezing - iput() caller didn't have to have any
* protection against it
*/
sb_start_intwrite(inode->i_sb);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
ext4_blocks_for_truncate(inode)+3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
* If we're going to skip the normal cleanup, we still need to
* make sure that the in-core orphan linked list is properly
* cleaned up.
*/
ext4_orphan_del(NULL, inode);
sb_end_intwrite(inode->i_sb);
goto no_delete;
}
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_size = 0;
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_warning(inode->i_sb,
"couldn't mark inode dirty (err %d)", err);
goto stop_handle;
}
if (inode->i_blocks)
ext4_truncate(inode);
/*
* ext4_ext_truncate() doesn't reserve any slop when it
* restarts journal transactions; therefore there may not be
* enough credits left in the handle to remove the inode from
* the orphan list and set the dtime field.
*/
if (!ext4_handle_has_enough_credits(handle, 3)) {
err = ext4_journal_extend(handle, 3);
if (err > 0)
err = ext4_journal_restart(handle, 3);
if (err != 0) {
ext4_warning(inode->i_sb,
"couldn't extend journal (err %d)", err);
stop_handle:
ext4_journal_stop(handle);
ext4_orphan_del(NULL, inode);
sb_end_intwrite(inode->i_sb);
goto no_delete;
}
}
/*
* Kill off the orphan record which ext4_truncate created.
* AKPM: I think this can be inside the above `if'.
* Note that ext4_orphan_del() has to be able to cope with the
* deletion of a non-existent orphan - this is because we don't
* know if ext4_truncate() actually created an orphan record.
* (Well, we could do this if we need to, but heck - it works)
*/
ext4_orphan_del(handle, inode);
EXT4_I(inode)->i_dtime = get_seconds();
/*
* One subtle ordering requirement: if anything has gone wrong
* (transaction abort, IO errors, whatever), then we can still
* do these next steps (the fs will already have been marked as
* having errors), but we can't free the inode if the mark_dirty
* fails.
*/
if (ext4_mark_inode_dirty(handle, inode))
/* If that failed, just do the required in-core inode clear. */
ext4_clear_inode(inode);
else
ext4_free_inode(handle, inode);
ext4_journal_stop(handle);
sb_end_intwrite(inode->i_sb);
return;
no_delete:
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
{
return &EXT4_I(inode)->i_reserved_quota;
}
#endif
/*
* Called with i_data_sem down, which is important since we can call
* ext4_discard_preallocations() from here.
*/
void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
spin_lock(&ei->i_block_reservation_lock);
trace_ext4_da_update_reserve_space(inode, used, quota_claim);
if (unlikely(used > ei->i_reserved_data_blocks)) {
ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
"with only %d reserved data blocks",
__func__, inode->i_ino, used,
ei->i_reserved_data_blocks);
WARN_ON(1);
used = ei->i_reserved_data_blocks;
}
/* Update per-inode reservations */
ei->i_reserved_data_blocks -= used;
percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/* Update quota subsystem for data blocks */
if (quota_claim)
dquot_claim_block(inode, EXT4_C2B(sbi, used));
else {
/*
* We did fallocate with an offset that is already delayed
* allocated. So on delayed allocated writeback we should
* not re-claim the quota for fallocated blocks.
*/
dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
}
/*
* If we have done all the pending block allocations and if
* there aren't any writers on the inode, we can discard the
* inode's preallocations.
*/
if ((ei->i_reserved_data_blocks == 0) &&
(atomic_read(&inode->i_writecount) == 0))
ext4_discard_preallocations(inode);
}
static int __check_block_validity(struct inode *inode, const char *func,
unsigned int line,
struct ext4_map_blocks *map)
{
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock "
"(length %d)", (unsigned long) map->m_lblk,
map->m_len);
return -EFSCORRUPTED;
}
return 0;
}
int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
ext4_lblk_t len)
{
int ret;
if (ext4_encrypted_inode(inode))
return fscrypt_zeroout_range(inode, lblk, pblk, len);
ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
if (ret > 0)
ret = 0;
return ret;
}
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
#ifdef ES_AGGRESSIVE_TEST
static void ext4_map_blocks_es_recheck(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *es_map,
struct ext4_map_blocks *map,
int flags)
{
int retval;
map->m_flags = 0;
/*
* There is a race window that the result is not the same.
* e.g. xfstests #223 when dioread_nolock enables. The reason
* is that we lookup a block mapping in extent status tree with
* out taking i_data_sem. So at the time the unwritten extent
* could be converted.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
up_read((&EXT4_I(inode)->i_data_sem));
/*
* We don't check m_len because extent will be collpased in status
* tree. So the m_len might not equal.
*/
if (es_map->m_lblk != map->m_lblk ||
es_map->m_flags != map->m_flags ||
es_map->m_pblk != map->m_pblk) {
printk("ES cache assertion failed for inode: %lu "
"es_cached ex [%d/%d/%llu/%x] != "
"found ex [%d/%d/%llu/%x] retval %d flags %x\n",
inode->i_ino, es_map->m_lblk, es_map->m_len,
es_map->m_pblk, es_map->m_flags, map->m_lblk,
map->m_len, map->m_pblk, map->m_flags,
retval, flags);
}
}
#endif /* ES_AGGRESSIVE_TEST */
/*
* The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped.
*
* Otherwise it takes the write lock of the i_data_sem and allocate blocks
* and store the allocated blocks in the result buffer head and mark it
* mapped.
*
* If file type is extents based, it will call ext4_ext_map_blocks(),
* Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
* based files
*
* On success, it returns the number of blocks being mapped or allocated. if
* create==0 and the blocks are pre-allocated and unwritten, the resulting @map
* is marked as unwritten. If the create == 1, it will mark @map as mapped.
*
* It returns 0 if plain look up failed (blocks have not been allocated), in
* that case, @map is returned as unmapped but we still do fill map->m_len to
* indicate the length of a hole starting at map->m_lblk.
*
* It returns the error in case of allocation failure.
*/
int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct extent_status es;
int retval;
int ret = 0;
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
/*
* ext4_map_blocks returns an int, and m_len is an unsigned int
*/
if (unlikely(map->m_len > INT_MAX))
map->m_len = INT_MAX;
/* We can handle the block number less than EXT_MAX_BLOCKS */
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED;
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
map->m_flags |= ext4_es_is_written(&es) ?
EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
map->m_pblk = 0;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
retval = 0;
} else {
BUG_ON(1);
}
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle, inode, map,
&orig_map, flags);
#endif
goto found;
}
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
if (ret < 0)
retval = ret;
}
up_read((&EXT4_I(inode)->i_data_sem));
found:
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
/* If it is only a block(s) look up */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
return retval;
/*
* Returns if the blocks have already allocated
*
* Note that if blocks have been preallocated
* ext4_ext_get_block() returns the create = 0
* with buffer head unmapped.
*/
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
/*
* If we need to convert extent to unwritten
* we continue and do the actual work in
* ext4_ext_map_blocks()
*/
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
return retval;
/*
* Here we clear m_flags because after allocating an new extent,
* it will be set again.
*/
map->m_flags &= ~EXT4_MAP_FLAGS;
/*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_block()
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
/*
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags);
if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
/*
* We allocated new blocks which will result in
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now. We don't
* support fallocate for non extent files. So we can update
* reserve space here.
*/
if ((retval > 0) &&
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
ext4_da_update_reserve_space(inode, retval, 1);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
/*
* We have to zeroout blocks before inserting them into extent
* status tree. Otherwise someone could look them up there and
* use them before they are really zeroed. We also have to
* unmap metadata before zeroing as otherwise writeback can
* overwrite zeros with stale data from block device.
*/
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
ext4_lblk_t i;
for (i = 0; i < map->m_len; i++) {
unmap_underlying_metadata(inode->i_sb->s_bdev,
map->m_pblk + i);
}
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
retval = ret;
goto out_sem;
}
}
/*
* If the extent has been zeroed out, we don't need to update
* extent status tree.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es))
goto out_sem;
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret < 0) {
retval = ret;
goto out_sem;
}
}
out_sem:
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
/*
* Inodes with freshly allocated blocks where contents will be
* visible after transaction commit must be on transaction's
* ordered data list.
*/
if (map->m_flags & EXT4_MAP_NEW &&
!(map->m_flags & EXT4_MAP_UNWRITTEN) &&
!(flags & EXT4_GET_BLOCKS_ZERO) &&
!IS_NOQUOTA(inode) &&
ext4_should_order_data(inode)) {
if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
ret = ext4_jbd2_inode_add_wait(handle, inode);
else
ret = ext4_jbd2_inode_add_write(handle, inode);
if (ret)
return ret;
}
}
return retval;
}
/*
* Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
* we have to be careful as someone else may be manipulating b_state as well.
*/
static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
{
unsigned long old_state;
unsigned long new_state;
flags &= EXT4_MAP_FLAGS;
/* Dummy buffer_head? Set non-atomically. */
if (!bh->b_page) {
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
return;
}
/*
* Someone else may be modifying b_state. Be careful! This is ugly but
* once we get rid of using bh as a container for mapping information
* to pass to / from get_block functions, this can go away.
*/
do {
old_state = READ_ONCE(bh->b_state);
new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
} while (unlikely(
cmpxchg(&bh->b_state, old_state, new_state) != old_state));
}
static int _ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int flags)
{
struct ext4_map_blocks map;
int ret = 0;
if (ext4_has_inline_data(inode))
return -ERANGE;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
flags);
if (ret > 0) {
map_bh(bh, inode->i_sb, map.m_pblk);
ext4_update_bh_state(bh, map.m_flags);
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
ret = 0;
}
return ret;
}
int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
return _ext4_get_block(inode, iblock, bh,
create ? EXT4_GET_BLOCKS_CREATE : 0);
}
/*
* Get block function used when preparing for buffered write if we require
* creating an unwritten extent if blocks haven't been allocated. The extent
* will be converted to written after the IO is complete.
*/
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
inode->i_ino, create);
return _ext4_get_block(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
}
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
/*
* Get blocks function for the cases that need to start a transaction -
* generally difference cases of direct IO and DAX IO. It also handles retries
* in case of ENOSPC.
*/
static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int flags)
{
int dio_credits;
handle_t *handle;
int retries = 0;
int ret;
/* Trim mapping request to maximum we can map at once for DIO */
if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
dio_credits = ext4_chunk_trans_blocks(inode,
bh_result->b_size >> inode->i_blkbits);
retry:
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = _ext4_get_block(inode, iblock, bh_result, flags);
ext4_journal_stop(handle);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
return ret;
}
/* Get block function for DIO reads and writes to inodes without extents */
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
if (!create)
return _ext4_get_block(inode, iblock, bh, 0);
return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
}
/*
* Get block function for AIO DIO writes when we create unwritten extent if
* blocks are not allocated yet. The extent will be converted to written
* after IO is complete.
*/
static int ext4_dio_get_block_unwritten_async(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* When doing DIO using unwritten extents, we need io_end to convert
* unwritten extents to written on IO completion. We allocate io_end
* once we spot unwritten extent and store it in b_private. Generic
* DIO code keeps b_private set and furthermore passes the value to
* our completion callback in 'private' argument.
*/
if (!ret && buffer_unwritten(bh_result)) {
if (!bh_result->b_private) {
ext4_io_end_t *io_end;
io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!io_end)
return -ENOMEM;
bh_result->b_private = io_end;
ext4_set_io_unwritten_flag(inode, io_end);
}
set_buffer_defer_completion(bh_result);
}
return ret;
}
/*
* Get block function for non-AIO DIO writes when we create unwritten extent if
* blocks are not allocated yet. The extent will be converted to written
* after IO is complete from ext4_ext_direct_IO() function.
*/
static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* Mark inode as having pending DIO writes to unwritten extents.
* ext4_ext_direct_IO() checks this flag and converts extents to
* written.
*/
if (!ret && buffer_unwritten(bh_result))
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
return ret;
}
static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret;
ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
inode->i_ino, create);
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
ret = _ext4_get_block(inode, iblock, bh_result, 0);
/*
* Blocks should have been preallocated! ext4_file_write_iter() checks
* that.
*/
WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
return ret;
}
/*
* `handle' can be NULL if create is zero
*/
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int map_flags)
{
struct ext4_map_blocks map;
struct buffer_head *bh;
int create = map_flags & EXT4_GET_BLOCKS_CREATE;
int err;
J_ASSERT(handle != NULL || create == 0);
map.m_lblk = block;
map.m_len = 1;
err = ext4_map_blocks(handle, inode, &map, map_flags);
if (err == 0)
return create ? ERR_PTR(-ENOSPC) : NULL;
if (err < 0)
return ERR_PTR(err);
bh = sb_getblk(inode->i_sb, map.m_pblk);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
if (map.m_flags & EXT4_MAP_NEW) {
J_ASSERT(create != 0);
J_ASSERT(handle != NULL);
/*
* Now that we do not always journal data, we should
* keep in mind whether this should always journal the
* new buffer as metadata. For now, regular file
* writes use ext4_get_block instead, so it's not a
* problem.
*/
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
err = ext4_journal_get_create_access(handle, bh);
if (unlikely(err)) {
unlock_buffer(bh);
goto errout;
}
if (!buffer_uptodate(bh)) {
memset(bh->b_data, 0, inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (unlikely(err))
goto errout;
} else
BUFFER_TRACE(bh, "not a new buffer");
return bh;
errout:
brelse(bh);
return ERR_PTR(err);
}
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int map_flags)
{
struct buffer_head *bh;
bh = ext4_getblk(handle, inode, block, map_flags);
if (IS_ERR(bh))
return bh;
if (!bh || buffer_uptodate(bh))
return bh;
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
put_bh(bh);
return ERR_PTR(-EIO);
}
int ext4_walk_page_buffers(handle_t *handle,
struct buffer_head *head,
unsigned from,
unsigned to,
int *partial,
int (*fn)(handle_t *handle,
struct buffer_head *bh))
{
struct buffer_head *bh;
unsigned block_start, block_end;
unsigned blocksize = head->b_size;
int err, ret = 0;
struct buffer_head *next;
for (bh = head, block_start = 0;
ret == 0 && (bh != head || !block_start);
block_start = block_end, bh = next) {
next = bh->b_this_page;
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (partial && !buffer_uptodate(bh))
*partial = 1;
continue;
}
err = (*fn)(handle, bh);
if (!ret)
ret = err;
}
return ret;
}
/*
* To preserve ordering, it is essential that the hole instantiation and
* the data write be encapsulated in a single transaction. We cannot
* close off a transaction and start a new one between the ext4_get_block()
* and the commit_write(). So doing the jbd2_journal_start at the start of
* prepare_write() is the right place.
*
* Also, this function can nest inside ext4_writepage(). In that case, we
* *know* that ext4_writepage() has generated enough buffer credits to do the
* whole page. So we won't block on the journal in that case, which is good,
* because the caller may be PF_MEMALLOC.
*
* By accident, ext4 can be reentered when a transaction is open via
* quota file writes. If we were to commit the transaction while thus
* reentered, there can be a deadlock - we would be holding a quota
* lock, and the commit would never complete if another thread had a
* transaction open and was blocking on the quota lock - a ranking
* violation.
*
* So what we do is to rely on the fact that jbd2_journal_stop/journal_start
* will _not_ run commit under these circumstances because handle->h_ref
* is elevated. We'll still have enough credits for the tiny quotafile
* write.
*/
int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh)
{
int dirty = buffer_dirty(bh);
int ret;
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
/*
* __block_write_begin() could have dirtied some buffers. Clean
* the dirty bit as jbd2_journal_get_write_access() could complain
* otherwise about fs integrity issues. Setting of the dirty bit
* by __block_write_begin() isn't a real problem here as we clear
* the bit before releasing a page lock and thus writeback cannot
* ever write the buffer.
*/
if (dirty)
clear_buffer_dirty(bh);
BUFFER_TRACE(bh, "get write access");
ret = ext4_journal_get_write_access(handle, bh);
if (!ret && dirty)
ret = ext4_handle_dirty_metadata(handle, NULL, bh);
return ret;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
bool decrypt = false;
BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
bbits = ilog2(blocksize);
block = (sector_t)page->index << (PAGE_SHIFT - bbits);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
continue;
}
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
if (buffer_new(bh)) {
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
zero_user_segments(page, to, block_end,
block_start, from);
continue;
}
}
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode);
}
}
/*
* If we issued read requests, let them complete.
*/
while (wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt)
err = fscrypt_decrypt_page(page);
return err;
}
#endif
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
int ret, needed_blocks;
handle_t *handle;
int retries = 0;
struct page *page;
pgoff_t index;
unsigned from, to;
trace_ext4_write_begin(inode, pos, len, flags);
/*
* Reserve one block more for addition to orphan list in case
* we allocate blocks but write fails for some reason
*/
needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
index = pos >> PAGE_SHIFT;
from = pos & (PAGE_SIZE - 1);
to = from + len;
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
flags, pagep);
if (ret < 0)
return ret;
if (ret == 1)
return 0;
}
/*
* grab_cache_page_write_begin() can take a long time if the
* system is thrashing due to memory pressure, or if the page
* is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate
* the page (if needed) without using GFP_NOFS.
*/
retry_grab:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
unlock_page(page);
retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) {
put_page(page);
return PTR_ERR(handle);
}
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
ext4_journal_stop(handle);
goto retry_grab;
}
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block_unwritten);
else
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block);
#else
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len,
ext4_get_block_unwritten);
else
ret = __block_write_begin(page, pos, len, ext4_get_block);
#endif
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, page_buffers(page),
from, to, NULL,
do_journal_get_write_access);
}
if (ret) {
unlock_page(page);
/*
* __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*
* Add inode to orphan list in case we crash before
* truncate finishes
*/
if (pos + len > inode->i_size && ext4_can_truncate(inode))
ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might
* still be on the orphan list; we need to
* make sure the inode is removed from the
* orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
put_page(page);
return ret;
}
*pagep = page;
return ret;
}
/* For write_end() in data=journal mode */
static int write_end_fn(handle_t *handle, struct buffer_head *bh)
{
int ret;
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
set_buffer_uptodate(bh);
ret = ext4_handle_dirty_metadata(handle, NULL, bh);
clear_buffer_meta(bh);
clear_buffer_prio(bh);
return ret;
}
/*
* We need to pick up the new inode size which generic_commit_write gave us
* `file' can be NULL - eg, when called from page_symlink().
*
* ext4 never places buffers on inode->i_mapping->private_list. metadata
* buffers are managed internally.
*/
static int ext4_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int i_size_changed = 0;
trace_ext4_write_end(inode, pos, len, copied);
if (ext4_has_inline_data(inode)) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0)
goto errout;
copied = ret;
} else
copied = block_write_end(file, mapping, pos,
len, copied, page, fsdata);
/*
* it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
i_size_changed = ext4_update_inode_size(inode, pos + copied);
unlock_page(page);
put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
* makes the holding time of page lock longer. Second, it forces lock
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
errout:
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
/*
* This is a private version of page_zero_new_buffers() which doesn't
* set the buffer to be dirty, since in data=journalled mode we need
* to call ext4_handle_dirty_metadata() instead.
*/
static void zero_new_buffers(struct page *page, unsigned from, unsigned to)
{
unsigned int block_start = 0, block_end;
struct buffer_head *head, *bh;
bh = head = page_buffers(page);
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
if (!PageUptodate(page)) {
unsigned start, size;
start = max(from, block_start);
size = min(to, block_end) - start;
zero_user(page, start, size);
set_buffer_uptodate(bh);
}
clear_buffer_new(bh);
}
}
block_start = block_end;
bh = bh->b_this_page;
} while (bh != head);
}
static int ext4_journalled_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int partial = 0;
unsigned from, to;
int size_changed = 0;
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_SIZE - 1);
to = from + len;
BUG_ON(!ext4_handle_valid(handle));
if (ext4_has_inline_data(inode))
copied = ext4_write_inline_data_end(inode, pos, len,
copied, page);
else {
if (copied < len) {
if (!PageUptodate(page))
copied = 0;
zero_new_buffers(page, from+copied, to);
}
ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
to, &partial, write_end_fn);
if (!partial)
SetPageUptodate(page);
}
size_changed = ext4_update_inode_size(inode, pos + copied);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
unlock_page(page);
put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
if (size_changed) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
}
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
/*
* Reserve space for a single cluster
*/
static int ext4_da_reserve_space(struct inode *inode)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
int ret;
/*
* We will charge metadata quota at writeout time; this saves
* us from metadata over-estimation, though we may go over by
* a small amount in the end. Here we just reserve for data.
*/
ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
if (ret)
return ret;
spin_lock(&ei->i_block_reservation_lock);
if (ext4_claim_free_clusters(sbi, 1, 0)) {
spin_unlock(&ei->i_block_reservation_lock);
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
ei->i_reserved_data_blocks++;
trace_ext4_da_reserve_space(inode);
spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
}
static void ext4_da_release_space(struct inode *inode, int to_free)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
if (!to_free)
return; /* Nothing to release, exit */
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
trace_ext4_da_release_space(inode, to_free);
if (unlikely(to_free > ei->i_reserved_data_blocks)) {
/*
* if there aren't enough reserved blocks, then the
* counter is messed up somewhere. Since this
* function is called from invalidate page, it's
* harmless to return without any action.
*/
ext4_warning(inode->i_sb, "ext4_da_release_space: "
"ino %lu, to_free %d with only %d reserved "
"data blocks", inode->i_ino, to_free,
ei->i_reserved_data_blocks);
WARN_ON(1);
to_free = ei->i_reserved_data_blocks;
}
ei->i_reserved_data_blocks -= to_free;
/* update fs dirty data blocks counter */
percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
}
static void ext4_da_page_release_reservation(struct page *page,
unsigned int offset,
unsigned int length)
{
int to_release = 0, contiguous_blks = 0;
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
struct inode *inode = page->mapping->host;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned int stop = offset + length;
int num_clusters;
ext4_fsblk_t lblk;
BUG_ON(stop > PAGE_SIZE || stop < length);
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
if (next_off > stop)
break;
if ((offset <= curr_off) && (buffer_delay(bh))) {
to_release++;
contiguous_blks++;
clear_buffer_delay(bh);
} else if (contiguous_blks) {
lblk = page->index <<
(PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) -
contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
contiguous_blks = 0;
}
curr_off = next_off;
} while ((bh = bh->b_this_page) != head);
if (contiguous_blks) {
lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
}
/* If we have released all the blocks belonging to a cluster, then we
* need to release the reserved space for that cluster. */
num_clusters = EXT4_NUM_B2C(sbi, to_release);
while (num_clusters > 0) {
lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
((num_clusters - 1) << sbi->s_cluster_bits);
if (sbi->s_cluster_ratio == 1 ||
!ext4_find_delalloc_cluster(inode, lblk))
ext4_da_release_space(inode, 1);
num_clusters--;
}
}
/*
* Delayed allocation stuff
*/
struct mpage_da_data {
struct inode *inode;
struct writeback_control *wbc;
pgoff_t first_page; /* The first page to write */
pgoff_t next_page; /* Current page to examine */
pgoff_t last_page; /* Last page to examine */
/*
* Extent to map - this can be after first_page because that can be
* fully mapped. We somewhat abuse m_flags to store whether the extent
* is delalloc or unwritten.
*/
struct ext4_map_blocks map;
struct ext4_io_submit io_submit; /* IO submission data */
};
static void mpage_release_unused_pages(struct mpage_da_data *mpd,
bool invalidate)
{
int nr_pages, i;
pgoff_t index, end;
struct pagevec pvec;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
/* This is necessary when next_page == 0. */
if (mpd->first_page >= mpd->next_page)
return;
index = mpd->first_page;
end = mpd->next_page - 1;
if (invalidate) {
ext4_lblk_t start, last;
start = index << (PAGE_SHIFT - inode->i_blkbits);
last = end << (PAGE_SHIFT - inode->i_blkbits);
ext4_es_remove_extent(inode, start, last - start + 1);
}
pagevec_init(&pvec, 0);
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (page->index > end)
break;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
if (invalidate) {
if (page_mapped(page))
clear_page_dirty_for_io(page);
block_invalidatepage(page, 0, PAGE_SIZE);
ClearPageUptodate(page);
}
unlock_page(page);
}
index = pvec.pages[nr_pages - 1]->index + 1;
pagevec_release(&pvec);
}
}
static void ext4_print_free_blocks(struct inode *inode)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct super_block *sb = inode->i_sb;
struct ext4_inode_info *ei = EXT4_I(inode);
ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
EXT4_C2B(EXT4_SB(inode->i_sb),
ext4_count_free_clusters(sb)));
ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
(long long) EXT4_C2B(EXT4_SB(sb),
percpu_counter_sum(&sbi->s_freeclusters_counter)));
ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
(long long) EXT4_C2B(EXT4_SB(sb),
percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
ext4_msg(sb, KERN_CRIT, "Block reservation details");
ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
ei->i_reserved_data_blocks);
return;
}
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
{
return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
}
/*
* This function is grabs code from the very beginning of
* ext4_map_blocks, but assumes that the caller is from delayed write
* time. This function looks up the requested blocks and sets the
* buffer delay bit under the protection of i_data_sem.
*/
static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
struct ext4_map_blocks *map,
struct buffer_head *bh)
{
struct extent_status es;
int retval;
sector_t invalid_block = ~((sector_t) 0xffff);
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
invalid_block = ~0;
map->m_flags = 0;
ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
"logical block %lu\n", inode->i_ino, map->m_len,
(unsigned long) map->m_lblk);
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, &es)) {
if (ext4_es_is_hole(&es)) {
retval = 0;
down_read(&EXT4_I(inode)->i_data_sem);
goto add_delayed;
}
/*
* Delayed extent could be allocated by fallocate.
* So we need to check it.
*/
if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
return 0;
}
map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
retval = es.es_len - (iblock - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
if (ext4_es_is_written(&es))
map->m_flags |= EXT4_MAP_MAPPED;
else if (ext4_es_is_unwritten(&es))
map->m_flags |= EXT4_MAP_UNWRITTEN;
else
BUG_ON(1);
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
#endif
return retval;
}
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_has_inline_data(inode))
retval = 0;
else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
retval = ext4_ind_map_blocks(NULL, inode, map, 0);
add_delayed:
if (retval == 0) {
int ret;
/*
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
/*
* If the block was allocated from previously allocated cluster,
* then we don't need to reserve it again. However we still need
* to reserve metadata for every block we're going to write.
*/
if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
!ext4_find_delalloc_cluster(inode, map->m_lblk)) {
ret = ext4_da_reserve_space(inode);
if (ret) {
/* not enough space to reserve */
retval = ret;
goto out_unlock;
}
}
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
~0, EXTENT_STATUS_DELAYED);
if (ret) {
retval = ret;
goto out_unlock;
}
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
} else if (retval > 0) {
int ret;
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret != 0)
retval = ret;
}
out_unlock:
up_read((&EXT4_I(inode)->i_data_sem));
return retval;
}
/*
* This is a special get_block_t callback which is used by
* ext4_da_write_begin(). It will either return mapped block or
* reserve space for a single block.
*
* For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
* We also have b_blocknr = -1 and b_bdev initialized properly
*
* For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
* We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
* initialized properly.
*/
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
struct ext4_map_blocks map;
int ret = 0;
BUG_ON(create == 0);
BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
map.m_lblk = iblock;
map.m_len = 1;
/*
* first, we need to know whether the block is allocated already
* preallocated blocks are unmapped but should treated
* the same as allocated blocks.
*/
ret = ext4_da_map_blocks(inode, iblock, &map, bh);
if (ret <= 0)
return ret;
map_bh(bh, inode->i_sb, map.m_pblk);
ext4_update_bh_state(bh, map.m_flags);
if (buffer_unwritten(bh)) {
/* A delayed write to unwritten bh should be marked
* new and mapped. Mapped ensures that we don't do
* get_block multiple times when we write to the same
* offset and new ensures that we do proper zero out
* for partial write.
*/
set_buffer_new(bh);
set_buffer_mapped(bh);
}
return 0;
}
static int bget_one(handle_t *handle, struct buffer_head *bh)
{
get_bh(bh);
return 0;
}
static int bput_one(handle_t *handle, struct buffer_head *bh)
{
put_bh(bh);
return 0;
}
static int __ext4_journalled_writepage(struct page *page,
unsigned int len)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
struct buffer_head *page_bufs = NULL;
handle_t *handle = NULL;
int ret = 0, err = 0;
int inline_data = ext4_has_inline_data(inode);
struct buffer_head *inode_bh = NULL;
ClearPageChecked(page);
if (inline_data) {
BUG_ON(page->index != 0);
BUG_ON(len > ext4_get_max_inline_size(inode));
inode_bh = ext4_journalled_write_inline_data(inode, len, page);
if (inode_bh == NULL)
goto out;
} else {
page_bufs = page_buffers(page);
if (!page_bufs) {
BUG();
goto out;
}
ext4_walk_page_buffers(handle, page_bufs, 0, len,
NULL, bget_one);
}
/*
* We need to release the page lock before we start the
* journal, so grab a reference so the page won't disappear
* out from under us.
*/
get_page(page);
unlock_page(page);
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
put_page(page);
goto out_no_pagelock;
}
BUG_ON(!ext4_handle_valid(handle));
lock_page(page);
put_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
ext4_journal_stop(handle);
ret = 0;
goto out;
}
if (inline_data) {
BUFFER_TRACE(inode_bh, "get write access");
ret = ext4_journal_get_write_access(handle, inode_bh);
err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
} else {
ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
do_journal_get_write_access);
err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
write_end_fn);
}
if (ret == 0)
ret = err;
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
if (!ext4_has_inline_data(inode))
ext4_walk_page_buffers(NULL, page_bufs, 0, len,
NULL, bput_one);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
unlock_page(page);
out_no_pagelock:
brelse(inode_bh);
return ret;
}
/*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
* need to file the inode to the transaction's list in ordered mode because if
* we are writing back data added by write(), the inode is already there and if
* we are writing back data modified via mmap(), no one guarantees in which
* transaction the data will hit the disk. In case we are journaling data, we
* cannot start transaction directly because transaction start ranks above page
* lock so we have to do some magic.
*
* This function can get called via...
* - ext4_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
* - shrink_page_list via the kswapd/direct reclaim (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
* multiple blocks we need to write those buffer_heads that are mapped. This
* is important for mmaped based write. So if we do with blocksize 1K
* truncate(f, 1024);
* a = mmap(f, 0, 4096);
* a[0] = 'a';
* truncate(f, 4096);
* we have in the page first buffer_head mapped via page_mkwrite call back
* but other buffer_heads would be unmapped but dirty (dirty done via the
* do_wp_page). So writepage should write the first block. If we modify
* the mmap area beyond 1024 we will again get a page_fault and the
* page_mkwrite callback will do the block allocation and mark the
* buffer_heads mapped.
*
* We redirty the page if we have any buffer_heads that is either delay or
* unwritten in the page.
*
* We can get recursively called as show below.
*
* ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
* ext4_writepage()
*
* But since we don't do any block allocation we should not deadlock.
* Page also have the dirty flag cleared so we don't get recurive page_lock.
*/
static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
int ret = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
struct ext4_io_submit io_submit;
bool keep_towrite = false;
trace_ext4_writepage(page);
size = i_size_read(inode);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
page_bufs = page_buffers(page);
/*
* We cannot do block allocation or other extent handling in this
* function. If there are buffers needing that, we have to redirty
* the page. But we may reach here when we do a journal commit via
* journal_submit_inode_data_buffers() and in that case we must write
* allocated buffers to achieve data=ordered mode guarantees.
*
* Also, if there is only one buffer per page (the fs block
* size == the page size), if one buffer needs block
* allocation or needs to modify the extent tree to clear the
* unwritten flag, we know that the page can't be written at
* all, so we might as well refuse the write immediately.
* Unfortunately if the block size != page size, we can't as
* easily detect this case using ext4_walk_page_buffers(), but
* for the extremely common case, this is an optimization that
* skips a useless round trip through ext4_bio_write_page().
*/
if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
redirty_page_for_writepage(wbc, page);
if ((current->flags & PF_MEMALLOC) ||
(inode->i_sb->s_blocksize == PAGE_SIZE)) {
/*
* For memory cleaning there's no point in writing only
* some buffers. So just bail out. Warn if we came here
* from direct reclaim.
*/
WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
== PF_MEMALLOC);
unlock_page(page);
return 0;
}
keep_towrite = true;
}
if (PageChecked(page) && ext4_should_journal_data(inode))
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
return __ext4_journalled_writepage(page, len);
ext4_io_submit_init(&io_submit, wbc);
io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_submit.io_end) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return -ENOMEM;
}
ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
ext4_io_submit(&io_submit);
/* Drop io_end reference we got from init */
ext4_put_io_end_defer(io_submit.io_end);
return ret;
}
static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
{
int len;
loff_t size = i_size_read(mpd->inode);
int err;
BUG_ON(page->index != mpd->first_page);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
clear_page_dirty_for_io(page);
err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
if (!err)
mpd->wbc->nr_to_write--;
mpd->first_page++;
return err;
}
#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
/*
* mballoc gives us at most this number of blocks...
* XXX: That seems to be only a limitation of ext4_mb_normalize_request().
* The rest of mballoc seems to handle chunks up to full group size.
*/
#define MAX_WRITEPAGES_EXTENT_LEN 2048
/*
* mpage_add_bh_to_extent - try to add bh to extent of blocks to map
*
* @mpd - extent of blocks
* @lblk - logical number of the block in the file
* @bh - buffer head we want to add to the extent
*
* The function is used to collect contig. blocks in the same state. If the
* buffer doesn't require mapping for writeback and we haven't started the
* extent of buffers to map yet, the function returns 'true' immediately - the
* caller can write the buffer right away. Otherwise the function returns true
* if the block has been added to the extent, false if the block couldn't be
* added.
*/
static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
struct buffer_head *bh)
{
struct ext4_map_blocks *map = &mpd->map;
/* Buffer that doesn't need mapping for writeback? */
if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
(!buffer_delay(bh) && !buffer_unwritten(bh))) {
/* So far no extent to map => we write the buffer right away */
if (map->m_len == 0)
return true;
return false;
}
/* First block in the extent? */
if (map->m_len == 0) {
map->m_lblk = lblk;
map->m_len = 1;
map->m_flags = bh->b_state & BH_FLAGS;
return true;
}
/* Don't go larger than mballoc is willing to allocate */
if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
return false;
/* Can we merge the block to our big extent? */
if (lblk == map->m_lblk + map->m_len &&
(bh->b_state & BH_FLAGS) == map->m_flags) {
map->m_len++;
return true;
}
return false;
}
/*
* mpage_process_page_bufs - submit page buffers for IO or add them to extent
*
* @mpd - extent of blocks for mapping
* @head - the first buffer in the page
* @bh - buffer we should start processing from
* @lblk - logical number of the block in the file corresponding to @bh
*
* Walk through page buffers from @bh upto @head (exclusive) and either submit
* the page for IO if all buffers in this page were mapped and there's no
* accumulated extent of buffers to map or add buffers in the page to the
* extent of buffers to map. The function returns 1 if the caller can continue
* by processing the next page, 0 if it should stop adding buffers to the
* extent to map because we cannot extend it anymore. It can also return value
* < 0 in case of error during IO submission.
*/
static int mpage_process_page_bufs(struct mpage_da_data *mpd,
struct buffer_head *head,
struct buffer_head *bh,
ext4_lblk_t lblk)
{
struct inode *inode = mpd->inode;
int err;
ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
>> inode->i_blkbits;
do {
BUG_ON(buffer_locked(bh));
if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
/* Found extent to map? */
if (mpd->map.m_len)
return 0;
/* Everything mapped so far and we hit EOF */
break;
}
} while (lblk++, (bh = bh->b_this_page) != head);
/* So far everything mapped? Submit the page for IO. */
if (mpd->map.m_len == 0) {
err = mpage_submit_page(mpd, head->b_page);
if (err < 0)
return err;
}
return lblk < blocks;
}
/*
* mpage_map_buffers - update buffers corresponding to changed extent and
* submit fully mapped pages for IO
*
* @mpd - description of extent to map, on return next extent to map
*
* Scan buffers corresponding to changed extent (we expect corresponding pages
* to be already locked) and update buffer state according to new extent state.
* We map delalloc buffers to their physical location, clear unwritten bits,
* and mark buffers as uninit when we perform writes to unwritten extents
* and do extent conversion after IO is finished. If the last page is not fully
* mapped, we update @map to the next extent in the last page that needs
* mapping. Otherwise we submit the page for IO.
*/
static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
{
struct pagevec pvec;
int nr_pages, i;
struct inode *inode = mpd->inode;
struct buffer_head *head, *bh;
int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
sector_t pblock;
int err;
start = mpd->map.m_lblk >> bpp_bits;
end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
lblk = start << bpp_bits;
pblock = mpd->map.m_pblk;
pagevec_init(&pvec, 0);
while (start <= end) {
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (page->index > end)
break;
/* Up to 'end' pages must be contiguous */
BUG_ON(page->index != start);
bh = head = page_buffers(page);
do {
if (lblk < mpd->map.m_lblk)
continue;
if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
/*
* Buffer after end of mapped extent.
* Find next buffer in the page to map.
*/
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
/*
* FIXME: If dioread_nolock supports
* blocksize < pagesize, we need to make
* sure we add size mapped so far to
* io_end->size as the following call
* can submit the page for IO.
*/
err = mpage_process_page_bufs(mpd, head,
bh, lblk);
pagevec_release(&pvec);
if (err > 0)
err = 0;
return err;
}
if (buffer_delay(bh)) {
clear_buffer_delay(bh);
bh->b_blocknr = pblock++;
}
clear_buffer_unwritten(bh);
} while (lblk++, (bh = bh->b_this_page) != head);
/*
* FIXME: This is going to break if dioread_nolock
* supports blocksize < pagesize as we will try to
* convert potentially unmapped parts of inode.
*/
mpd->io_submit.io_end->size += PAGE_SIZE;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
if (err < 0) {
pagevec_release(&pvec);
return err;
}
start++;
}
pagevec_release(&pvec);
}
/* Extent fully mapped and matches with page boundary. We are done. */
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
return 0;
}
static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
{
struct inode *inode = mpd->inode;
struct ext4_map_blocks *map = &mpd->map;
int get_blocks_flags;
int err, dioread_nolock;
trace_ext4_da_write_pages_extent(inode, map);
/*
* Call ext4_map_blocks() to allocate any delayed allocation blocks, or
* to convert an unwritten extent to be initialized (in the case
* where we have written into one or more preallocated blocks). It is
* possible that we're going to need more metadata blocks than
* previously reserved. However we must not fail because we're in
* writeback and there is nothing we can do about it so it might result
* in data loss. So use reserved blocks to allocate metadata if
* possible.
*
* We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
* the blocks in question are delalloc blocks. This indicates
* that the blocks and quotas has already been checked when
* the data was copied into the page cache.
*/
get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL |
EXT4_GET_BLOCKS_IO_SUBMIT;
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
if (map->m_flags & (1 << BH_Delay))
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
if (err < 0)
return err;
if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
if (!mpd->io_submit.io_end->handle &&
ext4_handle_valid(handle)) {
mpd->io_submit.io_end->handle = handle->h_rsv_handle;
handle->h_rsv_handle = NULL;
}
ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
}
BUG_ON(map->m_len == 0);
if (map->m_flags & EXT4_MAP_NEW) {
struct block_device *bdev = inode->i_sb->s_bdev;
int i;
for (i = 0; i < map->m_len; i++)
unmap_underlying_metadata(bdev, map->m_pblk + i);
}
return 0;
}
/*
* mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
* mpd->len and submit pages underlying it for IO
*
* @handle - handle for journal operations
* @mpd - extent to map
* @give_up_on_write - we set this to true iff there is a fatal error and there
* is no hope of writing the data. The caller should discard
* dirty pages to avoid infinite loops.
*
* The function maps extent starting at mpd->lblk of length mpd->len. If it is
* delayed, blocks are allocated, if it is unwritten, we may need to convert
* them to initialized or split the described range from larger unwritten
* extent. Note that we need not map all the described range since allocation
* can return less blocks or the range is covered by more unwritten extents. We
* cannot map more because we are limited by reserved transaction credits. On
* the other hand we always make sure that the last touched page is fully
* mapped so that it can be written out (and thus forward progress is
* guaranteed). After mapping we submit all mapped pages for IO.
*/
static int mpage_map_and_submit_extent(handle_t *handle,
struct mpage_da_data *mpd,
bool *give_up_on_write)
{
struct inode *inode = mpd->inode;
struct ext4_map_blocks *map = &mpd->map;
int err;
loff_t disksize;
int progress = 0;
mpd->io_submit.io_end->offset =
((loff_t)map->m_lblk) << inode->i_blkbits;
do {
err = mpage_map_one_extent(handle, mpd);
if (err < 0) {
struct super_block *sb = inode->i_sb;
if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
goto invalidate_dirty_pages;
/*
* Let the uper layers retry transient errors.
* In the case of ENOSPC, if ext4_count_free_blocks()
* is non-zero, a commit should free up blocks.
*/
if ((err == -ENOMEM) ||
(err == -ENOSPC && ext4_count_free_clusters(sb))) {
if (progress)
goto update_disksize;
return err;
}
ext4_msg(sb, KERN_CRIT,
"Delayed block allocation failed for "
"inode %lu at logical offset %llu with"
" max blocks %u with error %d",
inode->i_ino,
(unsigned long long)map->m_lblk,
(unsigned)map->m_len, -err);
ext4_msg(sb, KERN_CRIT,
"This should not happen!! Data will "
"be lost\n");
if (err == -ENOSPC)
ext4_print_free_blocks(inode);
invalidate_dirty_pages:
*give_up_on_write = true;
return err;
}
progress = 1;
/*
* Update buffer state, submit mapped pages, and get us new
* extent to map
*/
err = mpage_map_and_submit_buffers(mpd);
if (err < 0)
goto update_disksize;
} while (map->m_len);
update_disksize:
/*
* Update on-disk size after IO is submitted. Races with
* truncate are avoided by checking i_size under i_data_sem.
*/
disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
if (disksize > EXT4_I(inode)->i_disksize) {
int err2;
loff_t i_size;
down_write(&EXT4_I(inode)->i_data_sem);
i_size = i_size_read(inode);
if (disksize > i_size)
disksize = i_size;
if (disksize > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = disksize;
err2 = ext4_mark_inode_dirty(handle, inode);
up_write(&EXT4_I(inode)->i_data_sem);
if (err2)
ext4_error(inode->i_sb,
"Failed to mark inode %lu dirty",
inode->i_ino);
if (!err)
err = err2;
}
return err;
}
/*
* Calculate the total number of credits to reserve for one writepages
* iteration. This is called from ext4_writepages(). We map an extent of
* up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
* the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
* bpp - 1 blocks in bpp different extents.
*/
static int ext4_da_writepages_trans_blocks(struct inode *inode)
{
int bpp = ext4_journal_blocks_per_page(inode);
return ext4_meta_trans_blocks(inode,
MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
}
/*
* mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
* and underlying extent to map
*
* @mpd - where to look for pages
*
* Walk dirty pages in the mapping. If they are fully mapped, submit them for
* IO immediately. When we find a page which isn't mapped we start accumulating
* extent of buffers underlying these pages that needs mapping (formed by
* either delayed or unwritten buffers). We also lock the pages containing
* these buffers. The extent found is returned in @mpd structure (starting at
* mpd->lblk with length mpd->len blocks).
*
* Note that this function can attach bios to one io_end structure which are
* neither logically nor physically contiguous. Although it may seem as an
* unnecessary complication, it is actually inevitable in blocksize < pagesize
* case as we need to track IO to all buffers underlying a page in one io_end.
*/
static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
{
struct address_space *mapping = mpd->inode->i_mapping;
struct pagevec pvec;
unsigned int nr_pages;
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
int tag;
int i, err = 0;
int blkbits = mpd->inode->i_blkbits;
ext4_lblk_t lblk;
struct buffer_head *head;
if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
pagevec_init(&pvec, 0);
mpd->map.m_len = 0;
mpd->next_page = index;
while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
goto out;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/*
* At this point, the page may be truncated or
* invalidated (changing page->mapping to NULL), or
* even swizzled back from swapper_space to tmpfs file
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
if (page->index > end)
goto out;
/*
* Accumulated enough dirty pages? This doesn't apply
* to WB_SYNC_ALL mode. For integrity sync we have to
* keep going because someone may be concurrently
* dirtying pages, and we might have synced a lot of
* newly appeared dirty pages, but have not synced all
* of the old dirty pages.
*/
if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
goto out;
/* If we can't merge this page, we are done. */
if (mpd->map.m_len > 0 && mpd->next_page != page->index)
goto out;
lock_page(page);
/*
* If the page is no longer dirty, or its mapping no
* longer corresponds to inode we are writing (which
* means it has been truncated or invalidated), or the
* page is already under writeback and we are not doing
* a data integrity writeback, skip the page
*/
if (!PageDirty(page) ||
(PageWriteback(page) &&
(mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
if (mpd->map.m_len == 0)
mpd->first_page = page->index;
mpd->next_page = page->index + 1;
/* Add all dirty buffers to mpd */
lblk = ((ext4_lblk_t)page->index) <<
(PAGE_SHIFT - blkbits);
head = page_buffers(page);
err = mpage_process_page_bufs(mpd, head, head, lblk);
if (err <= 0)
goto out;
err = 0;
left--;
}
pagevec_release(&pvec);
cond_resched();
}
return 0;
out:
pagevec_release(&pvec);
return err;
}
static int __writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct address_space *mapping = data;
int ret = ext4_writepage(page, wbc);
mapping_set_error(mapping, ret);
return ret;
}
static int ext4_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
pgoff_t writeback_index = 0;
long nr_to_write = wbc->nr_to_write;
int range_whole = 0;
int cycled = 1;
handle_t *handle = NULL;
struct mpage_da_data mpd;
struct inode *inode = mapping->host;
int needed_blocks, rsv_blocks = 0, ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
bool done;
struct blk_plug plug;
bool give_up_on_write = false;
percpu_down_read(&sbi->s_journal_flag_rwsem);
trace_ext4_writepages(inode, wbc);
if (dax_mapping(mapping)) {
ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
wbc);
goto out_writepages;
}
/*
* No pages to write? This is mainly a kludge to avoid starting
* a transaction for special inodes like journal inode on last iput()
* because that could violate lock ordering on umount
*/
if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
goto out_writepages;
if (ext4_should_journal_data(inode)) {
struct blk_plug plug;
blk_start_plug(&plug);
ret = write_cache_pages(mapping, wbc, __writepage, mapping);
blk_finish_plug(&plug);
goto out_writepages;
}
/*
* If the filesystem has aborted, it is read-only, so return
* right away instead of dumping stack traces later on that
* will obscure the real source of the problem. We test
* EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
* the latter could be true if the filesystem is mounted
* read-only, and in that case, ext4_writepages should
* *never* be called, so if that ever happens, we would want
* the stack trace.
*/
if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
ret = -EROFS;
goto out_writepages;
}
if (ext4_should_dioread_nolock(inode)) {
/*
* We may need to convert up to one extent per block in
* the page and we may dirty the inode.
*/
rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
}
/*
* If we have inline data and arrive here, it means that
* we will soon create the block for the 1st page, so
* we'd better clear the inline data here.
*/
if (ext4_has_inline_data(inode)) {
/* Just inode will be modified... */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_writepages;
}
BUG_ON(ext4_test_inode_state(inode,
EXT4_STATE_MAY_INLINE_DATA));
ext4_destroy_inline_data(handle, inode);
ext4_journal_stop(handle);
}
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index;
if (writeback_index)
cycled = 0;
mpd.first_page = writeback_index;
mpd.last_page = -1;
} else {
mpd.first_page = wbc->range_start >> PAGE_SHIFT;
mpd.last_page = wbc->range_end >> PAGE_SHIFT;
}
mpd.inode = inode;
mpd.wbc = wbc;
ext4_io_submit_init(&mpd.io_submit, wbc);
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
done = false;
blk_start_plug(&plug);
while (!done && mpd.first_page <= mpd.last_page) {
/* For each extent of pages we use new io_end */
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
ret = -ENOMEM;
break;
}
/*
* We have two constraints: We find one extent to map and we
* must always write out whole page (makes a difference when
* blocksize < pagesize) so that we don't block on IO when we
* try to write out the rest of the page. Journalled mode is
* not supported by delalloc.
*/
BUG_ON(ext4_should_journal_data(inode));
needed_blocks = ext4_da_writepages_trans_blocks(inode);
/* start a new transaction */
handle = ext4_journal_start_with_reserve(inode,
EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
"%ld pages, ino %lu; err %d", __func__,
wbc->nr_to_write, inode->i_ino, ret);
/* Release allocated io_end */
ext4_put_io_end(mpd.io_submit.io_end);
break;
}
trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
ret = mpage_prepare_extent_to_map(&mpd);
if (!ret) {
if (mpd.map.m_len)
ret = mpage_map_and_submit_extent(handle, &mpd,
&give_up_on_write);
else {
/*
* We scanned the whole range (or exhausted
* nr_to_write), submitted what was mapped and
* didn't find anything needing mapping. We are
* done.
*/
done = true;
}
}
/*
* Caution: If the handle is synchronous,
* ext4_journal_stop() can wait for transaction commit
* to finish which may depend on writeback of pages to
* complete or on page lock to be released. In that
* case, we have to wait until after after we have
* submitted all the IO, released page locks we hold,
* and dropped io_end reference (for extent conversion
* to be able to complete) before stopping the handle.
*/
if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
ext4_journal_stop(handle);
handle = NULL;
}
/* Submit prepared bio */
ext4_io_submit(&mpd.io_submit);
/* Unlock pages we didn't use */
mpage_release_unused_pages(&mpd, give_up_on_write);
/*
* Drop our io_end reference we got from init. We have
* to be careful and use deferred io_end finishing if
* we are still holding the transaction as we can
* release the last reference to io_end which may end
* up doing unwritten extent conversion.
*/
if (handle) {
ext4_put_io_end_defer(mpd.io_submit.io_end);
ext4_journal_stop(handle);
} else
ext4_put_io_end(mpd.io_submit.io_end);
if (ret == -ENOSPC && sbi->s_journal) {
/*
* Commit the transaction which would
* free blocks released in the transaction
* and try again
*/
jbd2_journal_force_commit_nested(sbi->s_journal);
ret = 0;
continue;
}
/* Fatal error - ENOMEM, EIO... */
if (ret)
break;
}
blk_finish_plug(&plug);
if (!ret && !cycled && wbc->nr_to_write > 0) {
cycled = 1;
mpd.last_page = writeback_index - 1;
mpd.first_page = 0;
goto retry;
}
/* Update index */
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
/*
* Set the writeback_index so that range_cyclic
* mode will write it back later
*/
mapping->writeback_index = mpd.first_page;
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
percpu_up_read(&sbi->s_journal_flag_rwsem);
return ret;
}
static int ext4_nonda_switch(struct super_block *sb)
{
s64 free_clusters, dirty_clusters;
struct ext4_sb_info *sbi = EXT4_SB(sb);
/*
* switch to non delalloc mode if we are running low
* on free block. The free block accounting via percpu
* counters can get slightly wrong with percpu_counter_batch getting
* accumulated on each CPU without updating global counters
* Delalloc need an accurate free block accounting. So switch
* to non delalloc when we are near to error range.
*/
free_clusters =
percpu_counter_read_positive(&sbi->s_freeclusters_counter);
dirty_clusters =
percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
/*
* Start pushing delalloc when 1/2 of free blocks are dirty.
*/
if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
if (2 * free_clusters < 3 * dirty_clusters ||
free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
/*
* free block count is less than 150% of dirty blocks
* or free blocks is less than watermark
*/
return 1;
}
return 0;
}
/* We always reserve for an inode update; the superblock could be there too */
static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
{
if (likely(ext4_has_feature_large_file(inode->i_sb)))
return 1;
if (pos + len <= 0x7fffffffULL)
return 1;
/* We might need to update the superblock to set LARGE_FILE */
return 2;
}
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret, retries = 0;
struct page *page;
pgoff_t index;
struct inode *inode = mapping->host;
handle_t *handle;
index = pos >> PAGE_SHIFT;
if (ext4_nonda_switch(inode->i_sb)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
len, flags, pagep, fsdata);
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len, flags);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_da_write_inline_data_begin(mapping, inode,
pos, len, flags,
pagep, fsdata);
if (ret < 0)
return ret;
if (ret == 1)
return 0;
}
/*
* grab_cache_page_write_begin() can take a long time if the
* system is thrashing due to memory pressure, or if the page
* is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate
* the page (if needed) without using GFP_NOFS.
*/
retry_grab:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
unlock_page(page);
/*
* With delayed allocation, we don't log the i_disksize update
* if there is delayed block allocation. But we still need
* to journalling the i_disksize update if writes to the end
* of file which has an already mapped buffer.
*/
retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_da_write_credits(inode, pos, len));
if (IS_ERR(handle)) {
put_page(page);
return PTR_ERR(handle);
}
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
ext4_journal_stop(handle);
goto retry_grab;
}
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
ret = ext4_block_write_begin(page, pos, len,
ext4_da_get_block_prep);
#else
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
#endif
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
put_page(page);
return ret;
}
*pagep = page;
return ret;
}
/*
* Check if we should update i_disksize
* when write to the end of file but not require block allocation
*/
static int ext4_da_should_update_i_disksize(struct page *page,
unsigned long offset)
{
struct buffer_head *bh;
struct inode *inode = page->mapping->host;
unsigned int idx;
int i;
bh = page_buffers(page);
idx = offset >> inode->i_blkbits;
for (i = 0; i < idx; i++)
bh = bh->b_this_page;
if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
return 0;
return 1;
}
static int ext4_da_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
int ret = 0, ret2;
handle_t *handle = ext4_journal_current_handle();
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
if (write_mode == FALL_BACK_TO_NONDELALLOC)
return ext4_write_end(file, mapping, pos,
len, copied, page, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
start = pos & (PAGE_SIZE - 1);
end = start + copied - 1;
/*
* generic_write_end() will run mark_inode_dirty() if i_size
* changes. So let's piggyback the i_disksize mark_inode_dirty
* into that.
*/
new_i_size = pos + copied;
if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
if (ext4_has_inline_data(inode) ||
ext4_da_should_update_i_disksize(page, end)) {
ext4_update_i_disksize(inode, new_i_size);
/* We need to mark inode dirty even if
* new_i_size is less that inode->i_size
* bu greater than i_disksize.(hint delalloc)
*/
ext4_mark_inode_dirty(handle, inode);
}
}
if (write_mode != CONVERT_INLINE_DATA &&
ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
ext4_has_inline_data(inode))
ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
page);
else
ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
return ret ? ret : copied;
}
static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
/*
* Drop reserved blocks
*/
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
ext4_da_page_release_reservation(page, offset, length);
out:
ext4_invalidatepage(page, offset, length);
return;
}
/*
* Force all delayed allocation blocks to be allocated for a given inode.
*/
int ext4_alloc_da_blocks(struct inode *inode)
{
trace_ext4_alloc_da_blocks(inode);
if (!EXT4_I(inode)->i_reserved_data_blocks)
return 0;
/*
* We do something simple for now. The filemap_flush() will
* also start triggering a write of the data blocks, which is
* not strictly speaking necessary (and for users of
* laptop_mode, not even desirable). However, to do otherwise
* would require replicating code paths in:
*
* ext4_writepages() ->
* write_cache_pages() ---> (via passed in callback function)
* __mpage_da_writepage() -->
* mpage_add_bh_to_extent()
* mpage_da_map_blocks()
*
* The problem is that write_cache_pages(), located in
* mm/page-writeback.c, marks pages clean in preparation for
* doing I/O, which is not desirable if we're not planning on
* doing I/O at all.
*
* We could call write_cache_pages(), and then redirty all of
* the pages by calling redirty_page_for_writepage() but that
* would be ugly in the extreme. So instead we would need to
* replicate parts of the code in the above functions,
* simplifying them because we wouldn't actually intend to
* write out the pages, but rather only collect contiguous
* logical block extents, call the multi-block allocator, and
* then update the buffer heads with the block allocations.
*
* For now, though, we'll cheat by calling filemap_flush(),
* which will map the blocks, and start the I/O, but not
* actually wait for the I/O to complete.
*/
return filemap_flush(inode->i_mapping);
}
/*
* bmap() is special. It gets used by applications such as lilo and by
* the swapper to find the on-disk block of a specific piece of data.
*
* Naturally, this is dangerous if the block concerned is still in the
* journal. If somebody makes a swapfile on an ext4 data-journaling
* filesystem and enables swap, then they may get a nasty shock when the
* data getting swapped to that swapfile suddenly gets overwritten by
* the original zero's written out previously to the journal and
* awaiting writeback in the kernel's buffer cache.
*
* So, if we see any bmap calls here on a modified, data-journaled file,
* take extra steps to flush any blocks which might be in the cache.
*/
static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
journal_t *journal;
int err;
/*
* We can get here for an inline file via the FIBMAP ioctl
*/
if (ext4_has_inline_data(inode))
return 0;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
test_opt(inode->i_sb, DELALLOC)) {
/*
* With delalloc we want to sync the file
* so that we can make sure we allocate
* blocks for file
*/
filemap_write_and_wait(mapping);
}
if (EXT4_JOURNAL(inode) &&
ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
/*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
* only if we run lilo or swapon on a freshly made file
* do we expect this to happen.
*
* (bmap requires CAP_SYS_RAWIO so this does not
* represent an unprivileged user DOS attack --- we'd be
* in trouble if mortal users could trigger this path at
* will.)
*
* NB. EXT4_STATE_JDATA is not set on files other than
* regular files. If somebody wants to bmap a directory
* or symlink and gets confused because the buffer
* hasn't yet been flushed to disk, they deserve
* everything they get.
*/
ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
journal = EXT4_JOURNAL(inode);
jbd2_journal_lock_updates(journal);
err = jbd2_journal_flush(journal);
jbd2_journal_unlock_updates(journal);
if (err)
return 0;
}
return generic_block_bmap(mapping, block, ext4_get_block);
}
static int ext4_readpage(struct file *file, struct page *page)
{
int ret = -EAGAIN;
struct inode *inode = page->mapping->host;
trace_ext4_readpage(page);
if (ext4_has_inline_data(inode))
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
return ext4_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
static int
ext4_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = mapping->host;
/* If the file has inline data, no need to do readpages. */
if (ext4_has_inline_data(inode))
return 0;
return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
trace_ext4_invalidatepage(page, offset, length);
/* No journalling happens on data buffers when this function is used */
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
block_invalidatepage(page, offset, length);
}
static int __ext4_journalled_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
trace_ext4_journalled_invalidatepage(page, offset, length);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
if (offset == 0 && length == PAGE_SIZE)
ClearPageChecked(page);
return jbd2_journal_invalidatepage(journal, page, offset, length);
}
/* Wrapper for aops... */
static void ext4_journalled_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
{
WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
trace_ext4_releasepage(page);
/* Page has dirty journalled data -> cannot release */
if (PageChecked(page))
return 0;
if (journal)
return jbd2_journal_try_to_free_buffers(journal, page, wait);
else
return try_to_free_buffers(page);
}
#ifdef CONFIG_FS_DAX
/*
* Get block function for DAX IO and mmap faults. It takes care of converting
* unwritten extents to written ones and initializes new / converted blocks
* to zeros.
*/
int ext4_dax_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret;
ext4_debug("inode %lu, create flag %d\n", inode->i_ino, create);
if (!create)
return _ext4_get_block(inode, iblock, bh_result, 0);
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_PRE_IO |
EXT4_GET_BLOCKS_CREATE_ZERO);
if (ret < 0)
return ret;
if (buffer_unwritten(bh_result)) {
/*
* We are protected by i_mmap_sem or i_mutex so we know block
* cannot go away from under us even though we dropped
* i_data_sem. Convert extent to written and write zeros there.
*/
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_CONVERT |
EXT4_GET_BLOCKS_CREATE_ZERO);
if (ret < 0)
return ret;
}
/*
* At least for now we have to clear BH_New so that DAX code
* doesn't attempt to zero blocks again in a racy way.
*/
clear_buffer_new(bh_result);
return 0;
}
#else
/* Just define empty function, it will never get called. */
int ext4_dax_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
BUG();
return 0;
}
#endif
static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private)
{
ext4_io_end_t *io_end = private;
/* if not async direct IO just return */
if (!io_end)
return 0;
ext_debug("ext4_end_io_dio(): io_end 0x%p "
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
io_end, io_end->inode->i_ino, iocb, offset, size);
/*
* Error during AIO DIO. We cannot convert unwritten extents as the
* data was not written. Just clear the unwritten flag and drop io_end.
*/
if (size <= 0) {
ext4_clear_io_unwritten_flag(io_end);
size = 0;
}
io_end->offset = offset;
io_end->size = size;
ext4_put_io_end(io_end);
return 0;
}
/*
* Handling of direct IO writes.
*
* For ext4 extent files, ext4 will do direct-io write even to holes,
* preallocated extents, and those write extend the file, no need to
* fall back to buffered IO.
*
* For holes, we fallocate those blocks, mark them as unwritten
* If those blocks were preallocated, we mark sure they are split, but
* still keep the range to write as unwritten.
*
* The unwritten extents will be converted to written when DIO is completed.
* For async direct IO, since the IO may still pending when return, we
* set up an end_io call back function, which will do the conversion
* when async direct IO completed.
*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
*
*/
static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
ssize_t ret;
loff_t offset = iocb->ki_pos;
size_t count = iov_iter_count(iter);
int overwrite = 0;
get_block_t *get_block_func = NULL;
int dio_flags = 0;
loff_t final_size = offset + count;
int orphan = 0;
handle_t *handle;
if (final_size > inode->i_size) {
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
ret = ext4_orphan_add(handle, inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
}
orphan = 1;
ei->i_disksize = inode->i_size;
ext4_journal_stop(handle);
}
BUG_ON(iocb->private == NULL);
/*
* Make all waiters for direct IO properly wait also for extent
* conversion. This also disallows race between truncate() and
* overwrite DIO as i_dio_count needs to be incremented under i_mutex.
*/
inode_dio_begin(inode);
/* If we do a overwrite dio, i_mutex locking can be released */
overwrite = *((int *)iocb->private);
if (overwrite)
inode_unlock(inode);
/*
* For extent mapped files we could direct write to holes and fallocate.
*
* Allocated blocks to fill the hole are marked as unwritten to prevent
* parallel buffered read to expose the stale data before DIO complete
* the data IO.
*
* As to previously fallocated extents, ext4 get_block will just simply
* mark the buffer mapped but still keep the extents unwritten.
*
* For non AIO case, we will convert those unwritten extents to written
* after return back from blockdev_direct_IO. That way we save us from
* allocating io_end structure and also the overhead of offloading
* the extent convertion to a workqueue.
*
* For async DIO, the conversion needs to be deferred when the
* IO is completed. The ext4 end_io callback function will be
* called to take care of the conversion work. Here for async
* case, we allocate an io_end structure to hook to the iocb.
*/
iocb->private = NULL;
if (overwrite)
get_block_func = ext4_dio_get_block_overwrite;
else if (IS_DAX(inode)) {
/*
* We can avoid zeroing for aligned DAX writes beyond EOF. Other
* writes need zeroing either because they can race with page
* faults or because they use partial blocks.
*/
if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
ext4_aligned_io(inode, offset, count))
get_block_func = ext4_dio_get_block;
else
get_block_func = ext4_dax_get_block;
dio_flags = DIO_LOCKING;
} else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
get_block_func = ext4_dio_get_block;
dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
} else if (is_sync_kiocb(iocb)) {
get_block_func = ext4_dio_get_block_unwritten_sync;
dio_flags = DIO_LOCKING;
} else {
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
if (IS_DAX(inode)) {
ret = dax_do_io(iocb, inode, iter, get_block_func,
ext4_end_io_dio, dio_flags);
} else
ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter,
get_block_func,
ext4_end_io_dio, NULL, dio_flags);
if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
* for non AIO case, since the IO is already
* completed, we could do the conversion right here
*/
err = ext4_convert_unwritten_extents(NULL, inode,
offset, ret);
if (err < 0)
ret = err;
ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
}
inode_dio_end(inode);
/* take i_mutex locking again if we do a ovewrite dio */
if (overwrite)
inode_lock(inode);
if (ret < 0 && final_size > inode->i_size)
ext4_truncate_failed_write(inode);
/* Handle extending of i_size after direct IO write */
if (orphan) {
int err;
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
/* This is really bad luck. We've written the data
* but cannot extend i_size. Bail out and pretend
* the write failed... */
ret = PTR_ERR(handle);
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
goto out;
}
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
if (ret > 0) {
loff_t end = offset + ret;
if (end > inode->i_size) {
ei->i_disksize = end;
i_size_write(inode, end);
/*
* We're going to return a positive `ret'
* here due to non-zero-length I/O, so there's
* no way of reporting error returns from
* ext4_mark_inode_dirty() to userspace. So
* ignore it.
*/
ext4_mark_inode_dirty(handle, inode);
}
}
err = ext4_journal_stop(handle);
if (ret == 0)
ret = err;
}
out:
return ret;
}
static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
{
int unlocked = 0;
struct inode *inode = iocb->ki_filp->f_mapping->host;
ssize_t ret;
if (ext4_should_dioread_nolock(inode)) {
/*
* Nolock dioread optimization may be dynamically disabled
* via ext4_inode_block_unlocked_dio(). Check inode's state
* while holding extra i_dio_count ref.
*/
inode_dio_begin(inode);
smp_mb();
if (unlikely(ext4_test_inode_state(inode,
EXT4_STATE_DIOREAD_LOCK)))
inode_dio_end(inode);
else
unlocked = 1;
}
if (IS_DAX(inode)) {
ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block,
NULL, unlocked ? 0 : DIO_LOCKING);
} else {
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, ext4_dio_get_block,
NULL, NULL,
unlocked ? 0 : DIO_LOCKING);
}
if (unlocked)
inode_dio_end(inode);
return ret;
}
static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
ssize_t ret;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
/*
* If we are doing data journalling we don't support O_DIRECT
*/
if (ext4_should_journal_data(inode))
return 0;
/* Let buffer I/O handle the inline data case. */
if (ext4_has_inline_data(inode))
return 0;
trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (iov_iter_rw(iter) == READ)
ret = ext4_direct_IO_read(iocb, iter);
else
ret = ext4_direct_IO_write(iocb, iter);
trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
return ret;
}
/*
* Pages can be marked dirty completely asynchronously from ext4's journalling
* activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
* much here because ->set_page_dirty is called under VFS locks. The page is
* not necessarily locked.
*
* We cannot just dirty the page and leave attached buffers clean, because the
* buffers' dirty state is "definitive". We cannot just set the buffers dirty
* or jbddirty because all the journalling code will explode.
*
* So what we do is to mark the page "pending dirty" and next time writepage
* is called, propagate that into the buffers appropriately.
*/
static int ext4_journalled_set_page_dirty(struct page *page)
{
SetPageChecked(page);
return __set_page_dirty_nobuffers(page);
}
static const struct address_space_operations ext4_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_write_end,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = ext4_journalled_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_da_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.bmap = ext4_bmap,
.invalidatepage = ext4_da_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
void ext4_set_aops(struct inode *inode)
{
switch (ext4_inode_journal_mode(inode)) {
case EXT4_INODE_ORDERED_DATA_MODE:
case EXT4_INODE_WRITEBACK_DATA_MODE:
break;
case EXT4_INODE_JOURNAL_DATA_MODE:
inode->i_mapping->a_ops = &ext4_journalled_aops;
return;
default:
BUG();
}
if (test_opt(inode->i_sb, DELALLOC))
inode->i_mapping->a_ops = &ext4_da_aops;
else
inode->i_mapping->a_ops = &ext4_aops;
}
static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
ext4_fsblk_t index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
struct buffer_head *bh;
struct page *page;
int err = 0;
page = find_or_create_page(mapping, from >> PAGE_SHIFT,
mapping_gfp_constraint(mapping, ~__GFP_FS));
if (!page)
return -ENOMEM;
blocksize = inode->i_sb->s_blocksize;
iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
iblock++;
pos += blocksize;
}
if (buffer_freed(bh)) {
BUFFER_TRACE(bh, "freed: skip");
goto unlock;
}
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "unmapped");
ext4_get_block(inode, iblock, bh, 0);
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "still unmapped");
goto unlock;
}
}
/* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
err = -EIO;
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
ext4_encrypted_inode(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
WARN_ON_ONCE(fscrypt_decrypt_page(page));
}
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
err = ext4_journal_get_write_access(handle, bh);
if (err)
goto unlock;
}
zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block");
if (ext4_should_journal_data(inode)) {
err = ext4_handle_dirty_metadata(handle, inode, bh);
} else {
err = 0;
mark_buffer_dirty(bh);
if (ext4_should_order_data(inode))
err = ext4_jbd2_inode_add_write(handle, inode);
}
unlock:
unlock_page(page);
put_page(page);
return err;
}
/*
* ext4_block_zero_page_range() zeros out a mapping of length 'length'
* starting from file offset 'from'. The range to be zero'd must
* be contained with in one block. If the specified range exceeds
* the end of the block it will be shortened to end of the block
* that cooresponds to 'from'
*/
static int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
struct inode *inode = mapping->host;
unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned max = blocksize - (offset & (blocksize - 1));
/*
* correct length if it does not fall between
* 'from' and the end of the block
*/
if (length > max || length < 0)
length = max;
if (IS_DAX(inode))
return dax_zero_page_range(inode, from, length, ext4_get_block);
return __ext4_block_zero_page_range(handle, mapping, from, length);
}
/*
* ext4_block_truncate_page() zeroes out a mapping from file offset `from'
* up to the end of the block which corresponds to `from'.
* This required during truncate. We need to physically zero the tail end
* of that block so it doesn't yield old data if the file is later grown.
*/
static int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from)
{
unsigned offset = from & (PAGE_SIZE-1);
unsigned length;
unsigned blocksize;
struct inode *inode = mapping->host;
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
return ext4_block_zero_page_range(handle, mapping, from, length);
}
int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t length)
{
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
unsigned partial_start, partial_end;
ext4_fsblk_t start, end;
loff_t byte_end = (lstart + length - 1);
int err = 0;
partial_start = lstart & (sb->s_blocksize - 1);
partial_end = byte_end & (sb->s_blocksize - 1);
start = lstart >> sb->s_blocksize_bits;
end = byte_end >> sb->s_blocksize_bits;
/* Handle partial zero within the single block */
if (start == end &&
(partial_start || (partial_end != sb->s_blocksize - 1))) {
err = ext4_block_zero_page_range(handle, mapping,
lstart, length);
return err;
}
/* Handle partial zero out on the start of the range */
if (partial_start) {
err = ext4_block_zero_page_range(handle, mapping,
lstart, sb->s_blocksize);
if (err)
return err;
}
/* Handle partial zero out on the end of the range */
if (partial_end != sb->s_blocksize - 1)
err = ext4_block_zero_page_range(handle, mapping,
byte_end - partial_end,
partial_end + 1);
return err;
}
int ext4_can_truncate(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
return 1;
if (S_ISDIR(inode->i_mode))
return 1;
if (S_ISLNK(inode->i_mode))
return !ext4_inode_is_fast_symlink(inode);
return 0;
}
/*
* We have to make sure i_disksize gets properly updated before we truncate
* page cache due to hole punching or zero range. Otherwise i_disksize update
* can get lost as it may have been postponed to submission of writeback but
* that will never happen after we truncate page cache.
*/
int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t len)
{
handle_t *handle;
loff_t size = i_size_read(inode);
WARN_ON(!inode_is_locked(inode));
if (offset > size || offset + len < size)
return 0;
if (EXT4_I(inode)->i_disksize >= size)
return 0;
handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
ext4_update_i_disksize(inode, size);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
return 0;
}
/*
* ext4_punch_hole: punches a hole in a file by releasing the blocks
* associated with the given offset and length
*
* @inode: File inode
* @offset: The offset where the hole will begin
* @len: The length of the hole
*
* Returns: 0 on success or negative on failure
*/
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
int ret = 0;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
trace_ext4_punch_hole(inode, offset, length, 0);
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
return ret;
}
inode_lock(inode);
/* No need to punch hole beyond i_size */
if (offset >= inode->i_size)
goto out_mutex;
/*
* If the hole extends beyond i_size, set the hole
* to end after the page that contains i_size
*/
if (offset + length > inode->i_size) {
length = inode->i_size +
PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
offset;
}
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
* Attach jinode to inode for jbd2 if we do any zeroing of
* partial block
*/
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
goto out_mutex;
}
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
if (last_block_offset > first_block_offset) {
ret = ext4_update_disksize_before_punch(inode, offset, length);
if (ret)
goto out_dio;
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
goto out_dio;
}
ret = ext4_zero_partial_blocks(handle, inode, offset,
length);
if (ret)
goto out_stop;
first_block = (offset + sb->s_blocksize - 1) >>
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
/* If there are no blocks to remove, return now */
if (first_block >= stop_block)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
inode_unlock(inode);
return ret;
}
int ext4_inode_attach_jinode(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct jbd2_inode *jinode;
if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
return 0;
jinode = jbd2_alloc_inode(GFP_KERNEL);
spin_lock(&inode->i_lock);
if (!ei->jinode) {
if (!jinode) {
spin_unlock(&inode->i_lock);
return -ENOMEM;
}
ei->jinode = jinode;
jbd2_journal_init_jbd_inode(ei->jinode, inode);
jinode = NULL;
}
spin_unlock(&inode->i_lock);
if (unlikely(jinode != NULL))
jbd2_free_inode(jinode);
return 0;
}
/*
* ext4_truncate()
*
* We block out ext4_get_block() block instantiations across the entire
* transaction, and VFS/VM ensures that ext4_truncate() cannot run
* simultaneously on behalf of the same inode.
*
* As we work through the truncate and commit bits of it to the journal there
* is one core, guiding principle: the file's tree must always be consistent on
* disk. We must be able to restart the truncate after a crash.
*
* The file's tree may be transiently inconsistent in memory (although it
* probably isn't), but whenever we close off and commit a journal transaction,
* the contents of (the filesystem + the journal) must be consistent and
* restartable. It's pretty simple, really: bottom up, right to left (although
* left-to-right works OK too).
*
* Note that at recovery time, journal replay occurs *before* the restart of
* truncate against the orphan inode list.
*
* The committed inode has the new, desired i_size (which is the same as
* i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
* that this inode's truncate did not complete and it will again call
* ext4_truncate() to have another go. So there will be instantiated blocks
* to the right of the truncation point in a crashed ext4 filesystem. But
* that's fine - as long as they are linked from the inode, the post-crash
* ext4_truncate() run will find them and release them.
*/
void ext4_truncate(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int credits;
handle_t *handle;
struct address_space *mapping = inode->i_mapping;
/*
* There is a possibility that we're either freeing the inode
* or it's a completely new inode. In those cases we might not
* have i_mutex locked because it's not necessary.
*/
if (!(inode->i_state & (I_NEW|I_FREEING)))
WARN_ON(!inode_is_locked(inode));
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
return;
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
if (ext4_has_inline_data(inode)) {
int has_inline = 1;
ext4_inline_data_truncate(inode, &has_inline);
if (has_inline)
return;
}
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
if (ext4_inode_attach_jinode(inode) < 0)
return;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
return;
}
if (inode->i_size & (inode->i_sb->s_blocksize - 1))
ext4_block_truncate_page(handle, mapping, inode->i_size);
/*
* We add the inode to the orphan list, so that if this
* truncate spans multiple transactions, and we crash, we will
* resume the truncate when the filesystem recovers. It also
* marks the inode dirty, to catch the new size.
*
* Implication: the file must always be in a sane, consistent
* truncatable state while each transaction commits.
*/
if (ext4_orphan_add(handle, inode))
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ext4_ext_truncate(handle, inode);
else
ext4_ind_truncate(handle, inode);
up_write(&ei->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
out_stop:
/*
* If this was a simple ftruncate() and the file will remain alive,
* then we need to clear up the orphan record which we created above.
* However, if this was a real unlink then we were called by
* ext4_evict_inode(), and we allow that function to clean up the
* orphan info for us.
*/
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
}
/*
* ext4_get_inode_loc returns with an extra refcount against the inode's
* underlying buffer_head on success. If 'in_mem' is true, we have all
* data in memory that is needed to recreate the on-disk version of this
* inode.
*/
static int __ext4_get_inode_loc(struct inode *inode,
struct ext4_iloc *iloc, int in_mem)
{
struct ext4_group_desc *gdp;
struct buffer_head *bh;
struct super_block *sb = inode->i_sb;
ext4_fsblk_t block;
int inodes_per_block, inode_offset;
iloc->bh = NULL;
if (!ext4_valid_inum(sb, inode->i_ino))
return -EFSCORRUPTED;
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
if (!gdp)
return -EIO;
/*
* Figure out the offset within the block group inode table
*/
inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
inode_offset = ((inode->i_ino - 1) %
EXT4_INODES_PER_GROUP(sb));
block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
bh = sb_getblk(sb, block);
if (unlikely(!bh))
return -ENOMEM;
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
/*
* If the buffer has the write error flag, we have failed
* to write out another inode in the same block. In this
* case, we don't have to read the block because we may
* read the old inode data successfully.
*/
if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
set_buffer_uptodate(bh);
if (buffer_uptodate(bh)) {
/* someone brought it uptodate while we waited */
unlock_buffer(bh);
goto has_buffer;
}
/*
* If we have all information of the inode in memory and this
* is the only valid inode in the block, we need not read the
* block.
*/
if (in_mem) {
struct buffer_head *bitmap_bh;
int i, start;
start = inode_offset & ~(inodes_per_block - 1);
/* Is the inode bitmap in cache? */
bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
if (unlikely(!bitmap_bh))
goto make_io;
/*
* If the inode bitmap isn't in cache then the
* optimisation may end up performing two reads instead
* of one, so skip it.
*/
if (!buffer_uptodate(bitmap_bh)) {
brelse(bitmap_bh);
goto make_io;
}
for (i = start; i < start + inodes_per_block; i++) {
if (i == inode_offset)
continue;
if (ext4_test_bit(i, bitmap_bh->b_data))
break;
}
brelse(bitmap_bh);
if (i == start + inodes_per_block) {
/* all other inodes are free, so skip I/O */
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
unlock_buffer(bh);
goto has_buffer;
}
}
make_io:
/*
* If we need to do any I/O, try to pre-readahead extra
* blocks from the inode table.
*/
if (EXT4_SB(sb)->s_inode_readahead_blks) {
ext4_fsblk_t b, end, table;
unsigned num;
__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
table = ext4_inode_table(sb, gdp);
/* s_inode_readahead_blks is always a power of 2 */
b = block & ~((ext4_fsblk_t) ra_blks - 1);
if (table > b)
b = table;
end = b + ra_blks;
num = EXT4_INODES_PER_GROUP(sb);
if (ext4_has_group_desc_csum(sb))
num -= ext4_itable_unused_count(sb, gdp);
table += num / inodes_per_block;
if (end > table)
end = table;
while (b <= end)
sb_breadahead(sb, b++);
}
/*
* There are other valid inodes in the buffer, this inode
* has in-inode xattrs, or we don't have this inode in memory.
* Read the block from disk.
*/
trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block,
"unable to read itable block");
brelse(bh);
return -EIO;
}
}
has_buffer:
iloc->bh = bh;
return 0;
}
int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
{
/* We have all inode data except xattrs in memory here. */
return __ext4_get_inode_loc(inode, iloc,
!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
}
void ext4_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT4_I(inode)->i_flags;
unsigned int new_fl = 0;
if (flags & EXT4_SYNC_FL)
new_fl |= S_SYNC;
if (flags & EXT4_APPEND_FL)
new_fl |= S_APPEND;
if (flags & EXT4_IMMUTABLE_FL)
new_fl |= S_IMMUTABLE;
if (flags & EXT4_NOATIME_FL)
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
new_fl |= S_DAX;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
}
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
void ext4_get_inode_flags(struct ext4_inode_info *ei)
{
unsigned int vfs_fl;
unsigned long old_fl, new_fl;
do {
vfs_fl = ei->vfs_inode.i_flags;
old_fl = ei->i_flags;
new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
EXT4_DIRSYNC_FL);
if (vfs_fl & S_SYNC)
new_fl |= EXT4_SYNC_FL;
if (vfs_fl & S_APPEND)
new_fl |= EXT4_APPEND_FL;
if (vfs_fl & S_IMMUTABLE)
new_fl |= EXT4_IMMUTABLE_FL;
if (vfs_fl & S_NOATIME)
new_fl |= EXT4_NOATIME_FL;
if (vfs_fl & S_DIRSYNC)
new_fl |= EXT4_DIRSYNC_FL;
} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
}
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
blkcnt_t i_blocks ;
struct inode *inode = &(ei->vfs_inode);
struct super_block *sb = inode->i_sb;
if (ext4_has_feature_huge_file(sb)) {
/* we are using combined 48 bit field */
i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
le32_to_cpu(raw_inode->i_blocks_lo);
if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
/* i_blocks represent file system block size */
return i_blocks << (inode->i_blkbits - 9);
} else {
return i_blocks;
}
} else {
return le32_to_cpu(raw_inode->i_blocks_lo);
}
}
static inline void ext4_iget_extra_inode(struct inode *inode,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
ext4_find_inline_data_nolock(inode);
} else
EXT4_I(inode)->i_inline_off = 0;
}
int ext4_get_projid(struct inode *inode, kprojid_t *projid)
{
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_PROJECT))
return -EOPNOTSUPP;
*projid = EXT4_I(inode)->i_projid;
return 0;
}
struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
{
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
struct ext4_inode_info *ei;
struct inode *inode;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
int block;
uid_t i_uid;
gid_t i_gid;
projid_t i_projid;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
ei = EXT4_I(inode);
iloc.bh = NULL;
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT4_INODE_SIZE(inode->i_sb)) {
EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
EXT4_INODE_SIZE(inode->i_sb));
ret = -EFSCORRUPTED;
goto bad_inode;
}
} else
ei->i_extra_isize = 0;
/* Precompute checksum seed for inode metadata */
if (ext4_has_metadata_csum(sb)) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
__le32 inum = cpu_to_le32(inode->i_ino);
__le32 gen = raw_inode->i_generation;
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
sizeof(inum));
ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
sizeof(gen));
}
if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
EXT4_ERROR_INODE(inode, "checksum invalid");
ret = -EFSBADCRC;
goto bad_inode;
}
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) &&
EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
else
i_projid = EXT4_DEF_PROJID;
if (!(test_opt(inode->i_sb, NO_UID32))) {
i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
ei->i_projid = make_kprojid(&init_user_ns, i_projid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ei->i_inline_off = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
* the test is that same one that e2fsck uses
* NeilBrown 1999oct15
*/
if (inode->i_nlink == 0) {
if ((inode->i_mode == 0 ||
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
ino != EXT4_BOOT_LOADER_INO) {
/* this inode is deleted */
ret = -ESTALE;
goto bad_inode;
}
/* The only unlinked inodes we let through here have
* valid i_mode and are being read by the orphan
* recovery code: that's fine, we're about to complete
* the process of deleting those.
* OR it is the EXT4_BOOT_LOADER_INO which is
* not initialized on a new filesystem. */
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
#endif
inode->i_generation = le32_to_cpu(raw_inode->i_generation);
ei->i_block_group = iloc.block_group;
ei->i_last_alloc_group = ~0;
/*
* NOTE! The in-memory inode i_data array is in little-endian order
* even on big-endian machines: we do NOT byteswap the block numbers!
*/
for (block = 0; block < EXT4_N_BLOCKS; block++)
ei->i_data[block] = raw_inode->i_block[block];
INIT_LIST_HEAD(&ei->i_orphan);
/*
* Set transaction id's of transactions that have to be committed
* to finish f[data]sync. We set them to currently running transaction
* as we cannot be sure that the inode or some of its metadata isn't
* part of the transaction - the inode could have been reclaimed and
* now it is reread from disk.
*/
if (journal) {
transaction_t *transaction;
tid_t tid;
read_lock(&journal->j_state_lock);
if (journal->j_running_transaction)
transaction = journal->j_running_transaction;
else
transaction = journal->j_committing_transaction;
if (transaction)
tid = transaction->t_tid;
else
tid = journal->j_commit_sequence;
read_unlock(&journal->j_state_lock);
ei->i_sync_tid = tid;
ei->i_datasync_tid = tid;
}
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (ei->i_extra_isize == 0) {
/* The extra space is currently unused. Use it. */
ei->i_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
} else {
ext4_iget_extra_inode(inode, raw_inode, ei);
}
}
EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
inode->i_version |=
(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
}
}
ret = 0;
if (ei->i_file_acl &&
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
ei->i_file_acl);
ret = -EFSCORRUPTED;
goto bad_inode;
} else if (!ext4_has_inline_data(inode)) {
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode))))
/* Validate extent which is part of inode */
ret = ext4_ext_check_inode(inode);
} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode))) {
/* Validate block references which are part of inode */
ret = ext4_ind_check_inode(inode);
}
}
if (ret)
goto bad_inode;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
if (ext4_encrypted_inode(inode)) {
inode->i_op = &ext4_encrypted_symlink_inode_operations;
ext4_set_aops(inode);
} else if (ext4_inode_is_fast_symlink(inode)) {
inode->i_link = (char *)ei->i_data;
inode->i_op = &ext4_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
} else {
inode->i_op = &ext4_symlink_inode_operations;
ext4_set_aops(inode);
}
inode_nohighmem(inode);
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &ext4_special_inode_operations;
if (raw_inode->i_block[0])
init_special_inode(inode, inode->i_mode,
old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
else
init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
} else if (ino == EXT4_BOOT_LOADER_INO) {
make_bad_inode(inode);
} else {
ret = -EFSCORRUPTED;
EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
goto bad_inode;
}
brelse(iloc.bh);
ext4_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
bad_inode:
brelse(iloc.bh);
iget_failed(inode);
return ERR_PTR(ret);
}
struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
{
if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
return ERR_PTR(-EFSCORRUPTED);
return ext4_iget(sb, ino);
}
static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
struct inode *inode = &(ei->vfs_inode);
u64 i_blocks = inode->i_blocks;
struct super_block *sb = inode->i_sb;
if (i_blocks <= ~0U) {
/*
* i_blocks can be represented in a 32 bit variable
* as multiple of 512 bytes
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = 0;
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
return 0;
}
if (!ext4_has_feature_huge_file(sb))
return -EFBIG;
if (i_blocks <= 0xffffffffffffULL) {
/*
* i_blocks can be represented in a 48 bit variable
* as multiple of 512 bytes
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
} else {
ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
/* i_block is stored in file system block size */
i_blocks = i_blocks >> (inode->i_blkbits - 9);
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
}
return 0;
}
struct other_inode {
unsigned long orig_ino;
struct ext4_inode *raw_inode;
};
static int other_inode_match(struct inode * inode, unsigned long ino,
void *data)
{
struct other_inode *oi = (struct other_inode *) data;
if ((inode->i_ino != ino) ||
(inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
((inode->i_state & I_DIRTY_TIME) == 0))
return 0;
spin_lock(&inode->i_lock);
if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) &&
(inode->i_state & I_DIRTY_TIME)) {
struct ext4_inode_info *ei = EXT4_I(inode);
inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
ext4_inode_csum_set(inode, oi->raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
trace_ext4_other_inode_update_time(inode, oi->orig_ino);
return -1;
}
spin_unlock(&inode->i_lock);
return -1;
}
/*
* Opportunistically update the other time fields for other inodes in
* the same inode table block.
*/
static void ext4_update_other_inodes_time(struct super_block *sb,
unsigned long orig_ino, char *buf)
{
struct other_inode oi;
unsigned long ino;
int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
int inode_size = EXT4_INODE_SIZE(sb);
oi.orig_ino = orig_ino;
/*
* Calculate the first inode in the inode table block. Inode
* numbers are one-based. That is, the first inode in a block
* (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
*/
ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
if (ino == orig_ino)
continue;
oi.raw_inode = (struct ext4_inode *) buf;
(void) find_inode_nowait(sb, ino, other_inode_match, &oi);
}
}
/*
* Post the struct inode info into an on-disk inode location in the
* buffer-cache. This gobbles the caller's reference to the
* buffer_head in the inode location struct.
*
* The caller must have write access to iloc->bh.
*/
static int ext4_do_update_inode(handle_t *handle,
struct inode *inode,
struct ext4_iloc *iloc)
{
struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
struct super_block *sb = inode->i_sb;
int err = 0, rc, block;
int need_datasync = 0, set_large_file = 0;
uid_t i_uid;
gid_t i_gid;
projid_t i_projid;
spin_lock(&ei->i_raw_lock);
/* For fields not tracked in the in-memory inode,
* initialise them to zero for new inodes. */
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
ext4_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
i_uid = i_uid_read(inode);
i_gid = i_gid_read(inode);
i_projid = from_kprojid(&init_user_ns, ei->i_projid);
if (!(test_opt(inode->i_sb, NO_UID32))) {
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
/*
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if (ei->i_dtime && list_empty(&ei->i_orphan)) {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
} else {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(i_gid));
}
} else {
raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
err = ext4_inode_blocks_set(handle, raw_inode, ei);
if (err) {
spin_unlock(&ei->i_raw_lock);
goto out_brelse;
}
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
if (ei->i_disksize != ext4_isize(raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1;
}
if (ei->i_disksize > 0x7fffffffULL) {
if (!ext4_has_feature_large_file(sb) ||
EXT4_SB(sb)->s_es->s_rev_level ==
cpu_to_le32(EXT4_GOOD_OLD_REV))
set_large_file = 1;
}
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
raw_inode->i_block[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
raw_inode->i_block[1] = 0;
} else {
raw_inode->i_block[0] = 0;
raw_inode->i_block[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
raw_inode->i_block[2] = 0;
}
} else if (!ext4_has_inline_data(inode)) {
for (block = 0; block < EXT4_N_BLOCKS; block++)
raw_inode->i_block[block] = ei->i_data[block];
}
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
if (ei->i_extra_isize) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
raw_inode->i_version_hi =
cpu_to_le32(inode->i_version >> 32);
raw_inode->i_extra_isize =
cpu_to_le16(ei->i_extra_isize);
}
}
BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_PROJECT) &&
i_projid != EXT4_DEF_PROJID);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
raw_inode->i_projid = cpu_to_le32(i_projid);
ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
if (inode->i_sb->s_flags & MS_LAZYTIME)
ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
bh->b_data);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
rc = ext4_handle_dirty_metadata(handle, NULL, bh);
if (!err)
err = rc;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
if (set_large_file) {
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (err)
goto out_brelse;
ext4_update_dynamic_rev(sb);
ext4_set_feature_large_file(sb);
ext4_handle_sync(handle);
err = ext4_handle_dirty_super(handle, sb);
}
ext4_update_inode_fsync_trans(handle, inode, need_datasync);
out_brelse:
brelse(bh);
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* ext4_write_inode()
*
* We are called from a few places:
*
* - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
* Here, there will be no transaction running. We wait for any running
* transaction to commit.
*
* - Within flush work (sys_sync(), kupdate and such).
* We wait on commit, if told to.
*
* - Within iput_final() -> write_inode_now()
* We wait on commit, if told to.
*
* In all cases it is actually safe for us to return without doing anything,
* because the inode has been copied into a raw inode buffer in
* ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
* writeback.
*
* Note that we are absolutely dependent upon all inode dirtiers doing the
* right thing: they *must* call mark_inode_dirty() after dirtying info in
* which we are interested.
*
* It would be a bug for them to not do this. The code:
*
* mark_inode_dirty(inode)
* stuff();
* inode->i_size = expr;
*
* is in error because write_inode() could occur while `stuff()' is running,
* and the new i_size will be lost. Plus the inode will no longer be on the
* superblock's dirty inode list.
*/
int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
return 0;
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
dump_stack();
return -EIO;
}
/*
* No need to force transaction in WB_SYNC_NONE mode. Also
* ext4_sync_fs() will force the commit after everything is
* written.
*/
if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
return 0;
err = ext4_force_commit(inode->i_sb);
} else {
struct ext4_iloc iloc;
err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
/*
* sync(2) will flush the whole buffer cache. No need to do
* it here separately for each inode.
*/
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
"IO error syncing inode");
err = -EIO;
}
brelse(iloc.bh);
}
return err;
}
/*
* In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
* buffers that are attached to a page stradding i_size and are undergoing
* commit. In that case we have to wait for commit to finish and try again.
*/
static void ext4_wait_for_tail_page_commit(struct inode *inode)
{
struct page *page;
unsigned offset;
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = 0;
int ret;
offset = inode->i_size & (PAGE_SIZE - 1);
/*
* All buffers in the last page remain valid? Then there's nothing to
* do. We do the check mainly to optimize the common PAGE_SIZE ==
* blocksize case
*/
if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
return;
while (1) {
page = find_lock_page(inode->i_mapping,
inode->i_size >> PAGE_SHIFT);
if (!page)
return;
ret = __ext4_journalled_invalidatepage(page, offset,
PAGE_SIZE - offset);
unlock_page(page);
put_page(page);
if (ret != -EBUSY)
return;
commit_tid = 0;
read_lock(&journal->j_state_lock);
if (journal->j_committing_transaction)
commit_tid = journal->j_committing_transaction->t_tid;
read_unlock(&journal->j_state_lock);
if (commit_tid)
jbd2_log_wait_commit(journal, commit_tid);
}
}
/*
* ext4_setattr()
*
* Called from notify_change.
*
* We want to trap VFS attempts to truncate the file as soon as
* possible. In particular, we want to make sure that when the VFS
* shrinks i_size, we put the inode on the orphan list and modify
* i_disksize immediately, so that during the subsequent flushing of
* dirty pages and freeing of disk blocks, we can guarantee that any
* commit will leave the blocks being flushed in an unused state on
* disk. (On recovery, the inode will get truncated and the blocks will
* be freed, so we have a strong guarantee that no future commit will
* leave these blocks visible to the user.)
*
* Another thing we have to assure is that if we are in ordered mode
* and inode is still attached to the committing transaction, we must
* we start writeout of all the dirty pages which are being truncated.
* This way we are sure that all the data written in the previous
* transaction are already on disk (truncate waits for pages under
* writeback).
*
* Called with inode->i_mutex down.
*/
int ext4_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error, rc = 0;
int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
error = inode_change_ok(inode, attr);
if (error)
return error;
if (is_quota_modification(inode, attr)) {
error = dquot_initialize(inode);
if (error)
return error;
}
if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
(ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
* inode block, ? - but truncate inode update has it) */
handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
}
error = dquot_transfer(inode, attr);
if (error) {
ext4_journal_stop(handle);
return error;
}
/* Update corresponding info in inode so that everything is in
* one transaction */
if (attr->ia_valid & ATTR_UID)
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
}
if (attr->ia_valid & ATTR_SIZE) {
handle_t *handle;
loff_t oldsize = inode->i_size;
int shrink = (attr->ia_size <= inode->i_size);
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
if (attr->ia_size > sbi->s_bitmap_maxbytes)
return -EFBIG;
}
if (!S_ISREG(inode->i_mode))
return -EINVAL;
if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
inode_inc_iversion(inode);
if (ext4_should_order_data(inode) &&
(attr->ia_size < inode->i_size)) {
error = ext4_begin_ordered_truncate(inode,
attr->ia_size);
if (error)
goto err_out;
}
if (attr->ia_size != inode->i_size) {
handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
}
if (ext4_handle_valid(handle) && shrink) {
error = ext4_orphan_add(handle, inode);
orphan = 1;
}
/*
* Update c/mtime on truncate up, ext4_truncate() will
* update c/mtime in shrink case below
*/
if (!shrink) {
inode->i_mtime = ext4_current_time(inode);
inode->i_ctime = inode->i_mtime;
}
down_write(&EXT4_I(inode)->i_data_sem);
EXT4_I(inode)->i_disksize = attr->ia_size;
rc = ext4_mark_inode_dirty(handle, inode);
if (!error)
error = rc;
/*
* We have to update i_size under i_data_sem together
* with i_disksize to avoid races with writeback code
* running ext4_wb_update_i_disksize().
*/
if (!error)
i_size_write(inode, attr->ia_size);
up_write(&EXT4_I(inode)->i_data_sem);
ext4_journal_stop(handle);
if (error) {
if (orphan)
ext4_orphan_del(NULL, inode);
goto err_out;
}
}
if (!shrink)
pagecache_isize_extended(inode, oldsize, inode->i_size);
/*
* Blocks are going to be removed from the inode. Wait
* for dio in flight. Temporarily disable
* dioread_nolock to prevent livelock.
*/
if (orphan) {
if (!ext4_should_journal_data(inode)) {
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
ext4_inode_resume_unlocked_dio(inode);
} else
ext4_wait_for_tail_page_commit(inode);
}
down_write(&EXT4_I(inode)->i_mmap_sem);
/*
* Truncate pagecache after we've waited for commit
* in data=journal mode to make pages freeable.
*/
truncate_pagecache(inode, inode->i_size);
if (shrink)
ext4_truncate(inode);
up_write(&EXT4_I(inode)->i_mmap_sem);
}
if (!rc) {
setattr_copy(inode, attr);
mark_inode_dirty(inode);
}
/*
* If the call to ext4_truncate failed to get a transaction handle at
* all, we need to clean up the in-core orphan list manually.
*/
if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
if (!rc && (ia_valid & ATTR_MODE))
rc = posix_acl_chmod(inode, inode->i_mode);
err_out:
ext4_std_error(inode->i_sb, error);
if (!error)
error = rc;
return error;
}
int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *inode;
unsigned long long delalloc_blocks;
inode = d_inode(dentry);
generic_fillattr(inode, stat);
/*
* If there is inline data in the inode, the inode will normally not
* have data blocks allocated (it may have an external xattr block).
* Report at least one sector for such files, so tools like tar, rsync,
* others doen't incorrectly think the file is completely sparse.
*/
if (unlikely(ext4_has_inline_data(inode)))
stat->blocks += (stat->size + 511) >> 9;
/*
* We can't update i_blocks if the block allocation is delayed
* otherwise in the case of system crash before the real block
* allocation is done, we will have i_blocks inconsistent with
* on-disk file blocks.
* We always keep i_blocks updated together with real
* allocation. But to not confuse with user, stat
* will return the blocks that include the delayed allocation
* blocks for this file.
*/
delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
EXT4_I(inode)->i_reserved_data_blocks);
stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
return 0;
}
static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
int pextents)
{
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return ext4_ind_trans_blocks(inode, lblocks);
return ext4_ext_index_trans_blocks(inode, pextents);
}
/*
* Account for index blocks, block groups bitmaps and block group
* descriptor blocks if modify datablocks and index blocks
* worse case, the indexs blocks spread over different block groups
*
* If datablocks are discontiguous, they are possible to spread over
* different block groups too. If they are contiguous, with flexbg,
* they could still across block group boundary.
*
* Also account for superblock, inode, quota and xattr blocks
*/
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents)
{
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks;
int idxblocks;
int ret = 0;
/*
* How many index blocks need to touch to map @lblocks logical blocks
* to @pextents physical extents?
*/
idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
ret = idxblocks;
/*
* Now let's see how many group bitmaps and group descriptors need
* to account
*/
groups = idxblocks + pextents;
gdpblocks = groups;
if (groups > ngroups)
groups = ngroups;
if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
/* bitmaps and block group descriptor blocks */
ret += groups + gdpblocks;
/* Blocks for super block, inode, quota and xattr blocks */
ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
return ret;
}
/*
* Calculate the total number of credits to reserve to fit
* the modification of a single pages into a single transaction,
* which may include multiple chunks of block allocations.
*
* This could be called via ext4_write_begin()
*
* We need to consider the worse case, when
* one new block per extent.
*/
int ext4_writepage_trans_blocks(struct inode *inode)
{
int bpp = ext4_journal_blocks_per_page(inode);
int ret;
ret = ext4_meta_trans_blocks(inode, bpp, bpp);
/* Account for data blocks for journalled mode */
if (ext4_should_journal_data(inode))
ret += bpp;
return ret;
}
/*
* Calculate the journal credits for a chunk of data modification.
*
* This is called from DIO, fallocate or whoever calling
* ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
*
* journal buffers for data blocks are not included here, as DIO
* and fallocate do no need to journal data buffers.
*/
int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
{
return ext4_meta_trans_blocks(inode, nrblocks, 1);
}
/*
* The caller must have previously called ext4_reserve_inode_write().
* Give this, we know that the caller already has write access to iloc->bh.
*/
int ext4_mark_iloc_dirty(handle_t *handle,
struct inode *inode, struct ext4_iloc *iloc)
{
int err = 0;
if (IS_I_VERSION(inode))
inode_inc_iversion(inode);
/* the do_update_inode consumes one bh->b_count */
get_bh(iloc->bh);
/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
err = ext4_do_update_inode(handle, inode, iloc);
put_bh(iloc->bh);
return err;
}
/*
* On success, We end up with an outstanding reference count against
* iloc->bh. This _must_ be cleaned up later.
*/
int
ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc)
{
int err;
err = ext4_get_inode_loc(inode, iloc);
if (!err) {
BUFFER_TRACE(iloc->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err) {
brelse(iloc->bh);
iloc->bh = NULL;
}
}
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* Expand an inode by new_extra_isize bytes.
* Returns 0 on success or negative error number on failure.
*/
static int ext4_expand_extra_isize(struct inode *inode,
unsigned int new_extra_isize,
struct ext4_iloc iloc,
handle_t *handle)
{
struct ext4_inode *raw_inode;
struct ext4_xattr_ibody_header *header;
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
return 0;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
new_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0;
}
/* try to expand with EAs present */
return ext4_expand_extra_isize_ea(inode, new_extra_isize,
raw_inode, handle);
}
/*
* What we do here is to mark the in-core inode as clean with respect to inode
* dirtiness (it may still be data-dirty).
* This means that the in-core inode may be reaped by prune_icache
* without having to perform any I/O. This is a very good thing,
* because *any* task may call prune_icache - even ones which
* have a transaction open against a different journal.
*
* Is this cheating? Not really. Sure, we haven't written the
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
*/
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
static unsigned int mnt_count;
int err, ret;
might_sleep();
trace_ext4_mark_inode_dirty(inode, _RET_IP_);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
return err;
if (ext4_handle_valid(handle) &&
EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
!ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
/*
* We need extra buffer credits since we may write into EA block
* with this same handle. If journal_extend fails, then it will
* only result in a minor loss of functionality for that inode.
* If this is felt to be critical, then e2fsck should be run to
* force a large enough s_min_extra_isize.
*/
if ((jbd2_journal_extend(handle,
EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
ret = ext4_expand_extra_isize(inode,
sbi->s_want_extra_isize,
iloc, handle);
if (ret) {
if (mnt_count !=
le16_to_cpu(sbi->s_es->s_mnt_count)) {
ext4_warning(inode->i_sb,
"Unable to expand inode %lu. Delete"
" some EAs or run e2fsck.",
inode->i_ino);
mnt_count =
le16_to_cpu(sbi->s_es->s_mnt_count);
}
}
}
}
return ext4_mark_iloc_dirty(handle, inode, &iloc);
}
/*
* ext4_dirty_inode() is called from __mark_inode_dirty()
*
* We're really interested in the case where a file is being extended.
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, dquot_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
*
* If only the I_DIRTY_TIME flag is set, we can skip everything. If
* I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
* to copy into the on-disk inode structure are the timestamp files.
*/
void ext4_dirty_inode(struct inode *inode, int flags)
{
handle_t *handle;
if (flags == I_DIRTY_TIME)
return;
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle))
goto out;
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out:
return;
}
#if 0
/*
* Bind an inode's backing buffer_head into this transaction, to prevent
* it from being flushed to disk early. Unlike
* ext4_reserve_inode_write, this leaves behind no bh reference and
* returns no iloc structure, so the caller needs to repeat the iloc
* lookup to mark the inode dirty later.
*/
static int ext4_pin_inode(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
int err = 0;
if (handle) {
err = ext4_get_inode_loc(inode, &iloc);
if (!err) {
BUFFER_TRACE(iloc.bh, "get_write_access");
err = jbd2_journal_get_write_access(handle, iloc.bh);
if (!err)
err = ext4_handle_dirty_metadata(handle,
NULL,
iloc.bh);
brelse(iloc.bh);
}
}
ext4_std_error(inode->i_sb, err);
return err;
}
#endif
int ext4_change_inode_journal_flag(struct inode *inode, int val)
{
journal_t *journal;
handle_t *handle;
int err;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
/*
* We have to be very careful here: changing a data block's
* journaling status dynamically is dangerous. If we write a
* data block to the journal, change the status and then delete
* that block, we risk forgetting to revoke the old log record
* from the journal and so a subsequent replay can corrupt data.
* So, first we make sure that the journal is empty and that
* nobody is changing anything.
*/
journal = EXT4_JOURNAL(inode);
if (!journal)
return 0;
if (is_journal_aborted(journal))
return -EROFS;
/* Wait for all existing dio workers */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
/*
* Before flushing the journal and switching inode's aops, we have
* to flush all dirty data the inode has. There can be outstanding
* delayed allocations, there can be unwritten extents created by
* fallocate or buffered writes in dioread_nolock mode covered by
* dirty data which can be converted only after flushing the dirty
* data (and journalled aops don't know how to handle these cases).
*/
if (val) {
down_write(&EXT4_I(inode)->i_mmap_sem);
err = filemap_write_and_wait(inode->i_mapping);
if (err < 0) {
up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
return err;
}
}
percpu_down_write(&sbi->s_journal_flag_rwsem);
jbd2_journal_lock_updates(journal);
/*
* OK, there are no updates running now, and all cached data is
* synced to disk. We are now in a completely consistent state
* which doesn't have anything in the journal, and we know that
* no filesystem updates are running, so it is safe to modify
* the inode's in-core data-journaling state flag now.
*/
if (val)
ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
else {
err = jbd2_journal_flush(journal);
if (err < 0) {
jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem);
ext4_inode_resume_unlocked_dio(inode);
return err;
}
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
}
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem);
if (val)
up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
/* Finally we can mark the inode as dirty. */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
ext4_std_error(inode->i_sb, err);
return err;
}
static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
{
return !buffer_mapped(bh);
}
int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
loff_t size;
unsigned long len;
int ret;
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
get_block_t *get_block;
int retries = 0;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
!ext4_nonda_switch(inode->i_sb)) {
do {
ret = block_page_mkwrite(vma, vmf,
ext4_da_get_block_prep);
} while (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries));
goto out_ret;
}
lock_page(page);
size = i_size_read(inode);
/* Page got truncated from under us? */
if (page->mapping != mapping || page_offset(page) > size) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
/*
* Return if we have all the buffers mapped. This avoids the need to do
* journal_start/journal_stop which can block and take a long time
*/
if (page_has_buffers(page)) {
if (!ext4_walk_page_buffers(NULL, page_buffers(page),
0, len, NULL,
ext4_bh_unmapped)) {
/* Wait so that we don't change page under IO */
wait_for_stable_page(page);
ret = VM_FAULT_LOCKED;
goto out;
}
}
unlock_page(page);
/* OK, we need to fill the hole... */
if (ext4_should_dioread_nolock(inode))
get_block = ext4_get_block_unwritten;
else
get_block = ext4_get_block;
retry_alloc:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = VM_FAULT_SIGBUS;
goto out;
}
ret = block_page_mkwrite(vma, vmf, get_block);
if (!ret && ext4_should_journal_data(inode)) {
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
PAGE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
ext4_journal_stop(handle);
goto out;
}
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
}
ext4_journal_stop(handle);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_alloc;
out_ret:
ret = block_page_mkwrite_return(ret);
out:
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
}
int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = file_inode(vma->vm_file);
int err;
down_read(&EXT4_I(inode)->i_mmap_sem);
err = filemap_fault(vma, vmf);
up_read(&EXT4_I(inode)->i_mmap_sem);
return err;
}
/*
* Find the first extent at or after @lblk in an inode that is not a hole.
* Search for @map_len blocks at most. The extent is returned in @result.
*
* The function returns 1 if we found an extent. The function returns 0 in
* case there is no extent at or after @lblk and in that case also sets
* @result->es_len to 0. In case of error, the error code is returned.
*/
int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk,
unsigned int map_len, struct extent_status *result)
{
struct ext4_map_blocks map;
struct extent_status es = {};
int ret;
map.m_lblk = lblk;
map.m_len = map_len;
/*
* For non-extent based files this loop may iterate several times since
* we do not determine full hole size.
*/
while (map.m_len > 0) {
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
/* There's extent covering m_lblk? Just return it. */
if (ret > 0) {
int status;
ext4_es_store_pblock(result, map.m_pblk);
result->es_lblk = map.m_lblk;
result->es_len = map.m_len;
if (map.m_flags & EXT4_MAP_UNWRITTEN)
status = EXTENT_STATUS_UNWRITTEN;
else
status = EXTENT_STATUS_WRITTEN;
ext4_es_store_status(result, status);
return 1;
}
ext4_es_find_delayed_extent_range(inode, map.m_lblk,
map.m_lblk + map.m_len - 1,
&es);
/* Is delalloc data before next block in extent tree? */
if (es.es_len && es.es_lblk < map.m_lblk + map.m_len) {
ext4_lblk_t offset = 0;
if (es.es_lblk < lblk)
offset = lblk - es.es_lblk;
result->es_lblk = es.es_lblk + offset;
ext4_es_store_pblock(result,
ext4_es_pblock(&es) + offset);
result->es_len = es.es_len - offset;
ext4_es_store_status(result, ext4_es_status(&es));
return 1;
}
/* There's a hole at m_lblk, advance us after it */
map.m_lblk += map.m_len;
map_len -= map.m_len;
map.m_len = map_len;
cond_resched();
}
result->es_len = 0;
return 0;
}
| gpl-2.0 |
fizista/Dorimanx-SG2-I9100-Kernel | drivers/clocksource/arm_arch_timer.c | 7 | 9714 | /*
* linux/drivers/clocksource/arm_arch_timer.c
*
* Copyright (C) 2011 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
#include <clocksource/arm_arch_timer.h>
static u32 arch_timer_rate;
enum ppi_nr {
PHYS_SECURE_PPI,
PHYS_NONSECURE_PPI,
VIRT_PPI,
HYP_PPI,
MAX_TIMER_PPI
};
static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt;
static bool arch_timer_use_virtual = true;
/*
* Architected system timer support.
*/
static __always_inline
void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
struct clock_event_device *clk)
{
arch_timer_reg_write_cp15(access, reg, val);
}
static __always_inline
u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
struct clock_event_device *clk)
{
return arch_timer_reg_read_cp15(access, reg);
}
static __always_inline irqreturn_t timer_handler(const int access,
struct clock_event_device *evt)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
}
static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
static __always_inline void timer_set_mode(const int access, int mode,
struct clock_event_device *clk)
{
unsigned long ctrl;
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
break;
default:
break;
}
}
static void arch_timer_set_mode_virt(enum clock_event_mode mode,
struct clock_event_device *clk)
{
timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
}
static void arch_timer_set_mode_phys(enum clock_event_mode mode,
struct clock_event_device *clk)
{
timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
}
static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
static int arch_timer_setup(struct clock_event_device *clk)
{
clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
if (arch_timer_use_virtual) {
clk->irq = arch_timer_ppi[VIRT_PPI];
clk->set_mode = arch_timer_set_mode_virt;
clk->set_next_event = arch_timer_set_next_event_virt;
} else {
clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
clk->set_mode = arch_timer_set_mode_phys;
clk->set_next_event = arch_timer_set_next_event_phys;
}
clk->cpumask = cpumask_of(smp_processor_id());
clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
clockevents_config_and_register(clk, arch_timer_rate,
0xf, 0x7fffffff);
if (arch_timer_use_virtual)
enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
else {
enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
if (arch_timer_ppi[PHYS_NONSECURE_PPI])
enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
}
arch_counter_set_user_access();
return 0;
}
static int arch_timer_available(void)
{
u32 freq;
if (arch_timer_rate == 0) {
freq = arch_timer_get_cntfrq();
/* Check the timer frequency. */
if (freq == 0) {
pr_warn("Architected timer frequency not available\n");
return -EINVAL;
}
arch_timer_rate = freq;
}
pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
arch_timer_use_virtual ? "virt" : "phys");
return 0;
}
u32 arch_timer_get_rate(void)
{
return arch_timer_rate;
}
u64 arch_timer_read_counter(void)
{
return arch_counter_get_cntvct();
}
static cycle_t arch_counter_read(struct clocksource *cs)
{
return arch_counter_get_cntvct();
}
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{
return arch_counter_get_cntvct();
}
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter = {
.read = arch_counter_read_cc,
.mask = CLOCKSOURCE_MASK(56),
};
static struct timecounter timecounter;
struct timecounter *arch_timer_get_timecounter(void)
{
return &timecounter;
}
static void arch_timer_stop(struct clock_event_device *clk)
{
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id());
if (arch_timer_use_virtual)
disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
else {
disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
if (arch_timer_ppi[PHYS_NONSECURE_PPI])
disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
}
clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
}
static int arch_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
/*
* Grab cpu pointer in each case to avoid spurious
* preemptible warnings
*/
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
break;
case CPU_DYING:
arch_timer_stop(this_cpu_ptr(arch_timer_evt));
break;
}
return NOTIFY_OK;
}
static struct notifier_block arch_timer_cpu_nb = {
.notifier_call = arch_timer_cpu_notify,
};
static int __init arch_timer_register(void)
{
int err;
int ppi;
err = arch_timer_available();
if (err)
goto out;
arch_timer_evt = alloc_percpu(struct clock_event_device);
if (!arch_timer_evt) {
err = -ENOMEM;
goto out;
}
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter,
arch_counter_get_cntvct());
if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI];
err = request_percpu_irq(ppi, arch_timer_handler_virt,
"arch_timer", arch_timer_evt);
} else {
ppi = arch_timer_ppi[PHYS_SECURE_PPI];
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
if (err)
free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
arch_timer_evt);
}
}
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
ppi, err);
goto out_free;
}
err = register_cpu_notifier(&arch_timer_cpu_nb);
if (err)
goto out_free_irq;
/* Immediately configure the timer on the boot CPU */
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
return 0;
out_free_irq:
if (arch_timer_use_virtual)
free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
else {
free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
arch_timer_evt);
if (arch_timer_ppi[PHYS_NONSECURE_PPI])
free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
arch_timer_evt);
}
out_free:
free_percpu(arch_timer_evt);
out:
return err;
}
static void __init arch_timer_init(struct device_node *np)
{
u32 freq;
int i;
if (arch_timer_get_rate()) {
pr_warn("arch_timer: multiple nodes in dt, skipping\n");
return;
}
/* Try to determine the frequency from the device tree or CNTFRQ */
if (!of_property_read_u32(np, "clock-frequency", &freq))
arch_timer_rate = freq;
for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
of_node_put(np);
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
* that a guest can use the virtual timer instead.
*
* If no interrupt provided for virtual timer, we'll have to
* stick to the physical timer. It'd better be accessible...
*/
if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
arch_timer_use_virtual = false;
if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
!arch_timer_ppi[PHYS_NONSECURE_PPI]) {
pr_warn("arch_timer: No interrupt available, giving up\n");
return;
}
}
arch_timer_register();
arch_timer_arch_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
| gpl-2.0 |
pdebuyl/lammps | lib/kokkos/core/src/eti/Serial/Kokkos_Serial_ViewCopyETIInst_int64_t_float_LayoutRight_Rank8.cpp | 7 | 2458 | //@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2014) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Kokkos is licensed under 3-clause BSD terms of use:
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
#define KOKKOS_IMPL_COMPILING_LIBRARY true
#include<Kokkos_Core.hpp>
namespace Kokkos {
namespace Impl {
KOKKOS_IMPL_VIEWCOPY_ETI_INST(float********,LayoutRight,LayoutRight, Serial,int64_t)
KOKKOS_IMPL_VIEWCOPY_ETI_INST(float********,LayoutRight,LayoutLeft, Serial,int64_t)
KOKKOS_IMPL_VIEWCOPY_ETI_INST(float********,LayoutRight,LayoutStride,Serial,int64_t)
KOKKOS_IMPL_VIEWFILL_ETI_INST(float********,LayoutRight,Serial,int64_t)
}
}
| gpl-2.0 |
hermeli/amx_kernel | drivers/net/arm/ks8695_ether.c | 7 | 26977 | /*
* Ethernet driver for the Kendin/Micrel KS8695.
*
* Copyright (C) 2006 Andrew Victor
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mach/irq.h>
#include <asm/uaccess.h>
#include <mach/regs-wan.h>
#include <mach/regs-lan.h>
#include <mach/regs-hpna.h>
#include <mach/regs-switch.h>
#include <mach/regs-misc.h>
#include <mach/regs-irq.h>
#include "ks8695_ether.h"
#define DRV_NAME "ks8695_ether"
#define DRV_VERSION "0.01"
#warning "Convert driver to use struct net_device_ops"
/* ..................................................................... */
static inline unsigned long ks8695_read(struct net_device *dev, unsigned int reg)
{
return __raw_readl(dev->base_addr + reg);
}
static inline void ks8695_write(struct net_device *dev, unsigned int reg, unsigned long value)
{
__raw_writel(value, dev->base_addr + reg);
}
/* ......................... ADDRESS MANAGEMENT ........................ */
#define KS8695_NR_ADDRESSES 16
/*
* Add the specified multicast addresses to the Additional Station
* Address registers.
*/
static void ks8695_set_mcast_address(struct net_device *dev, struct dev_mc_list *addr, int nr_addr)
{
unsigned long low, high;
int i;
/* Set multicast addresses in Additional Station Address registers */
for (i = 0; i < nr_addr; i++, addr = addr->next) {
if (!addr) break; /* unexpected end of list */
else if (i == KS8695_NR_ADDRESSES) break; /* too many addresses */
low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) | (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]);
high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]);
ks8695_write(dev, KS8695_WMAAL(i), low);
ks8695_write(dev, KS8695_WMAAH(i), WMAAH_E | high);
}
/* Clear the remaining Additional Station Addresses */
for (; i < KS8695_NR_ADDRESSES; i++) {
ks8695_write(dev, KS8695_WMAAL(i), 0);
ks8695_write(dev, KS8695_WMAAH(i), 0);
}
}
/*
* Enable/Disable promiscuous and multicast modes.
*/
static void ks8695eth_set_multi(struct net_device *dev)
{
unsigned long ctrl;
ctrl = ks8695_read(dev, KS8695_WMDRXC);
if (dev->flags & IFF_PROMISC) /* enable promiscuous mode */
ctrl |= WMDRXC_WMRA;
else if (dev->flags & ~IFF_PROMISC) /* disable promiscuous mode */
ctrl &= ~WMDRXC_WMRA;
if (dev->flags & IFF_ALLMULTI) /* enable all multicast mode */
ctrl |= WMDRXC_WMRM;
else if (dev->mc_count > KS8695_NR_ADDRESSES) /* more specific multicast addresses than can be handled in hardware */
ctrl |= WMDRXC_WMRM;
else if (dev->mc_count > 0) { /* enable specific multicasts */
ctrl &= ~WMDRXC_WMRM;
ks8695_set_mcast_address(dev, dev->mc_list, dev->mc_count);
}
else if (dev->flags & ~IFF_ALLMULTI) { /* disable multicast mode */
ctrl &= ~WMDRXC_WMRM;
ks8695_set_mcast_address(dev, NULL, 0);
}
ks8695_write(dev, KS8695_WMDRXC, ctrl);
}
/*
* Program the hardware MAC address from dev->dev_addr.
*/
static void update_mac_address(struct net_device *dev)
{
unsigned long low, high;
low = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | (dev->dev_addr[4] << 8) | (dev->dev_addr[5]);
high = (dev->dev_addr[0] << 8) | (dev->dev_addr[1]);
ks8695_write(dev, KS8695_WMAL, low);
ks8695_write(dev, KS8695_WMAH, high);
}
/*
* Store the new hardware address in dev->dev_addr, and update the MAC.
*/
static int ks8695eth_set_mac(struct net_device *dev, void* addr)
{
struct sockaddr *address = addr;
DECLARE_MAC_BUF(mac);
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
update_mac_address(dev);
printk("%s: Setting MAC address to %s\n", dev->name, print_mac(mac, dev->dev_addr));
return 0;
}
/*
* Retrieve the MAC address set by the bootloader.
*/
static void __init get_mac_address(struct net_device *dev)
{
unsigned char addr[6];
unsigned long low, high;
low = ks8695_read(dev, KS8695_WMAL);
high = ks8695_read(dev, KS8695_WMAH);
addr[0] = (high & 0xff00) >> 8;
addr[1] = (high & 0xff);
addr[2] = (low & 0xff000000) >> 24;
addr[3] = (low & 0xff0000) >> 16;
addr[4] = (low & 0xff00) >> 8;
addr[5] = (low & 0xff);
if (is_valid_ether_addr(addr))
memcpy(dev->dev_addr, &addr, 6);
}
/* ......................... ETHTOOL SUPPORT ........................... */
/*
* Get device-specific settings.
*/
static int ks8695eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
unsigned long ctrl;
/* the defaults for all ports */
cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
| SUPPORTED_TP | SUPPORTED_MII;
cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
cmd->port = PORT_MII;
cmd->transceiver = XCVR_INTERNAL;
if (dev->base_addr == KS8695_HPNA_VA) {
cmd->phy_address = 0;
cmd->autoneg = AUTONEG_DISABLE; /* not supported for HPNA */
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
}
else if (dev->base_addr == KS8695_WAN_VA) {
cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
cmd->phy_address = 0;
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_WMC);
if ((ctrl & WMC_WAND) == 0) { /* auto-negotiation is enabled */
cmd->advertising |= ADVERTISED_Autoneg;
if (ctrl & WMC_WANA100F)
cmd->advertising |= ADVERTISED_100baseT_Full;
if (ctrl & WMC_WANA100H)
cmd->advertising |= ADVERTISED_100baseT_Half;
if (ctrl & WMC_WANA10F)
cmd->advertising |= ADVERTISED_10baseT_Full;
if (ctrl & WMC_WANA10H)
cmd->advertising |= ADVERTISED_10baseT_Half;
if (ctrl & WMC_WANAP)
cmd->advertising |= ADVERTISED_Pause;
cmd->autoneg = AUTONEG_ENABLE;
cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
cmd->duplex = (ctrl & WMC_WDS) ? DUPLEX_FULL : DUPLEX_HALF;
}
else { /* auto-negotiation is disabled */
cmd->autoneg = AUTONEG_DISABLE;
cmd->speed = (ctrl & WMC_WANF100) ? SPEED_100 : SPEED_10;
cmd->duplex = (ctrl & WMC_WANFF) ? DUPLEX_FULL : DUPLEX_HALF;
}
}
else if (dev->base_addr == KS8695_LAN_VA) {
// TODO: Implement for Switch ports
}
return 0;
}
/*
* Set device-specific settings.
*/
static int ks8695eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
unsigned long ctrl;
if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
return -EINVAL;
if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
return -EINVAL;
if (cmd->port != PORT_MII)
return -EINVAL;
if (cmd->transceiver != XCVR_INTERNAL)
return -EINVAL;
if ((cmd->autoneg != AUTONEG_DISABLE) && (cmd->autoneg != AUTONEG_ENABLE))
return -EINVAL;
if (cmd->autoneg == AUTONEG_ENABLE) {
if ((cmd->advertising & (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full)) == 0)
return -EINVAL;
if (dev->base_addr == KS8695_HPNA_VA)
return -EINVAL; /* HPNA does not support auto-negotiation. */
else if (dev->base_addr == KS8695_WAN_VA) {
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_WMC);
ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | WMC_WANA10F | WMC_WANA10H);
if (cmd->advertising & ADVERTISED_100baseT_Full)
ctrl |= WMC_WANA100F;
if (cmd->advertising & ADVERTISED_100baseT_Half)
ctrl |= WMC_WANA100H;
if (cmd->advertising & ADVERTISED_10baseT_Full)
ctrl |= WMC_WANA10F;
if (cmd->advertising & ADVERTISED_10baseT_Half)
ctrl |= WMC_WANA10H;
ctrl |= WMC_WANR; /* force a re-negotiation */
__raw_writel(ctrl, KS8695_MISC_VA + KS8695_WMC);
}
else if (dev->base_addr == KS8695_LAN_VA) {
// TODO: Implement for Switch ports
}
}
else {
if (dev->base_addr == KS8695_HPNA_VA) {
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
ctrl &= ~(HMC_HSS | HMC_HDS);
if (cmd->speed == SPEED_100)
ctrl |= HMC_HSS;
if (cmd->duplex == DUPLEX_FULL)
ctrl |= HMC_HDS;
__raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
}
else if (dev->base_addr == KS8695_WAN_VA) {
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_WMC);
ctrl |= WMC_WAND; /* disable auto-negotiation */
ctrl &= ~(WMC_WANF100 | WMC_WANFF);
if (cmd->speed == SPEED_100)
ctrl |= WMC_WANF100;
if (cmd->duplex == DUPLEX_FULL)
ctrl |= WMC_WANFF;
__raw_writel(ctrl, KS8695_MISC_VA + KS8695_WMC);
}
else if (dev->base_addr == KS8695_LAN_VA) {
// TODO: Implement for Switch ports
}
}
return 0;
}
/*
* Restart the auto-negotiation.
*/
static int ks8695eth_nwayreset(struct net_device *dev)
{
unsigned long ctrl;
if (dev->base_addr == KS8695_HPNA_VA) /* HPNA has no auto-negotiation */
return -EINVAL;
else if (dev->base_addr == KS8695_WAN_VA) {
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_WMC);
if ((ctrl & WMC_WAND) == 0)
__raw_writel(ctrl | WMC_WANR, KS8695_MISC_VA + KS8695_WMC);
else
return -EINVAL; /* auto-negitiation not enabled */
}
else if (dev->base_addr == KS8695_LAN_VA) {
// TODO: Implement for Switch ports
}
return 0;
}
static void ks8695eth_get_pause(struct net_device *dev, struct ethtool_pauseparam *param)
{
unsigned long ctrl;
if (dev->base_addr == KS8695_HPNA_VA)
return;
else if (dev->base_addr == KS8695_WAN_VA) {
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_WMC); /* advertise Pause */
param->autoneg = (ctrl & WMC_WANAP);
ctrl = ks8695_read(dev, KS8695_WMDRXC); /* current Tx Flow-control */
param->rx_pause = (ctrl & WMDRXC_WMRFCE);
ctrl = ks8695_read(dev, KS8695_WMDRXC); /* current Rx Flow-control */
param->tx_pause = (ctrl & WMDTXC_WMTFCE);
}
else if (dev->base_addr == KS8695_LAN_VA) {
// TODO: Implement for Switch ports
}
}
static int ks8695eth_set_pause(struct net_device *dev, struct ethtool_pauseparam *param)
{
// TODO.
return 0;
}
static u32 ks8695eth_get_link(struct net_device *dev)
{
unsigned long ctrl;
if (dev->base_addr == KS8695_HPNA_VA)
return 1; /* HPNA always has link */
else if (dev->base_addr == KS8695_WAN_VA) {
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_WMC);
return (ctrl & WMC_WLS);
}
else if (dev->base_addr == KS8695_LAN_VA) {
// TODO: Implement for Switch ports
}
return 0;
}
/*
* Report driver information.
*/
static void ks8695eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
}
static struct ethtool_ops ks8695eth_ethtool_ops = {
.get_settings = ks8695eth_get_settings,
.set_settings = ks8695eth_set_settings,
.get_drvinfo = ks8695eth_get_drvinfo,
.nway_reset = ks8695eth_nwayreset,
.get_pauseparam = ks8695eth_get_pause,
.set_pauseparam = ks8695eth_set_pause,
.get_link = ks8695eth_get_link,
};
/* ................................ MAC ................................ */
/*
* Setup the RX DMA descriptors, and enable and start the DMA receiver.
*/
static void ks8695eth_start_rx(struct net_device *dev)
{
struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
unsigned long ctrl;
int i;
/* Setup the DMA descriptors */
for (i = 0; i < MAX_RX_DESC; i++) {
lp->rxdma[i].length = MAX_RXBUF_SIZE;
lp->rxdma[i].addr = (unsigned long) lp->rxSkb[i].dma;
lp->rxdma[i].next = (unsigned long) lp->rxdma_phys + (sizeof(struct rx_descriptor) * (i+1));
lp->rxdma[i].status = RDES_OWN;
}
/* Create ring of DMA descriptors */
lp->rxdma[MAX_RX_DESC-1].next = (unsigned long) lp->rxdma_phys; /* phys address of 1st descriptor */
/* Reset receive index (since hardware was reset) */
lp->rx_idx = 0;
/* Program address of 1st descriptor in KS8695 */
ks8695_write(dev, KS8695_WRDLB, (unsigned long) lp->rxdma_phys);
/* Enable and start the DMA Receiver */
ctrl = ks8695_read(dev, KS8695_WMDRXC);
ks8695_write(dev, KS8695_WMDRXC, ctrl | WMDRXC_WMRE);
ks8695_write(dev, KS8695_WMDRSC, 0);
}
/*
* Stop the DMA receiver.
*/
static void ks8695eth_stop_rx(struct net_device *dev)
{
unsigned long ctrl;
/* Disable receive DMA */
ctrl = ks8695_read(dev, KS8695_WMDRXC);
ks8695_write(dev, KS8695_WMDRXC, ctrl & ~WMDRXC_WMRE);
}
/*
* Setup the TX DMA descriptors, and enable DMA transmitter.
*/
static void ks8695eth_start_tx(struct net_device *dev)
{
struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
unsigned long ctrl;
int i;
/* Setup the DMA descriptors */
for (i = 0; i < MAX_TX_DESC; i++) {
lp->txdma[i].ownership = 0;
lp->txdma[i].status = 0;
lp->txdma[i].addr = 0;
lp->txdma[i].next = (unsigned long) lp->txdma_phys + (sizeof(struct tx_descriptor) * (i+1));
}
/* Create ring of DMA descriptors */
lp->txdma[MAX_TX_DESC-1].next = (unsigned long) lp->txdma_phys; /* phys address of 1st desc */
/* Reset transmit indexes (since hardware was reset) */
lp->tx_head = 0;
lp->tx_tail = 0;
/* Program address of 1st descriptor in KS8695 */
ks8695_write(dev, KS8695_WTDLB, (unsigned long) lp->txdma_phys);
/* Enable the DMA transmitter (will be started on first packet) */
ctrl = ks8695_read(dev, KS8695_WMDTXC);
ks8695_write(dev, KS8695_WMDTXC, ctrl | WMDTXC_WMTE);
}
/*
* Stop the DMA transmitter.
*/
static void ks8695eth_stop_tx(struct net_device *dev)
{
struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
unsigned long ctrl;
int i;
/* Disable transmit DMA */
ctrl = ks8695_read(dev, KS8695_WMDTXC);
ks8695_write(dev, KS8695_WMDTXC, ctrl & ~WMDTXC_WMTE);
/* Clear any pending skb's still on transmit queue */
for (i = 0; i < MAX_TX_DESC; i++) {
lp->txdma[i].ownership = 0;
lp->txdma[i].status = 0;
lp->txdma[i].addr = 0;
if (lp->txSkb[i].skb) {
dma_unmap_single(lp->dev, lp->txSkb[i].dma, lp->txSkb[i].length, DMA_TO_DEVICE);
dev_kfree_skb_irq(lp->txSkb[i].skb);
lp->txSkb[i].skb = NULL;
}
}
}
/*
* Reset the MAC hardware.
*/
static void ks8695eth_hw_reset(struct net_device *dev)
{
/* Perform hardware reset */
ks8695_write(dev, KS8695_WMDTXC, WMDTXC_WMTRST);
while (ks8695_read(dev, KS8695_WMDTXC) & WMDTXC_WMTRST) { barrier(); }
/* Initialize the hardware */
ks8695_write(dev, KS8695_WMDRXC, WMDRXC_WMRU | WMDRXC_WMRB); /* RX: receive Unicast & Broadcast */
ks8695_write(dev, KS8695_WMDTXC, WMDTXC_WMTEP | WMDTXC_WMTAC); /* TX: add Padding & CRC */
// TODO: Can set Rx/Tx PBL: (Micrel using 8)
// TODO: Enable hardware checksumming.
// TODO: Enable Rx/Tx flow-control
}
/*
* Enable or Disable the IRQs associated with a network interface.
*/
static void ks8695eth_set_irq(struct net_device *dev, short enable)
{
struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
int i;
for (i = 0; i < NR_IRQS; i++) {
if (lp->irqs & (1 << i)) {
if (enable)
enable_irq(i);
else
disable_irq(i);
}
}
}
/*
* Open the ethernet interface.
*/
static int ks8695eth_open(struct net_device *dev)
{
if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
/* MUST reset hardware in _open() */
ks8695eth_hw_reset(dev);
/* Update the MAC address (incase user has changed it) */
update_mac_address(dev);
/* Start DMA */
ks8695eth_start_tx(dev);
ks8695eth_start_rx(dev);
/* Enable interrupts */
ks8695eth_set_irq(dev, 1);
netif_start_queue(dev);
return 0;
}
/*
* Close the ethernet interface.
*/
static int ks8695eth_close(struct net_device *dev)
{
/* Stop DMA */
ks8695eth_stop_rx(dev);
ks8695eth_stop_tx(dev);
/* Disable interrupts */
ks8695eth_set_irq(dev, 0);
netif_stop_queue(dev);
return 0;
}
/*
* Return the current statistics.
*/
static struct net_device_stats *ks8695eth_stats(struct net_device *dev)
{
return &dev->stats;
}
/*
* Queue a packet for transmission in next TX DMA descriptor.
*/
static int ks8695eth_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
int i;
/* Packets are added to head of array */
i = lp->tx_head;
/* Store packet information */
lp->txSkb[i].skb = skb;
lp->txSkb[i].length = skb->len;
lp->txSkb[i].dma = dma_map_single(lp->dev, skb->data, skb->len, DMA_TO_DEVICE);
spin_lock_irq(&lp->tx_lock);
/* Set Tx descriptor information */
lp->txdma[i].addr = lp->txSkb[i].dma;
lp->txdma[i].status = TDES_IC | TDES_FS | TDES_LS | (lp->txSkb[i].length & TDES_TBS);
lp->txdma[i].ownership = TDES_OWN;
/* Start the DMA transmitter (if necessary) */
ks8695_write(dev, KS8695_WMDTSC, 0);
lp->tx_head = (lp->tx_head + 1) % MAX_TX_DESC;
if (lp->tx_head == lp->tx_tail) /* no more descriptors */
netif_stop_queue(dev);
spin_unlock_irq(&lp->tx_lock);
dev->trans_start = jiffies;
return 0;
}
/* ..................................................................... */
/*
* The link state of the WAN port has changed.
* (Called from interrupt context)
*/
static void ks8695eth_wan_link(struct net_device *dev)
{
unsigned long ctrl;
ctrl = __raw_readl(KS8695_MISC_VA + KS8695_WMC);
if (ctrl & WMC_WLS) {
netif_carrier_on(dev);
printk(KERN_INFO "%s: Link is now %s-%s\n", dev->name,
(ctrl & WMC_WSS) ? "100" : "10",
(ctrl & WMC_WDS) ? "FullDuplex" : "HalfDuplex");
}
else {
netif_carrier_off(dev);
printk(KERN_INFO "%s: Link down.\n", dev->name);
}
}
/* ..................................................................... */
/*
* A frame has been received. Exteract from buffer descriptor and deliver to
* upper layers.
* (Called from interrupt context)
*/
static void ks8695eth_rx_interrupt(struct net_device *dev)
{
struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
struct sk_buff *skb;
unsigned long flags;
unsigned int pktlen;
while (!(lp->rxdma[lp->rx_idx].status & RDES_OWN)) {
flags = lp->rxdma[lp->rx_idx].status;
if ((flags & (RDES_FS | RDES_LS)) != (RDES_FS | RDES_LS)) {
printk(KERN_ERR "%s: Spanning packet detected\n", dev->name);
goto rx_complete;
}
/* handle errors */
if (flags & (RDES_ES | RDES_RE)) {
dev->stats.rx_errors++;
if (flags & RDES_TL) /* Frame too long */
dev->stats.rx_length_errors++;
else if (flags & RDES_RF) /* Runt frame */
dev->stats.rx_length_errors++;
else if (flags & RDES_CE) /* CRC error */
dev->stats.rx_crc_errors++;
else if (flags & RDES_RE) /* MII error */
dev->stats.rx_missed_errors++;
// TODO: If hardware checksumming, then check IP/TCP/UDP errors.
goto rx_complete;
}
pktlen = flags & RDES_FLEN;
pktlen = pktlen - 4; /* remove CRC */
// OLD CALL: consistent_sync(lp->rxSkb[lp->rx_idx].skb->data, MAX_RXBUF_SIZE, DMA_FROM_DEVICE);
dma_sync_single_for_cpu(lp->dev, lp->rxSkb[lp->rx_idx].dma, MAX_RXBUF_SIZE, DMA_FROM_DEVICE);
skb = dev_alloc_skb(pktlen+2); /* +2 to align IP header */
if (!skb) {
dev->stats.rx_dropped++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
goto rx_complete;
}
skb_reserve(skb, 2); /* align IP header */
memcpy(skb_put(skb, pktlen), lp->rxSkb[lp->rx_idx].skb->data, pktlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
/* update statistics */
dev->stats.rx_packets++;
dev->stats.rx_bytes += pktlen;
if (flags & RDES_MF)
dev->stats.multicast++;
dev->last_rx = jiffies;
rx_complete:
lp->rxdma[lp->rx_idx].status = RDES_OWN; /* reset ownership bit */
lp->rx_idx = (lp->rx_idx + 1) % MAX_RX_DESC; /* next descriptor */
}
/* restart DMA receiver incase it was suspended */
ks8695_write(dev, KS8695_WMDRSC, 0);
}
/*
* A packet has been transmitted.
* (Called from interrupt context)
*/
static void ks8695eth_tx_interrupt(struct net_device *dev)
{
struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
int i;
/* Packets are removed from tail of array */
i = lp->tx_tail;
// TODO: Loop through multiple times?
if (lp->txSkb[i].skb) {
/* update statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += lp->txSkb[i].length;
/* free packet */
dma_unmap_single(lp->dev, lp->txSkb[i].dma, lp->txSkb[i].length, DMA_TO_DEVICE);
dev_kfree_skb_irq(lp->txSkb[i].skb);
lp->txSkb[i].skb = NULL;
/* Not necessary to clear descriptor since we still own it */
}
lp->tx_tail = (lp->tx_tail + 1) % MAX_TX_DESC;
netif_wake_queue(dev);
}
/*
* MAC interrupt handler
*/
static irqreturn_t ks8695eth_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
switch (irq) {
case KS8695_IRQ_LAN_RX_STATUS:
case KS8695_IRQ_HPNA_RX_STATUS:
case KS8695_IRQ_WAN_RX_STATUS:
ks8695eth_rx_interrupt(dev);
return IRQ_HANDLED;
case KS8695_IRQ_LAN_TX_STATUS:
case KS8695_IRQ_HPNA_TX_STATUS:
case KS8695_IRQ_WAN_TX_STATUS:
ks8695eth_tx_interrupt(dev);
return IRQ_HANDLED;
case KS8695_IRQ_WAN_LINK:
ks8695eth_wan_link(dev);
return IRQ_HANDLED;
default:
return IRQ_NONE;
}
}
/* ..................................................................... */
/*
* Initialize the WAN hardware to known defaults.
*/
static void __init ks8695eth_init_wan(void)
{
unsigned long ctrl;
/* Support auto-negotiation */
ctrl = WMC_WANAP | WMC_WANA100F | WMC_WANA100H | WMC_WANA10F | WMC_WANA10H;
/* LED0 = Activity , LED1 = Link */
ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
/* Restart Auto-negotiation */
ctrl |= WMC_WANR;
__raw_writel(ctrl, KS8695_MISC_VA + KS8695_WMC);
__raw_writel(0, KS8695_MISC_VA + KS8695_WPPM);
__raw_writel(0, KS8695_MISC_VA + KS8695_PPS);
}
/*
* Initialize the LAN Switch hardware to known defaults.
*/
static void __init ks8695eth_init_switch(void)
{
unsigned long ctrl;
ctrl = 0x40819e00; /* default */
/* LED0 = Speed LED1 = Link/Activity */
ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
/* Enable Switch */
ctrl |= SEC0_ENABLE;
__raw_writel(ctrl, KS8695_SWITCH_VA + KS8695_SEC0);
__raw_writel(0x9400100, KS8695_SWITCH_VA + KS8695_SEC1); /* reset defaults */
}
static int ks8695eth_hook_irqs(struct platform_device *pdev, struct net_device *dev, unsigned long *irqset)
{
struct resource *res;
int i = 0, ret;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, i))) {
set_irq_flags(res->start, IRQF_VALID | IRQF_NOAUTOEN);
ret = request_irq(res->start, ks8695eth_interrupt, IRQF_DISABLED | IRQF_SHARED, res->name, dev);
if (ret) {
printk(KERN_ERR "%s: return_irq %u failed\n", dev->name, res->start);
return -EBUSY;
}
*irqset |= (1 << res->start);
// TODO: Can set different priorities for interrupts [0x BB AA FF].
i++;
}
return 0;
}
static int __init ks8695eth_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ks8695eth_priv *lp;
struct resource *res;
int i = 0, ret, size;
DECLARE_MAC_BUF(mac);
/* Create ethernet device */
dev = alloc_etherdev(sizeof(struct ks8695eth_priv));
if (!dev)
return -ENOMEM;
/* Get I/O base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
free_netdev(dev);
return -ENODEV;
}
dev->base_addr = res->start;
lp = (struct ks8695eth_priv *) dev->priv;
/* Retreive MAC address before the MAC registers are reset */
get_mac_address(dev);
/* Reset the hardware */
ks8695_write(dev, KS8695_WMDTXC, WMDTXC_WMTRST);
while (ks8695_read(dev, KS8695_WMDTXC) & WMDTXC_WMTRST) { barrier(); }
/* Get IRQ's */
dev->irq = platform_get_irq(pdev, 0);
ret = ks8695eth_hook_irqs(pdev, dev, &lp->irqs);
if (ret) {
// Cleanup.
}
/* Allocate DMA-able memory for Tx descriptor */
size = sizeof(struct tx_descriptor) * MAX_TX_DESC;
lp->txdma = dma_alloc_coherent(&pdev->dev, size, &lp->txdma_phys, GFP_KERNEL);
if (lp->txdma == NULL) {
// free IRQs
free_netdev(dev);
return -ENOMEM;
}
memset(lp->txdma, 0, size);
lp->tx_head = 0;
lp->tx_tail = 0;
/* Allocate DMA-able memory for Rx descriptor */
size = sizeof(struct rx_descriptor) * MAX_RX_DESC;
lp->rxdma = dma_alloc_coherent(&pdev->dev, size, &lp->rxdma_phys, GFP_KERNEL);
if (lp->rxdma == NULL) {
// free IRQs
// Free TX descriptor memory.
free_netdev(dev);
return -ENOMEM;
}
memset(lp->rxdma, 0, size);
lp->rx_idx = 0;
/* Allocate DMA-able memory for Rx Data */
for (i = 0; i < MAX_RX_DESC; i++) {
lp->rxSkb[i].skb = alloc_skb(MAX_RXBUF_SIZE, GFP_KERNEL);
if (lp->rxSkb[i].skb == NULL) {
// Cleanup
return -ENOMEM;
}
lp->rxSkb[i].length = MAX_RXBUF_SIZE;
lp->rxSkb[i].dma = dma_map_single(&pdev->dev, lp->rxSkb[i].skb->data, MAX_RXBUF_SIZE, DMA_FROM_DEVICE);
}
spin_lock_init(&lp->tx_lock);
platform_set_drvdata(pdev, dev);
ether_setup(dev);
dev->open = ks8695eth_open;
dev->stop = ks8695eth_close;
dev->hard_start_xmit = ks8695eth_xmit_frame;
dev->get_stats = ks8695eth_stats;
dev->set_multicast_list = ks8695eth_set_multi;
dev->set_mac_address = ks8695eth_set_mac;
dev->ethtool_ops = &ks8695eth_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
lp->dev = &pdev->dev;
if (dev->base_addr == KS8695_WAN_VA)
ks8695eth_init_wan();
else if (dev->base_addr == KS8695_LAN_VA)
ks8695eth_init_switch();
/* Register the network interface */
ret = register_netdev(dev);
if (ret) {
// free IRQs
free_netdev(dev);
// dma_free_coherent(&pdev->dev, sizeof(struct ks8695_tx_dma), lp->txdma, lp->txdma_phys);
// dma_free_coherent(&pdev->dev, sizeof(struct ks8695_rx_dma), lp->rxdma, lp->rxdma_phys);
return ret;
}
printk(KERN_INFO "%s: KS8695 ethernet (%s)\n", dev->name, print_mac(mac, dev->dev_addr));
return 0;
}
static int __devexit ks8695eth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
// struct ks8695eth_priv *lp = (struct ks8695eth_priv *) dev->priv;
unregister_netdev(dev);
// Free IRQ
// dma_free_coherent(&pdev->dev, sizeof(struct ks8695_tx_dma), lp->txdma, lp->txdma_phys);
// dma_free_coherent(&pdev->dev, sizeof(struct ks8695_rx_dma), lp->rxdma, lp->rxdma_phys);
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
static struct platform_driver ks8695ether_driver = {
.probe = ks8695eth_probe,
.remove = __devexit_p(ks8695eth_remove),
// .suspend =
// .resume =
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init ks8695eth_init(void)
{
return platform_driver_register(&ks8695ether_driver);
}
static void __exit ks8695eth_exit(void)
{
platform_driver_unregister(&ks8695ether_driver);
}
module_init(ks8695eth_init);
module_exit(ks8695eth_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("KS8695 Ethernet driver");
MODULE_AUTHOR("Andrew Victor");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
anshumang/lammps-analytics | src/USER-CUDA/pair_gran_hooke_cuda.cpp | 7 | 8521 | /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
Original Version:
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
See the README file in the top-level LAMMPS directory.
-----------------------------------------------------------------------
USER-CUDA Package and associated modifications:
https://sourceforge.net/projects/lammpscuda/
Christian Trott, christian.trott@tu-ilmenau.de
Lars Winterfeld, lars.winterfeld@tu-ilmenau.de
Theoretical Physics II, University of Technology Ilmenau, Germany
See the README file in the USER-CUDA directory.
This software is distributed under the GNU General Public License.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Crozier (SNL)
------------------------------------------------------------------------- */
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "pair_gran_hooke_cuda.h"
#include "pair_gran_hooke_cuda_cu.h"
#include "cuda_data.h"
#include "atom.h"
#include "comm.h"
#include "force.h"
#include "neighbor.h"
#include "neigh_list.h"
#include "neigh_request.h"
#include "modify.h"
#include "fix_pour.h"
#include "cuda_neigh_list.h"
#include "update.h"
#include "integrate.h"
#include "respa.h"
#include "memory.h"
#include "error.h"
#include "cuda.h"
using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
PairGranHookeCuda::PairGranHookeCuda(LAMMPS *lmp) : PairGranHooke(lmp)
{
cuda = lmp->cuda;
if(cuda == NULL)
error->all(FLERR,"You cannot use a /cuda class, without activating 'cuda' acceleration. Provide '-c on' as command-line argument to LAMMPS..");
allocated2 = false;
cuda->shared_data.pair.cudable_force = 1;
cuda->setSystemParams();
}
/* ----------------------------------------------------------------------
remember pointer to arrays in cuda shared data
------------------------------------------------------------------------- */
void PairGranHookeCuda::allocate()
{
if(! allocated) PairGranHooke::allocate();
if(! allocated2)
{
allocated2 = true;
int n = atom->ntypes;
cuda->shared_data.pair.cutsq = cutsq;
memory->create(cuda->shared_data.pair.coeff1,n+1,n+1,
"pair:cuda_coeff1");
memory->create(cuda->shared_data.pair.coeff2,
n+1,n+1,"pair:cuda_coeff2");
cuda->shared_data.pair.coeff1[0][0]=kn;
cuda->shared_data.pair.coeff1[0][1]=kt;
cuda->shared_data.pair.coeff1[1][0]=gamman;
cuda->shared_data.pair.coeff1[1][1]=gammat;
cuda->shared_data.pair.coeff2[0][0]=xmu;
cuda->shared_data.pair.coeff2[0][1]=dampflag;
}
}
/* ---------------------------------------------------------------------- */
void PairGranHookeCuda::compute(int eflag, int vflag)
{
cuda->shared_data.pair.use_block_per_atom = 0;
//cuda->cu_debugdata->memset_device(0);
if (eflag || vflag) ev_setup(eflag,vflag);
if(eflag) cuda->cu_eng_vdwl->upload();
if(vflag) cuda->cu_virial->upload();
Cuda_PairGranHookeCuda(& cuda->shared_data, & cuda_neigh_list->sneighlist, eflag, vflag, eflag_atom, vflag_atom);
if(not cuda->shared_data.pair.collect_forces_later)
{
if(eflag) cuda->cu_eng_vdwl->download();
if(vflag) cuda->cu_virial->download();
}
//cuda->cu_debugdata->download();
//printf("%lf %lf %lf %lf %lf %lf\n",1.0e-6*cuda->debugdata[0],1.0e-6*cuda->debugdata[1],1.0e-6*cuda->debugdata[2],1.0e-6*cuda->debugdata[3],1.0e-6*cuda->debugdata[4],1.0e-6*cuda->debugdata[5]);
}
/* ---------------------------------------------------------------------- */
void PairGranHookeCuda::settings(int narg, char **arg)
{
PairGranHooke::settings(narg, arg);
}
/* ---------------------------------------------------------------------- */
void PairGranHookeCuda::coeff(int narg, char **arg)
{
PairGranHooke::coeff(narg, arg);
allocate();
}
void PairGranHookeCuda::init_style()
{
int i;
MYDBG(printf("# CUDA PairGranHookeCuda::init_style start\n"); )
// request regular or rRESPA neighbor lists
int irequest;
if (update->whichflag == 0 && strstr(update->integrate_style,"respa")) {
}
else
{
irequest = neighbor->request(this);
neighbor->requests[irequest]->full = 1;
neighbor->requests[irequest]->half = 0;
neighbor->requests[irequest]->gran = 1;
neighbor->requests[irequest]->cudable = 1;
//neighbor->style=0; //0=NSQ neighboring
}
if (!atom->radius_flag || !atom->omega_flag || !atom->torque_flag)
error->all(FLERR,"Pair granular requires atom attributes radius, omega, torque");
if (comm->ghost_velocity == 0)
error->all(FLERR,"Pair granular requires ghost atoms store velocity");
// need a half neigh list and optionally a granular history neigh list
dt = update->dt;
// check for Fix freeze and set freeze_group_bit
for (i = 0; i < modify->nfix; i++)
if (strcmp(modify->fix[i]->style,"freeze") == 0) break;
if (i < modify->nfix) freeze_group_bit = modify->fix[i]->groupbit;
else freeze_group_bit = 0;
cuda->shared_data.pair.freeze_group_bit=freeze_group_bit;
// check for Fix pour and set pour_type and pour_maxdiam
int pour_type = 0;
double pour_maxrad = 0.0;
for (i = 0; i < modify->nfix; i++)
if (strcmp(modify->fix[i]->style,"pour") == 0) break;
if (i < modify->nfix) {
pour_type = ((FixPour *) modify->fix[i])->ntype;
pour_maxrad = ((FixPour *) modify->fix[i])->radius_max;
}
// set maxrad_dynamic and maxrad_frozen for each type
// include future Fix pour particles as dynamic
for (i = 1; i <= atom->ntypes; i++)
onerad_dynamic[i] = onerad_frozen[i] = 0.0;
if (pour_type) onerad_dynamic[pour_type] = pour_maxrad;
double *radius = atom->radius;
int *mask = atom->mask;
int *type = atom->type;
int nlocal = atom->nlocal;
for (i = 0; i < nlocal; i++){
if (mask[i] & freeze_group_bit)
onerad_frozen[type[i]] = MAX(onerad_frozen[type[i]],radius[i]);
else
onerad_dynamic[type[i]] = MAX(onerad_dynamic[type[i]],radius[i]);
}
MPI_Allreduce(&onerad_dynamic[1],&maxrad_dynamic[1],atom->ntypes,
MPI_DOUBLE,MPI_MAX,world);
MPI_Allreduce(&onerad_frozen[1],&maxrad_frozen[1],atom->ntypes,
MPI_DOUBLE,MPI_MAX,world);
MYDBG(printf("# CUDA PairGranHookeCuda::init_style end\n"); )
}
void PairGranHookeCuda::init_list(int id, NeighList *ptr)
{
MYDBG(printf("# CUDA PairGranHookeCuda::init_list\n");)
PairGranHooke::init_list(id, ptr);
#ifndef CUDA_USE_BINNING
// right now we can only handle verlet (id 0), not respa
if(id == 0) cuda_neigh_list = cuda->registerNeighborList(ptr);
// see Neighbor::init() for details on lammps lists' logic
#endif
MYDBG(printf("# CUDA PairGranHookeCuda::init_list end\n");)
}
void PairGranHookeCuda::ev_setup(int eflag, int vflag)
{
int maxeatomold=maxeatom;
PairGranHooke::ev_setup(eflag,vflag);
if (eflag_atom && atom->nmax > maxeatomold)
{delete cuda->cu_eatom; cuda->cu_eatom = new cCudaData<double, ENERGY_FLOAT, x > ((double*)eatom, & cuda->shared_data.atom.eatom , atom->nmax );}
if (eflag_atom && atom->nmax > maxeatomold)
{delete cuda->cu_vatom; cuda->cu_vatom = new cCudaData<double, ENERGY_FLOAT, yx > ((double*)vatom, & cuda->shared_data.atom.eatom , atom->nmax, 6 );}
}
| gpl-2.0 |
rutgers-apl/Atomicity-Violation-Detector | tbb-lib/examples/parallel_for/tachyon/src/objbound.cpp | 7 | 10317 | /*
Copyright 2005-2014 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
version 2 as published by the Free Software Foundation. Threading Building Blocks is
distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details. You should have received a copy of
the GNU General Public License along with Threading Building Blocks; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
As a special exception, you may use this file as part of a free software library without
restriction. Specifically, if other files instantiate templates or use macros or inline
functions from this file, or you compile this file and link it with other files to produce
an executable, this file does not by itself cause the resulting executable to be covered
by the GNU General Public License. This exception does not however invalidate any other
reasons why the executable file might be covered by the GNU General Public License.
*/
/*
The original source for this example is
Copyright (c) 1994-2008 John E. Stone
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
*/
/*
* objbound.cpp - This file contains the functions to find bounding boxes
* for the various primitives
*/
#include "machine.h"
#include "types.h"
#include "macros.h"
#include "bndbox.h"
#define OBJBOUND_PRIVATE
#include "objbound.h"
static void globalbound(object ** rootlist, vector * gmin, vector * gmax) {
vector min, max;
object * cur;
if (*rootlist == NULL) /* don't bound non-existant objects */
return;
gmin->x = FHUGE; gmin->y = FHUGE; gmin->z = FHUGE;
gmax->x = -FHUGE; gmax->y = -FHUGE; gmax->z = -FHUGE;
cur=*rootlist;
while (cur != NULL) { /* Go! */
min.x = -FHUGE; min.y = -FHUGE; min.z = -FHUGE;
max.x = FHUGE; max.y = FHUGE; max.z = FHUGE;
cur->methods->bbox((void *) cur, &min, &max);
gmin->x = MYMIN( gmin->x , min.x);
gmin->y = MYMIN( gmin->y , min.y);
gmin->z = MYMIN( gmin->z , min.z);
gmax->x = MYMAX( gmax->x , max.x);
gmax->y = MYMAX( gmax->y , max.y);
gmax->z = MYMAX( gmax->z , max.z);
cur=(object *)cur->nextobj;
}
}
static int objinside(object * obj, vector * min, vector * max) {
vector omin, omax;
if (obj == NULL) /* non-existant object, shouldn't get here */
return 0;
if (obj->methods->bbox((void *) obj, &omin, &omax)) {
if ((min->x <= omin.x) && (min->y <= omin.y) && (min->z <= omin.z) &&
(max->x >= omax.x) && (max->y >= omax.y) && (max->z >= omax.z)) {
return 1;
}
}
return 0;
}
static int countobj(object * root) {
object * cur; /* counts the number of objects on a list */
int numobj;
numobj=0;
cur=root;
while (cur != NULL) {
cur=(object *)cur->nextobj;
numobj++;
}
return numobj;
}
static void movenextobj(object * thisobj, object ** root) {
object * cur, * tmp;
/* move the object after thisobj to the front of the object list */
/* headed by root */
if (thisobj != NULL) {
if (thisobj->nextobj != NULL) {
cur=(object *)thisobj->nextobj; /* the object to be moved */
thisobj->nextobj = cur->nextobj; /* link around the moved obj */
tmp=*root; /* store the root node */
cur->nextobj=tmp; /* attach root to cur */
*root=cur; /* make cur, the new root */
}
}
}
static void octreespace(object ** rootlist, int maxoctnodes) {
object * cur;
vector gmin, gmax, gctr;
vector cmin1, cmin2, cmin3, cmin4, cmin5, cmin6, cmin7, cmin8;
vector cmax1, cmax2, cmax3, cmax4, cmax5, cmax6, cmax7, cmax8;
bndbox * box1, * box2, * box3, * box4;
bndbox * box5, * box6, * box7, * box8;
int skipobj;
if (*rootlist == NULL) /* don't subdivide non-existant data */
return;
skipobj=0;
globalbound(rootlist, &gmin, &gmax); /* find global min and max */
gctr.x = ((gmax.x - gmin.x) / 2.0) + gmin.x;
gctr.y = ((gmax.y - gmin.y) / 2.0) + gmin.y;
gctr.z = ((gmax.z - gmin.z) / 2.0) + gmin.z;
cmin1=gmin;
cmax1=gctr;
box1 = newbndbox(cmin1, cmax1);
cmin2=gmin;
cmin2.x=gctr.x;
cmax2=gmax;
cmax2.y=gctr.y;
cmax2.z=gctr.z;
box2 = newbndbox(cmin2, cmax2);
cmin3=gmin;
cmin3.y=gctr.y;
cmax3=gmax;
cmax3.x=gctr.x;
cmax3.z=gctr.z;
box3 = newbndbox(cmin3, cmax3);
cmin4=gmin;
cmin4.x=gctr.x;
cmin4.y=gctr.y;
cmax4=gmax;
cmax4.z=gctr.z;
box4 = newbndbox(cmin4, cmax4);
cmin5=gmin;
cmin5.z=gctr.z;
cmax5=gctr;
cmax5.z=gmax.z;
box5 = newbndbox(cmin5, cmax5);
cmin6=gctr;
cmin6.y=gmin.y;
cmax6=gmax;
cmax6.y=gctr.y;
box6 = newbndbox(cmin6, cmax6);
cmin7=gctr;
cmin7.x=gmin.x;
cmax7=gctr;
cmax7.y=gmax.y;
cmax7.z=gmax.z;
box7 = newbndbox(cmin7, cmax7);
cmin8=gctr;
cmax8=gmax;
box8 = newbndbox(cmin8, cmax8);
cur = *rootlist;
while (cur != NULL) {
if (objinside((object *)cur->nextobj, &cmin1, &cmax1)) {
movenextobj(cur, &box1->objlist);
}
else if (objinside((object *)cur->nextobj, &cmin2, &cmax2)) {
movenextobj(cur, &box2->objlist);
}
else if (objinside((object *)cur->nextobj, &cmin3, &cmax3)) {
movenextobj(cur, &box3->objlist);
}
else if (objinside((object *)cur->nextobj, &cmin4, &cmax4)) {
movenextobj(cur, &box4->objlist);
}
else if (objinside((object *)cur->nextobj, &cmin5, &cmax5)) {
movenextobj(cur, &box5->objlist);
}
else if (objinside((object *)cur->nextobj, &cmin6, &cmax6)) {
movenextobj(cur, &box6->objlist);
}
else if (objinside((object *)cur->nextobj, &cmin7, &cmax7)) {
movenextobj(cur, &box7->objlist);
}
else if (objinside((object *)cur->nextobj, &cmin8, &cmax8)) {
movenextobj(cur, &box8->objlist);
}
else {
skipobj++;
cur=(object *)cur->nextobj;
}
}
/* new scope, for redefinition of cur, and old */
{ bndbox * cur, * old;
old=box1;
cur=box2;
if (countobj(cur->objlist) > 0) {
old->nextobj=cur;
globalbound(&cur->objlist, &cur->min, &cur->max);
old=cur;
}
cur=box3;
if (countobj(cur->objlist) > 0) {
old->nextobj=cur;
globalbound(&cur->objlist, &cur->min, &cur->max);
old=cur;
}
cur=box4;
if (countobj(cur->objlist) > 0) {
old->nextobj=cur;
globalbound(&cur->objlist, &cur->min, &cur->max);
old=cur;
}
cur=box5;
if (countobj(cur->objlist) > 0) {
old->nextobj=cur;
globalbound(&cur->objlist, &cur->min, &cur->max);
old=cur;
}
cur=box6;
if (countobj(cur->objlist) > 0) {
old->nextobj=cur;
globalbound(&cur->objlist, &cur->min, &cur->max);
old=cur;
}
cur=box7;
if (countobj(cur->objlist) > 0) {
old->nextobj=cur;
globalbound(&cur->objlist, &cur->min, &cur->max);
old=cur;
}
cur=box8;
if (countobj(cur->objlist) > 0) {
old->nextobj=cur;
globalbound(&cur->objlist, &cur->min, &cur->max);
old=cur;
}
old->nextobj=*rootlist;
if (countobj(box1->objlist) > 0) {
globalbound(&box1->objlist, &box1->min, &box1->max);
*rootlist=(object *) box1;
}
else {
*rootlist=(object *) box1->nextobj;
}
} /**** end of special cur and old scope */
if (countobj(box1->objlist) > maxoctnodes) {
octreespace(&box1->objlist, maxoctnodes);
}
if (countobj(box2->objlist) > maxoctnodes) {
octreespace(&box2->objlist, maxoctnodes);
}
if (countobj(box3->objlist) > maxoctnodes) {
octreespace(&box3->objlist, maxoctnodes);
}
if (countobj(box4->objlist) > maxoctnodes) {
octreespace(&box4->objlist, maxoctnodes);
}
if (countobj(box5->objlist) > maxoctnodes) {
octreespace(&box5->objlist, maxoctnodes);
}
if (countobj(box6->objlist) > maxoctnodes) {
octreespace(&box6->objlist, maxoctnodes);
}
if (countobj(box7->objlist) > maxoctnodes) {
octreespace(&box7->objlist, maxoctnodes);
}
if (countobj(box8->objlist) > maxoctnodes) {
octreespace(&box8->objlist, maxoctnodes);
}
}
void dividespace(int maxoctnodes, object **toplist) {
bndbox * gbox;
vector gmin, gmax;
if (countobj(*toplist) > maxoctnodes) {
globalbound(toplist, &gmin, &gmax);
octreespace(toplist, maxoctnodes);
gbox = newbndbox(gmin, gmax);
gbox->objlist = NULL;
gbox->tex = NULL;
gbox->nextobj=NULL;
gbox->objlist=*toplist;
*toplist=(object *) gbox;
}
}
| gpl-2.0 |
cvmfs/cctools | dttools/src/stats.c | 7 | 2213 | /*
Copyright (C) 2017- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include <assert.h>
#include <inttypes.h>
#include "stats.h"
#include "hash_table.h"
#include "xxmalloc.h"
static struct hash_table *stats = NULL;
static int stats_enabled = 0;
typedef enum {
STATS_INT,
STATS_BIN,
} stats_type_t;
typedef struct {
stats_type_t type;
union {
int64_t value;
unsigned buckets[64];
} v;
} stats_t;
static void stats_init (void) {
if (!stats) {
stats = hash_table_create(0, 0);
}
}
static stats_t *stats_touch (const char *name, stats_type_t type) {
assert(name);
stats_init();
stats_t *s = hash_table_lookup(stats, name);
if (s) {
assert(s->type == type);
} else {
s = xxcalloc(1, sizeof(*s));
s->type = type;
int rc = hash_table_insert(stats, name, s);
assert(rc);
}
return s;
}
static size_t log2b(uint64_t n) {
size_t i = 0;
while (n >>= 1) ++i;
return i;
}
void stats_enable () {
stats_enabled = 1;
}
void stats_unset (const char *name) {
if (!stats_enabled) return;
assert(name);
stats_init();
free(hash_table_remove(stats, name));
}
void stats_set (const char *name, int64_t value) {
if (!stats_enabled) return;
stats_t *s = stats_touch(name, STATS_INT);
s->v.value = value;
}
void stats_inc (const char *name, int64_t offset) {
if (!stats_enabled) return;
stats_t *s = stats_touch(name, STATS_INT);
s->v.value += offset;
}
void stats_bin (const char *name, uint64_t value) {
if (!stats_enabled) return;
stats_t *s = stats_touch(name, STATS_BIN);
++s->v.buckets[log2b(value)];
}
struct jx *stats_get (void) {
if (!stats_enabled) return jx_null();
char *k;
stats_t *s;
struct jx *out = jx_object(NULL);
struct jx *log;
stats_init();
hash_table_firstkey(stats);
while (hash_table_nextkey(stats, &k, (void **) &s)) {
switch (s->type) {
case STATS_INT:
jx_insert_integer(out, k, s->v.value);
break;
case STATS_BIN:
log = jx_array(NULL);
for (size_t i = 0; i < 64; i++) {
jx_array_append(log, jx_integer(s->v.buckets[i]));
}
jx_insert(out, jx_string(k), log);
break;
}
}
return out;
}
/* vim: set noexpandtab tabstop=4: */
| gpl-2.0 |
richardtrip/GT-P1000_Kernel_Gingerbread | arch/powerpc/platforms/512x/mpc5121_generic.c | 775 | 1248 | /*
* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: John Rigby, <jrigby@freescale.com>
*
* Description:
* MPC5121 SoC setup
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/prom.h>
#include <asm/time.h>
#include "mpc512x.h"
/*
* list of supported boards
*/
static char *board[] __initdata = {
"prt,prtlvt",
NULL
};
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init mpc5121_generic_probe(void)
{
unsigned long node = of_get_flat_dt_root();
int i = 0;
while (board[i]) {
if (of_flat_dt_is_compatible(node, board[i]))
break;
i++;
}
return board[i] != NULL;
}
define_machine(mpc5121_generic) {
.name = "MPC5121 generic",
.probe = mpc5121_generic_probe,
.init = mpc512x_init,
.init_IRQ = mpc512x_init_IRQ,
.get_irq = ipic_get_irq,
.calibrate_decr = generic_calibrate_decr,
.restart = mpc512x_restart,
};
| gpl-2.0 |
morely/linux-xlnx | drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 1031 | 5324 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include "ipoib.h"
static ssize_t show_parent(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = netdev_priv(dev);
return sprintf(buf, "%s\n", priv->parent->name);
}
static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
u16 pkey, int type)
{
int result;
priv->max_ib_mtu = ppriv->max_ib_mtu;
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
result = ipoib_set_dev_features(priv, ppriv->ca);
if (result)
goto err;
priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
priv->dev->broadcast[8] = pkey >> 8;
priv->dev->broadcast[9] = pkey & 0xff;
result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
if (result < 0) {
ipoib_warn(ppriv, "failed to initialize subinterface: "
"device %s, port %d",
ppriv->ca->name, ppriv->port);
goto err;
}
result = register_netdevice(priv->dev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
goto register_failed;
}
priv->parent = ppriv->dev;
ipoib_create_debug_files(priv->dev);
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_umcast_attr(priv->dev))
goto sysfs_failed;
if (device_create_file(&priv->dev->dev, &dev_attr_parent))
goto sysfs_failed;
}
priv->child_type = type;
priv->dev->iflink = ppriv->dev->ifindex;
list_add_tail(&priv->list, &ppriv->child_intfs);
return 0;
sysfs_failed:
result = -ENOMEM;
ipoib_delete_debug_files(priv->dev);
unregister_netdevice(priv->dev);
register_failed:
ipoib_dev_cleanup(priv->dev);
err:
return result;
}
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv;
char intf_name[IFNAMSIZ];
struct ipoib_dev_priv *tpriv;
int result;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ppriv = netdev_priv(pdev);
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
if (!priv)
return -ENOMEM;
if (!rtnl_trylock())
return restart_syscall();
down_write(&ppriv->vlan_rwsem);
/*
* First ensure this isn't a duplicate. We check the parent device and
* then all of the legacy child interfaces to make sure the Pkey
* doesn't match.
*/
if (ppriv->pkey == pkey) {
result = -ENOTUNIQ;
goto out;
}
list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
if (tpriv->pkey == pkey &&
tpriv->child_type == IPOIB_LEGACY_CHILD) {
result = -ENOTUNIQ;
goto out;
}
}
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
out:
up_write(&ppriv->vlan_rwsem);
if (result)
free_netdev(priv->dev);
rtnl_unlock();
return result;
}
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
struct net_device *dev = NULL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ppriv = netdev_priv(pdev);
if (!rtnl_trylock())
return restart_syscall();
down_write(&ppriv->vlan_rwsem);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
unregister_netdevice(priv->dev);
list_del(&priv->list);
dev = priv->dev;
break;
}
}
up_write(&ppriv->vlan_rwsem);
rtnl_unlock();
if (dev) {
free_netdev(dev);
return 0;
}
return -ENODEV;
}
| gpl-2.0 |
f123h456/linux | drivers/char/agp/ati-agp.c | 1287 | 14811 | /*
* ATi AGPGART routines.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/agp_backend.h>
#include <asm/agp.h>
#include "agp.h"
#define ATI_GART_MMBASE_BAR 1
#define ATI_RS100_APSIZE 0xac
#define ATI_RS100_IG_AGPMODE 0xb0
#define ATI_RS300_APSIZE 0xf8
#define ATI_RS300_IG_AGPMODE 0xfc
#define ATI_GART_FEATURE_ID 0x00
#define ATI_GART_BASE 0x04
#define ATI_GART_CACHE_SZBASE 0x08
#define ATI_GART_CACHE_CNTRL 0x0c
#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
static const struct aper_size_info_lvl2 ati_generic_sizes[7] =
{
{2048, 524288, 0x0000000c},
{1024, 262144, 0x0000000a},
{512, 131072, 0x00000008},
{256, 65536, 0x00000006},
{128, 32768, 0x00000004},
{64, 16384, 0x00000002},
{32, 8192, 0x00000000}
};
static struct gatt_mask ati_generic_masks[] =
{
{ .mask = 1, .type = 0}
};
struct ati_page_map {
unsigned long *real;
unsigned long __iomem *remapped;
};
static struct _ati_generic_private {
volatile u8 __iomem *registers;
struct ati_page_map **gatt_pages;
int num_tables;
} ati_generic_private;
static int ati_create_page_map(struct ati_page_map *page_map)
{
int i, err = 0;
page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
if (page_map->real == NULL)
return -ENOMEM;
set_memory_uc((unsigned long)page_map->real, 1);
err = map_page_into_agp(virt_to_page(page_map->real));
page_map->remapped = page_map->real;
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
writel(agp_bridge->scratch_page, page_map->remapped+i);
readl(page_map->remapped+i); /* PCI Posting. */
}
return 0;
}
static void ati_free_page_map(struct ati_page_map *page_map)
{
unmap_page_from_agp(virt_to_page(page_map->real));
set_memory_wb((unsigned long)page_map->real, 1);
free_page((unsigned long) page_map->real);
}
static void ati_free_gatt_pages(void)
{
int i;
struct ati_page_map **tables;
struct ati_page_map *entry;
tables = ati_generic_private.gatt_pages;
for (i = 0; i < ati_generic_private.num_tables; i++) {
entry = tables[i];
if (entry != NULL) {
if (entry->real != NULL)
ati_free_page_map(entry);
kfree(entry);
}
}
kfree(tables);
}
static int ati_create_gatt_pages(int nr_tables)
{
struct ati_page_map **tables;
struct ati_page_map *entry;
int retval = 0;
int i;
tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL);
if (tables == NULL)
return -ENOMEM;
for (i = 0; i < nr_tables; i++) {
entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL);
tables[i] = entry;
if (entry == NULL) {
retval = -ENOMEM;
break;
}
retval = ati_create_page_map(entry);
if (retval != 0)
break;
}
ati_generic_private.num_tables = i;
ati_generic_private.gatt_pages = tables;
if (retval != 0)
ati_free_gatt_pages();
return retval;
}
static int is_r200(void)
{
if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
(agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
(agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
(agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
return 1;
return 0;
}
static int ati_fetch_size(void)
{
int i;
u32 temp;
struct aper_size_info_lvl2 *values;
if (is_r200())
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
else
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
temp = (temp & 0x0000000e);
values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void ati_tlbflush(struct agp_memory * mem)
{
writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
}
static void ati_cleanup(void)
{
struct aper_size_info_lvl2 *previous_size;
u32 temp;
previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
/* Write back the previous size and disable gart translation */
if (is_r200()) {
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
} else {
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
}
iounmap((volatile u8 __iomem *)ati_generic_private.registers);
}
static int ati_configure(void)
{
phys_addr_t reg;
u32 temp;
/* Get the memory mapped registers */
reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR);
ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
if (!ati_generic_private.registers)
return -ENOMEM;
if (is_r200())
pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
else
pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
/* address to map to */
/*
agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev,
AGP_APERTURE_BAR);
printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
*/
writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
/* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp);
pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14));
/* Write out the address of the gatt table */
writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
return 0;
}
#ifdef CONFIG_PM
static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
{
pci_save_state(dev);
pci_set_power_state(dev, PCI_D3hot);
return 0;
}
static int agp_ati_resume(struct pci_dev *dev)
{
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
return ati_configure();
}
#endif
/*
*Since we don't need contiguous memory we just try
* to get the gatt table once
*/
#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#undef GET_GATT
#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
static int ati_insert_memory(struct agp_memory * mem,
off_t pg_start, int type)
{
int i, j, num_entries;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int mask_type;
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
if (mask_type != 0 || type != mem->type)
return -EINVAL;
if (mem->page_count == 0)
return 0;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
while (j < (pg_start + mem->page_count)) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
return -EBUSY;
j++;
}
if (!mem->is_flushed) {
/*CACHE_FLUSH(); */
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
int type)
{
int i;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int mask_type;
mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
if (mask_type != 0 || type != mem->type)
return -EINVAL;
if (mem->page_count == 0)
return 0;
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
}
readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct ati_page_map page_dir;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
int i;
struct aper_size_info_lvl2 *current_size;
value = A_SIZE_LVL2(agp_bridge->current_size);
retval = ati_create_page_map(&page_dir);
if (retval != 0)
return retval;
retval = ati_create_gatt_pages(value->num_entries / 1024);
if (retval != 0) {
ati_free_page_map(&page_dir);
return retval;
}
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
if (is_r200()) {
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
temp = (((temp & ~(0x0000000e)) | current_size->size_value)
| 0x00000001);
pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
} else {
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
temp = (((temp & ~(0x0000000e)) | current_size->size_value)
| 0x00000001);
pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
}
/*
* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
* used to program the agp master not the cpu
*/
addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
agp_bridge->gart_bus_addr = addr;
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
for (i = 0; i < value->num_entries; i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
}
return 0;
}
static int ati_free_gatt_table(struct agp_bridge_data *bridge)
{
struct ati_page_map page_dir;
page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
ati_free_gatt_pages();
ati_free_page_map(&page_dir);
return 0;
}
static const struct agp_bridge_driver ati_generic_bridge = {
.owner = THIS_MODULE,
.aperture_sizes = ati_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = ati_configure,
.fetch_size = ati_fetch_size,
.cleanup = ati_cleanup,
.tlb_flush = ati_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = ati_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = ati_create_gatt_table,
.free_gatt_table = ati_free_gatt_table,
.insert_memory = ati_insert_memory,
.remove_memory = ati_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids ati_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_ATI_RS100,
.chipset_name = "IGP320/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS200,
.chipset_name = "IGP330/340/345/350/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS200_B,
.chipset_name = "IGP345M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS250,
.chipset_name = "IGP7000/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_100,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_133,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_166,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_200,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS350_133,
.chipset_name = "IGP9000/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS350_200,
.chipset_name = "IGP9100/M",
},
{ }, /* dummy final entry, always present */
};
static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ati_agp_device_ids;
struct agp_bridge_data *bridge;
u8 cap_ptr;
int j;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
/* probe for known chipsets */
for (j = 0; devs[j].chipset_name; j++) {
if (pdev->device == devs[j].device_id)
goto found;
}
dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
pdev->vendor, pdev->device);
return -ENODEV;
found:
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
bridge->driver = &ati_generic_bridge;
dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_ati_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
static struct pci_device_id agp_ati_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_ATI,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
static struct pci_driver agp_ati_pci_driver = {
.name = "agpgart-ati",
.id_table = agp_ati_pci_table,
.probe = agp_ati_probe,
.remove = agp_ati_remove,
#ifdef CONFIG_PM
.suspend = agp_ati_suspend,
.resume = agp_ati_resume,
#endif
};
static int __init agp_ati_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_ati_pci_driver);
}
static void __exit agp_ati_cleanup(void)
{
pci_unregister_driver(&agp_ati_pci_driver);
}
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
| gpl-2.0 |
HridayHS/Lightning-Kernel | drivers/hid/hid-roccat-common.c | 1799 | 3215 | /*
* Roccat common functions for device specific drivers
*
* Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/hid.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "hid-roccat-common.h"
static inline uint16_t roccat_common2_feature_report(uint8_t report_id)
{
return 0x300 | report_id;
}
int roccat_common2_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size)
{
char *buf;
int len;
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
HID_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(data, buf, size);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
EXPORT_SYMBOL_GPL(roccat_common2_receive);
int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size)
{
char *buf;
int len;
buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
EXPORT_SYMBOL_GPL(roccat_common2_send);
enum roccat_common2_control_states {
ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0,
ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3,
ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4,
};
static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
{
int retval;
struct roccat_common2_control control;
do {
msleep(50);
retval = roccat_common2_receive(usb_dev,
ROCCAT_COMMON_COMMAND_CONTROL,
&control, sizeof(struct roccat_common2_control));
if (retval)
return retval;
switch (control.value) {
case ROCCAT_COMMON_CONTROL_STATUS_OK:
return 0;
case ROCCAT_COMMON_CONTROL_STATUS_BUSY:
msleep(500);
continue;
case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL:
case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW:
return -EINVAL;
default:
dev_err(&usb_dev->dev,
"roccat_common2_receive_control_status: "
"unknown response value 0x%x\n",
control.value);
return -EINVAL;
}
} while (1);
}
int roccat_common2_send_with_status(struct usb_device *usb_dev,
uint command, void const *buf, uint size)
{
int retval;
retval = roccat_common2_send(usb_dev, command, buf, size);
if (retval)
return retval;
msleep(100);
return roccat_common2_receive_control_status(usb_dev);
}
EXPORT_SYMBOL_GPL(roccat_common2_send_with_status);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat common driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
thanhnhiel/linux-emcraft | arch/mips/math-emu/dp_add.c | 1799 | 4700 | /* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*
*/
#include "ieee754dp.h"
ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
{
COMPXDP;
COMPYDP;
EXPLODEXDP;
EXPLODEYDP;
CLEARCX;
FLUSHXDP;
FLUSHYDP;
switch (CLPAIR(xc, yc)) {
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
SETCX(IEEE754_INVALID_OPERATION);
return ieee754dp_nanxcpt(ieee754dp_indef(), "add", x, y);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return y;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
return x;
/* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
if (xs == ys)
return x;
SETCX(IEEE754_INVALID_OPERATION);
return ieee754dp_xcpt(ieee754dp_indef(), "add", x, y);
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
return y;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
return x;
/* Zero handling
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
else
return ieee754dp_zero(ieee754_csr.rm ==
IEEE754_RD);
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return y;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
/* FALL THROUGH */
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
DPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
DPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
break;
}
assert(xm & DP_HIDDEN_BIT);
assert(ym & DP_HIDDEN_BIT);
/* provide guard,round and stick bit space */
xm <<= 3;
ym <<= 3;
if (xe > ye) {
/* have to shift y fraction right to align
*/
int s = xe - ye;
ym = XDPSRS(ym, s);
ye += s;
} else if (ye > xe) {
/* have to shift x fraction right to align
*/
int s = ye - xe;
xm = XDPSRS(xm, s);
xe += s;
}
assert(xe == ye);
assert(xe <= DP_EMAX);
if (xs == ys) {
/* generate 28 bit result of adding two 27 bit numbers
* leaving result in xm,xs,xe
*/
xm = xm + ym;
xe = xe;
xs = xs;
if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
xm = XDPSRS1(xm);
xe++;
}
} else {
if (xm >= ym) {
xm = xm - ym;
xe = xe;
xs = xs;
} else {
xm = ym - xm;
xe = xe;
xs = ys;
}
if (xm == 0)
return ieee754dp_zero(ieee754_csr.rm ==
IEEE754_RD);
/* normalize to rounding precision */
while ((xm >> (DP_MBITS + 3)) == 0) {
xm <<= 1;
xe--;
}
}
DPNORMRET2(xs, xe, xm, "add", x, y);
}
| gpl-2.0 |
ShinySide/SM-G361H | arch/arm/mach-msm/smd_debug.c | 2311 | 7822 | /* arch/arm/mach-msm/smd_debug.c
*
* Copyright (C) 2007 Google, Inc.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include <linux/list.h>
#include <mach/msm_iomap.h>
#include "smd_private.h"
#if defined(CONFIG_DEBUG_FS)
static char *chstate(unsigned n)
{
switch (n) {
case SMD_SS_CLOSED:
return "CLOSED";
case SMD_SS_OPENING:
return "OPENING";
case SMD_SS_OPENED:
return "OPENED";
case SMD_SS_FLUSHING:
return "FLUSHING";
case SMD_SS_CLOSING:
return "CLOSING";
case SMD_SS_RESET:
return "RESET";
case SMD_SS_RESET_OPENING:
return "ROPENING";
default:
return "UNKNOWN";
}
}
static int dump_ch(char *buf, int max, struct smd_channel *ch)
{
volatile struct smd_half_channel *s = ch->send;
volatile struct smd_half_channel *r = ch->recv;
return scnprintf(
buf, max,
"ch%02d:"
" %8s(%05d/%05d) %c%c%c%c%c%c%c <->"
" %8s(%05d/%05d) %c%c%c%c%c%c%c '%s'\n", ch->n,
chstate(s->state), s->tail, s->head,
s->fDSR ? 'D' : 'd',
s->fCTS ? 'C' : 'c',
s->fCD ? 'C' : 'c',
s->fRI ? 'I' : 'i',
s->fHEAD ? 'W' : 'w',
s->fTAIL ? 'R' : 'r',
s->fSTATE ? 'S' : 's',
chstate(r->state), r->tail, r->head,
r->fDSR ? 'D' : 'd',
r->fCTS ? 'R' : 'r',
r->fCD ? 'C' : 'c',
r->fRI ? 'I' : 'i',
r->fHEAD ? 'W' : 'w',
r->fTAIL ? 'R' : 'r',
r->fSTATE ? 'S' : 's',
ch->name
);
}
static int debug_read_stat(char *buf, int max)
{
char *msg;
int i = 0;
msg = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET)
i += scnprintf(buf + i, max - i,
"smsm: ARM9 HAS CRASHED\n");
i += scnprintf(buf + i, max - i, "smsm: a9: %08x a11: %08x\n",
raw_smsm_get_state(SMSM_STATE_MODEM),
raw_smsm_get_state(SMSM_STATE_APPS));
#ifdef CONFIG_ARCH_MSM_SCORPION
i += scnprintf(buf + i, max - i, "smsm dem: apps: %08x modem: %08x "
"qdsp6: %08x power: %08x time: %08x\n",
raw_smsm_get_state(SMSM_STATE_APPS_DEM),
raw_smsm_get_state(SMSM_STATE_MODEM_DEM),
raw_smsm_get_state(SMSM_STATE_QDSP6_DEM),
raw_smsm_get_state(SMSM_STATE_POWER_MASTER_DEM),
raw_smsm_get_state(SMSM_STATE_TIME_MASTER_DEM));
#endif
if (msg) {
msg[SZ_DIAG_ERR_MSG - 1] = 0;
i += scnprintf(buf + i, max - i, "diag: '%s'\n", msg);
}
return i;
}
static int debug_read_mem(char *buf, int max)
{
unsigned n;
struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
struct smem_heap_entry *toc = shared->heap_toc;
int i = 0;
i += scnprintf(buf + i, max - i,
"heap: init=%d free=%d remain=%d\n",
shared->heap_info.initialized,
shared->heap_info.free_offset,
shared->heap_info.heap_remaining);
for (n = 0; n < SMEM_NUM_ITEMS; n++) {
if (toc[n].allocated == 0)
continue;
i += scnprintf(buf + i, max - i,
"%04d: offset %08x size %08x\n",
n, toc[n].offset, toc[n].size);
}
return i;
}
static int debug_read_ch(char *buf, int max)
{
struct smd_channel *ch;
unsigned long flags;
int i = 0;
spin_lock_irqsave(&smd_lock, flags);
list_for_each_entry(ch, &smd_ch_list_dsp, ch_list)
i += dump_ch(buf + i, max - i, ch);
list_for_each_entry(ch, &smd_ch_list_modem, ch_list)
i += dump_ch(buf + i, max - i, ch);
list_for_each_entry(ch, &smd_ch_closed_list, ch_list)
i += dump_ch(buf + i, max - i, ch);
spin_unlock_irqrestore(&smd_lock, flags);
return i;
}
static int debug_read_version(char *buf, int max)
{
struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
unsigned version = shared->version[VERSION_MODEM];
return sprintf(buf, "%d.%d\n", version >> 16, version & 0xffff);
}
static int debug_read_build_id(char *buf, int max)
{
unsigned size;
void *data;
data = smem_item(SMEM_HW_SW_BUILD_ID, &size);
if (!data)
return 0;
if (size >= max)
size = max;
memcpy(buf, data, size);
return size;
}
static int debug_read_alloc_tbl(char *buf, int max)
{
struct smd_alloc_elm *shared;
int n, i = 0;
shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
for (n = 0; n < 64; n++) {
if (shared[n].ref_count == 0)
continue;
i += scnprintf(buf + i, max - i,
"%03d: %-20s cid=%02d type=%03d "
"kind=%02d ref_count=%d\n",
n, shared[n].name, shared[n].cid,
shared[n].ctype & 0xff,
(shared[n].ctype >> 8) & 0xf,
shared[n].ref_count);
}
return i;
}
#define DEBUG_BUFMAX 4096
static char debug_buffer[DEBUG_BUFMAX];
static ssize_t debug_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int (*fill)(char *buf, int max) = file->private_data;
int bsize = fill(debug_buffer, DEBUG_BUFMAX);
return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
}
static const struct file_operations debug_ops = {
.read = debug_read,
.open = simple_open,
.llseek = default_llseek,
};
static void debug_create(const char *name, umode_t mode,
struct dentry *dent,
int (*fill)(char *buf, int max))
{
debugfs_create_file(name, mode, dent, fill, &debug_ops);
}
int __init smd_debugfs_init(void)
{
struct dentry *dent;
dent = debugfs_create_dir("smd", 0);
if (IS_ERR(dent))
return 1;
debug_create("ch", 0444, dent, debug_read_ch);
debug_create("stat", 0444, dent, debug_read_stat);
debug_create("mem", 0444, dent, debug_read_mem);
debug_create("version", 0444, dent, debug_read_version);
debug_create("tbl", 0444, dent, debug_read_alloc_tbl);
debug_create("build", 0444, dent, debug_read_build_id);
return 0;
}
#endif
#define MAX_NUM_SLEEP_CLIENTS 64
#define MAX_SLEEP_NAME_LEN 8
#define NUM_GPIO_INT_REGISTERS 6
#define GPIO_SMEM_NUM_GROUPS 2
#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
struct tramp_gpio_save {
unsigned int enable;
unsigned int detect;
unsigned int polarity;
};
struct tramp_gpio_smem {
uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
uint32_t enabled[NUM_GPIO_INT_REGISTERS];
uint32_t detection[NUM_GPIO_INT_REGISTERS];
uint32_t polarity[NUM_GPIO_INT_REGISTERS];
};
void smsm_print_sleep_info(void)
{
unsigned long flags;
uint32_t *ptr;
#ifndef CONFIG_ARCH_MSM_SCORPION
struct tramp_gpio_smem *gpio;
struct smsm_interrupt_info *int_info;
#endif
spin_lock_irqsave(&smem_lock, flags);
ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
if (ptr)
pr_info("SMEM_SMSM_SLEEP_DELAY: %x\n", *ptr);
ptr = smem_alloc(SMEM_SMSM_LIMIT_SLEEP, sizeof(*ptr));
if (ptr)
pr_info("SMEM_SMSM_LIMIT_SLEEP: %x\n", *ptr);
ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
if (ptr)
pr_info("SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
#ifndef CONFIG_ARCH_MSM_SCORPION
int_info = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*int_info));
if (int_info)
pr_info("SMEM_SMSM_INT_INFO %x %x %x\n",
int_info->interrupt_mask,
int_info->pending_interrupts,
int_info->wakeup_reason);
gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
if (gpio) {
int i;
for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++)
pr_info("SMEM_GPIO_INT: %d: e %x d %x p %x\n",
i, gpio->enabled[i], gpio->detection[i],
gpio->polarity[i]);
for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
pr_info("SMEM_GPIO_INT: %d: f %d: %d %d...\n",
i, gpio->num_fired[i], gpio->fired[i][0],
gpio->fired[i][1]);
}
#else
#endif
spin_unlock_irqrestore(&smem_lock, flags);
}
| gpl-2.0 |
TeamRegular/android_kernel_amazon_ford | drivers/power/lp8788-charger.c | 2311 | 18520 | /*
* TI LP8788 MFD - battery charger driver
*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/err.h>
#include <linux/iio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mfd/lp8788.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
/* register address */
#define LP8788_CHG_STATUS 0x07
#define LP8788_CHG_IDCIN 0x13
#define LP8788_CHG_IBATT 0x14
#define LP8788_CHG_VTERM 0x15
#define LP8788_CHG_EOC 0x16
/* mask/shift bits */
#define LP8788_CHG_INPUT_STATE_M 0x03 /* Addr 07h */
#define LP8788_CHG_STATE_M 0x3C
#define LP8788_CHG_STATE_S 2
#define LP8788_NO_BATT_M BIT(6)
#define LP8788_BAD_BATT_M BIT(7)
#define LP8788_CHG_IBATT_M 0x1F /* Addr 14h */
#define LP8788_CHG_VTERM_M 0x0F /* Addr 15h */
#define LP8788_CHG_EOC_LEVEL_M 0x30 /* Addr 16h */
#define LP8788_CHG_EOC_LEVEL_S 4
#define LP8788_CHG_EOC_TIME_M 0x0E
#define LP8788_CHG_EOC_TIME_S 1
#define LP8788_CHG_EOC_MODE_M BIT(0)
#define LP8788_CHARGER_NAME "charger"
#define LP8788_BATTERY_NAME "main_batt"
#define LP8788_CHG_START 0x11
#define LP8788_CHG_END 0x1C
#define LP8788_ISEL_MAX 23
#define LP8788_ISEL_STEP 50
#define LP8788_VTERM_MIN 4100
#define LP8788_VTERM_STEP 25
#define LP8788_MAX_BATT_CAPACITY 100
#define LP8788_MAX_CHG_IRQS 11
enum lp8788_charging_state {
LP8788_OFF,
LP8788_WARM_UP,
LP8788_LOW_INPUT = 0x3,
LP8788_PRECHARGE,
LP8788_CC,
LP8788_CV,
LP8788_MAINTENANCE,
LP8788_BATTERY_FAULT,
LP8788_SYSTEM_SUPPORT = 0xC,
LP8788_HIGH_CURRENT = 0xF,
LP8788_MAX_CHG_STATE,
};
enum lp8788_charger_adc_sel {
LP8788_VBATT,
LP8788_BATT_TEMP,
LP8788_NUM_CHG_ADC,
};
enum lp8788_charger_input_state {
LP8788_SYSTEM_SUPPLY = 1,
LP8788_FULL_FUNCTION,
};
/*
* struct lp8788_chg_irq
* @which : lp8788 interrupt id
* @virq : Linux IRQ number from irq_domain
*/
struct lp8788_chg_irq {
enum lp8788_int_id which;
int virq;
};
/*
* struct lp8788_charger
* @lp : used for accessing the registers of mfd lp8788 device
* @charger : power supply driver for the battery charger
* @battery : power supply driver for the battery
* @charger_work : work queue for charger input interrupts
* @chan : iio channels for getting adc values
* eg) battery voltage, capacity and temperature
* @irqs : charger dedicated interrupts
* @num_irqs : total numbers of charger interrupts
* @pdata : charger platform specific data
*/
struct lp8788_charger {
struct lp8788 *lp;
struct power_supply charger;
struct power_supply battery;
struct work_struct charger_work;
struct iio_channel *chan[LP8788_NUM_CHG_ADC];
struct lp8788_chg_irq irqs[LP8788_MAX_CHG_IRQS];
int num_irqs;
struct lp8788_charger_platform_data *pdata;
};
static char *battery_supplied_to[] = {
LP8788_BATTERY_NAME,
};
static enum power_supply_property lp8788_charger_prop[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CURRENT_MAX,
};
static enum power_supply_property lp8788_battery_prop[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
POWER_SUPPLY_PROP_TEMP,
};
static bool lp8788_is_charger_detected(struct lp8788_charger *pchg)
{
u8 data;
lp8788_read_byte(pchg->lp, LP8788_CHG_STATUS, &data);
data &= LP8788_CHG_INPUT_STATE_M;
return data == LP8788_SYSTEM_SUPPLY || data == LP8788_FULL_FUNCTION;
}
static int lp8788_charger_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct lp8788_charger *pchg = dev_get_drvdata(psy->dev->parent);
u8 read;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = lp8788_is_charger_detected(pchg);
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
lp8788_read_byte(pchg->lp, LP8788_CHG_IDCIN, &read);
val->intval = LP8788_ISEL_STEP *
(min_t(int, read, LP8788_ISEL_MAX) + 1);
break;
default:
return -EINVAL;
}
return 0;
}
static int lp8788_get_battery_status(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
enum lp8788_charging_state state;
u8 data;
int ret;
ret = lp8788_read_byte(pchg->lp, LP8788_CHG_STATUS, &data);
if (ret)
return ret;
state = (data & LP8788_CHG_STATE_M) >> LP8788_CHG_STATE_S;
switch (state) {
case LP8788_OFF:
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
case LP8788_PRECHARGE:
case LP8788_CC:
case LP8788_CV:
case LP8788_HIGH_CURRENT:
val->intval = POWER_SUPPLY_STATUS_CHARGING;
break;
case LP8788_MAINTENANCE:
val->intval = POWER_SUPPLY_STATUS_FULL;
break;
default:
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
}
return 0;
}
static int lp8788_get_battery_health(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
u8 data;
int ret;
ret = lp8788_read_byte(pchg->lp, LP8788_CHG_STATUS, &data);
if (ret)
return ret;
if (data & LP8788_NO_BATT_M)
val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
else if (data & LP8788_BAD_BATT_M)
val->intval = POWER_SUPPLY_HEALTH_DEAD;
else
val->intval = POWER_SUPPLY_HEALTH_GOOD;
return 0;
}
static int lp8788_get_battery_present(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
u8 data;
int ret;
ret = lp8788_read_byte(pchg->lp, LP8788_CHG_STATUS, &data);
if (ret)
return ret;
val->intval = !(data & LP8788_NO_BATT_M);
return 0;
}
static int lp8788_get_vbatt_adc(struct lp8788_charger *pchg, int *result)
{
struct iio_channel *channel = pchg->chan[LP8788_VBATT];
if (!channel)
return -EINVAL;
return iio_read_channel_processed(channel, result);
}
static int lp8788_get_battery_voltage(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
return lp8788_get_vbatt_adc(pchg, &val->intval);
}
static int lp8788_get_battery_capacity(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
struct lp8788 *lp = pchg->lp;
struct lp8788_charger_platform_data *pdata = pchg->pdata;
unsigned int max_vbatt;
int vbatt;
enum lp8788_charging_state state;
u8 data;
int ret;
if (!pdata)
return -EINVAL;
max_vbatt = pdata->max_vbatt_mv;
if (max_vbatt == 0)
return -EINVAL;
ret = lp8788_read_byte(lp, LP8788_CHG_STATUS, &data);
if (ret)
return ret;
state = (data & LP8788_CHG_STATE_M) >> LP8788_CHG_STATE_S;
if (state == LP8788_MAINTENANCE) {
val->intval = LP8788_MAX_BATT_CAPACITY;
} else {
ret = lp8788_get_vbatt_adc(pchg, &vbatt);
if (ret)
return ret;
val->intval = (vbatt * LP8788_MAX_BATT_CAPACITY) / max_vbatt;
val->intval = min(val->intval, LP8788_MAX_BATT_CAPACITY);
}
return 0;
}
static int lp8788_get_battery_temperature(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
struct iio_channel *channel = pchg->chan[LP8788_BATT_TEMP];
int result;
int ret;
if (!channel)
return -EINVAL;
ret = iio_read_channel_processed(channel, &result);
if (ret < 0)
return -EINVAL;
/* unit: 0.1 'C */
val->intval = result * 10;
return 0;
}
static int lp8788_get_battery_charging_current(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
u8 read;
lp8788_read_byte(pchg->lp, LP8788_CHG_IBATT, &read);
read &= LP8788_CHG_IBATT_M;
val->intval = LP8788_ISEL_STEP *
(min_t(int, read, LP8788_ISEL_MAX) + 1);
return 0;
}
static int lp8788_get_charging_termination_voltage(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
u8 read;
lp8788_read_byte(pchg->lp, LP8788_CHG_VTERM, &read);
read &= LP8788_CHG_VTERM_M;
val->intval = LP8788_VTERM_MIN + LP8788_VTERM_STEP * read;
return 0;
}
static int lp8788_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct lp8788_charger *pchg = dev_get_drvdata(psy->dev->parent);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
return lp8788_get_battery_status(pchg, val);
case POWER_SUPPLY_PROP_HEALTH:
return lp8788_get_battery_health(pchg, val);
case POWER_SUPPLY_PROP_PRESENT:
return lp8788_get_battery_present(pchg, val);
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
return lp8788_get_battery_voltage(pchg, val);
case POWER_SUPPLY_PROP_CAPACITY:
return lp8788_get_battery_capacity(pchg, val);
case POWER_SUPPLY_PROP_TEMP:
return lp8788_get_battery_temperature(pchg, val);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
return lp8788_get_battery_charging_current(pchg, val);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
return lp8788_get_charging_termination_voltage(pchg, val);
default:
return -EINVAL;
}
}
static inline bool lp8788_is_valid_charger_register(u8 addr)
{
return addr >= LP8788_CHG_START && addr <= LP8788_CHG_END;
}
static int lp8788_update_charger_params(struct platform_device *pdev,
struct lp8788_charger *pchg)
{
struct lp8788 *lp = pchg->lp;
struct lp8788_charger_platform_data *pdata = pchg->pdata;
struct lp8788_chg_param *param;
int i;
int ret;
if (!pdata || !pdata->chg_params) {
dev_info(&pdev->dev, "skip updating charger parameters\n");
return 0;
}
/* settting charging parameters */
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
if (!param)
continue;
if (lp8788_is_valid_charger_register(param->addr)) {
ret = lp8788_write_byte(lp, param->addr, param->val);
if (ret)
return ret;
}
}
return 0;
}
static int lp8788_psy_register(struct platform_device *pdev,
struct lp8788_charger *pchg)
{
pchg->charger.name = LP8788_CHARGER_NAME;
pchg->charger.type = POWER_SUPPLY_TYPE_MAINS;
pchg->charger.properties = lp8788_charger_prop;
pchg->charger.num_properties = ARRAY_SIZE(lp8788_charger_prop);
pchg->charger.get_property = lp8788_charger_get_property;
pchg->charger.supplied_to = battery_supplied_to;
pchg->charger.num_supplicants = ARRAY_SIZE(battery_supplied_to);
if (power_supply_register(&pdev->dev, &pchg->charger))
return -EPERM;
pchg->battery.name = LP8788_BATTERY_NAME;
pchg->battery.type = POWER_SUPPLY_TYPE_BATTERY;
pchg->battery.properties = lp8788_battery_prop;
pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
pchg->battery.get_property = lp8788_battery_get_property;
if (power_supply_register(&pdev->dev, &pchg->battery))
return -EPERM;
return 0;
}
static void lp8788_psy_unregister(struct lp8788_charger *pchg)
{
power_supply_unregister(&pchg->battery);
power_supply_unregister(&pchg->charger);
}
static void lp8788_charger_event(struct work_struct *work)
{
struct lp8788_charger *pchg =
container_of(work, struct lp8788_charger, charger_work);
struct lp8788_charger_platform_data *pdata = pchg->pdata;
enum lp8788_charger_event event = lp8788_is_charger_detected(pchg);
pdata->charger_event(pchg->lp, event);
}
static bool lp8788_find_irq_id(struct lp8788_charger *pchg, int virq, int *id)
{
bool found;
int i;
for (i = 0; i < pchg->num_irqs; i++) {
if (pchg->irqs[i].virq == virq) {
*id = pchg->irqs[i].which;
found = true;
break;
}
}
return found;
}
static irqreturn_t lp8788_charger_irq_thread(int virq, void *ptr)
{
struct lp8788_charger *pchg = ptr;
struct lp8788_charger_platform_data *pdata = pchg->pdata;
int id = -1;
if (!lp8788_find_irq_id(pchg, virq, &id))
return IRQ_NONE;
switch (id) {
case LP8788_INT_CHG_INPUT_STATE:
case LP8788_INT_CHG_STATE:
case LP8788_INT_EOC:
case LP8788_INT_BATT_LOW:
case LP8788_INT_NO_BATT:
power_supply_changed(&pchg->charger);
power_supply_changed(&pchg->battery);
break;
default:
break;
}
/* report charger dectection event if used */
if (!pdata)
goto irq_handled;
if (pdata->charger_event && id == LP8788_INT_CHG_INPUT_STATE)
schedule_work(&pchg->charger_work);
irq_handled:
return IRQ_HANDLED;
}
static int lp8788_set_irqs(struct platform_device *pdev,
struct lp8788_charger *pchg, const char *name)
{
struct resource *r;
struct irq_domain *irqdm = pchg->lp->irqdm;
int irq_start;
int irq_end;
int virq;
int nr_irq;
int i;
int ret;
/* no error even if no irq resource */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, name);
if (!r)
return 0;
irq_start = r->start;
irq_end = r->end;
for (i = irq_start; i <= irq_end; i++) {
nr_irq = pchg->num_irqs;
virq = irq_create_mapping(irqdm, i);
pchg->irqs[nr_irq].virq = virq;
pchg->irqs[nr_irq].which = i;
pchg->num_irqs++;
ret = request_threaded_irq(virq, NULL,
lp8788_charger_irq_thread,
0, name, pchg);
if (ret)
break;
}
if (i <= irq_end)
goto err_free_irq;
return 0;
err_free_irq:
for (i = 0; i < pchg->num_irqs; i++)
free_irq(pchg->irqs[i].virq, pchg);
return ret;
}
static int lp8788_irq_register(struct platform_device *pdev,
struct lp8788_charger *pchg)
{
const char *name[] = {
LP8788_CHG_IRQ, LP8788_PRSW_IRQ, LP8788_BATT_IRQ
};
int i;
int ret;
INIT_WORK(&pchg->charger_work, lp8788_charger_event);
pchg->num_irqs = 0;
for (i = 0; i < ARRAY_SIZE(name); i++) {
ret = lp8788_set_irqs(pdev, pchg, name[i]);
if (ret) {
dev_warn(&pdev->dev, "irq setup failed: %s\n", name[i]);
return ret;
}
}
if (pchg->num_irqs > LP8788_MAX_CHG_IRQS) {
dev_err(&pdev->dev, "invalid total number of irqs: %d\n",
pchg->num_irqs);
return -EINVAL;
}
return 0;
}
static void lp8788_irq_unregister(struct platform_device *pdev,
struct lp8788_charger *pchg)
{
int i;
int irq;
for (i = 0; i < pchg->num_irqs; i++) {
irq = pchg->irqs[i].virq;
if (!irq)
continue;
free_irq(irq, pchg);
}
}
static void lp8788_setup_adc_channel(struct device *dev,
struct lp8788_charger *pchg)
{
struct lp8788_charger_platform_data *pdata = pchg->pdata;
struct iio_channel *chan;
if (!pdata)
return;
/* ADC channel for battery voltage */
chan = iio_channel_get(dev, pdata->adc_vbatt);
pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan;
/* ADC channel for battery temperature */
chan = iio_channel_get(dev, pdata->adc_batt_temp);
pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan;
}
static void lp8788_release_adc_channel(struct lp8788_charger *pchg)
{
int i;
for (i = 0; i < LP8788_NUM_CHG_ADC; i++) {
if (!pchg->chan[i])
continue;
iio_channel_release(pchg->chan[i]);
pchg->chan[i] = NULL;
}
}
static ssize_t lp8788_show_charger_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lp8788_charger *pchg = dev_get_drvdata(dev);
enum lp8788_charging_state state;
char *desc[LP8788_MAX_CHG_STATE] = {
[LP8788_OFF] = "CHARGER OFF",
[LP8788_WARM_UP] = "WARM UP",
[LP8788_LOW_INPUT] = "LOW INPUT STATE",
[LP8788_PRECHARGE] = "CHARGING - PRECHARGE",
[LP8788_CC] = "CHARGING - CC",
[LP8788_CV] = "CHARGING - CV",
[LP8788_MAINTENANCE] = "NO CHARGING - MAINTENANCE",
[LP8788_BATTERY_FAULT] = "BATTERY FAULT",
[LP8788_SYSTEM_SUPPORT] = "SYSTEM SUPPORT",
[LP8788_HIGH_CURRENT] = "HIGH CURRENT",
};
u8 data;
lp8788_read_byte(pchg->lp, LP8788_CHG_STATUS, &data);
state = (data & LP8788_CHG_STATE_M) >> LP8788_CHG_STATE_S;
return scnprintf(buf, PAGE_SIZE, "%s\n", desc[state]);
}
static ssize_t lp8788_show_eoc_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lp8788_charger *pchg = dev_get_drvdata(dev);
char *stime[] = { "400ms", "5min", "10min", "15min",
"20min", "25min", "30min" "No timeout" };
u8 val;
lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val);
val = (val & LP8788_CHG_EOC_TIME_M) >> LP8788_CHG_EOC_TIME_S;
return scnprintf(buf, PAGE_SIZE, "End Of Charge Time: %s\n",
stime[val]);
}
static ssize_t lp8788_show_eoc_level(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lp8788_charger *pchg = dev_get_drvdata(dev);
char *abs_level[] = { "25mA", "49mA", "75mA", "98mA" };
char *relative_level[] = { "5%", "10%", "15%", "20%" };
char *level;
u8 val;
u8 mode;
lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val);
mode = val & LP8788_CHG_EOC_MODE_M;
val = (val & LP8788_CHG_EOC_LEVEL_M) >> LP8788_CHG_EOC_LEVEL_S;
level = mode ? abs_level[val] : relative_level[val];
return scnprintf(buf, PAGE_SIZE, "End Of Charge Level: %s\n", level);
}
static DEVICE_ATTR(charger_status, S_IRUSR, lp8788_show_charger_status, NULL);
static DEVICE_ATTR(eoc_time, S_IRUSR, lp8788_show_eoc_time, NULL);
static DEVICE_ATTR(eoc_level, S_IRUSR, lp8788_show_eoc_level, NULL);
static struct attribute *lp8788_charger_attr[] = {
&dev_attr_charger_status.attr,
&dev_attr_eoc_time.attr,
&dev_attr_eoc_level.attr,
NULL,
};
static const struct attribute_group lp8788_attr_group = {
.attrs = lp8788_charger_attr,
};
static int lp8788_charger_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct lp8788_charger *pchg;
struct device *dev = &pdev->dev;
int ret;
pchg = devm_kzalloc(dev, sizeof(struct lp8788_charger), GFP_KERNEL);
if (!pchg)
return -ENOMEM;
pchg->lp = lp;
pchg->pdata = lp->pdata ? lp->pdata->chg_pdata : NULL;
platform_set_drvdata(pdev, pchg);
ret = lp8788_update_charger_params(pdev, pchg);
if (ret)
return ret;
lp8788_setup_adc_channel(&pdev->dev, pchg);
ret = lp8788_psy_register(pdev, pchg);
if (ret)
return ret;
ret = sysfs_create_group(&pdev->dev.kobj, &lp8788_attr_group);
if (ret) {
lp8788_psy_unregister(pchg);
return ret;
}
ret = lp8788_irq_register(pdev, pchg);
if (ret)
dev_warn(dev, "failed to register charger irq: %d\n", ret);
return 0;
}
static int lp8788_charger_remove(struct platform_device *pdev)
{
struct lp8788_charger *pchg = platform_get_drvdata(pdev);
flush_work(&pchg->charger_work);
lp8788_irq_unregister(pdev, pchg);
sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group);
lp8788_psy_unregister(pchg);
lp8788_release_adc_channel(pchg);
return 0;
}
static struct platform_driver lp8788_charger_driver = {
.probe = lp8788_charger_probe,
.remove = lp8788_charger_remove,
.driver = {
.name = LP8788_DEV_CHARGER,
.owner = THIS_MODULE,
},
};
module_platform_driver(lp8788_charger_driver);
MODULE_DESCRIPTION("TI LP8788 Charger Driver");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:lp8788-charger");
| gpl-2.0 |
Evervolv/android_kernel_oppo_msm8974 | drivers/usb/host/xhci-mem.c | 2567 | 75375 | /*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include "xhci.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
* initializes the segment to zero, and sets the private next pointer to NULL.
*
* Section 4.11.1.1:
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
unsigned int cycle_state, gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
int i;
seg = kzalloc(sizeof *seg, flags);
if (!seg)
return NULL;
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
memset(seg->trbs, 0, SEGMENT_SIZE);
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= TRB_CYCLE;
}
seg->dma = dma;
seg->next = NULL;
return seg;
}
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
if (seg->trbs) {
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
kfree(seg);
}
static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *first)
{
struct xhci_segment *seg;
seg = first->next;
while (seg != first) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
}
xhci_segment_free(xhci, first);
}
/*
* Make the prev segment point to the next segment.
*
* Change the last TRB in the prev segment to be a Link TRB which points to the
* DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
struct xhci_segment *next, enum xhci_ring_type type)
{
u32 val;
if (!prev || !next)
return;
prev->next = next;
if (type != TYPE_EVENT) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
cpu_to_le64(next->dma);
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
/* Set chain bit for isoc rings on AMD 0.96 host */
if (xhci_link_trb_quirk(xhci) ||
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
}
/*
* Link the ring to the new segments.
* Set Toggle Cycle for the new ring if needed.
*/
static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *first, struct xhci_segment *last,
unsigned int num_segs)
{
struct xhci_segment *next;
if (!ring || !first || !last)
return;
next = ring->enq_seg->next;
xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
xhci_link_segments(xhci, last, next, ring->type);
ring->num_segs += num_segs;
ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
&= ~cpu_to_le32(LINK_TOGGLE);
last->trbs[TRBS_PER_SEGMENT-1].link.control
|= cpu_to_le32(LINK_TOGGLE);
ring->last_seg = last;
}
}
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
if (!ring)
return;
if (ring->first_seg)
xhci_free_segments_for_ring(xhci, ring->first_seg);
kfree(ring);
}
static void xhci_initialize_ring_info(struct xhci_ring *ring,
unsigned int cycle_state)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
ring->enq_seg = ring->first_seg;
ring->dequeue = ring->enqueue;
ring->deq_seg = ring->first_seg;
/* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
*
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
ring->cycle_state = cycle_state;
/* Not necessary for new rings, but needed for re-initialized rings */
ring->enq_updates = 0;
ring->deq_updates = 0;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
* accounting purpose
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **last,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, gfp_t flags)
{
struct xhci_segment *prev;
prev = xhci_segment_alloc(xhci, cycle_state, flags);
if (!prev)
return -ENOMEM;
num_segs--;
*first = prev;
while (num_segs > 0) {
struct xhci_segment *next;
next = xhci_segment_alloc(xhci, cycle_state, flags);
if (!next) {
xhci_free_segments_for_ring(xhci, *first);
return -ENOMEM;
}
xhci_link_segments(xhci, prev, next, type);
prev = next;
num_segs--;
}
xhci_link_segments(xhci, prev, *first, type);
*last = prev;
return 0;
}
/**
* Create a new ring with zero or more segments.
*
* Link each segment together into a ring.
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
ring = kzalloc(sizeof *(ring), flags);
if (!ring)
return NULL;
ring->num_segs = num_segs;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
&ring->last_seg, num_segs, cycle_state, type, flags);
if (ret)
goto fail;
/* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
/* See section 4.9.2.1 and 6.4.4.1 */
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
}
xhci_initialize_ring_info(ring, cycle_state);
return ring;
fail:
xhci_ring_free(xhci, ring);
return NULL;
}
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
int rings_cached;
rings_cached = virt_dev->num_rings_cached;
if (rings_cached < XHCI_MAX_RINGS_CACHED) {
virt_dev->ring_cache[rings_cached] =
virt_dev->eps[ep_index].ring;
virt_dev->num_rings_cached++;
xhci_dbg(xhci, "Cached old ring, "
"%d ring%s cached\n",
virt_dev->num_rings_cached,
(virt_dev->num_rings_cached > 1) ? "s" : "");
} else {
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
xhci_dbg(xhci, "Ring cache full (%d rings), "
"freeing ring\n",
virt_dev->num_rings_cached);
}
virt_dev->eps[ep_index].ring = NULL;
}
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring, unsigned int cycle_state,
enum xhci_ring_type type)
{
struct xhci_segment *seg = ring->first_seg;
int i;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= TRB_CYCLE;
}
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, type);
seg = seg->next;
} while (seg != ring->first_seg);
ring->type = type;
xhci_initialize_ring_info(ring, cycle_state);
/* td list should be empty since all URBs have been cancelled,
* but just in case...
*/
INIT_LIST_HEAD(&ring->td_list);
}
/*
* Expand an existing ring.
* Look for a cached ring or allocate a new ring which has same segment numbers
* and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags)
{
struct xhci_segment *first;
struct xhci_segment *last;
unsigned int num_segs;
unsigned int num_segs_needed;
int ret;
num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
(TRBS_PER_SEGMENT - 1);
/* Allocate number of segments we needed, or double the ring size */
num_segs = ring->num_segs > num_segs_needed ?
ring->num_segs : num_segs_needed;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
num_segs, ring->cycle_state, ring->type, flags);
if (ret)
return -ENOMEM;
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
ring->num_segs);
return 0;
}
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
if (!ctx)
return NULL;
BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
ctx->type = type;
ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
memset(ctx->bytes, 0, ctx->size);
return ctx;
}
static void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (!ctx)
return;
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
return (struct xhci_input_control_ctx *)ctx->bytes;
}
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (ctx->type == XHCI_CTX_TYPE_DEVICE)
return (struct xhci_slot_ctx *)ctx->bytes;
return (struct xhci_slot_ctx *)
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
}
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int ep_index)
{
/* increment ep index by offset of start of ep ctx array */
ep_index++;
if (ctx->type == XHCI_CTX_TYPE_INPUT)
ep_index++;
return (struct xhci_ep_ctx *)
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
/***************** Streams structures manipulation *************************/
static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(&pdev->dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
stream_ctx, dma);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
else
return dma_pool_free(xhci->medium_streams_pool,
stream_ctx, dma);
}
/*
* The stream context array for each endpoint with bulk streams enabled can
* vary in size, based on:
* - how many streams the endpoint supports,
* - the maximum primary stream array size the host controller supports,
* - and how many streams the device driver asks for.
*
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(&pdev->dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
dma, mem_flags);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
else
return dma_pool_alloc(xhci->medium_streams_pool,
mem_flags, dma);
}
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address)
{
if (ep->ep_state & EP_HAS_STREAMS)
return radix_tree_lookup(&ep->stream_info->trb_address_map,
address >> SEGMENT_SHIFT);
return ep->ring;
}
/* Only use this when you know stream_info is valid */
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
static struct xhci_ring *dma_to_stream_ring(
struct xhci_stream_info *stream_info,
u64 address)
{
return radix_tree_lookup(&stream_info->trb_address_map,
address >> SEGMENT_SHIFT);
}
#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
unsigned int stream_id)
{
struct xhci_virt_ep *ep = &dev->eps[ep_index];
if (stream_id == 0)
return ep->ring;
if (!ep->stream_info)
return NULL;
if (stream_id > ep->stream_info->num_streams)
return NULL;
return ep->stream_info->stream_rings[stream_id];
}
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
static int xhci_test_radix_tree(struct xhci_hcd *xhci,
unsigned int num_streams,
struct xhci_stream_info *stream_info)
{
u32 cur_stream;
struct xhci_ring *cur_ring;
u64 addr;
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
struct xhci_ring *mapped_ring;
int trb_size = sizeof(union xhci_trb);
cur_ring = stream_info->stream_rings[cur_stream];
for (addr = cur_ring->first_seg->dma;
addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
addr += trb_size) {
mapped_ring = dma_to_stream_ring(stream_info, addr);
if (cur_ring != mapped_ring) {
xhci_warn(xhci, "WARN: DMA address 0x%08llx "
"didn't map to stream ID %u; "
"mapped to ring %p\n",
(unsigned long long) addr,
cur_stream,
mapped_ring);
return -EINVAL;
}
}
/* One TRB after the end of the ring segment shouldn't return a
* pointer to the current ring (although it may be a part of a
* different ring).
*/
mapped_ring = dma_to_stream_ring(stream_info, addr);
if (mapped_ring != cur_ring) {
/* One TRB before should also fail */
addr = cur_ring->first_seg->dma - trb_size;
mapped_ring = dma_to_stream_ring(stream_info, addr);
}
if (mapped_ring == cur_ring) {
xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
"mapped to valid stream ID %u; "
"mapped ring = %p\n",
(unsigned long long) addr,
cur_stream,
mapped_ring);
return -EINVAL;
}
}
return 0;
}
#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
* drivers.
*
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the host controller won't tell
* us which stream ring the TRB came from. We could store the stream ID in an
* event data TRB, but that doesn't help us for the cancellation case, since the
* endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example, say I
* have segments of size 1KB, that are always 64-byte aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
unsigned int num_streams, gfp_t mem_flags)
{
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
unsigned long key;
u64 addr;
int ret;
xhci_dbg(xhci, "Allocating %u streams and %u "
"stream context array entries.\n",
num_streams, num_stream_ctxs);
if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
return NULL;
}
xhci->cmd_ring_reserved_trbs++;
stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
if (!stream_info)
goto cleanup_trbs;
stream_info->num_streams = num_streams;
stream_info->num_stream_ctxs = num_stream_ctxs;
/* Initialize the array of virtual pointers to stream rings. */
stream_info->stream_rings = kzalloc(
sizeof(struct xhci_ring *)*num_streams,
mem_flags);
if (!stream_info->stream_rings)
goto cleanup_info;
/* Initialize the array of DMA addresses for stream rings for the HW. */
stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
goto cleanup_ctx;
memset(stream_info->stream_ctx_array, 0,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
/* Allocate everything needed to free the stream rings later */
stream_info->free_streams_command =
xhci_alloc_command(xhci, true, true, mem_flags);
if (!stream_info->free_streams_command)
goto cleanup_ctx;
INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
/* Allocate rings for all the streams that the driver will use,
* and add their segment DMA addresses to the radix tree.
* Stream 0 is reserved.
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
cur_ring->cycle_state;
stream_info->stream_ctx_array[cur_stream].stream_ring =
cpu_to_le64(addr);
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
key = (unsigned long)
(cur_ring->first_seg->dma >> SEGMENT_SHIFT);
ret = radix_tree_insert(&stream_info->trb_address_map,
key, cur_ring);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
goto cleanup_rings;
}
}
/* Leave the other unused stream ring pointers in the stream context
* array initialized to zero. This will cause the xHC to give us an
* error if the device asks for a stream ID we don't have setup (if it
* was any other way, the host controller would assume the ring is
* "empty" and wait forever for data to be queued to that stream ID).
*/
#if XHCI_DEBUG
/* Do a little test on the radix tree to make sure it returns the
* correct values.
*/
if (xhci_test_radix_tree(xhci, num_streams, stream_info))
goto cleanup_rings;
#endif
return stream_info;
cleanup_rings:
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
cleanup_trbs:
xhci->cmd_ring_reserved_trbs--;
return NULL;
}
/*
* Sets the MaxPStreams field and the Linear Stream Array field.
* Sets the dequeue pointer to the stream context array.
*/
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_stream_info *stream_info)
{
u32 max_primary_streams;
/* MaxPStreams is the number of stream context array entries, not the
* number we're actually using. Must be in 2^(MaxPstreams + 1) format.
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
1 << (max_primary_streams + 1));
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
| EP_HAS_LSA);
ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
}
/*
* Sets the MaxPStreams field and the Linear Stream Array field to 0.
* Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
* not at the beginning of the ring).
*/
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep)
{
dma_addr_t addr;
ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
}
/* Frees all stream contexts associated with the endpoint,
*
* Caller should fix the endpoint context streams fields.
*/
void xhci_free_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info)
{
int cur_stream;
struct xhci_ring *cur_ring;
dma_addr_t addr;
if (!stream_info)
return;
for (cur_stream = 1; cur_stream < stream_info->num_streams;
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
xhci_free_command(xhci, stream_info->free_streams_command);
xhci->cmd_ring_reserved_trbs--;
if (stream_info->stream_ctx_array)
xhci_free_stream_ctx(xhci,
stream_info->num_stream_ctxs,
stream_info->stream_ctx_array,
stream_info->ctx_array_dma);
if (stream_info)
kfree(stream_info->stream_rings);
kfree(stream_info);
}
/***************** Device context manipulation *************************/
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
init_timer(&ep->stop_cmd_timer);
ep->stop_cmd_timer.data = (unsigned long) ep;
ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
ep->xhci = xhci;
}
static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
{
struct list_head *tt;
struct list_head *tt_list_head;
struct list_head *tt_next;
struct xhci_tt_bw_info *tt_info;
/* If the device never made it past the Set Address stage,
* it may not have the real_port set correctly.
*/
if (virt_dev->real_port == 0 ||
virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
xhci_dbg(xhci, "Bad real port.\n");
return;
}
tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
if (list_empty(tt_list_head))
return;
list_for_each(tt, tt_list_head) {
tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
if (tt_info->slot_id == slot_id)
break;
}
/* Cautionary measure in case the hub was disconnected before we
* stored the TT information.
*/
if (tt_info->slot_id != slot_id)
return;
tt_next = tt->next;
tt_info = list_entry(tt, struct xhci_tt_bw_info,
tt_list);
/* Multi-TT hubs will have more than one entry */
do {
list_del(tt);
kfree(tt_info);
tt = tt_next;
if (list_empty(tt_list_head))
break;
tt_next = tt->next;
tt_info = list_entry(tt, struct xhci_tt_bw_info,
tt_list);
} while (tt_info->slot_id == slot_id);
}
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_tt_bw_info *tt_info;
unsigned int num_ports;
int i, j;
if (!tt->multi)
num_ports = 1;
else
num_ports = hdev->maxchild;
for (i = 0; i < num_ports; i++, tt_info++) {
struct xhci_interval_bw_table *bw_table;
tt_info = kzalloc(sizeof(*tt_info), mem_flags);
if (!tt_info)
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
&xhci->rh_bw[virt_dev->real_port - 1].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
bw_table = &tt_info->bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++)
INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
}
return 0;
free_tts:
xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
return -ENOMEM;
}
/* All the xhci_tds in the ring's TD list should be freed at this point.
* Should be called with xhci->lock held if there is any chance the TT lists
* will be manipulated by the configure endpoint, allocate device, or update
* hub functions while this function is removing the TT entries from the list.
*/
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *dev;
int i;
int old_active_eps = 0;
/* Slot ID 0 is reserved */
if (slot_id == 0 || !xhci->devs[slot_id])
return;
dev = xhci->devs[slot_id];
xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
if (!dev)
return;
if (dev->tt_info)
old_active_eps = dev->tt_info->active_eps;
for (i = 0; i < 31; ++i) {
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
/* Endpoints on the TT/root port lists should have been removed
* when usb_disable_device() was called for the device.
* We can't drop them anyway, because the udev might have gone
* away by this point, and we can't tell what speed it was.
*/
if (!list_empty(&dev->eps[i].bw_endpoint_list))
xhci_warn(xhci, "Slot %u endpoint %u "
"not removed from BW list!\n",
slot_id, i);
}
/* If this is a hub, free the TT(s) from the TT list */
xhci_free_tt_info(xhci, dev, slot_id);
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, dev, old_active_eps);
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
xhci_ring_free(xhci, dev->ring_cache[i]);
kfree(dev->ring_cache);
}
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
xhci_free_container_ctx(xhci, dev->out_ctx);
kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = NULL;
}
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
struct xhci_virt_device *dev;
int i;
/* Slot ID 0 is reserved */
if (slot_id == 0 || xhci->devs[slot_id]) {
xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
return 0;
}
xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
if (!xhci->devs[slot_id])
return 0;
dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx)
goto fail;
xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->out_ctx->dma);
/* Allocate the (input) device context for address device command */
dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
if (!dev->in_ctx)
goto fail;
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma);
/* Initialize the cancellation list and watchdog timers for each ep */
for (i = 0; i < 31; i++) {
xhci_init_endpoint_timer(xhci, &dev->eps[i]);
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
/* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
if (!dev->eps[0].ring)
goto fail;
/* Allocate pointers to the ring cache */
dev->ring_cache = kzalloc(
sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
flags);
if (!dev->ring_cache)
goto fail;
dev->num_rings_cached = 0;
init_completion(&dev->cmd_completion);
INIT_LIST_HEAD(&dev->cmd_list);
dev->udev = udev;
/* Point to output device context in dcbaa. */
xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id,
&xhci->dcbaa->dev_context_ptrs[slot_id],
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
return 1;
fail:
xhci_free_virt_device(xhci, slot_id);
return 0;
}
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev)
{
struct xhci_virt_device *virt_dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_ring *ep_ring;
virt_dev = xhci->devs[udev->slot_id];
ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
ep_ring = virt_dev->eps[0].ring;
/*
* FIXME we don't keep track of the dequeue pointer very well after a
* Set TR dequeue pointer, so we're setting the dequeue pointer of the
* host to our enqueue pointer. This should only be called after a
* configured device has reset, so all control transfers should have
* been completed or cancelled before the reset.
*/
ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
ep_ring->enqueue)
| ep_ring->cycle_state);
}
/*
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers. xhci->port_array provides an array of the port speed for
* each offset into the port status registers.
*
* The xHCI hardware wants to know the roothub port number that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
* registers. Scan through the xHCI roothub port array, looking for the Nth
* entry of the correct port speed. Return the port number of that entry.
*/
static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_device *udev)
{
struct usb_device *top_dev;
unsigned int num_similar_speed_ports;
unsigned int faked_port_num;
int i;
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
faked_port_num = top_dev->portnum;
for (i = 0, num_similar_speed_ports = 0;
i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
u8 port_speed = xhci->port_array[i];
/*
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
* USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
* 1.1 ports are under the USB 2.0 hub. If the port speed
* matches the device speed, it's a similar speed port.
*/
if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
num_similar_speed_ports++;
if (num_similar_speed_ports == faked_port_num)
/* Roothub ports are numbered from 1 to N */
return i+1;
}
return 0;
}
/* Setup an xHCI virtual device for a Set Address command */
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
u32 port_num;
struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
if (udev->slot_id == 0 || !dev) {
xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
udev->slot_id);
return -EINVAL;
}
ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
case USB_SPEED_SUPER:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
break;
case USB_SPEED_HIGH:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
break;
case USB_SPEED_FULL:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
break;
case USB_SPEED_LOW:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
break;
case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
default:
/* Speed was set earlier, this shouldn't happen. */
BUG();
}
/* Find the root hub port this device is under */
port_num = xhci_find_real_port_number(xhci, udev);
if (!port_num)
return -EINVAL;
slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
/* Set the port number in the virtual_device to the faked port number */
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
dev->fake_port = top_dev->portnum;
dev->real_port = port_num;
xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
* decendent of one), it counts as a primary bandwidth domain, not a
* secondary bandwidth domain under a TT. An xhci_tt_info structure
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
rh_bw = &xhci->rh_bw[port_num - 1];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
continue;
if (!dev->udev->tt->multi ||
(udev->tt->multi &&
tt_bw->ttport == dev->udev->ttport)) {
dev->bw_table = &tt_bw->bw_table;
dev->tt_info = tt_bw;
break;
}
}
if (!dev->tt_info)
xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
}
/* Is this a LS/FS device under an external HS hub? */
if (udev->tt && udev->tt->hub->parent) {
slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
(udev->ttport << 8));
if (udev->tt->multi)
slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
}
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
/* Step 4 - ring already allocated */
/* Step 5 */
ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
/*
* XXX: Not sure about wireless USB devices.
*/
switch (udev->speed) {
case USB_SPEED_SUPER:
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
break;
case USB_SPEED_HIGH:
/* USB core guesses at a 64-byte max packet first for FS devices */
case USB_SPEED_FULL:
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
break;
case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
break;
case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
default:
/* New speed? */
BUG();
}
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
dev->eps[0].ring->cycle_state);
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
return 0;
}
/*
* Convert interval expressed as 2^(bInterval - 1) == interval into
* straight exponent value 2^n == interval.
*
*/
static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval;
interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
if (interval != ep->desc.bInterval - 1)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d %sframes\n",
ep->desc.bEndpointAddress,
1 << interval,
udev->speed == USB_SPEED_FULL ? "" : "micro");
if (udev->speed == USB_SPEED_FULL) {
/*
* Full speed isoc endpoints specify interval in frames,
* not microframes. We are using microframes everywhere,
* so adjust accordingly.
*/
interval += 3; /* 1 frame = 2^3 uframes */
}
return interval;
}
/*
* Convert bInterval expressed in microframes (in 1-255 range) to exponent of
* microframes, rounded down to nearest power of 2.
*/
static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
struct usb_host_endpoint *ep, unsigned int desc_interval,
unsigned int min_exponent, unsigned int max_exponent)
{
unsigned int interval;
interval = fls(desc_interval) - 1;
interval = clamp_val(interval, min_exponent, max_exponent);
if ((1 << interval) != desc_interval)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
ep->desc.bEndpointAddress,
1 << interval,
desc_interval);
return interval;
}
static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval, 0, 15);
}
static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval * 8, 3, 10);
}
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
* is set to N, it will service the endpoint every 2^(Interval)*125us.
*
* The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
* is set to 0.
*/
static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval = 0;
switch (udev->speed) {
case USB_SPEED_HIGH:
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc)) {
interval = xhci_parse_microframe_interval(udev, ep);
break;
}
/* Fall through - SS and HS isoc/int have same decoding */
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
}
break;
case USB_SPEED_FULL:
if (usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
break;
}
/*
* Fall through for interrupt endpoint interval decoding
* since it uses the same rules as low speed interrupt
* endpoints.
*/
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_frame_interval(udev, ep);
}
break;
default:
BUG();
}
return EP_INTERVAL(interval);
}
/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
* High speed endpoint descriptors can define "the number of additional
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (udev->speed != USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(&ep->desc))
return 0;
return ep->ss_ep_comp.bmAttributes;
}
static u32 xhci_get_endpoint_type(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int in;
u32 type;
in = usb_endpoint_dir_in(&ep->desc);
if (usb_endpoint_xfer_control(&ep->desc)) {
type = EP_TYPE(CTRL_EP);
} else if (usb_endpoint_xfer_bulk(&ep->desc)) {
if (in)
type = EP_TYPE(BULK_IN_EP);
else
type = EP_TYPE(BULK_OUT_EP);
} else if (usb_endpoint_xfer_isoc(&ep->desc)) {
if (in)
type = EP_TYPE(ISOC_IN_EP);
else
type = EP_TYPE(ISOC_OUT_EP);
} else if (usb_endpoint_xfer_int(&ep->desc)) {
if (in)
type = EP_TYPE(INT_IN_EP);
else
type = EP_TYPE(INT_OUT_EP);
} else {
BUG();
}
return type;
}
/* Return the maximum endpoint service interval time (ESIT) payload.
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int max_burst;
int max_packet;
/* Only applies for interrupt or isochronous endpoints */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
if (udev->speed == USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
/* A 0 in max burst means 1 transfer per ESIT */
return max_packet * (max_burst + 1);
}
/* Set up an endpoint with one ring segment. Do not allocate stream rings.
* Drivers will have to call usb_alloc_streams() to do that.
*/
int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *udev,
struct usb_host_endpoint *ep,
gfp_t mem_flags)
{
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
struct xhci_ring *ep_ring;
unsigned int max_packet;
unsigned int max_burst;
enum xhci_ring_type type;
u32 max_esit_payload;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
return -ENOMEM;
virt_dev->eps[ep_index].new_ring =
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1, type);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
| EP_MULT(xhci_get_endpoint_mult(udev, ep)));
/* FIXME dig Mult and streams info out of ep companion desc */
/* Allow 3 retries for everything but isoc;
* CErr shall be set to 0 for Isoch endpoints.
*/
if (!usb_endpoint_xfer_isoc(&ep->desc))
ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
else
ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
/* Set the max packet size and max burst */
switch (udev->speed) {
case USB_SPEED_SUPER:
max_packet = usb_endpoint_maxp(&ep->desc);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
/* dig out max burst from ep companion desc */
max_packet = ep->ss_ep_comp.bMaxBurst;
ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
break;
case USB_SPEED_HIGH:
/* bits 11:12 specify the number of additional transaction
* opportunities per microframe (USB 2.0, section 9.6.6)
*/
if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) {
max_burst = (usb_endpoint_maxp(&ep->desc)
& 0x1800) >> 11;
ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
}
/* Fall through */
case USB_SPEED_FULL:
case USB_SPEED_LOW:
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
break;
default:
BUG();
}
max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
/*
* XXX no idea how to calculate the average TRB buffer length for bulk
* endpoints, as the driver gives us no clue how big each scatter gather
* list entry (or buffer) is going to be.
*
* For isochronous and interrupt endpoints, we set it to the max
* available, until we have new API in the USB core to allow drivers to
* declare how much bandwidth they actually need.
*
* Normally, it would be calculated by taking the total of the buffer
* lengths in the TD and then dividing by the number of TRBs in a TD,
* including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
* use Event Data TRBs, and we don't chain in a link TRB on short
* transfers, we're basically dividing by 1.
*
* xHCI 1.0 specification indicates that the Average TRB Length should
* be set to 8 for control endpoints.
*/
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
else
ep_ctx->tx_info |=
cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
/* FIXME Debug endpoint context */
return 0;
}
void xhci_endpoint_zero(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_host_endpoint *ep)
{
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0;
ep_ctx->deq = 0;
ep_ctx->tx_info = 0;
/* Don't free the endpoint ring until the set interface or configuration
* request succeeds.
*/
}
void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
{
bw_info->ep_interval = 0;
bw_info->mult = 0;
bw_info->num_packets = 0;
bw_info->max_packet_size = 0;
bw_info->type = 0;
bw_info->max_esit_payload = 0;
}
void xhci_update_bw_info(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_input_control_ctx *ctrl_ctx,
struct xhci_virt_device *virt_dev)
{
struct xhci_bw_info *bw_info;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_type;
int i;
for (i = 1; i < 31; ++i) {
bw_info = &virt_dev->eps[i].bw_info;
/* We can't tell what endpoint type is being dropped, but
* unconditionally clearing the bandwidth info for non-periodic
* endpoints should be harmless because the info will never be
* set in the first place.
*/
if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
/* Dropped endpoint */
xhci_clear_endpoint_bw_info(bw_info);
continue;
}
if (EP_IS_ADDED(ctrl_ctx, i)) {
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
/* Ignore non-periodic endpoints */
if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
ep_type != ISOC_IN_EP &&
ep_type != INT_IN_EP)
continue;
/* Added or changed endpoint */
bw_info->ep_interval = CTX_TO_EP_INTERVAL(
le32_to_cpu(ep_ctx->ep_info));
/* Number of packets and mult are zero-based in the
* input context, but we want one-based for the
* interval table.
*/
bw_info->mult = CTX_TO_EP_MULT(
le32_to_cpu(ep_ctx->ep_info)) + 1;
bw_info->num_packets = CTX_TO_MAX_BURST(
le32_to_cpu(ep_ctx->ep_info2)) + 1;
bw_info->max_packet_size = MAX_PACKET_DECODED(
le32_to_cpu(ep_ctx->ep_info2));
bw_info->type = ep_type;
bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
le32_to_cpu(ep_ctx->tx_info));
}
}
}
/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command.
*/
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
unsigned int ep_index)
{
struct xhci_ep_ctx *out_ep_ctx;
struct xhci_ep_ctx *in_ep_ctx;
out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
in_ep_ctx->ep_info = out_ep_ctx->ep_info;
in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
in_ep_ctx->deq = out_ep_ctx->deq;
in_ep_ctx->tx_info = out_ep_ctx->tx_info;
}
/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command. Only the context entries field matters,
* but we'll copy the whole thing anyway.
*/
void xhci_slot_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx)
{
struct xhci_slot_ctx *in_slot_ctx;
struct xhci_slot_ctx *out_slot_ctx;
in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
in_slot_ctx->dev_info = out_slot_ctx->dev_info;
in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
in_slot_ctx->tt_info = out_slot_ctx->tt_info;
in_slot_ctx->dev_state = out_slot_ctx->dev_state;
}
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
int i;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
if (!num_sp)
return 0;
xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
if (!xhci->scratchpad)
goto fail_sp;
xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
num_sp * sizeof(u64),
&xhci->scratchpad->sp_dma, flags);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
if (!xhci->scratchpad->sp_buffers)
goto fail_sp3;
xhci->scratchpad->sp_dma_buffers =
kzalloc(sizeof(dma_addr_t) * num_sp, flags);
if (!xhci->scratchpad->sp_dma_buffers)
goto fail_sp4;
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
flags);
if (!buf)
goto fail_sp5;
xhci->scratchpad->sp_array[i] = dma;
xhci->scratchpad->sp_buffers[i] = buf;
xhci->scratchpad->sp_dma_buffers[i] = dma;
}
return 0;
fail_sp5:
for (i = i - 1; i >= 0; i--) {
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
fail_sp4:
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
fail_sp2:
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
fail_sp:
return -ENOMEM;
}
static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
if (!xhci->scratchpad)
return;
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
dma_free_coherent(&pdev->dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags)
{
struct xhci_command *command;
command = kzalloc(sizeof(*command), mem_flags);
if (!command)
return NULL;
if (allocate_in_ctx) {
command->in_ctx =
xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
mem_flags);
if (!command->in_ctx) {
kfree(command);
return NULL;
}
}
if (allocate_completion) {
command->completion =
kzalloc(sizeof(struct completion), mem_flags);
if (!command->completion) {
xhci_free_container_ctx(xhci, command->in_ctx);
kfree(command);
return NULL;
}
init_completion(command->completion);
}
command->status = 0;
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
{
if (urb_priv) {
kfree(urb_priv->td[0]);
kfree(urb_priv);
}
}
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command)
{
xhci_free_container_ctx(xhci,
command->in_ctx);
kfree(command->completion);
kfree(command);
}
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
struct dev_info *dev_info, *next;
unsigned long flags;
int size;
int i;
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
dma_free_coherent(&pdev->dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
xhci_dbg(xhci, "Freed ERST\n");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
xhci_dbg(xhci, "Freed event ring\n");
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
xhci_dbg(xhci, "Freed command ring\n");
for (i = 1; i < MAX_HC_SLOTS; ++i)
xhci_free_virt_device(xhci, i);
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
xhci_dbg(xhci, "Freed segment pool\n");
if (xhci->device_pool)
dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
xhci_dbg(xhci, "Freed device context pool\n");
if (xhci->small_streams_pool)
dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
xhci_dbg(xhci, "Freed small stream array pool\n");
if (xhci->medium_streams_pool)
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
xhci_dbg(xhci, "Freed medium stream array pool\n");
if (xhci->dcbaa)
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
xhci->dcbaa = NULL;
scratchpad_free(xhci);
spin_lock_irqsave(&xhci->lock, flags);
list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
list_del(&dev_info->list);
kfree(dev_info);
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
kfree(xhci->usb2_ports);
kfree(xhci->usb3_ports);
kfree(xhci->port_array);
kfree(xhci->rh_bw);
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
xhci->bus_state[1].bus_suspended = 0;
}
static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
struct xhci_segment *input_seg,
union xhci_trb *start_trb,
union xhci_trb *end_trb,
dma_addr_t input_dma,
struct xhci_segment *result_seg,
char *test_name, int test_number)
{
unsigned long long start_dma;
unsigned long long end_dma;
struct xhci_segment *seg;
start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
if (seg != result_seg) {
xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
test_name, test_number);
xhci_warn(xhci, "Tested TRB math w/ seg %p and "
"input DMA 0x%llx\n",
input_seg,
(unsigned long long) input_dma);
xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
"ending TRB %p (0x%llx DMA)\n",
start_trb, start_dma,
end_trb, end_dma);
xhci_warn(xhci, "Expected seg %p, got seg %p\n",
result_seg, seg);
return -1;
}
return 0;
}
/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
{
struct {
dma_addr_t input_dma;
struct xhci_segment *result_seg;
} simple_test_vector [] = {
/* A zeroed DMA field should fail */
{ 0, NULL },
/* One TRB before the ring start should fail */
{ xhci->event_ring->first_seg->dma - 16, NULL },
/* One byte before the ring start should fail */
{ xhci->event_ring->first_seg->dma - 1, NULL },
/* Starting TRB should succeed */
{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
/* Ending TRB should succeed */
{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
xhci->event_ring->first_seg },
/* One byte after the ring end should fail */
{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
/* One TRB after the ring end should fail */
{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
/* An address of all ones should fail */
{ (dma_addr_t) (~0), NULL },
};
struct {
struct xhci_segment *input_seg;
union xhci_trb *start_trb;
union xhci_trb *end_trb;
dma_addr_t input_dma;
struct xhci_segment *result_seg;
} complex_test_vector [] = {
/* Test feeding a valid DMA address from a different ring */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = xhci->event_ring->first_seg->trbs,
.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
.input_dma = xhci->cmd_ring->first_seg->dma,
.result_seg = NULL,
},
/* Test feeding a valid end TRB from a different ring */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = xhci->event_ring->first_seg->trbs,
.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
.input_dma = xhci->cmd_ring->first_seg->dma,
.result_seg = NULL,
},
/* Test feeding a valid start and end TRB from a different ring */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = xhci->cmd_ring->first_seg->trbs,
.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
.input_dma = xhci->cmd_ring->first_seg->dma,
.result_seg = NULL,
},
/* TRB in this ring, but after this TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[0],
.end_trb = &xhci->event_ring->first_seg->trbs[3],
.input_dma = xhci->event_ring->first_seg->dma + 4*16,
.result_seg = NULL,
},
/* TRB in this ring, but before this TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[3],
.end_trb = &xhci->event_ring->first_seg->trbs[6],
.input_dma = xhci->event_ring->first_seg->dma + 2*16,
.result_seg = NULL,
},
/* TRB in this ring, but after this wrapped TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
.end_trb = &xhci->event_ring->first_seg->trbs[1],
.input_dma = xhci->event_ring->first_seg->dma + 2*16,
.result_seg = NULL,
},
/* TRB in this ring, but before this wrapped TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
.end_trb = &xhci->event_ring->first_seg->trbs[1],
.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
.result_seg = NULL,
},
/* TRB not in this ring, and we have a wrapped TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
.end_trb = &xhci->event_ring->first_seg->trbs[1],
.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
.result_seg = NULL,
},
};
unsigned int num_tests;
int i, ret;
num_tests = ARRAY_SIZE(simple_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
xhci->event_ring->first_seg,
xhci->event_ring->first_seg->trbs,
&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
simple_test_vector[i].input_dma,
simple_test_vector[i].result_seg,
"Simple", i);
if (ret < 0)
return ret;
}
num_tests = ARRAY_SIZE(complex_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
complex_test_vector[i].input_seg,
complex_test_vector[i].start_trb,
complex_test_vector[i].end_trb,
complex_test_vector[i].input_dma,
complex_test_vector[i].result_seg,
"Complex", i);
if (ret < 0)
return ret;
}
xhci_dbg(xhci, "TRB math tests passed.\n");
return 0;
}
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
{
u64 temp;
dma_addr_t deq;
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
xhci->event_ring->dequeue);
if (deq == 0 && !in_interrupt())
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
temp &= ~ERST_EHB;
xhci_dbg(xhci, "// Write event ring dequeue pointer, "
"preserving EHB bit\n");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
__le32 __iomem *addr, u8 major_revision)
{
u32 temp, port_offset, port_count;
int i;
if (major_revision > 0x03) {
xhci_warn(xhci, "Ignoring unknown port speed, "
"Ext Cap %p, revision = 0x%x\n",
addr, major_revision);
/* Ignoring port protocol we can't understand. FIXME */
return;
}
/* Port offset and count in the third dword, see section 7.2 */
temp = xhci_readl(xhci, addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
"count = %u, revision = 0x%x\n",
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
/* WTF? "Valid values are ‘1’ to MaxPorts" */
return;
/* Check the host's USB2 LPM capability */
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
(temp & XHCI_L1C)) {
xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
xhci->sw_lpm_support = 1;
}
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
xhci->sw_lpm_support = 1;
if (temp & XHCI_HLC) {
xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
xhci->hw_lpm_support = 1;
}
}
port_offset--;
for (i = port_offset; i < (port_offset + port_count); i++) {
/* Duplicate entry. Ignore the port if the revisions differ. */
if (xhci->port_array[i] != 0) {
xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
" port %u\n", addr, i);
xhci_warn(xhci, "Port was marked as USB %u, "
"duplicated as USB %u\n",
xhci->port_array[i], major_revision);
/* Only adjust the roothub port counts if we haven't
* found a similar duplicate.
*/
if (xhci->port_array[i] != major_revision &&
xhci->port_array[i] != DUPLICATE_ENTRY) {
if (xhci->port_array[i] == 0x03)
xhci->num_usb3_ports--;
else
xhci->num_usb2_ports--;
xhci->port_array[i] = DUPLICATE_ENTRY;
}
/* FIXME: Should we disable the port? */
continue;
}
xhci->port_array[i] = major_revision;
if (major_revision == 0x03)
xhci->num_usb3_ports++;
else
xhci->num_usb2_ports++;
}
/* FIXME: Should we disable ports not in the Extended Capabilities? */
}
/*
* Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
* specify what speeds each port is supposed to be. We can't count on the port
* speed bits in the PORTSC register being correct until a device is connected,
* but we need to set up the two fake roothubs with the correct number of USB
* 3.0 and USB 2.0 ports at host controller initialization time.
*/
static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{
__le32 __iomem *addr;
u32 offset;
unsigned int num_ports;
int i, j, port_index;
addr = &xhci->cap_regs->hcc_params;
offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
if (offset == 0) {
xhci_err(xhci, "No Extended Capability registers, "
"unable to set up roothub.\n");
return -ENODEV;
}
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
if (!xhci->port_array)
return -ENOMEM;
xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
if (!xhci->rh_bw)
return -ENOMEM;
for (i = 0; i < num_ports; i++) {
struct xhci_interval_bw_table *bw_table;
INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
bw_table = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++)
INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
}
/*
* For whatever reason, the first capability offset is from the
* capability register base, not from the HCCPARAMS register.
* See section 5.3.6 for offset calculation.
*/
addr = &xhci->cap_regs->hc_capbase + offset;
while (1) {
u32 cap_id;
cap_id = xhci_readl(xhci, addr);
if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
xhci_add_in_port(xhci, num_ports, addr,
(u8) XHCI_EXT_PORT_MAJOR(cap_id));
offset = XHCI_EXT_CAPS_NEXT(cap_id);
if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
== num_ports)
break;
/*
* Once you're into the Extended Capabilities, the offset is
* always relative to the register holding the offset.
*/
addr += offset;
}
if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
xhci->num_usb2_ports, xhci->num_usb3_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->num_usb3_ports > 15) {
xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
xhci->num_usb3_ports = 15;
}
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
USB_MAXCHILDREN);
xhci->num_usb2_ports = USB_MAXCHILDREN;
}
/*
* Note we could have all USB 3.0 ports, or all USB 2.0 ports.
* Not sure how the USB core will handle a hub with no ports...
*/
if (xhci->num_usb2_ports) {
xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
xhci->num_usb2_ports, flags);
if (!xhci->usb2_ports)
return -ENOMEM;
port_index = 0;
for (i = 0; i < num_ports; i++) {
if (xhci->port_array[i] == 0x03 ||
xhci->port_array[i] == 0 ||
xhci->port_array[i] == DUPLICATE_ENTRY)
continue;
xhci->usb2_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
xhci_dbg(xhci, "USB 2.0 port at index %u, "
"addr = %p\n", i,
xhci->usb2_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb2_ports)
break;
}
}
if (xhci->num_usb3_ports) {
xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
xhci->num_usb3_ports, flags);
if (!xhci->usb3_ports)
return -ENOMEM;
port_index = 0;
for (i = 0; i < num_ports; i++)
if (xhci->port_array[i] == 0x03) {
xhci->usb3_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
xhci_dbg(xhci, "USB 3.0 port at index %u, "
"addr = %p\n", i,
xhci->usb3_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb3_ports)
break;
}
}
return 0;
}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
unsigned int val, val2;
u64 val_64;
struct xhci_segment *seg;
u32 page_size, temp;
int i;
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0)
break;
page_size = page_size >> 1;
}
if (i < 16)
xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
(unsigned int) val);
val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
(unsigned int) val);
xhci_writel(xhci, val, &xhci->op_regs->config_reg);
/*
* Section 5.4.8 - doorbell array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
GFP_KERNEL);
if (!xhci->dcbaa)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
* however, the command ring segment needs 64-byte aligned segments,
* so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
SEGMENT_SIZE, 64, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2112, 64, xhci->page_size);
if (!xhci->segment_pool || !xhci->device_pool)
goto fail;
/* Linear stream context arrays don't have any boundary restrictions,
* and only need to be 16-byte aligned.
*/
xhci->small_streams_pool =
dma_pool_create("xHCI 256 byte stream ctx arrays",
dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
xhci->medium_streams_pool =
dma_pool_create("xHCI 1KB stream ctx arrays",
dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
* will be allocated with dma_alloc_coherent()
*/
if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
" from cap regs base addr\n", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
xhci_dbg(xhci, "// Allocating event ring\n");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci, flags) < 0)
goto fail;
xhci->erst.entries = dma_alloc_coherent(dev,
sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
GFP_KERNEL);
if (!xhci->erst.entries)
goto fail;
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
(unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma;
xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
xhci->erst.num_entries,
xhci->erst.entries,
(unsigned long long)xhci->erst.erst_dma_addr);
/* set ring base address and size for each segment table entry */
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
struct xhci_erst_entry *entry = &xhci->erst.entries[val];
entry->seg_addr = cpu_to_le64(seg->dma);
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
entry->rsvd = 0;
seg = seg->next;
}
/* set ERST count with the number of entries in the segment table */
val = xhci_readl(xhci, &xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
val);
xhci_writel(xhci, val, &xhci->ir_set->erst_size);
xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
/* set the segment table base address */
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
(unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
* See section 5.5.1.2.
*/
init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = NULL;
for (i = 0; i < USB_MAXCHILDREN; ++i) {
xhci->bus_state[0].resume_done[i] = 0;
xhci->bus_state[1].resume_done[i] = 0;
}
if (scratchpad_alloc(xhci, flags))
goto fail;
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
INIT_LIST_HEAD(&xhci->lpm_failed_devs);
/* Enable USB 3.0 device notifications for function remote wake, which
* is necessary for allowing USB 3.0 devices to do remote wakeup from
* U3 (device suspend).
*/
temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
temp &= ~DEV_NOTE_MASK;
temp |= DEV_NOTE_FWAKE;
xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
return 0;
fail:
xhci_warn(xhci, "Couldn't initialize memory\n");
xhci_halt(xhci);
xhci_reset(xhci);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}
| gpl-2.0 |
dekkyy1/onex_3.1.10_miui_kernel | arch/arm/mach-imx/clock-imx1.c | 5383 | 14220 | /*
* Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/math64.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
#include <mach/common.h>
#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
/* CCM register addresses */
#define CCM_CSCR IO_ADDR_CCM(0x0)
#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
#define CCM_PCDR IO_ADDR_CCM(0x20)
#define CCM_CSCR_CLKO_OFFSET 29
#define CCM_CSCR_CLKO_MASK (0x7 << 29)
#define CCM_CSCR_USB_OFFSET 26
#define CCM_CSCR_USB_MASK (0x7 << 26)
#define CCM_CSCR_OSC_EN_SHIFT 17
#define CCM_CSCR_SYSTEM_SEL (1 << 16)
#define CCM_CSCR_BCLK_OFFSET 10
#define CCM_CSCR_BCLK_MASK (0xf << 10)
#define CCM_CSCR_PRESC (1 << 15)
#define CCM_PCDR_PCLK3_OFFSET 16
#define CCM_PCDR_PCLK3_MASK (0x7f << 16)
#define CCM_PCDR_PCLK2_OFFSET 4
#define CCM_PCDR_PCLK2_MASK (0xf << 4)
#define CCM_PCDR_PCLK1_OFFSET 0
#define CCM_PCDR_PCLK1_MASK 0xf
#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
/* SCM register addresses */
#define SCM_GCCR IO_ADDR_SCM(0xc)
#define SCM_GCCR_DMA_CLK_EN_OFFSET 3
#define SCM_GCCR_CSI_CLK_EN_OFFSET 2
#define SCM_GCCR_MMA_CLK_EN_OFFSET 1
#define SCM_GCCR_USBD_CLK_EN_OFFSET 0
static int _clk_enable(struct clk *clk)
{
unsigned int reg;
reg = __raw_readl(clk->enable_reg);
reg |= 1 << clk->enable_shift;
__raw_writel(reg, clk->enable_reg);
return 0;
}
static void _clk_disable(struct clk *clk)
{
unsigned int reg;
reg = __raw_readl(clk->enable_reg);
reg &= ~(1 << clk->enable_shift);
__raw_writel(reg, clk->enable_reg);
}
static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size,
struct clk *parent)
{
int i;
for (i = 0; i < size; i++)
if (parent == clk_arr[i])
return i;
return -EINVAL;
}
static unsigned long
_clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit)
{
int div;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (parent_rate % rate)
div++;
if (div > limit)
div = limit;
return parent_rate / div;
}
static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
{
return clk->parent->round_rate(clk->parent, rate);
}
static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
{
return clk->parent->set_rate(clk->parent, rate);
}
static unsigned long clk16m_get_rate(struct clk *clk)
{
return 16000000;
}
static struct clk clk16m = {
.get_rate = clk16m_get_rate,
.enable = _clk_enable,
.enable_reg = CCM_CSCR,
.enable_shift = CCM_CSCR_OSC_EN_SHIFT,
.disable = _clk_disable,
};
/* in Hz */
static unsigned long clk32_rate;
static unsigned long clk32_get_rate(struct clk *clk)
{
return clk32_rate;
}
static struct clk clk32 = {
.get_rate = clk32_get_rate,
};
static unsigned long clk32_premult_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) * 512;
}
static struct clk clk32_premult = {
.parent = &clk32,
.get_rate = clk32_premult_get_rate,
};
static const struct clk *prem_clk_clocks[] = {
&clk32_premult,
&clk16m,
};
static int prem_clk_set_parent(struct clk *clk, struct clk *parent)
{
int i;
unsigned int reg = __raw_readl(CCM_CSCR);
i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks),
parent);
switch (i) {
case 0:
reg &= ~CCM_CSCR_SYSTEM_SEL;
break;
case 1:
reg |= CCM_CSCR_SYSTEM_SEL;
break;
default:
return i;
}
__raw_writel(reg, CCM_CSCR);
return 0;
}
static struct clk prem_clk = {
.set_parent = prem_clk_set_parent,
};
static unsigned long system_clk_get_rate(struct clk *clk)
{
return mxc_decode_pll(__raw_readl(CCM_SPCTL0),
clk_get_rate(clk->parent));
}
static struct clk system_clk = {
.parent = &prem_clk,
.get_rate = system_clk_get_rate,
};
static unsigned long mcu_clk_get_rate(struct clk *clk)
{
return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
clk_get_rate(clk->parent));
}
static struct clk mcu_clk = {
.parent = &clk32_premult,
.get_rate = mcu_clk_get_rate,
};
static unsigned long fclk_get_rate(struct clk *clk)
{
unsigned long fclk = clk_get_rate(clk->parent);
if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC)
fclk /= 2;
return fclk;
}
static struct clk fclk = {
.parent = &mcu_clk,
.get_rate = fclk_get_rate,
};
/*
* get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA )
*/
static unsigned long hclk_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1);
}
static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate)
{
return _clk_simple_round_rate(clk, rate, 16);
}
static int hclk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int div;
unsigned int reg;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 16 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = __raw_readl(CCM_CSCR);
reg &= ~CCM_CSCR_BCLK_MASK;
reg |= div << CCM_CSCR_BCLK_OFFSET;
__raw_writel(reg, CCM_CSCR);
return 0;
}
static struct clk hclk = {
.parent = &system_clk,
.get_rate = hclk_get_rate,
.round_rate = hclk_round_rate,
.set_rate = hclk_set_rate,
};
static unsigned long clk48m_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1);
}
static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate)
{
return _clk_simple_round_rate(clk, rate, 8);
}
static int clk48m_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int div;
unsigned int reg;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 8 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = __raw_readl(CCM_CSCR);
reg &= ~CCM_CSCR_USB_MASK;
reg |= div << CCM_CSCR_USB_OFFSET;
__raw_writel(reg, CCM_CSCR);
return 0;
}
static struct clk clk48m = {
.parent = &system_clk,
.get_rate = clk48m_get_rate,
.round_rate = clk48m_round_rate,
.set_rate = clk48m_set_rate,
};
/*
* get peripheral clock 1 ( UART[12], Timer[12], PWM )
*/
static unsigned long perclk1_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1);
}
static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate)
{
return _clk_simple_round_rate(clk, rate, 16);
}
static int perclk1_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int div;
unsigned int reg;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 16 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = __raw_readl(CCM_PCDR);
reg &= ~CCM_PCDR_PCLK1_MASK;
reg |= div << CCM_PCDR_PCLK1_OFFSET;
__raw_writel(reg, CCM_PCDR);
return 0;
}
/*
* get peripheral clock 2 ( LCD, SD, SPI[12] )
*/
static unsigned long perclk2_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1);
}
static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate)
{
return _clk_simple_round_rate(clk, rate, 16);
}
static int perclk2_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int div;
unsigned int reg;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 16 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = __raw_readl(CCM_PCDR);
reg &= ~CCM_PCDR_PCLK2_MASK;
reg |= div << CCM_PCDR_PCLK2_OFFSET;
__raw_writel(reg, CCM_PCDR);
return 0;
}
/*
* get peripheral clock 3 ( SSI )
*/
static unsigned long perclk3_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1);
}
static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate)
{
return _clk_simple_round_rate(clk, rate, 128);
}
static int perclk3_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int div;
unsigned int reg;
unsigned long parent_rate;
parent_rate = clk_get_rate(clk->parent);
div = parent_rate / rate;
if (div > 128 || div < 1 || ((parent_rate / div) != rate))
return -EINVAL;
div--;
reg = __raw_readl(CCM_PCDR);
reg &= ~CCM_PCDR_PCLK3_MASK;
reg |= div << CCM_PCDR_PCLK3_OFFSET;
__raw_writel(reg, CCM_PCDR);
return 0;
}
static struct clk perclk[] = {
{
.id = 0,
.parent = &system_clk,
.get_rate = perclk1_get_rate,
.round_rate = perclk1_round_rate,
.set_rate = perclk1_set_rate,
}, {
.id = 1,
.parent = &system_clk,
.get_rate = perclk2_get_rate,
.round_rate = perclk2_round_rate,
.set_rate = perclk2_set_rate,
}, {
.id = 2,
.parent = &system_clk,
.get_rate = perclk3_get_rate,
.round_rate = perclk3_round_rate,
.set_rate = perclk3_set_rate,
}
};
static const struct clk *clko_clocks[] = {
&perclk[0],
&hclk,
&clk48m,
&clk16m,
&prem_clk,
&fclk,
};
static int clko_set_parent(struct clk *clk, struct clk *parent)
{
int i;
unsigned int reg;
i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent);
if (i < 0)
return i;
reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK;
reg |= i << CCM_CSCR_CLKO_OFFSET;
__raw_writel(reg, CCM_CSCR);
if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) {
clk->set_rate = _clk_parent_set_rate;
clk->round_rate = _clk_parent_round_rate;
} else {
clk->set_rate = NULL;
clk->round_rate = NULL;
}
return 0;
}
static struct clk clko_clk = {
.set_parent = clko_set_parent,
};
static struct clk dma_clk = {
.parent = &hclk,
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
.enable = _clk_enable,
.enable_reg = SCM_GCCR,
.enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET,
.disable = _clk_disable,
};
static struct clk csi_clk = {
.parent = &hclk,
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
.enable = _clk_enable,
.enable_reg = SCM_GCCR,
.enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET,
.disable = _clk_disable,
};
static struct clk mma_clk = {
.parent = &hclk,
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
.enable = _clk_enable,
.enable_reg = SCM_GCCR,
.enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET,
.disable = _clk_disable,
};
static struct clk usbd_clk = {
.parent = &clk48m,
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
.enable = _clk_enable,
.enable_reg = SCM_GCCR,
.enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET,
.disable = _clk_disable,
};
static struct clk gpt_clk = {
.parent = &perclk[0],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk uart_clk = {
.parent = &perclk[0],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk i2c_clk = {
.parent = &hclk,
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk spi_clk = {
.parent = &perclk[1],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk sdhc_clk = {
.parent = &perclk[1],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk lcdc_clk = {
.parent = &perclk[1],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk mshc_clk = {
.parent = &hclk,
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk ssi_clk = {
.parent = &perclk[2],
.round_rate = _clk_parent_round_rate,
.set_rate = _clk_parent_set_rate,
};
static struct clk rtc_clk = {
.parent = &clk32,
};
#define _REGISTER_CLOCK(d, n, c) \
{ \
.dev_id = d, \
.con_id = n, \
.clk = &c, \
},
static struct clk_lookup lookups[] __initdata = {
_REGISTER_CLOCK(NULL, "dma", dma_clk)
_REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk)
_REGISTER_CLOCK(NULL, "mma", mma_clk)
_REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk)
_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
_REGISTER_CLOCK("imx1-uart.0", NULL, uart_clk)
_REGISTER_CLOCK("imx1-uart.1", NULL, uart_clk)
_REGISTER_CLOCK("imx1-uart.2", NULL, uart_clk)
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
_REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk)
_REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk)
_REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk)
_REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
_REGISTER_CLOCK(NULL, "mshc", mshc_clk)
_REGISTER_CLOCK(NULL, "ssi", ssi_clk)
_REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk)
};
int __init mx1_clocks_init(unsigned long fref)
{
unsigned int reg;
/* disable clocks we are able to */
__raw_writel(0, SCM_GCCR);
clk32_rate = fref;
reg = __raw_readl(CCM_CSCR);
/* detect clock reference for system PLL */
if (reg & CCM_CSCR_SYSTEM_SEL) {
prem_clk.parent = &clk16m;
} else {
/* ensure that oscillator is disabled */
reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT);
__raw_writel(reg, CCM_CSCR);
prem_clk.parent = &clk32_premult;
}
/* detect reference for CLKO */
reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET;
clko_clk.parent = (struct clk *)clko_clocks[reg];
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
clk_enable(&hclk);
clk_enable(&fclk);
mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
MX1_TIM1_INT);
return 0;
}
| gpl-2.0 |
invaderjohn/kernel_lge_msm8226_g2m | arch/ia64/sn/kernel/io_init.c | 6663 | 10655 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
#include <asm/sn/module.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#include "xtalk/hubdev.h"
/*
* The code in this file will only be executed when running with
* a PROM that does _not_ have base ACPI IO support.
* (i.e., SN_ACPI_BASE_SUPPORT() == 0)
*/
static int max_segment_number; /* Default highest segment number */
static int max_pcibus_number = 255; /* Default highest pci bus number */
/*
* Retrieve the hub device info structure for the given nasid.
*/
static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
(u64) handle, (u64) address, 0, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci bus information given the bus number.
*/
static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
(u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_fixup_ionodes() - This routine initializes the HUB data structure for
* each node in the system. This function is only
* executed when running with a non-ACPI capable PROM.
*/
static void __init sn_fixup_ionodes(void)
{
struct hubdev_info *hubdev;
u64 status;
u64 nasid;
int i;
extern void sn_common_hubdev_init(struct hubdev_info *);
/*
* Get SGI Specific HUB chipset information.
* Inform Prom that this kernel can support domain bus numbering.
*/
for (i = 0; i < num_cnodes; i++) {
hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
nasid = cnodeid_to_nasid(i);
hubdev->max_segment_number = 0xffffffff;
hubdev->max_pcibus_number = 0xff;
status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
if (status)
continue;
/* Save the largest Domain and pcibus numbers found. */
if (hubdev->max_segment_number) {
/*
* Dealing with a Prom that supports segments.
*/
max_segment_number = hubdev->max_segment_number;
max_pcibus_number = hubdev->max_pcibus_number;
}
sn_common_hubdev_init(hubdev);
}
}
/*
* sn_pci_legacy_window_fixup - Create PCI controller windows for
* legacy IO and MEM space. This needs to
* be done here, as the PROM does not have
* ACPI support defining the root buses
* and their resources (_CRS),
*/
static void
sn_legacy_pci_window_fixup(struct pci_controller *controller,
u64 legacy_io, u64 legacy_mem)
{
controller->window = kcalloc(2, sizeof(struct pci_window),
GFP_KERNEL);
BUG_ON(controller->window == NULL);
controller->window[0].offset = legacy_io;
controller->window[0].resource.name = "legacy_io";
controller->window[0].resource.flags = IORESOURCE_IO;
controller->window[0].resource.start = legacy_io;
controller->window[0].resource.end =
controller->window[0].resource.start + 0xffff;
controller->window[0].resource.parent = &ioport_resource;
controller->window[1].offset = legacy_mem;
controller->window[1].resource.name = "legacy_mem";
controller->window[1].resource.flags = IORESOURCE_MEM;
controller->window[1].resource.start = legacy_mem;
controller->window[1].resource.end =
controller->window[1].resource.start + (1024 * 1024) - 1;
controller->window[1].resource.parent = &iomem_resource;
controller->windows = 2;
}
/*
* sn_pci_window_fixup() - Create a pci_window for each device resource.
* It will setup pci_windows for use by
* pcibios_bus_to_resource(), pcibios_resource_to_bus(),
* etc.
*/
static void
sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
s64 * pci_addrs)
{
struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
unsigned int i;
unsigned int idx;
unsigned int new_count;
struct pci_window *new_window;
if (count == 0)
return;
idx = controller->windows;
new_count = controller->windows + count;
new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
BUG_ON(new_window == NULL);
if (controller->window) {
memcpy(new_window, controller->window,
sizeof(struct pci_window) * controller->windows);
kfree(controller->window);
}
/* Setup a pci_window for each device resource. */
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
if (pci_addrs[i] == -1)
continue;
new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
new_window[idx].resource = dev->resource[i];
idx++;
}
controller->windows = new_count;
controller->window = new_window;
}
/*
* sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
* and need to convert the pci_dev->resource
* 'start' and 'end' addresses to mapped addresses,
* and setup the pci_controller->window array entries.
*/
void
sn_io_slot_fixup(struct pci_dev *dev)
{
unsigned int count = 0;
int idx;
s64 pci_addrs[PCI_ROM_RESOURCE + 1];
unsigned long addr, end, size, start;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
int status;
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
panic("%s: Unable to alloc memory for pcidev_info", __func__);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __func__);
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
(u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
BUG_ON(status); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
pci_addrs[idx] = -1;
continue;
}
start = dev->resource[idx].start;
end = dev->resource[idx].end;
size = end - start;
if (size == 0) {
pci_addrs[idx] = -1;
continue;
}
pci_addrs[idx] = start;
count++;
addr = pcidev_info->pdi_pio_mapped_addr[idx];
addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
/*
* if it's already in the device structure, remove it before
* inserting
*/
if (dev->resource[idx].parent && dev->resource[idx].parent->child)
release_resource(&dev->resource[idx]);
if (dev->resource[idx].flags & IORESOURCE_IO)
insert_resource(&ioport_resource, &dev->resource[idx]);
else
insert_resource(&iomem_resource, &dev->resource[idx]);
/*
* If ROM, set the actual ROM image size, and mark as
* shadowed in PROM.
*/
if (idx == PCI_ROM_RESOURCE) {
size_t image_size;
void __iomem *rom;
rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE),
size + 1);
image_size = pci_get_rom_size(dev, rom, size + 1);
dev->resource[PCI_ROM_RESOURCE].end =
dev->resource[PCI_ROM_RESOURCE].start +
image_size - 1;
dev->resource[PCI_ROM_RESOURCE].flags |=
IORESOURCE_ROM_BIOS_COPY;
}
}
/* Create a pci_window in the pci_controller struct for
* each device resource.
*/
if (count > 0)
sn_pci_window_fixup(dev, count, pci_addrs);
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
EXPORT_SYMBOL(sn_io_slot_fixup);
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
* consistent with the Linux PCI abstraction layer.
*/
static void __init
sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
{
s64 status = 0;
struct pci_controller *controller;
struct pcibus_bussoft *prom_bussoft_ptr;
LIST_HEAD(resources);
int i;
status = sal_get_pcibus_info((u64) segment, (u64) busnum,
(u64) ia64_tpa(&prom_bussoft_ptr));
if (status > 0)
return; /*bus # does not exist */
prom_bussoft_ptr = __va(prom_bussoft_ptr);
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
BUG_ON(!controller);
controller->segment = segment;
/*
* Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
* (platform_data will be overwritten later in sn_common_bus_fixup())
*/
controller->platform_data = prom_bussoft_ptr;
sn_legacy_pci_window_fixup(controller,
prom_bussoft_ptr->bs_legacy_io,
prom_bussoft_ptr->bs_legacy_mem);
for (i = 0; i < controller->windows; i++)
pci_add_resource_offset(&resources,
&controller->window[i].resource,
controller->window[i].offset);
bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller,
&resources);
if (bus == NULL)
goto error_return; /* error, or bus already scanned */
bus->sysdata = controller;
return;
error_return:
kfree(controller);
return;
}
/*
* sn_bus_fixup
*/
void
sn_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
if (prom_bussoft_ptr == NULL) {
printk(KERN_ERR
"sn_bus_fixup: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n",
pci_domain_nr(bus), bus->number);
return;
}
sn_common_bus_fixup(bus, prom_bussoft_ptr);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_io_slot_fixup(pci_dev);
}
}
/*
* sn_io_init - PROM does not have ACPI support to define nodes or root buses,
* so we need to do things the hard way, including initiating the
* bus scanning ourselves.
*/
void __init sn_io_init(void)
{
int i, j;
sn_fixup_ionodes();
/* busses are not known yet ... */
for (i = 0; i <= max_segment_number; i++)
for (j = 0; j <= max_pcibus_number; j++)
sn_pci_controller_fixup(i, j, NULL);
}
| gpl-2.0 |
MetalPhoenix45/SmoothGKernel | drivers/pci/pcie/aer/aerdrv_acpi.c | 8199 | 2939 | /*
* Access ACPI _OSC method
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
* Zhang Yanmin (yanmin.zhang@intel.com)
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/delay.h>
#include <acpi/apei.h>
#include "aerdrv.h"
#ifdef CONFIG_ACPI_APEI
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
{
return (0 == pci_domain_nr(pci->bus) &&
p->bus == pci->bus->number &&
p->device == PCI_SLOT(pci->devfn) &&
p->function == PCI_FUNC(pci->devfn));
}
struct aer_hest_parse_info {
struct pci_dev *pci_dev;
int firmware_first;
};
static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
{
struct aer_hest_parse_info *info = data;
struct acpi_hest_aer_common *p;
u8 pcie_type = 0;
u8 bridge = 0;
int ff = 0;
switch (hest_hdr->type) {
case ACPI_HEST_TYPE_AER_ROOT_PORT:
pcie_type = PCI_EXP_TYPE_ROOT_PORT;
break;
case ACPI_HEST_TYPE_AER_ENDPOINT:
pcie_type = PCI_EXP_TYPE_ENDPOINT;
break;
case ACPI_HEST_TYPE_AER_BRIDGE:
if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
bridge = 1;
break;
default:
return 0;
}
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
if (p->flags & ACPI_HEST_GLOBAL) {
if ((info->pci_dev->is_pcie &&
info->pci_dev->pcie_type == pcie_type) || bridge)
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
} else
if (hest_match_pci(p, info->pci_dev))
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
info->firmware_first = ff;
return 0;
}
static void aer_set_firmware_first(struct pci_dev *pci_dev)
{
int rc;
struct aer_hest_parse_info info = {
.pci_dev = pci_dev,
.firmware_first = 0,
};
rc = apei_hest_parse(aer_hest_parse, &info);
if (rc)
pci_dev->__aer_firmware_first = 0;
else
pci_dev->__aer_firmware_first = info.firmware_first;
pci_dev->__aer_firmware_first_valid = 1;
}
int pcie_aer_get_firmware_first(struct pci_dev *dev)
{
if (!dev->__aer_firmware_first_valid)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
static bool aer_firmware_first;
static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
{
struct acpi_hest_aer_common *p;
if (aer_firmware_first)
return 0;
switch (hest_hdr->type) {
case ACPI_HEST_TYPE_AER_ROOT_PORT:
case ACPI_HEST_TYPE_AER_ENDPOINT:
case ACPI_HEST_TYPE_AER_BRIDGE:
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
default:
return 0;
}
}
/**
* aer_acpi_firmware_first - Check if APEI should control AER.
*/
bool aer_acpi_firmware_first(void)
{
static bool parsed = false;
if (!parsed) {
apei_hest_parse(aer_hest_parse_aff, NULL);
parsed = true;
}
return aer_firmware_first;
}
#endif
| gpl-2.0 |
TeamEOS/kernel_moto_shamu | tools/power/cpupower/lib/cpufreq.c | 9991 | 3890 | /*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
*
* Licensed under the terms of the GNU GPL License version 2.
*/
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "cpufreq.h"
#include "sysfs.h"
int cpufreq_cpu_exists(unsigned int cpu)
{
return sysfs_cpu_exists(cpu);
}
unsigned long cpufreq_get_freq_kernel(unsigned int cpu)
{
return sysfs_get_freq_kernel(cpu);
}
unsigned long cpufreq_get_freq_hardware(unsigned int cpu)
{
return sysfs_get_freq_hardware(cpu);
}
unsigned long cpufreq_get_transition_latency(unsigned int cpu)
{
return sysfs_get_freq_transition_latency(cpu);
}
int cpufreq_get_hardware_limits(unsigned int cpu,
unsigned long *min,
unsigned long *max)
{
if ((!min) || (!max))
return -EINVAL;
return sysfs_get_freq_hardware_limits(cpu, min, max);
}
char *cpufreq_get_driver(unsigned int cpu)
{
return sysfs_get_freq_driver(cpu);
}
void cpufreq_put_driver(char *ptr)
{
if (!ptr)
return;
free(ptr);
}
struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu)
{
return sysfs_get_freq_policy(cpu);
}
void cpufreq_put_policy(struct cpufreq_policy *policy)
{
if ((!policy) || (!policy->governor))
return;
free(policy->governor);
policy->governor = NULL;
free(policy);
}
struct cpufreq_available_governors *cpufreq_get_available_governors(unsigned
int cpu)
{
return sysfs_get_freq_available_governors(cpu);
}
void cpufreq_put_available_governors(struct cpufreq_available_governors *any)
{
struct cpufreq_available_governors *tmp, *next;
if (!any)
return;
tmp = any->first;
while (tmp) {
next = tmp->next;
if (tmp->governor)
free(tmp->governor);
free(tmp);
tmp = next;
}
}
struct cpufreq_available_frequencies
*cpufreq_get_available_frequencies(unsigned int cpu)
{
return sysfs_get_available_frequencies(cpu);
}
void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies
*any) {
struct cpufreq_available_frequencies *tmp, *next;
if (!any)
return;
tmp = any->first;
while (tmp) {
next = tmp->next;
free(tmp);
tmp = next;
}
}
struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu)
{
return sysfs_get_freq_affected_cpus(cpu);
}
void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *any)
{
struct cpufreq_affected_cpus *tmp, *next;
if (!any)
return;
tmp = any->first;
while (tmp) {
next = tmp->next;
free(tmp);
tmp = next;
}
}
struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu)
{
return sysfs_get_freq_related_cpus(cpu);
}
void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *any)
{
cpufreq_put_affected_cpus(any);
}
int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy)
{
if (!policy || !(policy->governor))
return -EINVAL;
return sysfs_set_freq_policy(cpu, policy);
}
int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq)
{
return sysfs_modify_freq_policy_min(cpu, min_freq);
}
int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq)
{
return sysfs_modify_freq_policy_max(cpu, max_freq);
}
int cpufreq_modify_policy_governor(unsigned int cpu, char *governor)
{
if ((!governor) || (strlen(governor) > 19))
return -EINVAL;
return sysfs_modify_freq_policy_governor(cpu, governor);
}
int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency)
{
return sysfs_set_frequency(cpu, target_frequency);
}
struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
unsigned long long *total_time)
{
return sysfs_get_freq_stats(cpu, total_time);
}
void cpufreq_put_stats(struct cpufreq_stats *any)
{
struct cpufreq_stats *tmp, *next;
if (!any)
return;
tmp = any->first;
while (tmp) {
next = tmp->next;
free(tmp);
tmp = next;
}
}
unsigned long cpufreq_get_transitions(unsigned int cpu)
{
return sysfs_get_freq_transitions(cpu);
}
| gpl-2.0 |
cfpeng/linux | arch/frv/kernel/futex.c | 12039 | 6709 | /* futex.c: futex operations
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/futex.h>
#include <asm/errno.h>
/*
* the various futex operations; MMU fault checking is ignored under no-MMU
* conditions
*/
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" add %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" or %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" and %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" xor %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
/*****************************************************************************/
/*
* do the futex operations
*/
int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ADD:
ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
break;
case FUTEX_OP_OR:
ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ANDN:
ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
break;
case FUTEX_OP_XOR:
ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
break;
default:
ret = -ENOSYS;
break;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS; break;
}
}
return ret;
} /* end futex_atomic_op_inuser() */
| gpl-2.0 |
sunrunning/ok6410_linux | arch/frv/kernel/futex.c | 12039 | 6709 | /* futex.c: futex operations
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/futex.h>
#include <asm/errno.h>
/*
* the various futex operations; MMU fault checking is ignored under no-MMU
* conditions
*/
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" add %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" or %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" and %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
{
int oldval, ret;
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
"1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" xor %1,%3,%3 \n"
"2: cst.p %3,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
" setlos 0,%2 \n"
"3: \n"
".subsection 2 \n"
"4: setlos %5,%2 \n"
" bra 3b \n"
".previous \n"
".section __ex_table,\"a\" \n"
" .balign 8 \n"
" .long 1b,4b \n"
" .long 2b,4b \n"
".previous"
: "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
: "3"(oparg), "i"(-EFAULT)
: "memory", "cc7", "cc3", "icc3"
);
*_oldval = oldval;
return ret;
}
/*****************************************************************************/
/*
* do the futex operations
*/
int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ADD:
ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
break;
case FUTEX_OP_OR:
ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ANDN:
ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
break;
case FUTEX_OP_XOR:
ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
break;
default:
ret = -ENOSYS;
break;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS; break;
}
}
return ret;
} /* end futex_atomic_op_inuser() */
| gpl-2.0 |
mkl0301/linux | fs/hfs/part_tbl.c | 15623 | 2946 | /*
* linux/fs/hfs/part_tbl.c
*
* Copyright (C) 1996-1997 Paul H. Hargrove
* (C) 2003 Ardis Technologies <roman@ardistech.com>
* This file may be distributed under the terms of the GNU General Public License.
*
* Original code to handle the new style Mac partition table based on
* a patch contributed by Holger Schemel (aeglos@valinor.owl.de).
*/
#include "hfs_fs.h"
/*
* The new style Mac partition map
*
* For each partition on the media there is a physical block (512-byte
* block) containing one of these structures. These blocks are
* contiguous starting at block 1.
*/
struct new_pmap {
__be16 pmSig; /* signature */
__be16 reSigPad; /* padding */
__be32 pmMapBlkCnt; /* partition blocks count */
__be32 pmPyPartStart; /* physical block start of partition */
__be32 pmPartBlkCnt; /* physical block count of partition */
u8 pmPartName[32]; /* (null terminated?) string
giving the name of this
partition */
u8 pmPartType[32]; /* (null terminated?) string
giving the type of this
partition */
/* a bunch more stuff we don't need */
} __packed;
/*
* The old style Mac partition map
*
* The partition map consists for a 2-byte signature followed by an
* array of these structures. The map is terminated with an all-zero
* one of these.
*/
struct old_pmap {
__be16 pdSig; /* Signature bytes */
struct old_pmap_entry {
__be32 pdStart;
__be32 pdSize;
__be32 pdFSID;
} pdEntry[42];
} __packed;
/*
* hfs_part_find()
*
* Parse the partition map looking for the
* start and length of the 'part'th HFS partition.
*/
int hfs_part_find(struct super_block *sb,
sector_t *part_start, sector_t *part_size)
{
struct buffer_head *bh;
__be16 *data;
int i, size, res;
res = -ENOENT;
bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK, data);
if (!bh)
return -EIO;
switch (be16_to_cpu(*data)) {
case HFS_OLD_PMAP_MAGIC:
{
struct old_pmap *pm;
struct old_pmap_entry *p;
pm = (struct old_pmap *)bh->b_data;
p = pm->pdEntry;
size = 42;
for (i = 0; i < size; p++, i++) {
if (p->pdStart && p->pdSize &&
p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
(HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) {
*part_start += be32_to_cpu(p->pdStart);
*part_size = be32_to_cpu(p->pdSize);
res = 0;
}
}
break;
}
case HFS_NEW_PMAP_MAGIC:
{
struct new_pmap *pm;
pm = (struct new_pmap *)bh->b_data;
size = be32_to_cpu(pm->pmMapBlkCnt);
for (i = 0; i < size;) {
if (!memcmp(pm->pmPartType,"Apple_HFS", 9) &&
(HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) {
*part_start += be32_to_cpu(pm->pmPyPartStart);
*part_size = be32_to_cpu(pm->pmPartBlkCnt);
res = 0;
break;
}
brelse(bh);
bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK + ++i, pm);
if (!bh)
return -EIO;
if (pm->pmSig != cpu_to_be16(HFS_NEW_PMAP_MAGIC))
break;
}
break;
}
}
brelse(bh);
return res;
}
| gpl-2.0 |
ironman771/xbmc | xbmc/filesystem/VideoDatabaseDirectory/DirectoryNodeEpisodes.cpp | 8 | 1641 | /*
* Copyright (C) 2005-2013 Team XBMC
* http://kodi.tv
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "DirectoryNodeEpisodes.h"
#include "QueryParams.h"
#include "video/VideoDatabase.h"
using namespace XFILE::VIDEODATABASEDIRECTORY;
CDirectoryNodeEpisodes::CDirectoryNodeEpisodes(const std::string& strName, CDirectoryNode* pParent)
: CDirectoryNode(NODE_TYPE_EPISODES, strName, pParent)
{
}
bool CDirectoryNodeEpisodes::GetContent(CFileItemList& items) const
{
CVideoDatabase videodatabase;
if (!videodatabase.Open())
return false;
CQueryParams params;
CollectQueryParams(params);
int season = (int)params.GetSeason();
if (season == -2)
season = -1;
bool bSuccess=videodatabase.GetEpisodesNav(BuildPath(), items, params.GetGenreId(), params.GetYear(), params.GetActorId(), params.GetDirectorId(), params.GetTvShowId(), season);
videodatabase.Close();
return bSuccess;
}
NODE_TYPE CDirectoryNodeEpisodes::GetChildType() const
{
return NODE_TYPE_EPISODES;
}
| gpl-2.0 |
emanuel4you/mednafen-dreambox | src/snes/src/sdsp/sdsp.cpp | 8 | 6480 | //S-DSP emulator
//note: this is basically a C++ cothreaded implementation of Shay Green's (blargg's) S-DSP emulator.
//the actual algorithms, timing information, tables, variable names, etc were all from him.
#include <../base.hpp>
#define SDSP_CPP
namespace bSNES_v059 {
#if defined(DEBUGGER)
#include "debugger/debugger.cpp"
sDSPDebugger dsp;
#else
sDSP dsp;
#endif
#include "serialization.cpp"
#define REG(n) state.regs[r_##n]
#define VREG(n) state.regs[v.vidx + v_##n]
#if !defined(DSP_STATE_MACHINE)
#define phase_start() while(true) { \
while(scheduler.sync == Scheduler::SyncAll) { \
scheduler.exit(Scheduler::SynchronizeEvent); \
}
#define phase(n)
#define tick() scheduler.addclocks_dsp(3 * 8); scheduler.sync_dspsmp()
#define phase_end() }
#else
#define phase_start() switch(phase_index) {
#define phase(n) case n:
#define tick() scheduler.addclocks_dsp(3 * 8); break
#define phase_end() } phase_index = (phase_index + 1) & 31;
#endif
#include "gaussian.cpp"
#include "counter.cpp"
#include "envelope.cpp"
#include "brr.cpp"
#include "misc.cpp"
#include "voice.cpp"
#include "echo.cpp"
/* timing */
void sDSP::enter() {
phase_start()
phase(0)
voice_5(voice[0]);
voice_2(voice[1]);
tick();
phase(1)
voice_6(voice[0]);
voice_3(voice[1]);
tick();
phase(2)
voice_7(voice[0]);
voice_4(voice[1]);
voice_1(voice[3]);
tick();
phase(3)
voice_8(voice[0]);
voice_5(voice[1]);
voice_2(voice[2]);
tick();
phase(4)
voice_9(voice[0]);
voice_6(voice[1]);
voice_3(voice[2]);
tick();
phase(5)
voice_7(voice[1]);
voice_4(voice[2]);
voice_1(voice[4]);
tick();
phase(6)
voice_8(voice[1]);
voice_5(voice[2]);
voice_2(voice[3]);
tick();
phase(7)
voice_9(voice[1]);
voice_6(voice[2]);
voice_3(voice[3]);
tick();
phase(8)
voice_7(voice[2]);
voice_4(voice[3]);
voice_1(voice[5]);
tick();
phase(9)
voice_8(voice[2]);
voice_5(voice[3]);
voice_2(voice[4]);
tick();
phase(10)
voice_9(voice[2]);
voice_6(voice[3]);
voice_3(voice[4]);
tick();
phase(11)
voice_7(voice[3]);
voice_4(voice[4]);
voice_1(voice[6]);
tick();
phase(12)
voice_8(voice[3]);
voice_5(voice[4]);
voice_2(voice[5]);
tick();
phase(13)
voice_9(voice[3]);
voice_6(voice[4]);
voice_3(voice[5]);
tick();
phase(14)
voice_7(voice[4]);
voice_4(voice[5]);
voice_1(voice[7]);
tick();
phase(15)
voice_8(voice[4]);
voice_5(voice[5]);
voice_2(voice[6]);
tick();
phase(16)
voice_9(voice[4]);
voice_6(voice[5]);
voice_3(voice[6]);
tick();
phase(17)
voice_1(voice[0]);
voice_7(voice[5]);
voice_4(voice[6]);
tick();
phase(18)
voice_8(voice[5]);
voice_5(voice[6]);
voice_2(voice[7]);
tick();
phase(19)
voice_9(voice[5]);
voice_6(voice[6]);
voice_3(voice[7]);
tick();
phase(20)
voice_1(voice[1]);
voice_7(voice[6]);
voice_4(voice[7]);
tick();
phase(21)
voice_8(voice[6]);
voice_5(voice[7]);
voice_2(voice[0]);
tick();
phase(22)
voice_3a(voice[0]);
voice_9(voice[6]);
voice_6(voice[7]);
echo_22();
tick();
phase(23)
voice_7(voice[7]);
echo_23();
tick();
phase(24)
voice_8(voice[7]);
echo_24();
tick();
phase(25)
voice_3b(voice[0]);
voice_9(voice[7]);
echo_25();
tick();
phase(26)
echo_26();
tick();
phase(27)
misc_27();
echo_27();
tick();
phase(28)
misc_28();
echo_28();
tick();
phase(29)
misc_29();
echo_29();
tick();
phase(30)
misc_30();
voice_3c(voice[0]);
echo_30();
tick();
phase(31)
voice_4(voice[0]);
voice_1(voice[2]);
tick();
phase_end()
}
/* register interface for S-SMP $00f2,$00f3 */
uint8 sDSP::read(uint8 addr) {
return state.regs[addr];
}
void sDSP::write(uint8 addr, uint8 data) {
state.regs[addr] = data;
if((addr & 0x0f) == v_envx) {
state.envx_buf = data;
} else if((addr & 0x0f) == v_outx) {
state.outx_buf = data;
} else if(addr == r_kon) {
state.new_kon = data;
} else if(addr == r_endx) {
//always cleared, regardless of data written
state.endx_buf = 0;
state.regs[r_endx] = 0;
}
}
/* initialization */
void sDSP::power() {
memset(&state.regs, 0, sizeof state.regs);
state.echo_hist_pos = 0;
state.every_other_sample = false;
state.kon = 0;
state.noise = 0;
state.counter = 0;
state.echo_offset = 0;
state.echo_length = 0;
state.new_kon = 0;
state.endx_buf = 0;
state.envx_buf = 0;
state.outx_buf = 0;
state.t_pmon = 0;
state.t_non = 0;
state.t_eon = 0;
state.t_dir = 0;
state.t_koff = 0;
state.t_brr_next_addr = 0;
state.t_adsr0 = 0;
state.t_brr_header = 0;
state.t_brr_byte = 0;
state.t_srcn = 0;
state.t_esa = 0;
state.t_echo_disabled = 0;
state.t_dir_addr = 0;
state.t_pitch = 0;
state.t_output = 0;
state.t_looped = 0;
state.t_echo_ptr = 0;
state.t_main_out[0] = state.t_main_out[1] = 0;
state.t_echo_out[0] = state.t_echo_out[1] = 0;
state.t_echo_in[0] = state.t_echo_in[1] = 0;
for(unsigned i = 0; i < 8; i++) {
voice[i].buf_pos = 0;
voice[i].interp_pos = 0;
voice[i].brr_addr = 0;
voice[i].brr_offset = 1;
voice[i].vbit = 1 << i;
voice[i].vidx = i * 0x10;
voice[i].kon_delay = 0;
voice[i].env_mode = env_release;
voice[i].env = 0;
voice[i].t_envx_out = 0;
voice[i].hidden_env = 0;
}
reset();
}
void sDSP::reset() {
REG(flg) = 0xe0;
state.noise = 0x4000;
state.echo_hist_pos = 0;
state.every_other_sample = 1;
state.echo_offset = 0;
state.counter = 0;
phase_index = 0;
}
sDSP::sDSP() {
nall_static_assert<sizeof(int) >= 32 / 8>(); //int >= 32-bits
nall_static_assert<(int8)0x80 == -0x80>(); //8-bit sign extension
nall_static_assert<(int16)0x8000 == -0x8000>(); //16-bit sign extension
nall_static_assert<(uint16)0xffff0000 == 0>(); //16-bit unsigned clip
nall_static_assert<(-1 >> 1) == -1>(); //arithmetic shift right
//-0x8000 <= n <= +0x7fff
assert(sclamp<16>(+0x8000) == +0x7fff);
assert(sclamp<16>(-0x8001) == -0x8000);
}
sDSP::~sDSP() {
}
};
| gpl-2.0 |
stefan2904/glibc | nptl/sysdeps/sh/pthread_spin_lock.c | 8 | 1026 | /* Copyright (C) 2003-2013 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include "pthreadP.h"
int
pthread_spin_lock (lock)
pthread_spinlock_t *lock;
{
unsigned int val;
do
asm volatile ("tas.b @%1; movt %0"
: "=&r" (val)
: "r" (lock)
: "memory");
while (val == 0);
return 0;
}
| gpl-2.0 |
TeamLGOG/android_kernel_lge_d800 | arch/arm/mach-vt8500/devices-wm8505.c | 8 | 2955 | /* linux/arch/arm/mach-vt8500/devices-wm8505.c
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <mach/wm8505_regs.h>
#include <mach/wm8505_irqs.h>
#include <mach/i8042.h>
#include "devices.h"
void __init wm8505_set_resources(void)
{
struct resource tmp[3];
tmp[0] = wmt_mmio_res(WM8505_GOVR_BASE, SZ_512);
wmt_res_add(&vt8500_device_wm8505_fb, tmp, 1);
tmp[0] = wmt_mmio_res(WM8505_UART0_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART0);
wmt_res_add(&vt8500_device_uart0, tmp, 2);
tmp[0] = wmt_mmio_res(WM8505_UART1_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART1);
wmt_res_add(&vt8500_device_uart1, tmp, 2);
tmp[0] = wmt_mmio_res(WM8505_UART2_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART2);
wmt_res_add(&vt8500_device_uart2, tmp, 2);
tmp[0] = wmt_mmio_res(WM8505_UART3_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART3);
wmt_res_add(&vt8500_device_uart3, tmp, 2);
tmp[0] = wmt_mmio_res(WM8505_UART4_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART4);
wmt_res_add(&vt8500_device_uart4, tmp, 2);
tmp[0] = wmt_mmio_res(WM8505_UART5_BASE, 0x1040);
tmp[1] = wmt_irq_res(IRQ_UART5);
wmt_res_add(&vt8500_device_uart5, tmp, 2);
tmp[0] = wmt_mmio_res(WM8505_EHCI_BASE, SZ_512);
tmp[1] = wmt_irq_res(IRQ_EHCI);
wmt_res_add(&vt8500_device_ehci, tmp, 2);
tmp[0] = wmt_mmio_res(WM8505_GEGEA_BASE, SZ_256);
wmt_res_add(&vt8500_device_ge_rops, tmp, 1);
tmp[0] = wmt_mmio_res(WM8505_PWM_BASE, 0x44);
wmt_res_add(&vt8500_device_pwm, tmp, 1);
tmp[0] = wmt_mmio_res(WM8505_RTC_BASE, 0x2c);
tmp[1] = wmt_irq_res(IRQ_RTC);
tmp[2] = wmt_irq_res(IRQ_RTCSM);
wmt_res_add(&vt8500_device_rtc, tmp, 3);
}
static void __init wm8505_set_externs(void)
{
/* */
wmt_ic_base = WM8505_IC_BASE;
wmt_sic_base = WM8505_SIC_BASE;
wmt_gpio_base = WM8505_GPIO_BASE;
wmt_pmc_base = WM8505_PMC_BASE;
wmt_i8042_base = WM8505_PS2_BASE;
wmt_nr_irqs = WM8505_NR_IRQS;
wmt_timer_irq = IRQ_PMCOS0;
wmt_gpio_ext_irq[0] = IRQ_EXT0;
wmt_gpio_ext_irq[1] = IRQ_EXT1;
wmt_gpio_ext_irq[2] = IRQ_EXT2;
wmt_gpio_ext_irq[3] = IRQ_EXT3;
wmt_gpio_ext_irq[4] = IRQ_EXT4;
wmt_gpio_ext_irq[5] = IRQ_EXT5;
wmt_gpio_ext_irq[6] = IRQ_EXT6;
wmt_gpio_ext_irq[7] = IRQ_EXT7;
wmt_i8042_kbd_irq = IRQ_PS2KBD;
wmt_i8042_aux_irq = IRQ_PS2MOUSE;
}
void __init wm8505_map_io(void)
{
iotable_init(wmt_io_desc, ARRAY_SIZE(wmt_io_desc));
/* */
wm8505_set_externs();
}
| gpl-2.0 |
daniviga/QGIS | tests/src/core/testqgis.cpp | 8 | 21497 | /***************************************************************************
testqgis.cpp
------------
Date : March 2015
Copyright : (C) 2015 by Nyall Dawson
Email : nyall.dawson@gmail.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgstest.h"
#include <QObject>
#include <QString>
#include <QApplication>
#include <QCheckBox>
#include <memory>
#include <QSignalSpy>
//qgis includes...
#include "qgis.h"
#include "qgsmaplayermodel.h"
#include "qgsattributeeditorelement.h"
#include "qgsfieldproxymodel.h"
/**
* \ingroup UnitTests
* Includes unit tests for the Qgis namespace
*/
class TestQgis : public QObject
{
Q_OBJECT
private slots:
void initTestCase();// will be called before the first testfunction is executed.
void cleanupTestCase();// will be called after the last testfunction was executed.
void init() {}// will be called before each testfunction is executed.
void cleanup() {}// will be called after every testfunction.
void permissiveToDouble();
void permissiveToInt();
void permissiveToLongLong();
void doubleToString();
void signalBlocker();
void qVariantCompare_data();
void qVariantCompare();
void testNanCompatibleEquals_data();
void testNanCompatibleEquals();
void testQgsAsConst();
void testQgsRound();
void testQgsVariantEqual();
void testQgsEnumValueToKey();
void testQgsEnumKeyToValue();
void testQgsFlagValueToKeys();
void testQgsFlagKeysToValue();
void testQMapQVariantList();
private:
QString mReport;
};
//runs before all tests
void TestQgis::initTestCase()
{
mReport = QStringLiteral( "<h1>Qgis Tests</h1>\n" );
}
//runs after all tests
void TestQgis::cleanupTestCase()
{
QString myReportFile = QDir::tempPath() + "/qgistest.html";
QFile myFile( myReportFile );
if ( myFile.open( QIODevice::WriteOnly | QIODevice::Truncate ) )
{
QTextStream myQTextStream( &myFile );
myQTextStream << mReport;
myFile.close();
}
}
void TestQgis::permissiveToDouble()
{
//good inputs
bool ok = false;
double result = qgsPermissiveToDouble( QStringLiteral( "1000" ), ok );
QVERIFY( ok );
QCOMPARE( result, 1000.0 );
ok = false;
result = qgsPermissiveToDouble( QStringLiteral( "1" ) + QLocale().groupSeparator() + "000", ok );
QVERIFY( ok );
QCOMPARE( result, 1000.0 );
ok = false;
result = qgsPermissiveToDouble( QStringLiteral( "5" ) + QLocale().decimalPoint() + "5", ok );
QVERIFY( ok );
QCOMPARE( result, 5.5 );
ok = false;
result = qgsPermissiveToDouble( QStringLiteral( "1" ) + QLocale().groupSeparator() + "000" + QLocale().decimalPoint() + "5", ok );
QVERIFY( ok );
QCOMPARE( result, 1000.5 );
//bad input
ok = false;
( void ) qgsPermissiveToDouble( QStringLiteral( "a" ), ok );
QVERIFY( !ok );
//messy input (invalid thousand separator position), should still be converted
ok = false;
result = qgsPermissiveToDouble( QStringLiteral( "10" ) + QLocale().groupSeparator() + "00", ok );
QVERIFY( ok );
QCOMPARE( result, 1000.0 );
ok = false;
result = qgsPermissiveToDouble( QStringLiteral( "10" ) + QLocale().groupSeparator() + "00" + QLocale().decimalPoint() + "5", ok );
QVERIFY( ok );
QCOMPARE( result, 1000.5 );
}
void TestQgis::permissiveToInt()
{
//good inputs
bool ok = false;
int result = qgsPermissiveToInt( QStringLiteral( "1000" ), ok );
QVERIFY( ok );
QCOMPARE( result, 1000 );
ok = false;
result = qgsPermissiveToInt( QStringLiteral( "1%01000" ).arg( QLocale().groupSeparator() ), ok );
QVERIFY( ok );
QCOMPARE( result, 1000 );
//bad input
ok = false;
( void ) qgsPermissiveToInt( QStringLiteral( "a" ), ok );
QVERIFY( !ok );
//messy input (invalid thousand separator position), should still be converted
ok = false;
result = qgsPermissiveToInt( QStringLiteral( "10%0100" ).arg( QLocale().groupSeparator() ), ok );
QVERIFY( ok );
QCOMPARE( result, 1000 );
}
void TestQgis::permissiveToLongLong()
{
//good inputs
bool ok = false;
qlonglong result = qgsPermissiveToLongLong( QStringLiteral( "1000" ), ok );
QVERIFY( ok );
QCOMPARE( result, 1000 );
ok = false;
result = qgsPermissiveToLongLong( QStringLiteral( "1%01000" ).arg( QLocale().groupSeparator() ), ok );
QVERIFY( ok );
QCOMPARE( result, 1000 );
//bad input
ok = false;
( void ) qgsPermissiveToLongLong( QStringLiteral( "a" ), ok );
QVERIFY( !ok );
//messy input (invalid thousand separator position), should still be converted
ok = false;
result = qgsPermissiveToLongLong( QStringLiteral( "10%0100" ).arg( QLocale().groupSeparator() ), ok );
QVERIFY( ok );
QCOMPARE( result, 1000 );
}
void TestQgis::doubleToString()
{
QCOMPARE( qgsDoubleToString( 5.6783212, 5 ), QString( "5.67832" ) );
QCOMPARE( qgsDoubleToString( 5.5555555, 5 ), QString( "5.55556" ) );
QCOMPARE( qgsDoubleToString( 12.2, 1 ), QString( "12.2" ) );
QCOMPARE( qgsDoubleToString( 12.2, 2 ), QString( "12.2" ) );
QCOMPARE( qgsDoubleToString( 12.2, 10 ), QString( "12.2" ) );
QCOMPARE( qgsDoubleToString( 12.234333, 1 ), QString( "12.2" ) );
QCOMPARE( qgsDoubleToString( 12, 1 ), QString( "12" ) );
QCOMPARE( qgsDoubleToString( 12, 0 ), QString( "12" ) );
QCOMPARE( qgsDoubleToString( 12000, 0 ), QString( "12000" ) );
QCOMPARE( qgsDoubleToString( 12000, 1 ), QString( "12000" ) );
QCOMPARE( qgsDoubleToString( 12000, 10 ), QString( "12000" ) );
QCOMPARE( qgsDoubleToString( 12345, -1 ), QString( "12345" ) );
QCOMPARE( qgsDoubleToString( 12345.12300000, 7 ), QString( "12345.123" ) );
QCOMPARE( qgsDoubleToString( 12345.00011111, 2 ), QString( "12345" ) );
QCOMPARE( qgsDoubleToString( -0.000000000708115, 0 ), QString( "0" ) );
}
void TestQgis::signalBlocker()
{
std::unique_ptr< QCheckBox > checkbox( new QCheckBox() );
QSignalSpy spy( checkbox.get(), &QCheckBox::toggled );
//first check that signals are not blocked
QVERIFY( !checkbox->signalsBlocked() );
checkbox->setChecked( true );
QCOMPARE( spy.count(), 1 );
QCOMPARE( spy.last().at( 0 ).toBool(), true );
//block signals
{
QgsSignalBlocker< QCheckBox > blocker( checkbox.get() );
QVERIFY( checkbox->signalsBlocked() );
checkbox->setChecked( false );
QVERIFY( !checkbox->isChecked() );
//should be no new signals
QCOMPARE( spy.count(), 1 );
QCOMPARE( spy.last().at( 0 ).toBool(), true );
checkbox->setChecked( true );
}
//blocker is out of scope, blocking should be removed
QVERIFY( !checkbox->signalsBlocked() );
checkbox->setChecked( false );
QCOMPARE( spy.count(), 2 );
QCOMPARE( spy.last().at( 0 ).toBool(), false );
// now check that initial blocking state is restored when QgsSignalBlocker goes out of scope
checkbox->blockSignals( true );
{
QgsSignalBlocker< QCheckBox > blocker( checkbox.get() );
QVERIFY( checkbox->signalsBlocked() );
}
// initial blocked state should be restored
QVERIFY( checkbox->signalsBlocked() );
checkbox->blockSignals( false );
// nested signal blockers
{
QgsSignalBlocker< QCheckBox > blocker( checkbox.get() );
QVERIFY( checkbox->signalsBlocked() );
{
QgsSignalBlocker< QCheckBox > blocker2( checkbox.get() );
QVERIFY( checkbox->signalsBlocked() );
}
QVERIFY( checkbox->signalsBlocked() );
}
QVERIFY( !checkbox->signalsBlocked() );
// check whileBlocking function
checkbox->setChecked( true );
QCOMPARE( spy.count(), 3 );
QCOMPARE( spy.last().at( 0 ).toBool(), true );
QVERIFY( !checkbox->signalsBlocked() );
whileBlocking( checkbox.get() )->setChecked( false );
// should have been no signals emitted
QCOMPARE( spy.count(), 3 );
// check that initial state of blocked signals was restored correctly
QVERIFY( !checkbox->signalsBlocked() );
checkbox->blockSignals( true );
QVERIFY( checkbox->signalsBlocked() );
whileBlocking( checkbox.get() )->setChecked( true );
QVERIFY( checkbox->signalsBlocked() );
}
void TestQgis::qVariantCompare_data()
{
QTest::addColumn<QVariant>( "lhs" );
QTest::addColumn<QVariant>( "rhs" );
QTest::addColumn<bool>( "lessThan" );
QTest::addColumn<bool>( "greaterThan" );
QTest::newRow( "invalid to value" ) << QVariant() << QVariant( 2 ) << true << false;
QTest::newRow( "invalid to value 2" ) << QVariant( 2 ) << QVariant() << false << true;
QTest::newRow( "invalid to null" ) << QVariant() << QVariant( QVariant::String ) << true << false;
QTest::newRow( "invalid to null2 " ) << QVariant( QVariant::String ) << QVariant() << false << true;
QTest::newRow( "null to value" ) << QVariant( QVariant::String ) << QVariant( "a" ) << true << false;
QTest::newRow( "null to value 2" ) << QVariant( "a" ) << QVariant( QVariant::String ) << false << true;
QTest::newRow( "int" ) << QVariant( 1 ) << QVariant( 2 ) << true << false;
QTest::newRow( "int 2" ) << QVariant( 1 ) << QVariant( -2 ) << false << true;
QTest::newRow( "int 3" ) << QVariant( 0 ) << QVariant( 1 ) << true << false;
QTest::newRow( "uint" ) << QVariant( 1u ) << QVariant( 2u ) << true << false;
QTest::newRow( "uint 2" ) << QVariant( 2u ) << QVariant( 0u ) << false << true;
QTest::newRow( "long long" ) << QVariant( 1LL ) << QVariant( 2LL ) << true << false;
QTest::newRow( "long long 2" ) << QVariant( 1LL ) << QVariant( -2LL ) << false << true;
QTest::newRow( "long long 3" ) << QVariant( 0LL ) << QVariant( 1LL ) << true << false;
QTest::newRow( "ulong long" ) << QVariant( 1uLL ) << QVariant( 2uLL ) << true << false;
QTest::newRow( "ulong long 2" ) << QVariant( 2uLL ) << QVariant( 0uLL ) << false << true;
QTest::newRow( "double" ) << QVariant( 1.5 ) << QVariant( 2.5 ) << true << false;
QTest::newRow( "double 2" ) << QVariant( 1.5 ) << QVariant( -2.5 ) << false << true;
QTest::newRow( "double 3" ) << QVariant( 0.5 ) << QVariant( 1.5 ) << true << false;
QTest::newRow( "char" ) << QVariant( 'b' ) << QVariant( 'x' ) << true << false;
QTest::newRow( "char 2" ) << QVariant( 'x' ) << QVariant( 'b' ) << false << true;
QTest::newRow( "date" ) << QVariant( QDate( 2000, 5, 6 ) ) << QVariant( QDate( 2000, 8, 6 ) ) << true << false;
QTest::newRow( "date 2" ) << QVariant( QDate( 2000, 8, 6 ) ) << QVariant( QDate( 2000, 5, 6 ) ) << false << true;
QTest::newRow( "time" ) << QVariant( QTime( 13, 5, 6 ) ) << QVariant( QTime( 13, 8, 6 ) ) << true << false;
QTest::newRow( "time 2" ) << QVariant( QTime( 18, 8, 6 ) ) << QVariant( QTime( 13, 5, 6 ) ) << false << true;
QTest::newRow( "datetime" ) << QVariant( QDateTime( QDate( 2000, 5, 6 ), QTime( 13, 5, 6 ) ) ) << QVariant( QDateTime( QDate( 2000, 8, 6 ), QTime( 13, 5, 6 ) ) ) << true << false;
QTest::newRow( "datetime 2" ) << QVariant( QDateTime( QDate( 2000, 8, 6 ), QTime( 13, 5, 6 ) ) ) << QVariant( QDateTime( QDate( 2000, 5, 6 ), QTime( 13, 5, 6 ) ) ) << false << true;
QTest::newRow( "datetime 3" ) << QVariant( QDateTime( QDate( 2000, 5, 6 ), QTime( 13, 5, 6 ) ) ) << QVariant( QDateTime( QDate( 2000, 5, 6 ), QTime( 13, 9, 6 ) ) ) << true << false;
QTest::newRow( "datetime 4" ) << QVariant( QDateTime( QDate( 2000, 5, 6 ), QTime( 13, 9, 6 ) ) ) << QVariant( QDateTime( QDate( 2000, 5, 6 ), QTime( 13, 5, 6 ) ) ) << false << true;
QTest::newRow( "bool" ) << QVariant( false ) << QVariant( true ) << true << false;
QTest::newRow( "bool 2" ) << QVariant( true ) << QVariant( false ) << false << true;
QTest::newRow( "qvariantlist" ) << QVariant( QVariantList() << QVariant( 5 ) ) << QVariant( QVariantList() << QVariant( 9 ) ) << true << false;
QTest::newRow( "qvariantlist 2" ) << QVariant( QVariantList() << QVariant( 9 ) ) << QVariant( QVariantList() << QVariant( 5 ) ) << false << true;
QTest::newRow( "qvariantlist 3" ) << QVariant( QVariantList() << QVariant( 5 ) << QVariant( 3 ) ) << QVariant( QVariantList() << QVariant( 5 ) << QVariant( 6 ) ) << true << false;
QTest::newRow( "qvariantlist 4" ) << QVariant( QVariant( QVariantList() << QVariant( 5 ) << QVariant( 6 ) ) ) << QVariant( QVariantList() << QVariant( 5 ) << QVariant( 3 ) ) << false << true;
QTest::newRow( "qvariantlist 5" ) << QVariant( QVariantList() << QVariant( 5 ) ) << QVariant( QVariantList() << QVariant( 5 ) << QVariant( 6 ) ) << true << false;
QTest::newRow( "qvariantlist 5" ) << QVariant( QVariantList() << QVariant( 5 ) << QVariant( 6 ) ) << QVariant( QVariantList() << QVariant( 5 ) ) << false << true;
QTest::newRow( "qstringlist" ) << QVariant( QStringList() << QStringLiteral( "aa" ) ) << QVariant( QStringList() << QStringLiteral( "bb" ) ) << true << false;
QTest::newRow( "qstringlist 2" ) << QVariant( QStringList() << QStringLiteral( "bb" ) ) << QVariant( QStringList() << QStringLiteral( "aa" ) ) << false << true;
QTest::newRow( "qstringlist 3" ) << QVariant( QStringList() << QStringLiteral( "aa" ) << QStringLiteral( "cc" ) ) << QVariant( QStringList() << QStringLiteral( "aa" ) << QStringLiteral( "xx" ) ) << true << false;
QTest::newRow( "qstringlist 4" ) << QVariant( QStringList() << QStringLiteral( "aa" ) << QStringLiteral( "xx" ) ) << QVariant( QStringList() << QStringLiteral( "aa" ) << QStringLiteral( "cc" ) ) << false << true;
QTest::newRow( "qstringlist 5" ) << QVariant( QStringList() << QStringLiteral( "aa" ) ) << QVariant( QStringList() << QStringLiteral( "aa" ) << QStringLiteral( "xx" ) ) << true << false;
QTest::newRow( "qstringlist 6" ) << QVariant( QStringList() << QStringLiteral( "aa" ) << QStringLiteral( "xx" ) ) << QVariant( QStringList() << QStringLiteral( "aa" ) ) << false << true;
QTest::newRow( "string" ) << QVariant( "a b c" ) << QVariant( "d e f" ) << true << false;
QTest::newRow( "string 2" ) << QVariant( "d e f" ) << QVariant( "a b c" ) << false << true;
}
void TestQgis::qVariantCompare()
{
QFETCH( QVariant, lhs );
QFETCH( QVariant, rhs );
QFETCH( bool, lessThan );
QFETCH( bool, greaterThan );
QCOMPARE( qgsVariantLessThan( lhs, rhs ), lessThan );
QCOMPARE( qgsVariantGreaterThan( lhs, rhs ), greaterThan );
}
void TestQgis::testNanCompatibleEquals_data()
{
QTest::addColumn<double>( "lhs" );
QTest::addColumn<double>( "rhs" );
QTest::addColumn<bool>( "expected" );
QTest::newRow( "both nan" ) << std::numeric_limits< double >::quiet_NaN() << std::numeric_limits< double >::quiet_NaN() << true;
QTest::newRow( "first is nan" ) << std::numeric_limits< double >::quiet_NaN() << 5.0 << false;
QTest::newRow( "second is nan" ) << 5.0 << std::numeric_limits< double >::quiet_NaN() << false;
QTest::newRow( "two numbers, not equal" ) << 5.0 << 6.0 << false;
QTest::newRow( "two numbers, equal" ) << 5.0 << 5.0 << true;
}
void TestQgis::testNanCompatibleEquals()
{
QFETCH( double, lhs );
QFETCH( double, rhs );
QFETCH( bool, expected );
QCOMPARE( qgsNanCompatibleEquals( lhs, rhs ), expected );
QCOMPARE( qgsNanCompatibleEquals( rhs, lhs ), expected );
}
class ConstTester
{
public:
void doSomething()
{
mVal = 1;
}
void doSomething() const
{
mVal = 2;
}
mutable int mVal = 0;
};
void TestQgis::testQgsAsConst()
{
ConstTester ct;
ct.doSomething();
QCOMPARE( ct.mVal, 1 );
std::as_const( ct ).doSomething();
QCOMPARE( ct.mVal, 2 );
}
void TestQgis::testQgsRound()
{
QGSCOMPARENEAR( qgsRound( 1234.567, 2 ), 1234.57, 0.01 );
QGSCOMPARENEAR( qgsRound( -1234.567, 2 ), -1234.57, 0.01 );
QGSCOMPARENEAR( qgsRound( 98765432198, 8 ), 98765432198, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198, 9 ), 98765432198, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198, 10 ), 98765432198, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198, 11 ), 98765432198, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198, 12 ), 98765432198, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198, 13 ), 98765432198, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198, 14 ), 98765432198, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198765, 14 ), 98765432198765, 1.0 );
QGSCOMPARENEAR( qgsRound( 98765432198765432., 20 ), 98765432198765432., 1.0 );
QGSCOMPARENEAR( qgsRound( 9.8765432198765, 2 ), 9.88, 0.001 );
QGSCOMPARENEAR( qgsRound( 9.8765432198765, 3 ), 9.877, 0.0001 );
QGSCOMPARENEAR( qgsRound( 9.8765432198765, 4 ), 9.8765, 0.00001 );
QGSCOMPARENEAR( qgsRound( 9.8765432198765, 5 ), 9.87654, 0.000001 );
QGSCOMPARENEAR( qgsRound( 9.8765432198765, 6 ), 9.876543, 0.0000001 );
QGSCOMPARENEAR( qgsRound( 9.8765432198765, 7 ), 9.8765432, 0.00000001 );
QGSCOMPARENEAR( qgsRound( -9.8765432198765, 7 ), -9.8765432, 0.0000001 );
QGSCOMPARENEAR( qgsRound( 9876543.2198765, 5 ), 9876543.219880, 0.000001 );
QGSCOMPARENEAR( qgsRound( -9876543.2198765, 5 ), -9876543.219880, 0.000001 );
QGSCOMPARENEAR( qgsRound( 9.87654321987654321, 13 ), 9.87654321987654, 0.0000000000001 );
QGSCOMPARENEAR( qgsRound( 9.87654321987654321, 14 ), 9.876543219876543, 0.00000000000001 );
QGSCOMPARENEAR( qgsRound( 9998.87654321987654321, 14 ), 9998.876543219876543, 0.00000000000001 );
QGSCOMPARENEAR( qgsRound( 9999999.87654321987654321, 14 ), 9999999.876543219876543, 0.00000000000001 );
}
void TestQgis::testQgsVariantEqual()
{
// Invalid
QVERIFY( qgsVariantEqual( QVariant(), QVariant() ) );
QVERIFY( QVariant() == QVariant() );
// Zero
QVERIFY( qgsVariantEqual( QVariant( 0 ), QVariant( 0.0f ) ) );
QVERIFY( QVariant( 0 ) == QVariant( 0.0f ) );
// Double
QVERIFY( qgsVariantEqual( QVariant( 1.234 ), QVariant( 1.234 ) ) );
// This is what we actually wanted to fix with qgsVariantEqual
// zero != NULL
QVERIFY( ! qgsVariantEqual( QVariant( 0 ), QVariant( QVariant::Int ) ) );
QVERIFY( ! qgsVariantEqual( QVariant( 0 ), QVariant( QVariant::Double ) ) );
QVERIFY( ! qgsVariantEqual( QVariant( 0.0f ), QVariant( QVariant::Int ) ) );
QVERIFY( ! qgsVariantEqual( QVariant( 0.0f ), QVariant( QVariant::Double ) ) );
QVERIFY( QVariant( 0 ) == QVariant( QVariant::Int ) );
// NULL identities
QVERIFY( qgsVariantEqual( QVariant( QVariant::Int ), QVariant( QVariant::Int ) ) );
QVERIFY( qgsVariantEqual( QVariant( QVariant::Double ), QVariant( QVariant::Double ) ) );
QVERIFY( qgsVariantEqual( QVariant( QVariant::Int ), QVariant( QVariant::Double ) ) );
QVERIFY( qgsVariantEqual( QVariant( QVariant::Int ), QVariant( QVariant::String ) ) );
// NULL should not be equal to invalid
QVERIFY( !qgsVariantEqual( QVariant(), QVariant( QVariant::Int ) ) );
}
void TestQgis::testQgsEnumValueToKey()
{
QCOMPARE( qgsEnumValueToKey<QgsMapLayerModel::ItemDataRole>( QgsMapLayerModel::LayerRole ), QStringLiteral( "LayerRole" ) );
}
void TestQgis::testQgsEnumKeyToValue()
{
QCOMPARE( qgsEnumKeyToValue<QgsMapLayerModel::ItemDataRole>( QStringLiteral( "AdditionalRole" ), QgsMapLayerModel::LayerIdRole ), QgsMapLayerModel::AdditionalRole );
QCOMPARE( qgsEnumKeyToValue<QgsMapLayerModel::ItemDataRole>( QStringLiteral( "UnknownKey" ), QgsMapLayerModel::LayerIdRole ), QgsMapLayerModel::LayerIdRole );
// try with int values as string keys
QCOMPARE( qgsEnumKeyToValue<QgsMapLayerModel::ItemDataRole>( QString::number( QgsMapLayerModel::AdditionalRole ), QgsMapLayerModel::LayerIdRole, true ), QgsMapLayerModel::AdditionalRole );
QCOMPARE( qgsEnumKeyToValue<QgsMapLayerModel::ItemDataRole>( QString::number( QgsMapLayerModel::AdditionalRole ), QgsMapLayerModel::LayerIdRole, false ), QgsMapLayerModel::LayerIdRole );
// also try with an invalid int value
QMetaEnum metaEnum = QMetaEnum::fromType<QgsMapLayerModel::ItemDataRole>();
int invalidValue = QgsMapLayerModel::LayerIdRole + 100;
QVERIFY( !metaEnum.valueToKey( invalidValue ) );
QCOMPARE( qgsEnumKeyToValue<QgsMapLayerModel::ItemDataRole>( QString::number( invalidValue ), QgsMapLayerModel::LayerIdRole ), QgsMapLayerModel::LayerIdRole );
}
void TestQgis::testQgsFlagValueToKeys()
{
QgsFieldProxyModel::Filters filters = QgsFieldProxyModel::Filter::String | QgsFieldProxyModel::Filter::Double;
QCOMPARE( qgsFlagValueToKeys( filters ), QStringLiteral( "String|Double" ) );
}
void TestQgis::testQgsFlagKeysToValue()
{
QCOMPARE( qgsFlagKeysToValue( QStringLiteral( "String|Double" ), QgsFieldProxyModel::Filters( QgsFieldProxyModel::Filter::AllTypes ) ), QgsFieldProxyModel::Filter::String | QgsFieldProxyModel::Filter::Double );
QCOMPARE( qgsFlagKeysToValue( QStringLiteral( "UnknownKey" ), QgsFieldProxyModel::Filters( QgsFieldProxyModel::Filter::AllTypes ) ), QgsFieldProxyModel::Filters( QgsFieldProxyModel::Filter::AllTypes ) );
}
void TestQgis::testQMapQVariantList()
{
QMap<QVariantList, long> ids;
ids.insert( QVariantList() << "B" << "c", 5 );
ids.insert( QVariantList() << "b" << "C", 7 );
QVariantList v = QVariantList() << "b" << "C";
QMap<QVariantList, long>::const_iterator it = ids.constFind( v );
QVERIFY( it != ids.constEnd() );
QCOMPARE( it.value(), 7L );
v = QVariantList() << "B" << "c";
it = ids.constFind( v );
QVERIFY( it != ids.constEnd() );
QCOMPARE( it.value(), 5L );
}
QGSTEST_MAIN( TestQgis )
#include "testqgis.moc"
| gpl-2.0 |
spxtr/dolphin | Source/Core/Core/PowerPC/SignatureDB/MEGASignatureDB.cpp | 8 | 4746 | // Copyright 2017 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "Core/PowerPC/SignatureDB/MEGASignatureDB.h"
#include <cstddef>
#include <cstdlib>
#include <fstream>
#include <limits>
#include <sstream>
#include <string>
#include <utility>
#include "Common/FileUtil.h"
#include "Common/Logging/Log.h"
#include "Common/StringUtil.h"
#include "Core/PowerPC/PPCSymbolDB.h"
#include "Core/PowerPC/PowerPC.h"
namespace
{
constexpr size_t INSTRUCTION_HEXSTRING_LENGTH = 8;
bool GetCode(MEGASignature* sig, std::istringstream* iss)
{
std::string code;
if ((*iss >> code) && (code.length() % INSTRUCTION_HEXSTRING_LENGTH) == 0)
{
for (size_t i = 0; i < code.length(); i += INSTRUCTION_HEXSTRING_LENGTH)
{
std::string instruction = code.substr(i, INSTRUCTION_HEXSTRING_LENGTH);
u32 num = static_cast<u32>(strtoul(instruction.c_str(), nullptr, 16));
if (num != 0 || instruction == "........")
{
sig->code.emplace_back(num);
}
else
{
WARN_LOG(SYMBOLS, "MEGA database failed to parse code");
return false;
}
}
return true;
}
return false;
}
bool GetFunctionName(std::istringstream* iss, std::string* name)
{
std::string buffer;
std::getline(*iss, buffer);
size_t next = buffer.find(" ^");
*name = StripSpaces(buffer.substr(0, next));
if (name->empty())
return false;
if (next == std::string::npos)
next = buffer.length();
iss->str(buffer.substr(next));
iss->clear();
return true;
}
bool GetName(MEGASignature* sig, std::istringstream* iss)
{
std::string unknown;
return (*iss >> unknown) && GetFunctionName(iss, &sig->name);
}
bool GetRefs(MEGASignature* sig, std::istringstream* iss)
{
std::string num, ref;
u32 ref_count = 1;
while (*iss && (*iss >> num) && !num.empty())
{
num = num.substr(1);
const char* ptr = num.c_str();
char* endptr;
u64 offset = strtoul(ptr, &endptr, 16);
if (ptr == endptr || offset > std::numeric_limits<u32>::max())
{
WARN_LOG(SYMBOLS, "MEGA database failed to parse reference %u offset", ref_count);
return false;
}
if (!GetFunctionName(iss, &ref))
{
WARN_LOG(SYMBOLS, "MEGA database failed to parse reference %u name", ref_count);
return false;
}
sig->refs.emplace_back(static_cast<u32>(offset), std::move(ref));
ref_count += 1;
num.clear();
ref.clear();
}
return true;
}
bool Compare(u32 address, u32 size, const MEGASignature& sig)
{
if (size != sig.code.size() * sizeof(u32))
return false;
for (size_t i = 0; i < sig.code.size(); ++i)
{
if (sig.code[i] != 0 &&
PowerPC::HostRead_U32(static_cast<u32>(address + i * sizeof(u32))) != sig.code[i])
return false;
}
return true;
}
} // Anonymous namespace
MEGASignatureDB::MEGASignatureDB() = default;
MEGASignatureDB::~MEGASignatureDB() = default;
void MEGASignatureDB::Clear()
{
m_signatures.clear();
}
bool MEGASignatureDB::Load(const std::string& file_path)
{
std::ifstream ifs;
File::OpenFStream(ifs, file_path, std::ios_base::in);
if (!ifs)
return false;
std::string line;
for (size_t i = 1; std::getline(ifs, line); ++i)
{
std::istringstream iss(line);
MEGASignature sig;
if (GetCode(&sig, &iss) && GetName(&sig, &iss) && GetRefs(&sig, &iss))
{
m_signatures.push_back(std::move(sig));
}
else
{
WARN_LOG(SYMBOLS, "MEGA database failed to parse line %zu", i);
}
}
return true;
}
bool MEGASignatureDB::Save(const std::string& file_path) const
{
ERROR_LOG(SYMBOLS, "MEGA database save unsupported yet.");
return false;
}
void MEGASignatureDB::Apply(PPCSymbolDB* symbol_db) const
{
for (auto& it : symbol_db->AccessSymbols())
{
auto& symbol = it.second;
for (const auto& sig : m_signatures)
{
if (Compare(symbol.address, symbol.size, sig))
{
symbol.name = sig.name;
INFO_LOG(SYMBOLS, "Found %s at %08x (size: %08x)!", sig.name.c_str(), symbol.address,
symbol.size);
break;
}
}
}
symbol_db->Index();
}
void MEGASignatureDB::Populate(const PPCSymbolDB* func_db, const std::string& filter)
{
ERROR_LOG(SYMBOLS, "MEGA database can't be populated yet.");
}
bool MEGASignatureDB::Add(u32 startAddr, u32 size, const std::string& name)
{
ERROR_LOG(SYMBOLS, "Can't add symbol to MEGA database yet.");
return false;
}
void MEGASignatureDB::List() const
{
for (const auto& entry : m_signatures)
{
DEBUG_LOG(SYMBOLS, "%s : %zu bytes", entry.name.c_str(), entry.code.size() * sizeof(u32));
}
INFO_LOG(SYMBOLS, "%zu functions known in current MEGA database.", m_signatures.size());
}
| gpl-2.0 |
aicjofs/android_kernel_lge_v500_stock | drivers/ata/pata_opti.c | 8 | 5528 | /*
* pata_opti.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
*
* Based on
* linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
*
* Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
*
* Authors:
* Jaromir Koutek <miri@punknet.cz>,
* Jan Harkes <jaharkes@cwi.nl>,
* Mark Lord <mlord@pobox.com>
* Some parts of code are from ali14xx.c and from rz1000.c.
*
* Also consulted the FreeBSD prototype driver by Kevin Day to try
* and resolve some confusions. Further documentation can be found in
* Ralf Brown's interrupt list
*
* If you have other variants of the Opti range (Viper/Vendetta) please
* try this driver with those PCI idents and report back. For the later
* chips see the pata_optidma driver
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_opti"
#define DRV_VERSION "0.2.9"
enum {
READ_REG = 0, /* */
WRITE_REG = 1, /* */
CNTRL_REG = 3, /* */
STRAP_REG = 5, /* */
MISC_REG = 6 /* */
};
/*
*/
static int opti_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits opti_enable_bits[] = {
{ 0x45, 1, 0x80, 0x00 },
{ 0x40, 1, 0x08, 0x00 }
};
if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/*
*/
static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
{
void __iomem *regio = ap->ioaddr.cmd_addr;
/* */
ioread16(regio + 1);
ioread16(regio + 1);
iowrite8(3, regio + 2);
/* */
iowrite8(val, regio + reg);
/* */
iowrite8(0x83, regio + 2);
}
/**
* opti_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Timing numbers are taken from
* the FreeBSD driver then pre computed to keep the code clean. There
* are two tables depending on the hardware clock speed.
*/
static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *pair = ata_dev_pair(adev);
int clock;
int pio = adev->pio_mode - XFER_PIO_0;
void __iomem *regio = ap->ioaddr.cmd_addr;
u8 addr;
/* */
static const u8 addr_timing[2][5] = {
{ 0x30, 0x20, 0x20, 0x10, 0x10 },
{ 0x20, 0x20, 0x10, 0x10, 0x10 }
};
static const u8 data_rec_timing[2][5] = {
{ 0x6B, 0x56, 0x42, 0x32, 0x31 },
{ 0x58, 0x44, 0x32, 0x22, 0x21 }
};
iowrite8(0xff, regio + 5);
clock = ioread16(regio + 5) & 1;
/*
*/
addr = addr_timing[clock][pio];
if (pair) {
/* */
u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
if (pair_addr > addr)
addr = pair_addr;
}
/* */
opti_write_reg(ap, adev->devno, MISC_REG);
opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
opti_write_reg(ap, addr, MISC_REG);
/* */
opti_write_reg(ap, 0x85, CNTRL_REG);
}
static struct scsi_host_template opti_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations opti_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = opti_set_piomode,
.prereset = opti_pre_reset,
};
static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &opti_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
ata_print_version_once(&dev->dev, DRV_VERSION);
return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL, 0);
}
static const struct pci_device_id opti[] = {
{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 1 },
{ },
};
static struct pci_driver opti_pci_driver = {
.name = DRV_NAME,
.id_table = opti,
.probe = opti_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static int __init opti_init(void)
{
return pci_register_driver(&opti_pci_driver);
}
static void __exit opti_exit(void)
{
pci_unregister_driver(&opti_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, opti);
MODULE_VERSION(DRV_VERSION);
module_init(opti_init);
module_exit(opti_exit);
| gpl-2.0 |
verybadsoldier/xbmc | xbmc/dialogs/GUIDialogExtendedProgressBar.cpp | 8 | 4339 | /*
* Copyright (C) 2012-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "GUIDialogExtendedProgressBar.h"
#include "guilib/GUIProgressControl.h"
#include "guilib/GUISliderControl.h"
#include "threads/SingleLock.h"
#include "threads/SystemClock.h"
#define CONTROL_LABELHEADER 30
#define CONTROL_LABELTITLE 31
#define CONTROL_PROGRESS 32
#define ITEM_SWITCH_TIME_MS 2000
using namespace std;
string CGUIDialogProgressBarHandle::Text(void) const
{
CSingleLock lock(m_critSection);
string retVal(m_strText);
return retVal;
}
void CGUIDialogProgressBarHandle::SetText(const string &strText)
{
CSingleLock lock(m_critSection);
m_strText = strText;
}
void CGUIDialogProgressBarHandle::SetTitle(const string &strTitle)
{
CSingleLock lock(m_critSection);
m_strTitle = strTitle;
}
void CGUIDialogProgressBarHandle::SetProgress(int currentItem, int itemCount)
{
float fPercentage = (float)((currentItem*100)/itemCount);
if (fPercentage > 100.0F)
fPercentage = 100.0F;
m_fPercentage = fPercentage;
}
CGUIDialogExtendedProgressBar::CGUIDialogExtendedProgressBar(void)
: CGUIDialog(WINDOW_DIALOG_EXT_PROGRESS, "DialogExtendedProgressBar.xml")
{
m_loadType = LOAD_ON_GUI_INIT;
m_iLastSwitchTime = 0;
m_iCurrentItem = 0;
}
CGUIDialogProgressBarHandle *CGUIDialogExtendedProgressBar::GetHandle(const string &strTitle)
{
CGUIDialogProgressBarHandle *handle = new CGUIDialogProgressBarHandle(strTitle);
{
CSingleLock lock(m_critSection);
m_handles.push_back(handle);
}
Show();
return handle;
}
bool CGUIDialogExtendedProgressBar::OnMessage(CGUIMessage& message)
{
switch (message.GetMessage())
{
case GUI_MSG_WINDOW_INIT:
{
m_iLastSwitchTime = XbmcThreads::SystemClockMillis();
m_iCurrentItem = 0;
CGUIDialog::OnMessage(message);
UpdateState(0);
return true;
}
break;
}
return CGUIDialog::OnMessage(message);
}
void CGUIDialogExtendedProgressBar::Process(unsigned int currentTime, CDirtyRegionList &dirtyregions)
{
if (m_active)
UpdateState(currentTime);
CGUIDialog::Process(currentTime, dirtyregions);
}
void CGUIDialogExtendedProgressBar::UpdateState(unsigned int currentTime)
{
string strHeader;
string strTitle;
float fProgress(-1.0f);
{
CSingleLock lock(m_critSection);
// delete finished items
for (int iPtr = m_handles.size() - 1; iPtr >= 0; iPtr--)
{
if (m_handles.at(iPtr)->IsFinished())
{
delete m_handles.at(iPtr);
m_handles.erase(m_handles.begin() + iPtr);
}
}
if (!m_handles.size())
{
Close(false, 0, true, false);
return;
}
// ensure the current item is in our range
if (m_iCurrentItem >= m_handles.size())
m_iCurrentItem = m_handles.size() - 1;
// update the current item ptr
if (currentTime > m_iLastSwitchTime &&
currentTime - m_iLastSwitchTime >= ITEM_SWITCH_TIME_MS)
{
m_iLastSwitchTime = currentTime;
// select next item
if (++m_iCurrentItem > m_handles.size() - 1)
m_iCurrentItem = 0;
}
CGUIDialogProgressBarHandle *handle = m_handles.at(m_iCurrentItem);
if (handle)
{
strTitle = handle->Text();
strHeader = handle->Title();
fProgress = handle->Percentage();
}
}
SET_CONTROL_LABEL(CONTROL_LABELHEADER, strHeader);
SET_CONTROL_LABEL(CONTROL_LABELTITLE, strTitle);
if (fProgress > -1.0f)
{
SET_CONTROL_VISIBLE(CONTROL_PROGRESS);
CGUIProgressControl* pProgressCtrl=(CGUIProgressControl*)GetControl(CONTROL_PROGRESS);
if (pProgressCtrl) pProgressCtrl->SetPercentage(fProgress);
}
}
| gpl-2.0 |
liuyanghejerry/qtextended | src/libraries/qtopiagfx/plugin/mmx/mmx32_blur.c | 8 | 22397 | /****************************************************************************
**
** This file is part of the Qt Extended Opensource Package.
**
** Copyright (C) 2009 Trolltech ASA.
**
** Contact: Qt Extended Information (info@qtextended.org)
**
** This file may be used under the terms of the GNU General Public License
** version 2.0 as published by the Free Software Foundation and appearing
** in the file LICENSE.GPL included in the packaging of this file.
**
** Please review the following information to ensure GNU General Public
** Licensing requirements will be met:
** http://www.fsf.org/licensing/licenses/info/GPLv2.html.
**
**
****************************************************************************/
#include <mmintrin.h>
#include <stdio.h>
void mmx_blurcol(unsigned int *line, int length, int step, int alpha32)
{
int out = 0;
alpha32 |= alpha32 << 16;
asm volatile (
"wzero wr12\n\t" // wr12 = zero
"tbcstw wr11, %4\n\t" // wr11 = alpha 0AAAAAAAAAAAAAAA.A.A.A
// "wzero wr10\n\t" // wr10 = accumulator
"mov r5, #7\n\t"
"tbcstw wr13, r5\n\t" // wr13 = 7
"mov r5, %3\n\t"
// Load (pre)
"wldrw wr1, [%2]\n\t"
// Setup accumulator default
"wunpckelub wr10, wr1\n\t" // wr0 = 0A0R0G0B
"wsllh wr10, wr10, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
"1:\n\t"
"add r6, %2, %6\n\t"
"subs r5, r5, #1\n\t" // Loop update
// Adjust
"wunpckelub wr0, wr1\n\t" // wr0 = 0A0R0G0B
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr10\n\t" // wr0 = data - accum
//"textrmuw %1, wr0, #0\n\t"
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrwne wr1, [r6]\n\t"
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr10, wr10, wr0\n\t" // wr10 += wr0
// Adjust
"wsrlh wr0, wr10, wr13\n\t" // wr0 = wr10 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// XXX 1 cycle STALL
// Store
"wstrw wr0, [%2]\n\t"
"mov %2, r6\n\t"
// Loop
"bne 1b\n\t"
//
// Now go back again
//
"sub %2, %2, %6\n\t"
"mov r5, %3\n\t"
// Load (pre)
"wldrw wr1, [%2]\n\t"
"2:\n\t"
"sub r6, %2, %6\n\t"
"subs r5, r5, #1\n\t" // Update loop
// Adjust
"wunpckelub wr0, wr1\n\t" // wr0 = 0A0R0G0B
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr10\n\t" // wr0 = data - accum
//"textrmuw %1, wr0, #0\n\t"
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrwne wr1, [r6]\n\t"
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr10, wr10, wr0\n\t" // wr10 += wr0
// Adjust
"wsrlh wr0, wr10, wr13\n\t" // wr0 = wr10 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// XXX 1 cycle STALL
// Store
"wstrw wr0, [%2]\n\t"
"mov %2, r6\n\t"
// Loop
"bne 2b\n\t"
: /* output */ "=r"(line), "=r"(out)
: /* input */ "0"(line), "r"(length), "r"(alpha32), "1"(out), "r"(step)
: /* clobber */ "r5", "r6", "wr12"
);
}
// Can be sped up using technique from _16
void mmx_blurcol_8(unsigned int *line, int length, int step, int alpha32)
{
int out = 0;
alpha32 |= alpha32 << 16;
/*
0 s
1 s
2 s
3 s
4 ac5
5 ac6
6 ac7
7 ac1
8 ac2
9 ac3
10 ac4
11 alpha
12 zero
13 7
14 ac8
15 2
*/
asm volatile (
"wzero wr12\n\t" // wr12 = zero
"tbcstw wr11, %4\n\t" // wr11 = alpha 0AAAAAAAAAAAAAAA.A.A.A
// "wzero wr7\n\t" // wr7 = accumulator (1)
// "wzero wr8\n\t" // wr8 = accumulator (2)
// "wzero wr9\n\t" // wr9 = accumulator (3)
// "wzero wr10\n\t" // wr10 = accumulator (4)
// "wzero wr4\n\t" // wr4 = accumulator (5)
// "wzero wr5\n\t" // wr5 = accumulator (6)
// "wzero wr6\n\t" // wr6 = accumulator (7)
// "wzero wr14\n\t" // wr14 = accumulator (8)
"mov r5, #7\n\t"
"tbcstw wr13, r5\n\t" // wr13 = 7
// Setup accumulator default
"wldrw wr7, [%2]\n\t"
"wunpckelub wr7, wr7\n\t"
"wsllh wr7, wr7, wr13\n\t"
"wldrw wr8, [%2, #4]\n\t"
"wunpckelub wr8, wr8\n\t"
"wsllh wr8, wr8, wr13\n\t"
"wldrw wr9, [%2, #8]\n\t"
"wunpckelub wr9, wr9\n\t"
"wsllh wr9, wr9, wr13\n\t"
"wldrw wr10, [%2, #12]\n\t"
"wunpckelub wr10, wr10\n\t"
"wsllh wr10, wr10, wr13\n\t"
"wldrw wr4, [%2, #16]\n\t"
"wunpckelub wr4, wr4\n\t"
"wsllh wr4, wr4, wr13\n\t"
"wldrw wr5, [%2, #20]\n\t"
"wunpckelub wr5, wr5\n\t"
"wsllh wr5, wr5, wr13\n\t"
"wldrw wr6, [%2, #24]\n\t"
"wunpckelub wr6, wr6\n\t"
"wsllh wr6, wr6, wr13\n\t"
"wldrw wr14, [%2, #28]\n\t"
"wunpckelub wr14, wr14\n\t"
"wsllh wr14, wr14, wr13\n\t"
"mov r5, %3\n\t"
"1:\n\t"
// wr0 & wr7
// Load
"wldrw wr0, [%2]\n\t"
// Adjust
"wunpckelub wr0, wr0\n\t" // wr0 = 0A0R0G0B
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr7\n\t" // wr0 = data - accum
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrw wr1, [%2, #4]\n\t" // XXX 2
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr7, wr7, wr0\n\t" // wr7 += wr0
// Adjust
"wsrlh wr0, wr7, wr13\n\t" // wr0 = wr7 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// Store
// "wstrw wr0, [%2]\n\t" // XXX 1
// wr1 & wr8
// Load
// "wldrw wr1, [%2, #4]\n\t" // XXX 2
// Adjust
"wunpckelub wr1, wr1\n\t" // wr1 = 0A0R0G0B
"wstrw wr0, [%2]\n\t" // XXX 1
"wsllh wr1, wr1, wr13\n\t" // wr1 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr1, wr1, wr8\n\t" // wr1 = data - accum
"wmulsm wr1, wr1, wr11\n\t" // wr1 = (accum - data) * alpha
"wldrw wr2, [%2, #8]\n\t" // XXX 4
"waddhss wr1, wr1, wr1\n\t" // wr1 = wr2 * 2
"waddhss wr8, wr8, wr1\n\t" // wr8 += wr1
// Adjust
"wsrlh wr1, wr8, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr1, wr1, wr12\n\t"
// Store
// "wstrw wr1, [%2, #4]\n\t" // XXX 3
// wr2 & wr9
// Load
// "wldrw wr2, [%2, #8]\n\t" // XXX 4
// Adjust
"wunpckelub wr2, wr2\n\t" // wr2 = 0A0R0G0B
"wstrw wr1, [%2, #4]\n\t" // XXX 3
"wsllh wr2, wr2, wr13\n\t" // wr2 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr2, wr2, wr9\n\t" // wr2 = data - accum
"wmulsm wr2, wr2, wr11\n\t" // wr2 = (accum - data) * alpha
"wldrw wr3, [%2, #12]\n\t" // XXX 6
"waddhss wr2, wr2, wr2\n\t" // wr1 = wr2 * 2
"waddhss wr9, wr9, wr2\n\t" // wr8 += wr1
// Adjust
"wsrlh wr2, wr9, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr2, wr2, wr12\n\t"
// Store
// "wstrw wr2, [%2, #8]\n\t" // XXX 5
// wr3 & wr10
// Load
// "wldrw wr3, [%2, #12]\n\t" // XXX 6
// Adjust
"wunpckelub wr3, wr3\n\t" // wr3 = 0A0R0G0B
"wstrw wr2, [%2, #8]\n\t" // XXX 5
"wsllh wr3, wr3, wr13\n\t" // wr3 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr3, wr3, wr10\n\t" // wr3 = data - accum
"wmulsm wr3, wr3, wr11\n\t" // wr3 = (accum - data) * alpha
"wldrw wr0, [%2, #16]\n\t" // XXX 8
"waddhss wr3, wr3, wr3\n\t" // wr1 = wr3 * 2
"waddhss wr10, wr10, wr3\n\t" // wr8 += wr1
// Adjust
"wsrlh wr3, wr10, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr3, wr3, wr12\n\t"
// Store
// "wstrw wr3, [%2, #12]\n\t" // XXX 7
// wr0 & wr4
// Load
// "wldrw wr0, [%2, #16]\n\t" // XXX 8
// Adjust
"wunpckelub wr0, wr0\n\t" // wr0 = 0A0R0G0B
"wstrw wr3, [%2, #12]\n\t" // XXX 7
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr4\n\t" // wr0 = data - accum
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrw wr1, [%2, #20]\n\t" // XXX 9
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr4, wr4, wr0\n\t" // wr4 += wr0
// Adjust
"wsrlh wr0, wr4, wr13\n\t" // wr0 = wr4 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// Store
// "wstrw wr0, [%2, #16]\n\t" // XXX 8
// wr1 & wr5
// Load
// "wldrw wr1, [%2, #20]\n\t" // XXX 9
// Adjust
"wunpckelub wr1, wr1\n\t" // wr1 = 0A0R0G0B
"wstrw wr0, [%2, #16]\n\t" // XXX 8
"wsllh wr1, wr1, wr13\n\t" // wr1 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr1, wr1, wr5\n\t" // wr1 = data - accum
"wmulsm wr1, wr1, wr11\n\t" // wr1 = (accum - data) * alpha
"wldrw wr2, [%2, #24]\n\t" // XXX 11
"waddhss wr1, wr1, wr1\n\t" // wr1 = wr2 * 2
"waddhss wr5, wr5, wr1\n\t" // wr5 += wr1
// Adjust
"wsrlh wr1, wr5, wr13\n\t" // wr1 = wr5 >> 7
"wpackhus wr1, wr1, wr12\n\t"
// Store
// "wstrw wr1, [%2, #20]\n\t" // XXX 10
// wr2 & wr6
// Load
// "wldrw wr2, [%2, #24]\n\t" // XXX 11
// Adjust
"wunpckelub wr2, wr2\n\t" // wr2 = 0A0R0G0B
"wstrw wr1, [%2, #20]\n\t" // XXX 10
"wsllh wr2, wr2, wr13\n\t" // wr2 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr2, wr2, wr6\n\t" // wr2 = data - accum
"wmulsm wr2, wr2, wr11\n\t" // wr2 = (accum - data) * alpha
"wldrw wr3, [%2, #28]\n\t" // XXX 13
"waddhss wr2, wr2, wr2\n\t" // wr1 = wr2 * 2
"waddhss wr6, wr6, wr2\n\t" // wr8 += wr1
// Adjust
"wsrlh wr2, wr6, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr2, wr2, wr12\n\t"
// Store
// "wstrw wr2, [%2, #24]\n\t" // XXX 12
// wr3 & wr14
// Load
// "wldrw wr3, [%2, #28]\n\t" // XXX 13
// Adjust
"wunpckelub wr3, wr3\n\t" // wr3 = 0A0R0G0B
"wstrw wr2, [%2, #24]\n\t" // XXX 12
"wsllh wr3, wr3, wr13\n\t" // wr3 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr3, wr3, wr14\n\t" // wr3 = data - accum
"wmulsm wr3, wr3, wr11\n\t" // wr3 = (accum - data) * alpha
"waddhss wr3, wr3, wr3\n\t" // wr1 = wr3 * 2
"waddhss wr14, wr14, wr3\n\t" // wr8 += wr1
// Adjust
"wsrlh wr3, wr14, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr3, wr3, wr12\n\t"
// Store
"subs r5, r5, #1\n\t" // Loop update // XXX 14
"wstrw wr3, [%2, #28]\n\t"
"addne %2, %2, %6\n\t"
// "subs r5, r5, #1\n\t" // Loop update // XXX 14
// Loop
"bne 1b\n\t"
//
// Now go back again
//
"mov r5, %3\n\t"
"2:\n\t"
// wr0 & wr7
// Load
"wldrw wr0, [%2]\n\t" // XXX 15
// Adjust
"wunpckelub wr0, wr0\n\t" // wr0 = 0A0R0G0B
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr7\n\t" // wr0 = data - accum
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrw wr1, [%2, #4]\n\t" // XXX 17
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr7, wr7, wr0\n\t" // wr7 += wr0
// Adjust
"wsrlh wr0, wr7, wr13\n\t" // wr0 = wr7 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// Store
// "wstrw wr0, [%2]\n\t" // XXX 16
// wr1 & wr8
// Load
// "wldrw wr1, [%2, #4]\n\t" // XXX 17
// Adjust
"wunpckelub wr1, wr1\n\t" // wr1 = 0A0R0G0B
"wstrw wr0, [%2]\n\t" // XXX 16
"wsllh wr1, wr1, wr13\n\t" // wr1 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr1, wr1, wr8\n\t" // wr1 = data - accum
"wmulsm wr1, wr1, wr11\n\t" // wr1 = (accum - data) * alpha
"wldrw wr2, [%2, #8]\n\t" // XXX 19
"waddhss wr1, wr1, wr1\n\t" // wr1 = wr2 * 2
"waddhss wr8, wr8, wr1\n\t" // wr8 += wr1
// Adjust
"wsrlh wr1, wr8, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr1, wr1, wr12\n\t"
// Store
// "wstrw wr1, [%2, #4]\n\t" // XXX 18
// wr2 & wr9
// Load
// "wldrw wr2, [%2, #8]\n\t" // XXX 19
// Adjust
"wunpckelub wr2, wr2\n\t" // wr2 = 0A0R0G0B
"wstrw wr1, [%2, #4]\n\t" // XXX 18
"wsllh wr2, wr2, wr13\n\t" // wr2 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr2, wr2, wr9\n\t" // wr2 = data - accum
"wmulsm wr2, wr2, wr11\n\t" // wr2 = (accum - data) * alpha
"wldrw wr3, [%2, #12]\n\t" // XXX 21
"waddhss wr2, wr2, wr2\n\t" // wr1 = wr2 * 2
"waddhss wr9, wr9, wr2\n\t" // wr8 += wr1
// Adjust
"wsrlh wr2, wr9, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr2, wr2, wr12\n\t"
// Store
// "wstrw wr2, [%2, #8]\n\t" // XXX 20
// wr3 & wr10
// Load
// "wldrw wr3, [%2, #12]\n\t" // XXX 21
// Adjust
"wunpckelub wr3, wr3\n\t" // wr3 = 0A0R0G0B
"wstrw wr2, [%2, #8]\n\t" // XXX 20
"wsllh wr3, wr3, wr13\n\t" // wr3 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr3, wr3, wr10\n\t" // wr3 = data - accum
"wmulsm wr3, wr3, wr11\n\t" // wr3 = (accum - data) * alpha
"wldrw wr0, [%2, #16]\n\t" // XXX 23
"waddhss wr3, wr3, wr3\n\t" // wr1 = wr3 * 2
"waddhss wr10, wr10, wr3\n\t" // wr8 += wr1
// Adjust
"wsrlh wr3, wr10, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr3, wr3, wr12\n\t"
// Store
// "wstrw wr3, [%2, #12]\n\t" // XXX 22
// wr0 & wr4
// Load
// "wldrw wr0, [%2, #16]\n\t" // XXX 23
// Adjust
"wunpckelub wr0, wr0\n\t" // wr0 = 0A0R0G0B
"wstrw wr3, [%2, #12]\n\t" // XXX 22
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr4\n\t" // wr0 = data - accum
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrw wr1, [%2, #20]\n\t" // XXX 25
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr4, wr4, wr0\n\t" // wr4 += wr0
// Adjust
"wsrlh wr0, wr4, wr13\n\t" // wr0 = wr4 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// Store
//"wstrw wr0, [%2, #16]\n\t" // XXX 24
// wr1 & wr5
// Load
//"wldrw wr1, [%2, #20]\n\t" // XXX 25
// Adjust
"wunpckelub wr1, wr1\n\t" // wr1 = 0A0R0G0B
"wstrw wr0, [%2, #16]\n\t" // XXX 24
"wsllh wr1, wr1, wr13\n\t" // wr1 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr1, wr1, wr5\n\t" // wr1 = data - accum
"wmulsm wr1, wr1, wr11\n\t" // wr1 = (accum - data) * alpha
"wldrw wr2, [%2, #24]\n\t" // XXX 27
"waddhss wr1, wr1, wr1\n\t" // wr1 = wr2 * 2
"waddhss wr5, wr5, wr1\n\t" // wr5 += wr1
// Adjust
"wsrlh wr1, wr5, wr13\n\t" // wr1 = wr5 >> 7
"wpackhus wr1, wr1, wr12\n\t"
// Store
//"wstrw wr1, [%2, #20]\n\t" // XXX 26
// wr2 & wr6
// Load
//"wldrw wr2, [%2, #24]\n\t" // XXX 27
// Adjust
"wunpckelub wr2, wr2\n\t" // wr2 = 0A0R0G0B
"wstrw wr1, [%2, #20]\n\t" // XXX 26
"wsllh wr2, wr2, wr13\n\t" // wr2 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr2, wr2, wr6\n\t" // wr2 = data - accum
"wmulsm wr2, wr2, wr11\n\t" // wr2 = (accum - data) * alpha
"wldrw wr3, [%2, #28]\n\t" // XXX 29
"waddhss wr2, wr2, wr2\n\t" // wr1 = wr2 * 2
"waddhss wr6, wr6, wr2\n\t" // wr8 += wr1
// Adjust
"wsrlh wr2, wr6, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr2, wr2, wr12\n\t"
// Store
//"wstrw wr2, [%2, #24]\n\t" // XXX 28
// wr3 & wr14
// Load
//"wldrw wr3, [%2, #28]\n\t" // XXX 29
// Adjust
"wunpckelub wr3, wr3\n\t" // wr3 = 0A0R0G0B
"wstrw wr2, [%2, #24]\n\t" // XXX 28
"wsllh wr3, wr3, wr13\n\t" // wr3 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr3, wr3, wr14\n\t" // wr3 = data - accum
"wmulsm wr3, wr3, wr11\n\t" // wr3 = (accum - data) * alpha
"waddhss wr3, wr3, wr3\n\t" // wr1 = wr3 * 2
"waddhss wr14, wr14, wr3\n\t" // wr8 += wr1
// Adjust
"wsrlh wr3, wr14, wr13\n\t" // wr1 = wr8 >> 7
"wpackhus wr3, wr3, wr12\n\t"
"subs r5, r5, #1\n\t" // Loop update
// Store
"wstrw wr3, [%2, #28]\n\t"
"sub %2, %2, %6\n\t"
// Loop
"bne 2b\n\t"
: /* output */ "=r"(line), "=r"(out)
: /* input */ "0"(line), "r"(length), "r"(alpha32), "1"(out), "r"(step)
: /* clobber */ "r5", "r6", "wr12"
);
}
void mmx_blurrow(unsigned int *line, int length, int alpha32)
{
int out = 0;
alpha32 |= alpha32 << 16;
asm volatile (
"wzero wr12\n\t" // wr12 = zero
"tbcstw wr11, %4\n\t" // wr11 = alpha 0AAAAAAAAAAAAAAA.A.A.A
// "wzero wr10\n\t" // wr10 = accumulator
"mov r5, #7\n\t"
"tbcstw wr13, r5\n\t" // wr13 = 7
"mov r5, %3\n\t"
// Load (pre)
"wldrw wr1, [%2]\n\t"
// Setup accumulator default
"wunpckelub wr10, wr1\n\t" // wr0 = 0A0R0G0B
"wsllh wr10, wr10, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
"1:\n\t"
"subs r5, r5, #1\n\t" // Loop adjust
// Adjust
"wunpckelub wr0, wr1\n\t" // wr0 = 0A0R0G0B
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr10\n\t" // wr0 = data - accum
//"textrmuw %1, wr0, #0\n\t"
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrwne wr1, [%2, #4]\n\t"
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr10, wr10, wr0\n\t" // wr10 += wr0
// Adjust
"wsrlh wr0, wr10, wr13\n\t" // wr0 = wr10 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// XXX 1 cycle STALL
// Store
"wstrw wr0, [%2], #4\n\t"
// Loop
"bne 1b\n\t"
//
// Now go back again
//
"sub %2, %2, #4\n\t"
"mov r5, %3\n\t"
// Load (pre)
"wldrw wr1, [%2]\n\t"
"2:\n\t"
"subs r5, r5, #1\n\t" // Loop adjust
// Adjust
"wunpckelub wr0, wr1\n\t" // wr0 = 0A0R0G0B
"wsllh wr0, wr0, wr13\n\t" // wr0 = 0aaaaaaaa0000000.r.g.b.
// Adjust and accumulate
"wsubh wr0, wr0, wr10\n\t" // wr0 = data - accum
//"textrmuw %1, wr0, #0\n\t"
"wmulsm wr0, wr0, wr11\n\t" // wr0 = (accum - data) * alpha
"wldrwne wr1, [%2, #-4]\n\t"
"waddhss wr0, wr0, wr0\n\t" // wr0 = wr2 * 2
"waddhss wr10, wr10, wr0\n\t" // wr10 += wr0
// Adjust
"wsrlh wr0, wr10, wr13\n\t" // wr0 = wr10 >> 7
"wpackhus wr0, wr0, wr12\n\t"
// XXX 1 cycle STALL
// Store
"wstrw wr0, [%2], #-4\n\t"
// Loop
"bne 2b\n\t"
: /* output */ "=r"(line), "=r"(out)
: /* input */ "0"(line), "r"(length), "r"(alpha32), "1"(out)
: /* clobber */ "r5", "wr12"
);
}
void mmx_blur(unsigned int *img, int width, int height, int step_width, int alpha32)
{
int row, col;
for(row = 0; row < height; ++row)
mmx_blurrow(img + row * step_width, width, alpha32);
while(width >= 8) {
mmx_blurcol_8(img, height, step_width * 4, alpha32);
img += 8;
width -= 8;
}
while(width--)
mmx_blurcol(img++, height, step_width * 4, alpha32);
}
| gpl-2.0 |
muzaffar101/rtLinux2.4.35 | drivers/hotplug/pciehprm_nonacpi.c | 8 | 11839 | /*
* PCIEHPRM NONACPI: PHP Resource Manager for Non-ACPI/Legacy platform
*
* Copyright (C) 1995,2001 Compaq Computer Corporation
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
* Copyright (C) 2003-2004 Intel Corporation
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
#include "pciehp.h"
#include "pciehprm.h"
#include "pciehprm_nonacpi.h"
void pciehprm_cleanup(void)
{
return;
}
int pciehprm_print_pirt(void)
{
return 0;
}
void * pciehprm_get_slot(struct slot *slot)
{
return NULL;
}
int pciehprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, u8 busnum, u8 devnum)
{
*sun = (u8) (ctrl->first_slot);
return 0;
}
static void print_pci_resource ( struct pci_resource *aprh)
{
struct pci_resource *res;
for (res = aprh; res; res = res->next)
dbg(" base= 0x%x length= 0x%x\n", res->base, res->length);
}
static void phprm_dump_func_res( struct pci_func *fun)
{
struct pci_func *func = fun;
if (func->bus_head) {
dbg(": BUS Resources:\n");
print_pci_resource (func->bus_head);
}
if (func->io_head) {
dbg(": IO Resources:\n");
print_pci_resource (func->io_head);
}
if (func->mem_head) {
dbg(": MEM Resources:\n");
print_pci_resource (func->mem_head);
}
if (func->p_mem_head) {
dbg(": PMEM Resources:\n");
print_pci_resource (func->p_mem_head);
}
}
static int phprm_get_used_resources (
struct controller *ctrl,
struct pci_func *func
)
{
return pciehp_save_used_resources (ctrl, func, !DISABLE_CARD);
}
static int phprm_delete_resource(
struct pci_resource **aprh,
ulong base,
ulong size)
{
struct pci_resource *res;
struct pci_resource *prevnode;
struct pci_resource *split_node;
ulong tbase;
pciehp_resource_sort_and_combine(aprh);
for (res = *aprh; res; res = res->next) {
if (res->base > base)
continue;
if ((res->base + res->length) < (base + size))
continue;
if (res->base < base) {
tbase = base;
if ((res->length - (tbase - res->base)) < size)
continue;
split_node = (struct pci_resource *) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
if (!split_node)
return -ENOMEM;
split_node->base = res->base;
split_node->length = tbase - res->base;
res->base = tbase;
res->length -= split_node->length;
split_node->next = res->next;
res->next = split_node;
}
if (res->length >= size) {
split_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
if (!split_node)
return -ENOMEM;
split_node->base = res->base + size;
split_node->length = res->length - size;
res->length = size;
split_node->next = res->next;
res->next = split_node;
}
if (*aprh == res) {
*aprh = res->next;
} else {
prevnode = *aprh;
while (prevnode->next != res)
prevnode = prevnode->next;
prevnode->next = res->next;
}
res->next = NULL;
kfree(res);
break;
}
return 0;
}
static int phprm_delete_resources(
struct pci_resource **aprh,
struct pci_resource *this
)
{
struct pci_resource *res;
for (res = this; res; res = res->next)
phprm_delete_resource(aprh, res->base, res->length);
return 0;
}
static int configure_existing_function(
struct controller *ctrl,
struct pci_func *func
)
{
int rc;
/* See how much resources the func has used. */
rc = phprm_get_used_resources (ctrl, func);
if (!rc) {
/* Subtract the resources used by the func from ctrl resources */
rc = phprm_delete_resources (&ctrl->bus_head, func->bus_head);
rc |= phprm_delete_resources (&ctrl->io_head, func->io_head);
rc |= phprm_delete_resources (&ctrl->mem_head, func->mem_head);
rc |= phprm_delete_resources (&ctrl->p_mem_head, func->p_mem_head);
if (rc)
warn("aCEF: cannot del used resources\n");
} else
err("aCEF: cannot get used resources\n");
return rc;
}
static int pciehprm_delete_resource(
struct pci_resource **aprh,
ulong base,
ulong size)
{
struct pci_resource *res;
struct pci_resource *prevnode;
struct pci_resource *split_node;
ulong tbase;
pciehp_resource_sort_and_combine(aprh);
for (res = *aprh; res; res = res->next) {
if (res->base > base)
continue;
if ((res->base + res->length) < (base + size))
continue;
if (res->base < base) {
tbase = base;
if ((res->length - (tbase - res->base)) < size)
continue;
split_node = (struct pci_resource *) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
if (!split_node)
return -ENOMEM;
split_node->base = res->base;
split_node->length = tbase - res->base;
res->base = tbase;
res->length -= split_node->length;
split_node->next = res->next;
res->next = split_node;
}
if (res->length >= size) {
split_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
if (!split_node)
return -ENOMEM;
split_node->base = res->base + size;
split_node->length = res->length - size;
res->length = size;
split_node->next = res->next;
res->next = split_node;
}
if (*aprh == res) {
*aprh = res->next;
} else {
prevnode = *aprh;
while (prevnode->next != res)
prevnode = prevnode->next;
prevnode->next = res->next;
}
res->next = NULL;
kfree(res);
break;
}
return 0;
}
static int bind_pci_resources_to_slots ( struct controller *ctrl)
{
struct pci_func *func, new_func;
int busn = ctrl->slot_bus;
int devn, funn;
u32 vid;
for (devn = 0; devn < 32; devn++) {
for (funn = 0; funn < 8; funn++) {
/*
if (devn == ctrl->device && funn == ctrl->function)
continue;
*/
/* Find out if this entry is for an occupied slot */
vid = 0xFFFFFFFF;
pci_bus_read_config_dword(ctrl->pci_dev->subordinate, PCI_DEVFN(devn, funn), PCI_VENDOR_ID, &vid);
if (vid != 0xFFFFFFFF) {
dbg("%s: vid = %x bus %x dev %x fun %x\n", __FUNCTION__,
vid, busn, devn, funn);
func = pciehp_slot_find(busn, devn, funn);
dbg("%s: func = %p\n", __FUNCTION__,func);
if (!func) {
memset(&new_func, 0, sizeof(struct pci_func));
new_func.bus = busn;
new_func.device = devn;
new_func.function = funn;
new_func.is_a_board = 1;
configure_existing_function(ctrl, &new_func);
phprm_dump_func_res(&new_func);
} else {
configure_existing_function(ctrl, func);
phprm_dump_func_res(func);
}
dbg("aCCF:existing PCI 0x%x Func ResourceDump\n", ctrl->bus);
}
}
}
return 0;
}
static void phprm_dump_ctrl_res( struct controller *ctlr)
{
struct controller *ctrl = ctlr;
if (ctrl->bus_head) {
dbg(": BUS Resources:\n");
print_pci_resource (ctrl->bus_head);
}
if (ctrl->io_head) {
dbg(": IO Resources:\n");
print_pci_resource (ctrl->io_head);
}
if (ctrl->mem_head) {
dbg(": MEM Resources:\n");
print_pci_resource (ctrl->mem_head);
}
if (ctrl->p_mem_head) {
dbg(": PMEM Resources:\n");
print_pci_resource (ctrl->p_mem_head);
}
}
/*
* phprm_find_available_resources
*
* Finds available memory, IO, and IRQ resources for programming
* devices which may be added to the system
* this function is for hot plug ADD!
*
* returns 0 if success
*/
int pciehprm_find_available_resources(struct controller *ctrl)
{
struct pci_func func;
u32 rc;
memset(&func, 0, sizeof(struct pci_func));
func.bus = ctrl->bus;
func.device = ctrl->device;
func.function = ctrl->function;
func.is_a_board = 1;
/* Get resources for this PCI bridge */
rc = pciehp_save_used_resources (ctrl, &func, !DISABLE_CARD);
dbg("%s: pciehp_save_used_resources rc = %d\n", __FUNCTION__, rc);
if (func.mem_head)
func.mem_head->next = ctrl->mem_head;
ctrl->mem_head = func.mem_head;
if (func.p_mem_head)
func.p_mem_head->next = ctrl->p_mem_head;
ctrl->p_mem_head = func.p_mem_head;
if (func.io_head)
func.io_head->next = ctrl->io_head;
ctrl->io_head = func.io_head;
if (func.bus_head)
func.bus_head->next = ctrl->bus_head;
ctrl->bus_head = func.bus_head;
if (ctrl->bus_head)
pciehprm_delete_resource(&ctrl->bus_head, ctrl->pci_dev->subordinate->number, 1);
dbg("%s:pre-Bind PCI 0x%x Ctrl Resource Dump\n", __FUNCTION__, ctrl->bus);
phprm_dump_ctrl_res(ctrl);
dbg("%s: before bind_pci_resources_to slots\n", __FUNCTION__);
bind_pci_resources_to_slots (ctrl);
dbg("%s:post-Bind PCI 0x%x Ctrl Resource Dump\n", __FUNCTION__, ctrl->bus);
phprm_dump_ctrl_res(ctrl);
return (rc);
}
int pciehprm_set_hpp(
struct controller *ctrl,
struct pci_func *func,
u8 card_type)
{
u32 rc;
u8 temp_byte;
struct pci_bus lpci_bus, *pci_bus;
unsigned int devfn;
memcpy(&lpci_bus, ctrl->pci_bus, sizeof(lpci_bus));
pci_bus = &lpci_bus;
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
temp_byte = 0x40; /* Hard coded value for LT */
if (card_type == PCI_HEADER_TYPE_BRIDGE) {
/* Set subordinate Latency Timer */
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte);
if (rc) {
dbg("%s: set secondary LT error. b:d:f(%02x:%02x:%02x)\n", __FUNCTION__, func->bus, func->device, func->function);
return rc;
}
}
/* Set base Latency Timer */
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte);
if (rc) {
dbg("%s: set LT error. b:d:f(%02x:%02x:%02x)\n", __FUNCTION__, func->bus,
func->device, func->function);
return rc;
}
/* set Cache Line size */
temp_byte = 0x08; /* hard coded value for CLS */
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte);
if (rc) {
dbg("%s: set CLS error. b:d:f(%02x:%02x:%02x)\n", __FUNCTION__, func->bus,
func->device, func->function);
}
/* set enable_perr */
/* set enable_serr */
return rc;
}
void pciehprm_enable_card(
struct controller *ctrl,
struct pci_func *func,
u8 card_type)
{
u16 command, bcommand;
struct pci_bus lpci_bus, *pci_bus;
unsigned int devfn;
int rc;
memcpy(&lpci_bus, ctrl->pci_bus, sizeof(lpci_bus));
pci_bus = &lpci_bus;
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
rc = pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &command);
command |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR
| PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
| PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
rc = pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
if (card_type == PCI_HEADER_TYPE_BRIDGE) {
rc = pci_bus_read_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, &bcommand);
bcommand |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR
| PCI_BRIDGE_CTL_NO_ISA;
rc = pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, bcommand);
}
}
static int legacy_pciehprm_init_pci(void)
{
return 0;
}
int pciehprm_init(enum php_ctlr_type ctrl_type)
{
int retval;
switch (ctrl_type) {
case PCI:
retval = legacy_pciehprm_init_pci();
break;
default:
retval = -ENODEV;
break;
}
return retval;
}
| gpl-2.0 |
Lucoms/SkyMist-Core | src/server/scripts/Pandaria/WorldBosses/AugustCelestials/boss_niuzao.cpp | 8 | 11093 | /*
* Copyright (C) 2011-2015 SkyMist Gaming
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* World Boss: Niuzao <The Black Ox>.
*
* ======= QUESTS =======
*
* [90] Celestial Blessings
*
* Quest accept:
*
* Wrathion says: I will meet you at each of the four shrines, champion.
* Wrathion says: Remember, we must visit all four, but we only need to complete ONE of the four challenges.
* Wrathion says: Choose the challenge most appropriate for your unique talents.
* // Wrathion transforms into his whelp form.
* Wrathion says: Good luck!
*
* Quest complete:
*
* Wrathion says: We have done it, hero! We have the blessings of the celestials, and we have completed their challenge.
* Wrathion says: I know our next step. Meet me back atop Mason's folly - you'll be pleasantly surprised at what I have in store for you!
*
* Niuzao Temple - ! Tank spec challenge.
*
* Wrathion says: Here we are, the home of Niuzao, the Black Ox. He is the patron spirit of Pandaren fortitude, the only celestial who chooses to live beyond the wall.
* Wrathion says: Mighty Black Ox! Hear our plea.
* Wrathion says: My champion and I seek the blessing of fortitude.
* Niuzao says: So enters the dragon, and his mortal champion. You are welcome here.
* Niuzao says: Tell me, Black Prince: What is the nature of fortitude?
* Wrathion says: The strength to overcome any hardship! That is fortitude.
* Niuzao says: You confuse strength with fortitude, young dragon. But power is worthless without spirit!
* Niuzao says: I have seen humble slaves endure unimaginable torture. Only to rise up and overthrow their masters.
* Niuzao says: And I have seen the mightiest of emperors laid low by the perseverance of the smallest of enemies.
* Wrathion says: You mean to say - physical strength is developed without, but fortitude comes from within?
* Niuzao says: Precisely!
* Wrathion says: ...Sure, I knew that.
* Niuzao says: Of course you did. We will put your understanding to the test.
* Niuzao says: My challenge, if you accept it, will test your champion's ability to defend and protect while enduring terrible hardship.
*
* - Tank Challenge -
*
* Niuzao says: Let the challenge begin! Hero, you must defend the Black Prince while he faces his inner turmoil.
* Wrathion says: I'm a black dragon. I won't need any help.
* Niuzao says: Is that so? You are filled with doubts and fears, young Prince.
* Niuzao says: Face your inner demons. Now is the time.
* Wrathion says: What - wait - father!?
* Vision of Deathwing yells: I shall tear this world apart!
* Wrathion says: Please - don't make me do this.
* Niuzao yells: Your champion will defend you. Begin!
* Vision of Deathwing yells: Your efforts are insignificant!
*
* Niuzao yells: The elements will not die! Make use of your time before they rise again!
* Niuzao yells: Control the combat. Pay attention to ALL your foes.
* Niuzao yells: Use the battlefield to regenerate yourself!
* Niuzao yells: Protect the Black Prince! Distract his enemies!
* Niuzao yells: Do not allow your foe to complete his attack!
* Niuzao yells: You must sacrifice yourself to keep the Prince from falling!
*
* Vision of Deathwing yells: Your tenacity is admirable, but pointless!
* Vision of Deathwing yells: There's no shelter from my fury!
* Vision of Deathwing yells: Your armor means nothing! Your faith - even less!
*
* Failure
* Wrathion yells: Enough! Make it stop!
* Niuzao yells: So be it. The test is over.
* Niuzao says: Remember: You are never defeated until you decide to remain so. Those with true fortitude always rise whenever they fall.
* Niuzao says: Try again - I know you can defeat this challenge!
*
* Victory
* Niuzao yells: Well done! You have triumphant!
* Wrathion says: Such... madness...
* Niuzao says: You are stronger than your father, young prince.
* Niuzao says: You have friends. Your champion did not allow you to fall. Take this lesson to heart.
* Wrathion says: I understand. Thank you, Mighty Ox.
*
* ===================================================================================================================
*
*[90] The Emperor's Way - Actual Boss fight.
*
* Intro
* Emperor Shaohao yells: In the face of great fear, the black ox taught me fortitude, Through terror and darkness it persevered. The fear was vanquished.
* Niuzao yells: Can you stand on the tallest peak? Winds and sleet buffeting your skin, until the trees wither and the mountains fall into the sea?
*
* Aggro
* We shall see.
*
* Charge
* The winds may be strong, and the sleets may sting.
* You are the mountain unmovable by all but time!
*
* Massive Quake
* Be vigilant in your stand or you will never achieve your goals!
*
* Kills player
* You must persevere!
*
* Death
* Niuzao yells: Though you will be surrounded by foes greater than you can imagine, your fortitude shall allow you to endure. Remember this in the times ahead.
* Emperor Shaohao yells: You have walked the trial of fortitude, and learned of the path of the black ox. May it bless your passage.
*/
#include "ObjectMgr.h"
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "CreatureTextMgr.h"
#include "SpellScript.h"
#include "SpellAuras.h"
#include "SpellAuraEffects.h"
#include "Player.h"
enum Texts
{
// Niuzao
SAY_INTRO = 0, // Can you stand on the tallest peak? Winds and sleet buffeting your skin, until the trees wither and the mountains fall into the sea?
SAY_AGGRO = 1, // We shall see.
SAY_DEATH = 2, // Though you will be surrounded by foes greater than you can imagine, your fortitude shall allow you to endure. Remember this in the times ahead.
SAY_SLAY = 3, // You must persevere!
SAY_MASSIVE_QUAKE = 4, // Be vigilant in your stand or you will never achieve your goals!
SAY_CHARGE = 5 // 0 - The winds may be strong, and the sleets may sting. ; 1 - You are the mountain unmovable by all but time!
};
enum Spells
{
// Niuzao
SPELL_HEADBUTT = 144610, // Damage, knockback and threat removal.
SPELL_OXEN_FORTITUDE = 144606, // Player health + boss damage increase.
SPELL_OXEN_FORTITUDE_T = 144607, // Triggered on players by above.
SPELL_MASSIVE_QUAKE = 144611, // Damage each sec.
SPELL_MASSIVE_QUAKE_D = 144612, // Triggered by above.
SPELL_CHARGE = 144608, // Charge cast time and aura.
SPELL_CHARGE_D = 144609, // Per. dmg., triggered by above each sec.
};
enum Events
{
// Niuzao
EVENT_HEADBUTT = 1, // 20s from aggro, 30s after.
EVENT_OXEN_FORTITUDE = 2, // 12s from aggro, 45s after.
EVENT_MASSIVE_QUAKE = 3, // 45s from aggro, 65s after.
EVENT_NIUZAO_CHARGE = 4 // 66 and 33%.
};
enum ChargeStates
{
DONE_NONE = 0, // No casts done.
DONE_66 = 1, // First cast done.
DONE_33 = 2 // Both casts done.
};
// ToDo: Script Charge, fix timers.
class boss_niuzao : public CreatureScript
{
public:
boss_niuzao() : CreatureScript("boss_niuzao") { }
struct boss_niuzaoAI : public ScriptedAI
{
boss_niuzaoAI(Creature* creature) : ScriptedAI(creature) { }
EventMap events;
uint8 chargeDone;
void InitializeAI()
{
if (!me->isDead())
Reset();
}
void Reset()
{
events.Reset();
chargeDone = DONE_NONE;
}
void EnterCombat(Unit* /*who*/)
{
Talk(SAY_AGGRO);
events.ScheduleEvent(EVENT_HEADBUTT, urand(18000, 23000)); // 18-23
events.ScheduleEvent(EVENT_OXEN_FORTITUDE, urand(12000, 14000)); // 12-14
events.ScheduleEvent(EVENT_MASSIVE_QUAKE, urand(44000, 48000)); // 44-48
}
void KilledUnit(Unit* victim)
{
if (victim->GetTypeId() == TYPEID_PLAYER)
Talk(SAY_SLAY);
}
void EnterEvadeMode()
{
me->RemoveAllAuras();
Reset();
me->DeleteThreatList();
me->CombatStop(true);
me->GetMotionMaster()->MoveTargetedHome();
}
void JustDied(Unit* /*killer*/)
{
Talk(SAY_DEATH);
}
void UpdateAI(const uint32 diff)
{
if (!UpdateVictim())
return;
events.Update(diff);
if (me->HasUnitState(UNIT_STATE_CASTING))
return;
// Set Crane Rush phases execution.
if (me->HealthBelowPct(67) && chargeDone == DONE_NONE || me->HealthBelowPct(34) && chargeDone == DONE_66)
{
events.ScheduleEvent(EVENT_NIUZAO_CHARGE, 2000);
chargeDone++;
}
while (uint32 eventId = events.ExecuteEvent())
{
switch (eventId)
{
case EVENT_HEADBUTT:
DoCastVictim(SPELL_HEADBUTT);
events.ScheduleEvent(EVENT_HEADBUTT, urand(35000, 40000));
break;
case EVENT_OXEN_FORTITUDE:
DoCast(me, SPELL_OXEN_FORTITUDE);
events.ScheduleEvent(EVENT_OXEN_FORTITUDE, urand(43000, 47000));
break;
case EVENT_MASSIVE_QUAKE:
Talk(SAY_MASSIVE_QUAKE);
DoCast(me, SPELL_MASSIVE_QUAKE);
events.ScheduleEvent(EVENT_MASSIVE_QUAKE, urand(70000, 75000));
break;
case EVENT_NIUZAO_CHARGE:
Talk(SAY_CHARGE);
DoCast(me, SPELL_CHARGE);
break;
default: break;
}
}
DoMeleeAttackIfReady();
}
};
CreatureAI* GetAI(Creature* creature) const
{
return new boss_niuzaoAI(creature);
}
};
void AddSC_boss_niuzao()
{
new boss_niuzao();
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.