code
stringlengths 6
250k
| repo_name
stringlengths 5
70
| path
stringlengths 3
177
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
250k
|
|---|---|---|---|---|---|
/*
* Intel XScale PXA Programmable Interrupt Controller.
*
* Copyright (c) 2006 Openedhand Ltd.
* Copyright (c) 2006 Thorsten Zitterell
* Written by Andrzej Zaborowski <balrog@zabor.org>
*
* This code is licenced under the GPL.
*/
#include "hw.h"
#include "pxa.h"
#define ICIP 0x00 /* Interrupt Controller IRQ Pending register */
#define ICMR 0x04 /* Interrupt Controller Mask register */
#define ICLR 0x08 /* Interrupt Controller Level register */
#define ICFP 0x0c /* Interrupt Controller FIQ Pending register */
#define ICPR 0x10 /* Interrupt Controller Pending register */
#define ICCR 0x14 /* Interrupt Controller Control register */
#define ICHP 0x18 /* Interrupt Controller Highest Priority register */
#define IPR0 0x1c /* Interrupt Controller Priority register 0 */
#define IPR31 0x98 /* Interrupt Controller Priority register 31 */
#define ICIP2 0x9c /* Interrupt Controller IRQ Pending register 2 */
#define ICMR2 0xa0 /* Interrupt Controller Mask register 2 */
#define ICLR2 0xa4 /* Interrupt Controller Level register 2 */
#define ICFP2 0xa8 /* Interrupt Controller FIQ Pending register 2 */
#define ICPR2 0xac /* Interrupt Controller Pending register 2 */
#define IPR32 0xb0 /* Interrupt Controller Priority register 32 */
#define IPR39 0xcc /* Interrupt Controller Priority register 39 */
#define PXA2XX_PIC_SRCS 40
typedef struct {
CPUState *cpu_env;
uint32_t int_enabled[2];
uint32_t int_pending[2];
uint32_t is_fiq[2];
uint32_t int_idle;
uint32_t priority[PXA2XX_PIC_SRCS];
} PXA2xxPICState;
static void pxa2xx_pic_update(void *opaque)
{
uint32_t mask[2];
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
if (s->cpu_env->halted) {
mask[0] = s->int_pending[0] & (s->int_enabled[0] | s->int_idle);
mask[1] = s->int_pending[1] & (s->int_enabled[1] | s->int_idle);
if (mask[0] || mask[1])
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_EXITTB);
}
mask[0] = s->int_pending[0] & s->int_enabled[0];
mask[1] = s->int_pending[1] & s->int_enabled[1];
if ((mask[0] & s->is_fiq[0]) || (mask[1] & s->is_fiq[1]))
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_FIQ);
else
cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_FIQ);
if ((mask[0] & ~s->is_fiq[0]) || (mask[1] & ~s->is_fiq[1]))
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
else
cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
}
/* Note: Here level means state of the signal on a pin, not
* IRQ/FIQ distinction as in PXA Developer Manual. */
static void pxa2xx_pic_set_irq(void *opaque, int irq, int level)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
int int_set = (irq >= 32);
irq &= 31;
if (level)
s->int_pending[int_set] |= 1 << irq;
else
s->int_pending[int_set] &= ~(1 << irq);
pxa2xx_pic_update(opaque);
}
static inline uint32_t pxa2xx_pic_highest(PXA2xxPICState *s) {
int i, int_set, irq;
uint32_t bit, mask[2];
uint32_t ichp = 0x003f003f; /* Both IDs invalid */
mask[0] = s->int_pending[0] & s->int_enabled[0];
mask[1] = s->int_pending[1] & s->int_enabled[1];
for (i = PXA2XX_PIC_SRCS - 1; i >= 0; i --) {
irq = s->priority[i] & 0x3f;
if ((s->priority[i] & (1 << 31)) && irq < PXA2XX_PIC_SRCS) {
/* Source peripheral ID is valid. */
bit = 1 << (irq & 31);
int_set = (irq >= 32);
if (mask[int_set] & bit & s->is_fiq[int_set]) {
/* FIQ asserted */
ichp &= 0xffff0000;
ichp |= (1 << 15) | irq;
}
if (mask[int_set] & bit & ~s->is_fiq[int_set]) {
/* IRQ asserted */
ichp &= 0x0000ffff;
ichp |= (1 << 31) | (irq << 16);
}
}
}
return ichp;
}
static uint32_t pxa2xx_pic_mem_read(void *opaque, target_phys_addr_t offset)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
switch (offset) {
case ICIP: /* IRQ Pending register */
return s->int_pending[0] & ~s->is_fiq[0] & s->int_enabled[0];
case ICIP2: /* IRQ Pending register 2 */
return s->int_pending[1] & ~s->is_fiq[1] & s->int_enabled[1];
case ICMR: /* Mask register */
return s->int_enabled[0];
case ICMR2: /* Mask register 2 */
return s->int_enabled[1];
case ICLR: /* Level register */
return s->is_fiq[0];
case ICLR2: /* Level register 2 */
return s->is_fiq[1];
case ICCR: /* Idle mask */
return (s->int_idle == 0);
case ICFP: /* FIQ Pending register */
return s->int_pending[0] & s->is_fiq[0] & s->int_enabled[0];
case ICFP2: /* FIQ Pending register 2 */
return s->int_pending[1] & s->is_fiq[1] & s->int_enabled[1];
case ICPR: /* Pending register */
return s->int_pending[0];
case ICPR2: /* Pending register 2 */
return s->int_pending[1];
case IPR0 ... IPR31:
return s->priority[0 + ((offset - IPR0 ) >> 2)];
case IPR32 ... IPR39:
return s->priority[32 + ((offset - IPR32) >> 2)];
case ICHP: /* Highest Priority register */
return pxa2xx_pic_highest(s);
default:
printf("%s: Bad register offset " REG_FMT "\n", __FUNCTION__, offset);
return 0;
}
}
static void pxa2xx_pic_mem_write(void *opaque, target_phys_addr_t offset,
uint32_t value)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
switch (offset) {
case ICMR: /* Mask register */
s->int_enabled[0] = value;
break;
case ICMR2: /* Mask register 2 */
s->int_enabled[1] = value;
break;
case ICLR: /* Level register */
s->is_fiq[0] = value;
break;
case ICLR2: /* Level register 2 */
s->is_fiq[1] = value;
break;
case ICCR: /* Idle mask */
s->int_idle = (value & 1) ? 0 : ~0;
break;
case IPR0 ... IPR31:
s->priority[0 + ((offset - IPR0 ) >> 2)] = value & 0x8000003f;
break;
case IPR32 ... IPR39:
s->priority[32 + ((offset - IPR32) >> 2)] = value & 0x8000003f;
break;
default:
printf("%s: Bad register offset " REG_FMT "\n", __FUNCTION__, offset);
return;
}
pxa2xx_pic_update(opaque);
}
/* Interrupt Controller Coprocessor Space Register Mapping */
static const int pxa2xx_cp_reg_map[0x10] = {
[0x0 ... 0xf] = -1,
[0x0] = ICIP,
[0x1] = ICMR,
[0x2] = ICLR,
[0x3] = ICFP,
[0x4] = ICPR,
[0x5] = ICHP,
[0x6] = ICIP2,
[0x7] = ICMR2,
[0x8] = ICLR2,
[0x9] = ICFP2,
[0xa] = ICPR2,
};
static uint32_t pxa2xx_pic_cp_read(void *opaque, int op2, int reg, int crm,
void *retaddr)
{
target_phys_addr_t offset;
if (pxa2xx_cp_reg_map[reg] == -1) {
printf("%s: Bad register 0x%x\n", __FUNCTION__, reg);
return 0;
}
offset = pxa2xx_cp_reg_map[reg];
return pxa2xx_pic_mem_read(opaque, offset);
}
static void pxa2xx_pic_cp_write(void *opaque, int op2, int reg, int crm,
uint32_t value, void *retaddr)
{
target_phys_addr_t offset;
if (pxa2xx_cp_reg_map[reg] == -1) {
printf("%s: Bad register 0x%x\n", __FUNCTION__, reg);
return;
}
offset = pxa2xx_cp_reg_map[reg];
pxa2xx_pic_mem_write(opaque, offset, value);
}
static CPUReadMemoryFunc * const pxa2xx_pic_readfn[] = {
pxa2xx_pic_mem_read,
pxa2xx_pic_mem_read,
pxa2xx_pic_mem_read,
};
static CPUWriteMemoryFunc * const pxa2xx_pic_writefn[] = {
pxa2xx_pic_mem_write,
pxa2xx_pic_mem_write,
pxa2xx_pic_mem_write,
};
static void pxa2xx_pic_save(QEMUFile *f, void *opaque)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
int i;
for (i = 0; i < 2; i ++)
qemu_put_be32s(f, &s->int_enabled[i]);
for (i = 0; i < 2; i ++)
qemu_put_be32s(f, &s->int_pending[i]);
for (i = 0; i < 2; i ++)
qemu_put_be32s(f, &s->is_fiq[i]);
qemu_put_be32s(f, &s->int_idle);
for (i = 0; i < PXA2XX_PIC_SRCS; i ++)
qemu_put_be32s(f, &s->priority[i]);
}
static int pxa2xx_pic_load(QEMUFile *f, void *opaque, int version_id)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
int i;
for (i = 0; i < 2; i ++)
qemu_get_be32s(f, &s->int_enabled[i]);
for (i = 0; i < 2; i ++)
qemu_get_be32s(f, &s->int_pending[i]);
for (i = 0; i < 2; i ++)
qemu_get_be32s(f, &s->is_fiq[i]);
qemu_get_be32s(f, &s->int_idle);
for (i = 0; i < PXA2XX_PIC_SRCS; i ++)
qemu_get_be32s(f, &s->priority[i]);
pxa2xx_pic_update(opaque);
return 0;
}
qemu_irq *pxa2xx_pic_init(target_phys_addr_t base, CPUState *env)
{
PXA2xxPICState *s;
int iomemtype;
qemu_irq *qi;
s = (PXA2xxPICState *)
qemu_mallocz(sizeof(PXA2xxPICState));
if (!s)
return NULL;
s->cpu_env = env;
s->int_pending[0] = 0;
s->int_pending[1] = 0;
s->int_enabled[0] = 0;
s->int_enabled[1] = 0;
s->is_fiq[0] = 0;
s->is_fiq[1] = 0;
qi = qemu_allocate_irqs(pxa2xx_pic_set_irq, s, PXA2XX_PIC_SRCS);
/* Enable IC memory-mapped registers access. */
iomemtype = cpu_register_io_memory(pxa2xx_pic_readfn,
pxa2xx_pic_writefn, s, DEVICE_NATIVE_ENDIAN);
cpu_register_physical_memory(base, 0x00100000, iomemtype);
/* Enable IC coprocessor access. */
cpu_arm_set_cp_io(env, 6, pxa2xx_pic_cp_read, pxa2xx_pic_cp_write, s);
register_savevm(NULL, "pxa2xx_pic", 0, 0, pxa2xx_pic_save,
pxa2xx_pic_load, s);
return qi;
}
|
XVilka/qemu
|
hw/pxa2xx_pic.c
|
C
|
gpl-2.0
| 9,607
|
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/amigayle.h>
#include <asm/amipcmcia.h>
#include "8390.h"
#define DRV_NAME "apne"
#define NE_BASE (dev->base_addr)
#define NE_CMD 0x00
#define NE_DATAPORT 0x10
#define NE_RESET 0x1f
#define NE_IO_EXTENT 0x20
#define NE_EN0_ISR 0x07
#define NE_EN0_DCFG 0x0e
#define NE_EN0_RSARLO 0x08
#define NE_EN0_RSARHI 0x09
#define NE_EN0_RCNTLO 0x0a
#define NE_EN0_RXCR 0x0c
#define NE_EN0_TXCR 0x0d
#define NE_EN0_RCNTHI 0x0b
#define NE_EN0_IMR 0x0f
#define NE1SM_START_PG 0x20
#define NE1SM_STOP_PG 0x40
#define NESM_START_PG 0x40
#define NESM_STOP_PG 0x80
struct net_device * __init apne_probe(int unit);
static int apne_probe1(struct net_device *dev, int ioaddr);
static void apne_reset_8390(struct net_device *dev);
static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page);
static void apne_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void apne_block_output(struct net_device *dev, const int count,
const unsigned char *buf, const int start_page);
static irqreturn_t apne_interrupt(int irq, void *dev_id);
static int init_pcmcia(void);
#define IOBASE 0x300
static const char version[] =
"apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n";
static int apne_owned;
struct net_device * __init apne_probe(int unit)
{
struct net_device *dev;
#ifndef MANUAL_CONFIG
char tuple[8];
#endif
int err;
if (!MACH_IS_AMIGA)
return ERR_PTR(-ENODEV);
if (apne_owned)
return ERR_PTR(-ENODEV);
if ( !(AMIGAHW_PRESENT(PCMCIA)) )
return ERR_PTR(-ENODEV);
printk("Looking for PCMCIA ethernet card : ");
if (!(PCMCIA_INSERTED)) {
printk("NO PCMCIA card inserted\n");
return ERR_PTR(-ENODEV);
}
dev = alloc_ei_netdev();
if (!dev)
return ERR_PTR(-ENOMEM);
if (unit >= 0) {
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
pcmcia_disable_irq();
#ifndef MANUAL_CONFIG
if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
(tuple[2] != CISTPL_FUNCID_NETWORK)) {
printk("not an ethernet card\n");
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
#endif
printk("ethernet PCMCIA card inserted\n");
if (!init_pcmcia()) {
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
if (!request_region(IOBASE, 0x20, DRV_NAME)) {
free_netdev(dev);
return ERR_PTR(-EBUSY);
}
err = apne_probe1(dev, IOBASE);
if (err) {
release_region(IOBASE, 0x20);
free_netdev(dev);
return ERR_PTR(err);
}
err = register_netdev(dev);
if (!err)
return dev;
pcmcia_disable_irq();
free_irq(IRQ_AMIGA_PORTS, dev);
pcmcia_reset();
release_region(IOBASE, 0x20);
free_netdev(dev);
return ERR_PTR(err);
}
static int __init apne_probe1(struct net_device *dev, int ioaddr)
{
int i;
unsigned char SA_prom[32];
int wordlength = 2;
const char *name = NULL;
int start_page, stop_page;
#ifndef MANUAL_HWADDR0
int neX000, ctron;
#endif
static unsigned version_printed;
if (ei_debug && version_printed++ == 0)
printk(version);
printk("PCMCIA NE*000 ethercard probe");
{ unsigned long reset_start_time = jiffies;
outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
printk(" not found (no reset ack).\n");
return -ENODEV;
}
outb(0xff, ioaddr + NE_EN0_ISR);
}
#ifndef MANUAL_HWADDR0
{
struct {unsigned long value, offset; } program_seq[] = {
{E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD},
{0x48, NE_EN0_DCFG},
{0x00, NE_EN0_RCNTLO},
{0x00, NE_EN0_RCNTHI},
{0x00, NE_EN0_IMR},
{0xFF, NE_EN0_ISR},
{E8390_RXOFF, NE_EN0_RXCR},
{E8390_TXOFF, NE_EN0_TXCR},
{32, NE_EN0_RCNTLO},
{0x00, NE_EN0_RCNTHI},
{0x00, NE_EN0_RSARLO},
{0x00, NE_EN0_RSARHI},
{E8390_RREAD+E8390_START, NE_CMD},
};
for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
outb(program_seq[i].value, ioaddr + program_seq[i].offset);
}
}
for(i = 0; i < 32 ; i+=2) {
SA_prom[i] = inb(ioaddr + NE_DATAPORT);
SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
if (SA_prom[i] != SA_prom[i+1])
wordlength = 1;
}
if (wordlength == 2)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
if (wordlength == 2) {
outb(0x49, ioaddr + NE_EN0_DCFG);
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
} else {
start_page = NE1SM_START_PG;
stop_page = NE1SM_STOP_PG;
}
neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
if (neX000) {
name = (wordlength == 2) ? "NE2000" : "NE1000";
} else if (ctron) {
name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
start_page = 0x01;
stop_page = (wordlength == 2) ? 0x40 : 0x20;
} else {
printk(" not found.\n");
return -ENXIO;
}
#else
wordlength = 2;
outb(0x49, ioaddr + NE_EN0_DCFG);
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
SA_prom[0] = MANUAL_HWADDR0;
SA_prom[1] = MANUAL_HWADDR1;
SA_prom[2] = MANUAL_HWADDR2;
SA_prom[3] = MANUAL_HWADDR3;
SA_prom[4] = MANUAL_HWADDR4;
SA_prom[5] = MANUAL_HWADDR5;
name = "NE2000";
#endif
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
dev->netdev_ops = &ei_netdev_ops;
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
for(i = 0; i < ETHER_ADDR_LEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
printk("%s: %s found.\n", dev->name, name);
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
ei_status.word16 = (wordlength == 2);
ei_status.rx_start_page = start_page + TX_PAGES;
ei_status.reset_8390 = &apne_reset_8390;
ei_status.block_input = &apne_block_input;
ei_status.block_output = &apne_block_output;
ei_status.get_8390_hdr = &apne_get_8390_hdr;
NS8390_init(dev, 0);
pcmcia_ack_int(pcmcia_get_intreq());
pcmcia_enable_irq();
apne_owned = 1;
return 0;
}
static void
apne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
init_pcmcia();
if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
ei_status.txing = 0;
ei_status.dmaing = 0;
while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
printk("%s: ne_reset_8390() did not complete.\n", dev->name);
break;
}
outb(ENISR_RESET, NE_BASE + NE_EN0_ISR);
}
static void
apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
int nic_base = dev->base_addr;
int cnt;
char *ptrc;
short *ptrs;
if (ei_status.dmaing) {
printk("%s: DMAing conflict in ne_get_8390_hdr "
"[DMAstat:%d][irqlock:%d][intr:%d].\n",
dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
outb(0, nic_base + NE_EN0_RCNTHI);
outb(0, nic_base + NE_EN0_RSARLO);
outb(ring_page, nic_base + NE_EN0_RSARHI);
outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_status.word16) {
ptrs = (short*)hdr;
for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++)
*ptrs++ = inw(NE_BASE + NE_DATAPORT);
} else {
ptrc = (char*)hdr;
for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
ei_status.dmaing &= ~0x01;
le16_to_cpus(&hdr->count);
}
static void
apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
int nic_base = dev->base_addr;
char *buf = skb->data;
char *ptrc;
short *ptrs;
int cnt;
if (ei_status.dmaing) {
printk("%s: DMAing conflict in ne_block_input "
"[DMAstat:%d][irqlock:%d][intr:%d].\n",
dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
outb(count >> 8, nic_base + NE_EN0_RCNTHI);
outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_status.word16) {
ptrs = (short*)buf;
for (cnt = 0; cnt < (count>>1); cnt++)
*ptrs++ = inw(NE_BASE + NE_DATAPORT);
if (count & 0x01) {
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
}
} else {
ptrc = (char*)buf;
for (cnt = 0; cnt < count; cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
ei_status.dmaing &= ~0x01;
}
static void
apne_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
int nic_base = NE_BASE;
unsigned long dma_start;
char *ptrc;
short *ptrs;
int cnt;
if (ei_status.word16 && (count & 0x01))
count++;
if (ei_status.dmaing) {
printk("%s: DMAing conflict in ne_block_output."
"[DMAstat:%d][irqlock:%d][intr:%d]\n",
dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
outb(count >> 8, nic_base + NE_EN0_RCNTHI);
outb(0x00, nic_base + NE_EN0_RSARLO);
outb(start_page, nic_base + NE_EN0_RSARHI);
outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
if (ei_status.word16) {
ptrs = (short*)buf;
for (cnt = 0; cnt < count>>1; cnt++)
outw(*ptrs++, NE_BASE+NE_DATAPORT);
} else {
ptrc = (char*)buf;
for (cnt = 0; cnt < count; cnt++)
outb(*ptrc++, NE_BASE + NE_DATAPORT);
}
dma_start = jiffies;
while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) {
printk("%s: timeout waiting for Tx RDC.\n", dev->name);
apne_reset_8390(dev);
NS8390_init(dev,1);
break;
}
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
ei_status.dmaing &= ~0x01;
return;
}
static irqreturn_t apne_interrupt(int irq, void *dev_id)
{
unsigned char pcmcia_intreq;
if (!(gayle.inten & GAYLE_IRQ_IRQ))
return IRQ_NONE;
pcmcia_intreq = pcmcia_get_intreq();
if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) {
pcmcia_ack_int(pcmcia_intreq);
return IRQ_NONE;
}
if (ei_debug > 3)
printk("pcmcia intreq = %x\n", pcmcia_intreq);
pcmcia_disable_irq();
ei_interrupt(irq, dev_id);
pcmcia_ack_int(pcmcia_get_intreq());
pcmcia_enable_irq();
return IRQ_HANDLED;
}
#ifdef MODULE
static struct net_device *apne_dev;
static int __init apne_module_init(void)
{
apne_dev = apne_probe(-1);
if (IS_ERR(apne_dev))
return PTR_ERR(apne_dev);
return 0;
}
static void __exit apne_module_exit(void)
{
unregister_netdev(apne_dev);
pcmcia_disable_irq();
free_irq(IRQ_AMIGA_PORTS, apne_dev);
pcmcia_reset();
release_region(IOBASE, 0x20);
free_netdev(apne_dev);
}
module_init(apne_module_init);
module_exit(apne_module_exit);
#endif
static int init_pcmcia(void)
{
u_char config;
#ifndef MANUAL_CONFIG
u_char tuple[32];
int offset_len;
#endif
u_long offset;
pcmcia_reset();
pcmcia_program_voltage(PCMCIA_0V);
pcmcia_access_speed(PCMCIA_SPEED_250NS);
pcmcia_write_enable();
#ifdef MANUAL_CONFIG
config = MANUAL_CONFIG;
#else
if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3)
return 0;
config = tuple[2] & 0x3f;
#endif
#ifdef MANUAL_OFFSET
offset = MANUAL_OFFSET;
#else
if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6)
return 0;
offset_len = (tuple[2] & 0x3) + 1;
offset = 0;
while(offset_len--) {
offset = (offset << 8) | tuple[4+offset_len];
}
#endif
out_8(GAYLE_ATTRIBUTE+offset, config);
return 1;
}
MODULE_LICENSE("GPL");
|
leemgs/OptimusOneKernel-KandroidCommunity
|
drivers/net/apne.c
|
C
|
gpl-2.0
| 12,904
|
/**
* Copyright (C) ARM Limited 2012-2015. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define NEWLINE_CANARY \
/* Unix */ \
"1\n" \
/* Windows */ \
"2\r\n" \
/* Mac OS */ \
"3\r" \
/* RISC OS */ \
"4\n\r" \
/* Add another character so the length isn't 0x0a bytes */ \
"5"
#ifdef MALI_SUPPORT
#include "gator_events_mali_common.h"
#endif
static void marshal_summary(long long timestamp, long long uptime, long long monotonic_delta, const char *uname)
{
unsigned long flags;
int cpu = 0;
char buf[32];
local_irq_save(flags);
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_SUMMARY);
gator_buffer_write_string(cpu, SUMMARY_BUF, NEWLINE_CANARY);
gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, timestamp);
gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, uptime);
gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, monotonic_delta);
gator_buffer_write_string(cpu, SUMMARY_BUF, "uname");
gator_buffer_write_string(cpu, SUMMARY_BUF, uname);
gator_buffer_write_string(cpu, SUMMARY_BUF, "PAGESIZE");
snprintf(buf, sizeof(buf), "%lu", PAGE_SIZE);
gator_buffer_write_string(cpu, SUMMARY_BUF, buf);
#if GATOR_IKS_SUPPORT
gator_buffer_write_string(cpu, SUMMARY_BUF, "iks");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
#ifdef CONFIG_PREEMPT_RTB
gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rtb");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
#ifdef CONFIG_PREEMPT_RT_FULL
gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rt_full");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
/* Let Streamline know which GPU is used so that it can label the GPU Activity appropriately. This is a temporary fix, to be improved in a future release. */
#ifdef MALI_SUPPORT
gator_buffer_write_string(cpu, SUMMARY_BUF, "mali_type");
#if (MALI_SUPPORT == MALI_4xx)
gator_buffer_write_string(cpu, SUMMARY_BUF, "4xx");
#elif (MALI_SUPPORT == MALI_MIDGARD)
gator_buffer_write_string(cpu, SUMMARY_BUF, "6xx");
#else
gator_buffer_write_string(cpu, SUMMARY_BUF, "unknown");
#endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
gator_buffer_write_string(cpu, SUMMARY_BUF, "nosync");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
/* Commit the buffer now so it can be one of the first frames read by Streamline */
local_irq_restore(flags);
gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
}
static bool marshal_cookie_header(const char *text)
{
int cpu = get_physical_cpu();
return buffer_check_space(cpu, NAME_BUF, strlen(text) + 3 * MAXSIZE_PACK32);
}
static void marshal_cookie(int cookie, const char *text)
{
int cpu = get_physical_cpu();
/* buffer_check_space already called by marshal_cookie_header */
gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_COOKIE);
gator_buffer_write_packed_int(cpu, NAME_BUF, cookie);
gator_buffer_write_string(cpu, NAME_BUF, text);
buffer_check(cpu, NAME_BUF, gator_get_time());
}
static void marshal_thread_name(int pid, char *name)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, NAME_BUF, TASK_COMM_LEN + 3 * MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_THREAD_NAME);
gator_buffer_write_packed_int64(cpu, NAME_BUF, time);
gator_buffer_write_packed_int(cpu, NAME_BUF, pid);
gator_buffer_write_string(cpu, NAME_BUF, name);
}
local_irq_restore(flags);
buffer_check(cpu, NAME_BUF, time);
}
static void marshal_link(int cookie, int tgid, int pid)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_LINK);
gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, cookie);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, tgid);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, ACTIVITY_BUF, time);
}
static bool marshal_backtrace_header(int exec_cookie, int tgid, int pid, u64 time)
{
int cpu = get_physical_cpu();
if (!buffer_check_space(cpu, BACKTRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32 + gator_backtrace_depth * 2 * MAXSIZE_PACK32)) {
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, BACKTRACE_BUF, time);
return false;
}
gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, time);
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, exec_cookie);
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, tgid);
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, pid);
return true;
}
static void marshal_backtrace(unsigned long address, int cookie, int in_kernel)
{
int cpu = get_physical_cpu();
if (cookie == 0 && !in_kernel)
cookie = UNRESOLVED_COOKIE;
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, cookie);
gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, address);
}
static void marshal_backtrace_footer(u64 time)
{
int cpu = get_physical_cpu();
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, MESSAGE_END_BACKTRACE);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, BACKTRACE_BUF, time);
}
static bool marshal_event_header(u64 time)
{
unsigned long flags, cpu = get_physical_cpu();
bool retval = false;
local_irq_save(flags);
if (buffer_check_space(cpu, BLOCK_COUNTER_BUF, MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, 0); /* key of zero indicates a timestamp */
gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, time);
retval = true;
}
local_irq_restore(flags);
return retval;
}
static void marshal_event(int len, int *buffer)
{
unsigned long i, flags, cpu = get_physical_cpu();
if (len <= 0)
return;
/* length must be even since all data is a (key, value) pair */
if (len & 0x1) {
pr_err("gator: invalid counter data detected and discarded\n");
return;
}
/* events must be written in key,value pairs */
local_irq_save(flags);
for (i = 0; i < len; i += 2) {
if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK32))
break;
gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i]);
gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i + 1]);
}
local_irq_restore(flags);
}
static void marshal_event64(int len, long long *buffer64)
{
unsigned long i, flags, cpu = get_physical_cpu();
if (len <= 0)
return;
/* length must be even since all data is a (key, value) pair */
if (len & 0x1) {
pr_err("gator: invalid counter data detected and discarded\n");
return;
}
/* events must be written in key,value pairs */
local_irq_save(flags);
for (i = 0; i < len; i += 2) {
if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK64))
break;
gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i]);
gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i + 1]);
}
local_irq_restore(flags);
}
static void __maybe_unused marshal_event_single(int core, int key, int value)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, COUNTER_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, value);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, COUNTER_BUF, time);
}
static void __maybe_unused marshal_event_single64(int core, int key, long long value)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, COUNTER_BUF, 2 * MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
gator_buffer_write_packed_int64(cpu, COUNTER_BUF, value);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, COUNTER_BUF, time);
}
static void marshal_sched_trace_switch(int pid, int state)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
return;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_SWITCH);
gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, state);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, SCHED_TRACE_BUF, time);
}
static void marshal_sched_trace_exit(int tgid, int pid)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
return;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_EXIT);
gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, SCHED_TRACE_BUF, time);
}
#if GATOR_CPU_FREQ_SUPPORT
static void marshal_idle(int core, int state)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, IDLE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, IDLE_BUF, state);
gator_buffer_write_packed_int64(cpu, IDLE_BUF, time);
gator_buffer_write_packed_int(cpu, IDLE_BUF, core);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, IDLE_BUF, time);
}
#endif
#if defined(__arm__) || defined(__aarch64__)
static void marshal_core_name(const int core, const int cpuid, const char *name)
{
int cpu = get_physical_cpu();
unsigned long flags;
local_irq_save(flags);
if (buffer_check_space(cpu, SUMMARY_BUF, MAXSIZE_PACK32 + MAXSIZE_CORE_NAME)) {
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_CORE_NAME);
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, core);
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, cpuid);
gator_buffer_write_string(cpu, SUMMARY_BUF, name);
}
/* Commit core names now so that they can show up in live */
local_irq_restore(flags);
gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
}
#endif
static void marshal_activity_switch(int core, int key, int activity, int pid, int state)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
if (!per_cpu(gator_buffer, cpu)[ACTIVITY_BUF])
return;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_SWITCH);
gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, core);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, key);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, activity);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, state);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, ACTIVITY_BUF, time);
}
void gator_marshal_activity_switch(int core, int key, int activity, int pid)
{
/* state is reserved for cpu use only */
marshal_activity_switch(core, key, activity, pid, 0);
}
|
javilonas/Lonas_KL-SM-G901F
|
drivers/gator/gator_marshaling.c
|
C
|
gpl-2.0
| 12,460
|
/*
* Copyright (c) 2017, 2020, Oracle and/or its affiliates.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <graalvm/llvm/polyglot.h>
#include <graalvm/llvm/handles.h>
int main() {
void *p = polyglot_import("object");
void *p1 = create_handle(p);
void *p2 = resolve_handle(p1);
if (p != p2) {
return 1;
}
return 0;
}
|
smarr/Truffle
|
sulong/tests/com.oracle.truffle.llvm.tests.interop.native/interop/createResolveHandle.c
|
C
|
gpl-2.0
| 1,844
|
/*
** Copyright (C) 1991, 1997 Free Software Foundation, Inc.
**
** This file is part of TACK.
**
** TACK is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2, or (at your option)
** any later version.
**
** TACK is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with TACK; see the file COPYING. If not, write to
** the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
** Boston, MA 02111-1307, USA.
*/
#include <tack.h>
MODULE_ID("$Id: menu.c,v 1.1.1.1 2004/03/24 19:53:07 sure Exp $")
/*
Menu control
*/
static void test_byname(struct test_menu *, int *, int *);
struct test_list *augment_test;
char prompt_string[80]; /* menu prompt storage */
/*
** menu_prompt()
**
** Print the menu prompt string.
*/
void
menu_prompt(void)
{
ptext(&prompt_string[1]);
}
/*
** menu_test_loop(test-structure, state, control-character)
**
** This function implements the repeat test function.
*/
static void
menu_test_loop(
struct test_list *test,
int *state,
int *ch)
{
int nch, p;
if ((test->flags & MENU_REP_MASK) && (augment_test != test)) {
/* set the augment variable (first time only) */
p = (test->flags >> 8) & 15;
if ((test->flags & MENU_REP_MASK) == MENU_LM1) {
augment = lines - 1;
} else
if ((test->flags & MENU_ONE_MASK) == MENU_ONE) {
augment = 1;
} else
if ((test->flags & MENU_LC_MASK) == MENU_lines) {
augment = lines * p / 10;
} else
if ((test->flags & MENU_LC_MASK) == MENU_columns) {
augment = columns * p / 10;
} else {
augment = 1;
}
augment_test = test;
set_augment_txt();
}
do {
if ((test->flags | *state) & MENU_CLEAR) {
put_clear();
} else
if (line_count + test->lines_needed >= lines) {
put_clear();
}
nch = 0;
if (test->test_procedure) {
/* The procedure takes precedence so I can pass
the menu entry as an argument.
*/
can_test(test->caps_done, FLAG_TESTED);
can_test(test->caps_tested, FLAG_TESTED);
test->test_procedure(test, state, &nch);
} else
if (test->sub_menu) {
/* nested menu's */
menu_display(test->sub_menu, &nch);
*state = 0;
if (nch == 'q' || nch == 's') {
/* Quit and skip are killed here */
nch = '?';
}
} else {
break; /* cya */
}
if (nch == '\r' || nch == '\n' || nch == 'n') {
nch = 0;
break;
}
} while (nch == 'r');
*ch = nch;
}
/*
** menu_display(menu-structure, flags)
**
** This function implements menu control.
*/
void
menu_display(
struct test_menu *menu,
int *last_ch)
{
int test_state = 0, run_standard_tests;
int hot_topic, ch = 0, nch = 0;
struct test_list *mt;
struct test_list *repeat_tests = 0;
int repeat_state = 0;
int prompt_length;
prompt_length = strlen(prompt_string);
if (menu->ident) {
sprintf(&prompt_string[prompt_length], "/%s", menu->ident);
}
hot_topic = menu->default_action;
run_standard_tests = menu->standard_tests ?
menu->standard_tests[0] : -1;
if (!last_ch) {
last_ch = &ch;
}
while (1) {
if (ch == 0) {
/* Display the menu */
put_crlf();
if (menu->menu_function) {
/*
this function may be used to restrict menu
entries. If used it must print the title.
*/
menu->menu_function(menu);
} else
if (menu->menu_title) {
ptextln(menu->menu_title);
}
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
if (mt->menu_entry) {
ptext(" ");
ptextln(mt->menu_entry);
}
}
if (menu->standard_tests) {
ptext(" ");
ptextln(menu->standard_tests);
ptextln(" r) repeat test");
ptextln(" s) skip to next test");
}
ptextln(" q) quit");
ptextln(" ?) help");
}
if (ch == 0 || ch == REQUEST_PROMPT) {
put_crlf();
ptext(&prompt_string[1]);
if (hot_topic) {
ptext(" [");
putchp(hot_topic);
ptext("]");
}
ptext(" > ");
/* read a character */
ch = wait_here();
}
if (ch == '\r' || ch == '\n') {
ch = hot_topic;
}
if (ch == 'q') {
break;
}
if (ch == '?') {
ch = 0;
continue;
}
nch = ch;
ch = 0;
/* Run one of the standard tests (by request) */
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
if (mt->menu_entry && (nch == mt->menu_entry[0])) {
if (mt->flags & MENU_MENU) {
test_byname(menu, &test_state, &nch);
} else {
menu_test_loop(mt, &test_state, &nch);
}
ch = nch;
if ((mt->flags & MENU_COMPLETE) && ch == 0) {
/* top level */
hot_topic = 'q';
ch = '?';
}
}
}
if (menu->standard_tests && nch == 'r') {
menu->resume_tests = repeat_tests;
test_state = repeat_state;
nch = run_standard_tests;
}
if (nch == run_standard_tests) {
if (!(mt = menu->resume_tests)) {
mt = menu->tests;
}
if (mt->flags & MENU_LAST) {
mt = menu->tests;
}
/* Run the standard test suite */
for ( ; (mt->flags & MENU_LAST) == 0; ) {
if ((mt->flags & MENU_NEXT) == MENU_NEXT) {
repeat_tests = mt;
repeat_state = test_state;
nch = run_standard_tests;
menu_test_loop(mt, &test_state, &nch);
if (nch != 0 && nch != 'n') {
ch = nch;
break;
}
if (test_state & MENU_STOP) {
break;
}
}
mt++;
}
if (ch == 0) {
ch = hot_topic;
}
menu->resume_tests = mt;
menu->resume_state = test_state;
menu->resume_char = ch;
if (ch == run_standard_tests) {
/* pop up a level */
break;
}
}
}
*last_ch = ch;
prompt_string[prompt_length] = '\0';
}
/*
** generic_done_message(test_list)
**
** Print the Done message and request input.
*/
void
generic_done_message(
struct test_list *test,
int *state,
int *ch)
{
char done_message[128];
if (test->caps_done) {
sprintf(done_message, "(%s) Done ", test->caps_done);
ptext(done_message);
} else {
ptext("Done ");
}
*ch = wait_here();
if (*ch == '\r' || *ch == '\n' || *ch == 'n') {
*ch = 0;
}
if (*ch == 's') {
*state |= MENU_STOP;
*ch = 0;
}
}
/*
** menu_clear_screen(test, state, ch)
**
** Just clear the screen.
*/
void
menu_clear_screen(
struct test_list *test GCC_UNUSED,
int *state GCC_UNUSED,
int *ch GCC_UNUSED)
{
put_clear();
}
/*
** menu_reset_init(test, state, ch)
**
** Send the reset and init strings.
*/
void
menu_reset_init(
struct test_list *test GCC_UNUSED,
int *state GCC_UNUSED,
int *ch GCC_UNUSED)
{
reset_init();
put_crlf();
}
/*
** subtest_menu(test, state, ch)
**
** Scan the menu looking for something to execute
** Return TRUE if we found anything.
*/
int
subtest_menu(
struct test_list *test,
int *state,
int *ch)
{
struct test_list *mt;
if (*ch) {
for (mt = test; (mt->flags & MENU_LAST) == 0; mt++) {
if (mt->menu_entry && (*ch == mt->menu_entry[0])) {
*ch = 0;
menu_test_loop(mt, state, ch);
return TRUE;
}
}
}
return FALSE;
}
/*
** menu_can_scan(menu-structure)
**
** Recursively scan the menu tree and find which cap names can be tested.
*/
void
menu_can_scan(
const struct test_menu *menu)
{
struct test_list *mt;
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
can_test(mt->caps_done, FLAG_CAN_TEST);
can_test(mt->caps_tested, FLAG_CAN_TEST);
if (!(mt->test_procedure)) {
if (mt->sub_menu) {
menu_can_scan(mt->sub_menu);
}
}
}
}
/*
** menu_search(menu-structure, cap)
**
** Recursively search the menu tree and execute any tests that use cap.
*/
static void
menu_search(
struct test_menu *menu,
int *state,
int *ch,
char *cap)
{
struct test_list *mt;
int nch;
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
nch = 0;
if (cap_match(mt->caps_done, cap)
|| cap_match(mt->caps_tested, cap)) {
menu_test_loop(mt, state, &nch);
}
if (!(mt->test_procedure)) {
if (mt->sub_menu) {
menu_search(mt->sub_menu, state, &nch, cap);
}
}
if (*state & MENU_STOP) {
break;
}
if (nch != 0 && nch != 'n') {
*ch = nch;
break;
}
}
}
/*
** test_byname(menu, state, ch)
**
** Get a cap name then run all tests that use that cap.
*/
static void
test_byname(
struct test_menu *menu,
int *state GCC_UNUSED,
int *ch)
{
int test_state = 0;
char cap[32];
if (tty_can_sync == SYNC_NOT_TESTED) {
verify_time();
}
ptext("enter name: ");
read_string(cap, sizeof(cap));
if (cap[0]) {
menu_search(menu, &test_state, ch, cap);
}
*ch = '?';
}
|
nslu2/glibc
|
ncurses/tack/menu.c
|
C
|
gpl-2.0
| 8,641
|
/*
* linux/fs/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Some corrections by tytso.
*/
/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
* lookup logic.
*/
/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/device_cgroup.h>
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
#include <linux/hash.h>
#include <asm/uaccess.h>
#include "internal.h"
#include "mount.h"
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
* were necessary because of omirr. The reason is that omirr needs
* to know the _real_ pathname, not the user-supplied one, in case
* of symlinks (and also when transname replacements occur).
*
* The new code replaces the old recursive symlink resolution with
* an iterative one (in case of non-nested symlink chains). It does
* this with calls to <fs>_follow_link().
* As a side effect, dir_namei(), _namei() and follow_link() are now
* replaced with a single function lookup_dentry() that can handle all
* the special cases of the former code.
*
* With the new dcache, the pathname is stored at each inode, at least as
* long as the refcount of the inode is positive. As a side effect, the
* size of the dcache depends on the inode cache and thus is dynamic.
*
* [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
* resolution to correspond with current state of the code.
*
* Note that the symlink resolution is not *completely* iterative.
* There is still a significant amount of tail- and mid- recursion in
* the algorithm. Also, note that <fs>_readlink() is not used in
* lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
* may return different results than <fs>_follow_link(). Many virtual
* filesystems (including /proc) exhibit this behavior.
*/
/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
* New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
* and the name already exists in form of a symlink, try to create the new
* name indicated by the symlink. The old code always complained that the
* name already exists, due to not following the symlink even if its target
* is nonexistent. The new semantics affects also mknod() and link() when
* the name is a symlink pointing to a non-existent name.
*
* I don't know which semantics is the right one, since I have no access
* to standards. But I found by trial that HP-UX 9.0 has the full "new"
* semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
* "old" one. Personally, I think the new semantics is much more logical.
* Note that "ln old new" where "new" is a symlink pointing to a non-existing
* file does succeed in both HP-UX and SunOs, but not in Solaris
* and in the old Linux semantics.
*/
/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
* semantics. See the comments in "open_namei" and "do_link" below.
*
* [10-Sep-98 Alan Modra] Another symlink change.
*/
/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
* inside the path - always follow.
* in the last component in creation/removal/renaming - never follow.
* if LOOKUP_FOLLOW passed - follow.
* if the pathname has trailing slashes - follow.
* otherwise - don't follow.
* (applied in that order).
*
* [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
* restored for 2.4. This is the last surviving part of old 4.2BSD bug.
* During the 2.4 we need to fix the userland stuff depending on it -
* hopefully we will be able to get rid of that wart in 2.5. So far only
* XEmacs seems to be relying on it...
*/
/*
* [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
* implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
* any extra contention...
*/
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
*
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
* PATH_MAX includes the nul terminator --RR.
*/
static int do_getname(const char __user *filename, char *page)
{
int retval;
unsigned long len = PATH_MAX;
if (!segment_eq(get_fs(), KERNEL_DS)) {
if ((unsigned long) filename >= TASK_SIZE)
return -EFAULT;
if (TASK_SIZE - (unsigned long) filename < PATH_MAX)
len = TASK_SIZE - (unsigned long) filename;
}
retval = strncpy_from_user(page, filename, len);
if (retval > 0) {
if (retval < len)
return 0;
return -ENAMETOOLONG;
} else if (!retval)
retval = -ENOENT;
return retval;
}
static char *getname_flags(const char __user *filename, int flags, int *empty)
{
char *result = __getname();
int retval;
if (!result)
return ERR_PTR(-ENOMEM);
retval = do_getname(filename, result);
if (retval < 0) {
if (retval == -ENOENT && empty)
*empty = 1;
if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
__putname(result);
return ERR_PTR(retval);
}
}
audit_getname(result);
return result;
}
char *getname(const char __user * filename)
{
return getname_flags(filename, 0, NULL);
}
#ifdef CONFIG_AUDITSYSCALL
void putname(const char *name)
{
if (unlikely(!audit_dummy_context()))
audit_putname(name);
else
__putname(name);
}
EXPORT_SYMBOL(putname);
#endif
static int check_acl(struct inode *inode, int mask)
{
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
if (mask & MAY_NOT_BLOCK) {
acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
if (!acl)
return -EAGAIN;
/* no ->get_acl() calls in RCU mode... */
if (acl == ACL_NOT_CACHED)
return -ECHILD;
return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
}
acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
/*
* A filesystem can force a ACL callback by just never filling the
* ACL cache. But normally you'd fill the cache either at inode
* instantiation time, or on the first ->get_acl call.
*
* If the filesystem doesn't have a get_acl() function at all, we'll
* just create the negative cache entry.
*/
if (acl == ACL_NOT_CACHED) {
if (inode->i_op->get_acl) {
acl = inode->i_op->get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
} else {
set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
return -EAGAIN;
}
}
if (acl) {
int error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
return error;
}
#endif
return -EAGAIN;
}
/*
* This does the basic permission checking
*/
static int acl_permission_check(struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
if (current_user_ns() != inode_userns(inode))
goto other_perms;
if (likely(current_fsuid() == inode->i_uid))
mode >>= 6;
else {
if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
int error = check_acl(inode, mask);
if (error != -EAGAIN)
return error;
}
if (in_group_p(inode->i_gid))
mode >>= 3;
}
other_perms:
/*
* If the DACs are ok we don't need any capability check.
*/
if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
return 0;
return -EACCES;
}
/**
* generic_permission - check for access rights on a Posix-like filesystem
* @inode: inode to check access rights for
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
*
* Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things.
*
* generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
* request cannot be satisfied (eg. requires blocking or too much complexity).
* It would then be called again in ref-walk mode.
*/
int generic_permission(struct inode *inode, int mask)
{
int ret;
/*
* Do the basic permission checks.
*/
ret = acl_permission_check(inode, mask);
if (ret != -EACCES)
return ret;
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
return 0;
if (!(mask & MAY_WRITE))
if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable when there is
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
return 0;
/*
* Searching includes executable on directories, else just read.
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
/*
* We _really_ want to just do "generic_permission()" without
* even looking at the inode->i_op values. So we keep a cache
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
static inline int do_inode_permission(struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
if (likely(inode->i_op->permission))
return inode->i_op->permission(inode, mask);
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_FASTPERM;
spin_unlock(&inode->i_lock);
}
return generic_permission(inode, mask);
}
/**
* inode_permission - check for access rights to a given inode
* @inode: inode to check permission on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
*
* Used to check for read/write/execute permissions on an inode.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things.
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
int inode_permission(struct inode *inode, int mask)
{
int retval;
if (unlikely(mask & MAY_WRITE)) {
umode_t mode = inode->i_mode;
/*
* Nobody gets write access to a read-only fs.
*/
if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
/*
* Nobody gets write access to an immutable file.
*/
if (IS_IMMUTABLE(inode))
return -EACCES;
}
retval = do_inode_permission(inode, mask);
if (retval)
return retval;
retval = devcgroup_inode_permission(inode, mask);
if (retval)
return retval;
return security_inode_permission(inode, mask);
}
/**
* path_get - get a reference to a path
* @path: path to get the reference to
*
* Given a path increment the reference count to the dentry and the vfsmount.
*/
void path_get(struct path *path)
{
mntget(path->mnt);
dget(path->dentry);
}
EXPORT_SYMBOL(path_get);
/**
* path_put - put a reference to a path
* @path: path to put the reference to
*
* Given a path decrement the reference count to the dentry and the vfsmount.
*/
void path_put(struct path *path)
{
dput(path->dentry);
mntput(path->mnt);
}
EXPORT_SYMBOL(path_put);
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
* continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
* normal reference counts on dentries and vfsmounts to transition to rcu-walk
* mode. Refcounts are grabbed at the last known good point before rcu-walk
* got stuck, so ref-walk may continue from there. If this is not successful
* (eg. a seqcount has changed), then failure is returned and it's up to caller
* to restart the path walk from the beginning in ref-walk mode.
*/
/**
* unlazy_walk - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* @dentry: child of nd->path.dentry or NULL
* Returns: 0 on success, -ECHILD on failure
*
* unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
* for ref-walk mode. @dentry must be a path found by a do_lookup call on
* @nd or NULL. Must be called from rcu-walk context.
*/
static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
{
struct fs_struct *fs = current->fs;
struct dentry *parent = nd->path.dentry;
int want_root = 0;
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
want_root = 1;
spin_lock(&fs->lock);
if (nd->root.mnt != fs->root.mnt ||
nd->root.dentry != fs->root.dentry)
goto err_root;
}
spin_lock(&parent->d_lock);
if (!dentry) {
if (!__d_rcu_to_refcount(parent, nd->seq))
goto err_parent;
BUG_ON(nd->inode != parent->d_inode);
} else {
if (dentry->d_parent != parent)
goto err_parent;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (!__d_rcu_to_refcount(dentry, nd->seq))
goto err_child;
/*
* If the sequence check on the child dentry passed, then
* the child has not been removed from its parent. This
* means the parent dentry must be valid and able to take
* a reference at this point.
*/
BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
BUG_ON(!parent->d_count);
parent->d_count++;
spin_unlock(&dentry->d_lock);
}
spin_unlock(&parent->d_lock);
if (want_root) {
path_get(&nd->root);
spin_unlock(&fs->lock);
}
mntget(nd->path.mnt);
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
nd->flags &= ~LOOKUP_RCU;
return 0;
err_child:
spin_unlock(&dentry->d_lock);
err_parent:
spin_unlock(&parent->d_lock);
err_root:
if (want_root)
spin_unlock(&fs->lock);
return -ECHILD;
}
/**
* release_open_intent - free up open intent resources
* @nd: pointer to nameidata
*/
void release_open_intent(struct nameidata *nd)
{
struct file *file = nd->intent.open.file;
if (file && !IS_ERR(file)) {
if (file->f_path.dentry == NULL)
put_filp(file);
else
fput(file);
}
}
static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
{
return dentry->d_op->d_revalidate(dentry, nd);
}
/**
* complete_walk - successful completion of path walk
* @nd: pointer nameidata
*
* If we had been in RCU mode, drop out of it and legitimize nd->path.
* Revalidate the final result, unless we'd already done that during
* the path walk or the filesystem doesn't ask for it. Return 0 on
* success, -error on failure. In case of failure caller does not
* need to drop nd->path.
*/
static int complete_walk(struct nameidata *nd)
{
struct dentry *dentry = nd->path.dentry;
int status;
if (nd->flags & LOOKUP_RCU) {
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
spin_lock(&dentry->d_lock);
if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
spin_unlock(&dentry->d_lock);
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
return -ECHILD;
}
BUG_ON(nd->inode != dentry->d_inode);
spin_unlock(&dentry->d_lock);
mntget(nd->path.mnt);
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
}
if (likely(!(nd->flags & LOOKUP_JUMPED)))
return 0;
if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
return 0;
if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
return 0;
/* Note: we do not d_invalidate() */
status = d_revalidate(dentry, nd);
if (status > 0)
return 0;
if (!status)
status = -ESTALE;
path_put(&nd->path);
return status;
}
static __always_inline void set_root(struct nameidata *nd)
{
if (!nd->root.mnt)
get_fs_root(current->fs, &nd->root);
}
static int link_path_walk(const char *, struct nameidata *);
static __always_inline void set_root_rcu(struct nameidata *nd)
{
if (!nd->root.mnt) {
struct fs_struct *fs = current->fs;
unsigned seq;
do {
seq = read_seqcount_begin(&fs->seq);
nd->root = fs->root;
nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
}
}
static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
{
int ret;
if (IS_ERR(link))
goto fail;
if (*link == '/') {
set_root(nd);
path_put(&nd->path);
nd->path = nd->root;
path_get(&nd->root);
nd->flags |= LOOKUP_JUMPED;
}
nd->inode = nd->path.dentry->d_inode;
ret = link_path_walk(link, nd);
return ret;
fail:
path_put(&nd->path);
return PTR_ERR(link);
}
static void path_put_conditional(struct path *path, struct nameidata *nd)
{
dput(path->dentry);
if (path->mnt != nd->path.mnt)
mntput(path->mnt);
}
static inline void path_to_nameidata(const struct path *path,
struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU)) {
dput(nd->path.dentry);
if (nd->path.mnt != path->mnt)
mntput(nd->path.mnt);
}
nd->path.mnt = path->mnt;
nd->path.dentry = path->dentry;
}
static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
{
struct inode *inode = link->dentry->d_inode;
if (!IS_ERR(cookie) && inode->i_op->put_link)
inode->i_op->put_link(link->dentry, nd, cookie);
path_put(link);
}
static __always_inline int
follow_link(struct path *link, struct nameidata *nd, void **p)
{
int error;
struct dentry *dentry = link->dentry;
BUG_ON(nd->flags & LOOKUP_RCU);
if (link->mnt == nd->path.mnt)
mntget(link->mnt);
if (unlikely(current->total_link_count >= 40)) {
*p = ERR_PTR(-ELOOP); /* no ->put_link(), please */
path_put(&nd->path);
return -ELOOP;
}
cond_resched();
current->total_link_count++;
touch_atime(link);
nd_set_link(nd, NULL);
error = security_inode_follow_link(link->dentry, nd);
if (error) {
*p = ERR_PTR(error); /* no ->put_link(), please */
path_put(&nd->path);
return error;
}
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
if (!IS_ERR(*p)) {
char *s = nd_get_link(nd);
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
else if (nd->last_type == LAST_BIND) {
nd->flags |= LOOKUP_JUMPED;
nd->inode = nd->path.dentry->d_inode;
if (nd->inode->i_op->follow_link) {
/* stepped on a _really_ weird one */
path_put(&nd->path);
error = -ELOOP;
}
}
}
return error;
}
static int follow_up_rcu(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
parent = mnt->mnt_parent;
if (&parent->mnt == path->mnt)
return 0;
mountpoint = mnt->mnt_mountpoint;
path->dentry = mountpoint;
path->mnt = &parent->mnt;
return 1;
}
/*
* follow_up - Find the mountpoint of path's vfsmount
*
* Given a path, find the mountpoint of its source file system.
* Replace @path with the path of the mountpoint in the parent mount.
* Up is towards /.
*
* Return 1 if we went up a level and 0 if we were already at the
* root.
*/
int follow_up(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
br_read_lock(&vfsmount_lock);
parent = mnt->mnt_parent;
if (&parent->mnt == path->mnt) {
br_read_unlock(&vfsmount_lock);
return 0;
}
mntget(&parent->mnt);
mountpoint = dget(mnt->mnt_mountpoint);
br_read_unlock(&vfsmount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
path->mnt = &parent->mnt;
return 1;
}
/*
* Perform an automount
* - return -EISDIR to tell follow_managed() to stop and return the path we
* were called with.
*/
static int follow_automount(struct path *path, unsigned flags,
bool *need_mntput)
{
struct vfsmount *mnt;
int err;
if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
return -EREMOTE;
/* We don't want to mount if someone's just doing a stat -
* unless they're stat'ing a directory and appended a '/' to
* the name.
*
* We do, however, want to mount if someone wants to open or
* create a file of any type under the mountpoint, wants to
* traverse through the mountpoint or wants to open the
* mounted directory. Also, autofs may mark negative dentries
* as being automount points. These will need the attentions
* of the daemon to instantiate them before they can be used.
*/
if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
path->dentry->d_inode)
return -EISDIR;
current->total_link_count++;
if (current->total_link_count >= 40)
return -ELOOP;
mnt = path->dentry->d_op->d_automount(path);
if (IS_ERR(mnt)) {
/*
* The filesystem is allowed to return -EISDIR here to indicate
* it doesn't want to automount. For instance, autofs would do
* this so that its userspace daemon can mount on this dentry.
*
* However, we can only permit this if it's a terminal point in
* the path being looked up; if it wasn't then the remainder of
* the path is inaccessible and we should say so.
*/
if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
return -EREMOTE;
return PTR_ERR(mnt);
}
if (!mnt) /* mount collision */
return 0;
if (!*need_mntput) {
/* lock_mount() may release path->mnt on error */
mntget(path->mnt);
*need_mntput = true;
}
err = finish_automount(mnt, path);
switch (err) {
case -EBUSY:
/* Someone else made a mount here whilst we were busy */
return 0;
case 0:
path_put(path);
path->mnt = mnt;
path->dentry = dget(mnt->mnt_root);
return 0;
default:
return err;
}
}
/*
* Handle a dentry that is managed in some way.
* - Flagged for transit management (autofs)
* - Flagged as mountpoint
* - Flagged as automount point
*
* This may only be called in refwalk mode.
*
* Serialization is taken care of in namespace.c
*/
static int follow_managed(struct path *path, unsigned flags)
{
struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
unsigned managed;
bool need_mntput = false;
int ret = 0;
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
* the components of that value change under us */
while (managed = ACCESS_ONCE(path->dentry->d_flags),
managed &= DCACHE_MANAGED_DENTRY,
unlikely(managed != 0)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held. */
if (managed & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path->dentry, false);
if (ret < 0)
break;
}
/* Transit to a mounted filesystem. */
if (managed & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
if (need_mntput)
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
need_mntput = true;
continue;
}
/* Something is mounted on this dentry in another
* namespace and/or whatever was mounted there in this
* namespace got unmounted before we managed to get the
* vfsmount_lock */
}
/* Handle an automount point */
if (managed & DCACHE_NEED_AUTOMOUNT) {
ret = follow_automount(path, flags, &need_mntput);
if (ret < 0)
break;
continue;
}
/* We didn't change the current path point */
break;
}
if (need_mntput && path->mnt == mnt)
mntput(path->mnt);
if (ret == -EISDIR)
ret = 0;
return ret < 0 ? ret : need_mntput;
}
int follow_down_one(struct path *path)
{
struct vfsmount *mounted;
mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
return 1;
}
return 0;
}
static inline bool managed_dentry_might_block(struct dentry *dentry)
{
return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
dentry->d_op->d_manage(dentry, true) < 0);
}
/*
* Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
* we meet a managed dentry that would need blocking.
*/
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode)
{
for (;;) {
struct mount *mounted;
/*
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
if (unlikely(managed_dentry_might_block(path->dentry)))
return false;
if (!d_mountpoint(path->dentry))
break;
mounted = __lookup_mnt(path->mnt, path->dentry, 1);
if (!mounted)
break;
path->mnt = &mounted->mnt;
path->dentry = mounted->mnt.mnt_root;
nd->flags |= LOOKUP_JUMPED;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
/*
* Update the inode too. We don't need to re-check the
* dentry sequence number here after this d_inode read,
* because a mount-point is always pinned.
*/
*inode = path->dentry->d_inode;
}
return true;
}
static void follow_mount_rcu(struct nameidata *nd)
{
while (d_mountpoint(nd->path.dentry)) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
}
static int follow_dotdot_rcu(struct nameidata *nd)
{
set_root_rcu(nd);
while (1) {
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
if (nd->path.dentry != nd->path.mnt->mnt_root) {
struct dentry *old = nd->path.dentry;
struct dentry *parent = old->d_parent;
unsigned seq;
seq = read_seqcount_begin(&parent->d_seq);
if (read_seqcount_retry(&old->d_seq, nd->seq))
goto failed;
nd->path.dentry = parent;
nd->seq = seq;
break;
}
if (!follow_up_rcu(&nd->path))
break;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
follow_mount_rcu(nd);
nd->inode = nd->path.dentry->d_inode;
return 0;
failed:
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
return -ECHILD;
}
/*
* Follow down to the covering mount currently visible to userspace. At each
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
*/
int follow_down(struct path *path)
{
unsigned managed;
int ret;
while (managed = ACCESS_ONCE(path->dentry->d_flags),
unlikely(managed & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held.
*
* We indicate to the filesystem if someone is trying to mount
* something here. This gives autofs the chance to deny anyone
* other than its daemon the right to mount on its
* superstructure.
*
* The filesystem may sleep at this point.
*/
if (managed & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(
path->dentry, false);
if (ret < 0)
return ret == -EISDIR ? 0 : ret;
}
/* Transit to a mounted filesystem. */
if (managed & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
continue;
}
/* Don't handle automount points here */
break;
}
return 0;
}
/*
* Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
*/
static void follow_mount(struct path *path)
{
while (d_mountpoint(path->dentry)) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
}
}
static void follow_dotdot(struct nameidata *nd)
{
set_root(nd);
while(1) {
struct dentry *old = nd->path.dentry;
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
if (nd->path.dentry != nd->path.mnt->mnt_root) {
/* rare case of legitimate dget_parent()... */
nd->path.dentry = dget_parent(nd->path.dentry);
dput(old);
break;
}
if (!follow_up(&nd->path))
break;
}
follow_mount(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
/*
* This looks up the name in dcache, possibly revalidates the old dentry and
* allocates a new one if not found or not valid. In the need_lookup argument
* returns whether i_op->lookup is necessary.
*
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
struct nameidata *nd, bool *need_lookup)
{
struct dentry *dentry;
int error;
*need_lookup = false;
dentry = d_lookup(dir, name);
if (dentry) {
if (d_need_lookup(dentry)) {
*need_lookup = true;
} else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
error = d_revalidate(dentry, nd);
if (unlikely(error <= 0)) {
if (error < 0) {
dput(dentry);
return ERR_PTR(error);
} else if (!d_invalidate(dentry)) {
dput(dentry);
dentry = NULL;
}
}
}
}
if (!dentry) {
dentry = d_alloc(dir, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
*need_lookup = true;
}
return dentry;
}
/*
* Call i_op->lookup on the dentry. The dentry must be negative but may be
* hashed if it was pouplated with DCACHE_NEED_LOOKUP.
*
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *old;
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir))) {
dput(dentry);
return ERR_PTR(-ENOENT);
}
old = dir->i_op->lookup(dir, dentry, nd);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
return dentry;
}
static struct dentry *__lookup_hash(struct qstr *name,
struct dentry *base, struct nameidata *nd)
{
bool need_lookup;
struct dentry *dentry;
dentry = lookup_dcache(name, base, nd, &need_lookup);
if (!need_lookup)
return dentry;
return lookup_real(base->d_inode, dentry, nd);
}
/*
* It's more convoluted than I'd like it to be, but... it's still fairly
* small and for now I'd prefer to have fast path as straight as possible.
* It _is_ time-critical.
*/
static int do_lookup(struct nameidata *nd, struct qstr *name,
struct path *path, struct inode **inode)
{
struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry;
int need_reval = 1;
int status = 1;
int err;
/*
* Rename seqlock is not required here because in the off chance
* of a false negative due to a concurrent rename, we're going to
* do the non-racy lookup, below.
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
*inode = nd->inode;
dentry = __d_lookup_rcu(parent, name, &seq, inode);
if (!dentry)
goto unlazy;
/* Memory barrier in read_seqcount_begin of child is enough */
if (__read_seqcount_retry(&parent->d_seq, nd->seq))
return -ECHILD;
nd->seq = seq;
if (unlikely(d_need_lookup(dentry)))
goto unlazy;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
status = d_revalidate(dentry, nd);
if (unlikely(status <= 0)) {
if (status != -ECHILD)
need_reval = 0;
goto unlazy;
}
}
path->mnt = mnt;
path->dentry = dentry;
if (unlikely(!__follow_mount_rcu(nd, path, inode)))
goto unlazy;
if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
goto unlazy;
return 0;
unlazy:
if (unlazy_walk(nd, dentry))
return -ECHILD;
} else {
dentry = __d_lookup(parent, name);
}
if (unlikely(!dentry))
goto need_lookup;
if (unlikely(d_need_lookup(dentry))) {
dput(dentry);
goto need_lookup;
}
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
status = d_revalidate(dentry, nd);
if (unlikely(status <= 0)) {
if (status < 0) {
dput(dentry);
return status;
}
if (!d_invalidate(dentry)) {
dput(dentry);
goto need_lookup;
}
}
done:
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd->flags);
if (unlikely(err < 0)) {
path_put_conditional(path, nd);
return err;
}
if (err)
nd->flags |= LOOKUP_JUMPED;
*inode = path->dentry->d_inode;
return 0;
need_lookup:
BUG_ON(nd->inode != parent->d_inode);
mutex_lock(&parent->d_inode->i_mutex);
dentry = __lookup_hash(name, parent, nd);
mutex_unlock(&parent->d_inode->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
goto done;
}
static inline int may_lookup(struct nameidata *nd)
{
if (nd->flags & LOOKUP_RCU) {
int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
if (err != -ECHILD)
return err;
if (unlazy_walk(nd, NULL))
return -ECHILD;
}
return inode_permission(nd->inode, MAY_EXEC);
}
static inline int handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
if (nd->flags & LOOKUP_RCU) {
if (follow_dotdot_rcu(nd))
return -ECHILD;
} else
follow_dotdot(nd);
}
return 0;
}
static void terminate_walk(struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU)) {
path_put(&nd->path);
} else {
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
}
}
/*
* Do we need to follow links? We _really_ want to be able
* to do this check without having to look at inode->i_op,
* so we keep a cache of "no, this doesn't need follow_link"
* for the common case.
*/
static inline int should_follow_link(struct inode *inode, int follow)
{
if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
if (likely(inode->i_op->follow_link))
return follow;
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_NOFOLLOW;
spin_unlock(&inode->i_lock);
}
return 0;
}
static inline int walk_component(struct nameidata *nd, struct path *path,
struct qstr *name, int type, int follow)
{
struct inode *inode;
int err;
/*
* "." and ".." are special - ".." especially so because it has
* to be able to know about the current root directory and
* parent relationships.
*/
if (unlikely(type != LAST_NORM))
return handle_dots(nd, type);
err = do_lookup(nd, name, path, &inode);
if (unlikely(err)) {
terminate_walk(nd);
return err;
}
if (!inode) {
path_to_nameidata(path, nd);
terminate_walk(nd);
return -ENOENT;
}
if (should_follow_link(inode, follow)) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(unlazy_walk(nd, path->dentry))) {
terminate_walk(nd);
return -ECHILD;
}
}
BUG_ON(inode != path->dentry->d_inode);
return 1;
}
path_to_nameidata(path, nd);
nd->inode = inode;
return 0;
}
/*
* This limits recursive symlink follows to 8, while
* limiting consecutive symlinks to 40.
*
* Without that kind of total limit, nasty chains of consecutive
* symlinks can cause almost arbitrarily long lookups.
*/
static inline int nested_symlink(struct path *path, struct nameidata *nd)
{
int res;
if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
path_put_conditional(path, nd);
path_put(&nd->path);
return -ELOOP;
}
BUG_ON(nd->depth >= MAX_NESTED_LINKS);
nd->depth++;
current->link_count++;
do {
struct path link = *path;
void *cookie;
res = follow_link(&link, nd, &cookie);
if (!res)
res = walk_component(nd, path, &nd->last,
nd->last_type, LOOKUP_FOLLOW);
put_link(nd, &link, cookie);
} while (res > 0);
current->link_count--;
nd->depth--;
return res;
}
/*
* We really don't want to look at inode->i_op->lookup
* when we don't have to. So we keep a cache bit in
* the inode ->i_opflags field that says "yes, we can
* do lookup on this inode".
*/
static inline int can_lookup(struct inode *inode)
{
if (likely(inode->i_opflags & IOP_LOOKUP))
return 1;
if (likely(!inode->i_op->lookup))
return 0;
/* We do this once for the lifetime of the inode */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_LOOKUP;
spin_unlock(&inode->i_lock);
return 1;
}
/*
* We can do the critical dentry name comparison and hashing
* operations one word at a time, but we are limited to:
*
* - Architectures with fast unaligned word accesses. We could
* do a "get_unaligned()" if this helps and is sufficiently
* fast.
*
* - Little-endian machines (so that we can generate the mask
* of low bytes efficiently). Again, we *could* do a byte
* swapping load on big-endian architectures if that is not
* expensive enough to make the optimization worthless.
*
* - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
* do not trap on the (extremely unlikely) case of a page
* crossing operation.
*
* - Furthermore, we need an efficient 64-bit compile for the
* 64-bit case in order to generate the "number of bytes in
* the final mask". Again, that could be replaced with a
* efficient population count instruction or similar.
*/
#ifdef CONFIG_DCACHE_WORD_ACCESS
#include <asm/word-at-a-time.h>
#ifdef CONFIG_64BIT
static inline unsigned int fold_hash(unsigned long hash)
{
return hash_64(hash, 32);
}
#else /* 32-bit case */
#define fold_hash(x) (x)
#endif
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long a, mask;
unsigned long hash = 0;
for (;;) {
a = load_unaligned_zeropad(name);
if (len < sizeof(unsigned long))
break;
hash += a;
hash *= 9;
name += sizeof(unsigned long);
len -= sizeof(unsigned long);
if (!len)
goto done;
}
mask = ~(~0ul << len*8);
hash += mask & a;
done:
return fold_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/*
* Calculate the length and hash of the path component, and
* return the length of the component;
*/
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long a, mask, hash, len;
hash = a = 0;
len = -sizeof(unsigned long);
do {
hash = (hash + a) * 9;
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
/* Do we have any NUL or '/' bytes in this word? */
mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
} while (!mask);
/* The mask *below* the first high bit set */
mask = (mask - 1) & ~mask;
mask >>= 7;
hash += a & mask;
*hashp = fold_hash(hash);
return len + count_masked_bytes(mask);
}
#else
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
while (len--)
hash = partial_name_hash(*name++, hash);
return end_name_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/*
* We know there's a real path component here of at least
* one character.
*/
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long hash = init_name_hash();
unsigned long len = 0, c;
c = (unsigned char)*name;
do {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
*hashp = end_name_hash(hash);
return len;
}
#endif
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
*
* Returns 0 and nd will have valid dentry and mnt on success.
* Returns error and drops reference to input namei data on failure.
*/
static int link_path_walk(const char *name, struct nameidata *nd)
{
struct path next;
int err;
while (*name=='/')
name++;
if (!*name)
return 0;
/* At this point we know we have a real path component. */
for(;;) {
struct qstr this;
long len;
int type;
err = may_lookup(nd);
if (err)
break;
len = hash_name(name, &this.hash);
this.name = name;
this.len = len;
type = LAST_NORM;
if (name[0] == '.') switch (len) {
case 2:
if (name[1] == '.') {
type = LAST_DOTDOT;
nd->flags |= LOOKUP_JUMPED;
}
break;
case 1:
type = LAST_DOT;
}
if (likely(type == LAST_NORM)) {
struct dentry *parent = nd->path.dentry;
nd->flags &= ~LOOKUP_JUMPED;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
err = parent->d_op->d_hash(parent, nd->inode,
&this);
if (err < 0)
break;
}
}
if (!name[len])
goto last_component;
/*
* If it wasn't NUL, we know it was '/'. Skip that
* slash, and continue until no more slashes.
*/
do {
len++;
} while (unlikely(name[len] == '/'));
if (!name[len])
goto last_component;
name += len;
err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
if (err < 0)
return err;
if (err) {
err = nested_symlink(&next, nd);
if (err)
return err;
}
if (can_lookup(nd->inode))
continue;
err = -ENOTDIR;
break;
/* here ends the main loop */
last_component:
nd->last = this;
nd->last_type = type;
return 0;
}
terminate_walk(nd);
return err;
}
static int path_init(int dfd, const char *name, unsigned int flags,
struct nameidata *nd, struct file **fp)
{
int retval = 0;
int fput_needed;
struct file *file;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED;
nd->depth = 0;
if (flags & LOOKUP_ROOT) {
struct inode *inode = nd->root.dentry->d_inode;
if (*name) {
if (!inode->i_op->lookup)
return -ENOTDIR;
retval = inode_permission(inode, MAY_EXEC);
if (retval)
return retval;
}
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
br_read_lock(&vfsmount_lock);
rcu_read_lock();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
path_get(&nd->path);
}
return 0;
}
nd->root.mnt = NULL;
if (*name=='/') {
if (flags & LOOKUP_RCU) {
br_read_lock(&vfsmount_lock);
rcu_read_lock();
set_root_rcu(nd);
} else {
set_root(nd);
path_get(&nd->root);
}
nd->path = nd->root;
} else if (dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
br_read_lock(&vfsmount_lock);
rcu_read_lock();
do {
seq = read_seqcount_begin(&fs->seq);
nd->path = fs->pwd;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
}
} else {
struct dentry *dentry;
file = fget_raw_light(dfd, &fput_needed);
retval = -EBADF;
if (!file)
goto out_fail;
dentry = file->f_path.dentry;
if (*name) {
retval = -ENOTDIR;
if (!S_ISDIR(dentry->d_inode->i_mode))
goto fput_fail;
retval = inode_permission(dentry->d_inode, MAY_EXEC);
if (retval)
goto fput_fail;
}
nd->path = file->f_path;
if (flags & LOOKUP_RCU) {
if (fput_needed)
*fp = file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
br_read_lock(&vfsmount_lock);
rcu_read_lock();
} else {
path_get(&file->f_path);
fput_light(file, fput_needed);
}
}
nd->inode = nd->path.dentry->d_inode;
return 0;
fput_fail:
fput_light(file, fput_needed);
out_fail:
return retval;
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
{
if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
nd->flags &= ~LOOKUP_PARENT;
return walk_component(nd, path, &nd->last, nd->last_type,
nd->flags & LOOKUP_FOLLOW);
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int path_lookupat(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
struct file *base = NULL;
struct path path;
int err;
/*
* Path walking is largely split up into 2 different synchronisation
* schemes, rcu-walk and ref-walk (explained in
* Documentation/filesystems/path-lookup.txt). These share much of the
* path walk code, but some things particularly setup, cleanup, and
* following mounts are sufficiently divergent that functions are
* duplicated. Typically there is a function foo(), and its RCU
* analogue, foo_rcu().
*
* -ECHILD is the error number of choice (just to avoid clashes) that
* is returned if some aspect of an rcu-walk fails. Such an error must
* be handled by restarting a traditional ref-walk (which will always
* be able to complete).
*/
err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(err))
goto out;
current->total_link_count = 0;
err = link_path_walk(name, nd);
if (!err && !(flags & LOOKUP_PARENT)) {
err = lookup_last(nd, &path);
while (err > 0) {
void *cookie;
struct path link = path;
nd->flags |= LOOKUP_PARENT;
err = follow_link(&link, nd, &cookie);
if (!err)
err = lookup_last(nd, &path);
put_link(nd, &link, cookie);
}
}
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
err = -ENOTDIR;
}
}
out:
if (base)
fput(base);
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
path_put(&nd->root);
nd->root.mnt = NULL;
}
return err;
}
static int do_path_lookup(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
if (unlikely(retval == -ECHILD))
retval = path_lookupat(dfd, name, flags, nd);
if (unlikely(retval == -ESTALE))
retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
if (likely(!retval)) {
if (unlikely(!audit_dummy_context())) {
if (nd->path.dentry && nd->inode)
audit_inode(name, nd->path.dentry);
}
}
return retval;
}
/* does lookup, returns the object with parent locked */
struct dentry *kern_path_locked(const char *name, struct path *path)
{
struct nameidata nd;
struct dentry *d;
int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd);
if (err)
return ERR_PTR(err);
if (nd.last_type != LAST_NORM) {
path_put(&nd.path);
return ERR_PTR(-EINVAL);
}
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
d = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
if (IS_ERR(d)) {
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
return d;
}
*path = nd.path;
return d;
}
int kern_path(const char *name, unsigned int flags, struct path *path)
{
struct nameidata nd;
int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
if (!res)
*path = nd.path;
return res;
}
/**
* vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
* @dentry: pointer to dentry of the base directory
* @mnt: pointer to vfs mount of the base directory
* @name: pointer to file name
* @flags: lookup flags
* @path: pointer to struct path to fill
*/
int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
struct nameidata nd;
int err;
nd.root.dentry = dentry;
nd.root.mnt = mnt;
BUG_ON(flags & LOOKUP_PARENT);
/* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
if (!err)
*path = nd.path;
return err;
}
/*
* Restricted form of lookup. Doesn't follow links, single-component only,
* needs parent already locked. Doesn't follow mounts.
* SMP-safe.
*/
static struct dentry *lookup_hash(struct nameidata *nd)
{
return __lookup_hash(&nd->last, nd->path.dentry, nd);
}
/**
* lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code. Also note that by using this function the
* nameidata argument is passed to the filesystem methods and a filesystem
* using this helper needs to be prepared for that.
*/
struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
{
struct qstr this;
unsigned int c;
int err;
WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
this.name = name;
this.len = len;
this.hash = full_name_hash(name, len);
if (!len)
return ERR_PTR(-EACCES);
while (len--) {
c = *(const unsigned char *)name++;
if (c == '/' || c == '\0')
return ERR_PTR(-EACCES);
}
/*
* See if the low-level filesystem might want
* to use its own hash..
*/
if (base->d_flags & DCACHE_OP_HASH) {
int err = base->d_op->d_hash(base, base->d_inode, &this);
if (err < 0)
return ERR_PTR(err);
}
err = inode_permission(base->d_inode, MAY_EXEC);
if (err)
return ERR_PTR(err);
return __lookup_hash(&this, base, NULL);
}
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
{
struct nameidata nd;
char *tmp = getname_flags(name, flags, empty);
int err = PTR_ERR(tmp);
if (!IS_ERR(tmp)) {
BUG_ON(flags & LOOKUP_PARENT);
err = do_path_lookup(dfd, tmp, flags, &nd);
putname(tmp);
if (!err)
*path = nd.path;
}
return err;
}
int user_path_at(int dfd, const char __user *name, unsigned flags,
struct path *path)
{
return user_path_at_empty(dfd, name, flags, path, NULL);
}
static int user_path_parent(int dfd, const char __user *path,
struct nameidata *nd, char **name)
{
char *s = getname(path);
int error;
if (IS_ERR(s))
return PTR_ERR(s);
error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd);
if (error)
putname(s);
else
*name = s;
return error;
}
/*
* It's inline, so penalty for filesystems that don't use sticky bit is
* minimal.
*/
static inline int check_sticky(struct inode *dir, struct inode *inode)
{
uid_t fsuid = current_fsuid();
if (!(dir->i_mode & S_ISVTX))
return 0;
if (current_user_ns() != inode_userns(inode))
goto other_userns;
if (inode->i_uid == fsuid)
return 0;
if (dir->i_uid == fsuid)
return 0;
other_userns:
return !ns_capable(inode_userns(inode), CAP_FOWNER);
}
/*
* Check whether we can remove a link victim from directory dir, check
* whether the type of victim is right.
* 1. We can't do it if dir is read-only (done in permission())
* 2. We should have write and exec permissions on dir
* 3. We can't remove anything from append-only dir
* 4. We can't do anything with immutable dir (done in permission())
* 5. If the sticky bit on dir is set we should either
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do antyhing with
* links pointing to it.
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
* 9. We can't remove a root or mountpoint.
* 10. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
{
int error;
if (!victim->d_inode)
return -ENOENT;
BUG_ON(victim->d_parent->d_inode != dir);
audit_inode_child(victim, dir);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
return -EPERM;
if (isdir) {
if (!S_ISDIR(victim->d_inode->i_mode))
return -ENOTDIR;
if (IS_ROOT(victim))
return -EBUSY;
} else if (S_ISDIR(victim->d_inode->i_mode))
return -EISDIR;
if (IS_DEADDIR(dir))
return -ENOENT;
if (victim->d_flags & DCACHE_NFSFS_RENAMED)
return -EBUSY;
return 0;
}
/* Check whether we can create an object with dentry child in directory
* dir.
* 1. We can't do it if child already exists (open has special treatment for
* this case, but since we are inlined it's OK)
* 2. We can't do it if dir is read-only (done in permission())
* 3. We should have write and exec permissions on dir
* 4. We can't do it if dir is immutable (done in permission())
*/
static inline int may_create(struct inode *dir, struct dentry *child)
{
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
/*
* p1 and p2 should be directories on the same fs.
*/
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
struct dentry *p;
if (p1 == p2) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
return NULL;
}
mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
p = d_ancestor(p2, p1);
if (p) {
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
p = d_ancestor(p1, p2);
if (p) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
return NULL;
}
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
mutex_unlock(&p1->d_inode->i_mutex);
if (p1 != p2) {
mutex_unlock(&p2->d_inode->i_mutex);
mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
}
}
int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
mode &= S_IALLUGO;
mode |= S_IFREG;
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, nd);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
static int may_open(struct path *path, int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
struct inode *inode = dentry->d_inode;
int error;
/* O_PATH? */
if (!acc_mode)
return 0;
if (!inode)
return -ENOENT;
switch (inode->i_mode & S_IFMT) {
case S_IFLNK:
return -ELOOP;
case S_IFDIR:
if (acc_mode & MAY_WRITE)
return -EISDIR;
break;
case S_IFBLK:
case S_IFCHR:
if (path->mnt->mnt_flags & MNT_NODEV)
return -EACCES;
/*FALLTHRU*/
case S_IFIFO:
case S_IFSOCK:
flag &= ~O_TRUNC;
break;
}
error = inode_permission(inode, acc_mode);
if (error)
return error;
/*
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
return -EPERM;
if (flag & O_TRUNC)
return -EPERM;
}
/* O_NOATIME can only be set by the owner or superuser */
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
return 0;
}
static int handle_truncate(struct file *filp)
{
struct path *path = &filp->f_path;
struct inode *inode = path->dentry->d_inode;
int error = get_write_access(inode);
if (error)
return error;
/*
* Refuse to truncate files with mandatory locks held on them.
*/
error = locks_verify_locked(inode);
if (!error)
error = security_path_truncate(path);
if (!error) {
error = do_truncate(path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
put_write_access(inode);
return error;
}
static inline int open_to_namei_flags(int flag)
{
if ((flag & O_ACCMODE) == 3)
flag--;
return flag;
}
/*
* Handle the last step of open()
*/
static struct file *do_last(struct nameidata *nd, struct path *path,
const struct open_flags *op, const char *pathname)
{
struct dentry *dir = nd->path.dentry;
struct dentry *dentry;
int open_flag = op->open_flag;
int will_truncate = open_flag & O_TRUNC;
int want_write = 0;
int acc_mode = op->acc_mode;
struct file *filp;
int error;
nd->flags &= ~LOOKUP_PARENT;
nd->flags |= op->intent;
switch (nd->last_type) {
case LAST_DOTDOT:
case LAST_DOT:
error = handle_dots(nd, nd->last_type);
if (error)
return ERR_PTR(error);
/* fallthrough */
case LAST_ROOT:
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
audit_inode(pathname, nd->path.dentry);
if (open_flag & O_CREAT) {
error = -EISDIR;
goto exit;
}
goto ok;
case LAST_BIND:
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
audit_inode(pathname, dir);
goto ok;
}
if (!(open_flag & O_CREAT)) {
int symlink_ok = 0;
if (nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
symlink_ok = 1;
/* we _can_ be in RCU mode here */
error = walk_component(nd, path, &nd->last, LAST_NORM,
!symlink_ok);
if (error < 0)
return ERR_PTR(error);
if (error) /* symlink */
return NULL;
/* sayonara */
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
error = -ENOTDIR;
if (nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup)
goto exit;
}
audit_inode(pathname, nd->path.dentry);
goto ok;
}
/* create side of things */
/*
* This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
* cleared when we got to the last component we are about to look up
*/
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
audit_inode(pathname, dir);
error = -EISDIR;
/* trailing slashes? */
if (nd->last.name[nd->last.len])
goto exit;
mutex_lock(&dir->d_inode->i_mutex);
dentry = lookup_hash(nd);
error = PTR_ERR(dentry);
if (IS_ERR(dentry)) {
mutex_unlock(&dir->d_inode->i_mutex);
goto exit;
}
path->dentry = dentry;
path->mnt = nd->path.mnt;
/* Negative dentry, just create the file */
if (!dentry->d_inode) {
umode_t mode = op->mode;
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
* This write is needed to ensure that a
* rw->ro transition does not occur between
* the time when the file is created and when
* a permanent write count is taken through
* the 'struct file' in nameidata_to_filp().
*/
error = mnt_want_write(nd->path.mnt);
if (error)
goto exit_mutex_unlock;
want_write = 1;
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = 0;
acc_mode = MAY_OPEN;
error = security_path_mknod(&nd->path, dentry, mode, 0);
if (error)
goto exit_mutex_unlock;
error = vfs_create(dir->d_inode, dentry, mode, nd);
if (error)
goto exit_mutex_unlock;
mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->path.dentry);
nd->path.dentry = dentry;
goto common;
}
/*
* It already exists.
*/
mutex_unlock(&dir->d_inode->i_mutex);
audit_inode(pathname, path->dentry);
error = -EEXIST;
if (open_flag & O_EXCL)
goto exit_dput;
error = follow_managed(path, nd->flags);
if (error < 0)
goto exit_dput;
if (error)
nd->flags |= LOOKUP_JUMPED;
error = -ENOENT;
if (!path->dentry->d_inode)
goto exit_dput;
if (path->dentry->d_inode->i_op->follow_link)
return NULL;
path_to_nameidata(path, nd);
nd->inode = path->dentry->d_inode;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
error = -EISDIR;
if (S_ISDIR(nd->inode->i_mode))
goto exit;
ok:
if (!S_ISREG(nd->inode->i_mode))
will_truncate = 0;
if (will_truncate) {
error = mnt_want_write(nd->path.mnt);
if (error)
goto exit;
want_write = 1;
}
common:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto exit;
filp = nameidata_to_filp(nd);
if (!IS_ERR(filp)) {
error = ima_file_check(filp, op->acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
}
}
if (!IS_ERR(filp)) {
if (will_truncate) {
error = handle_truncate(filp);
if (error) {
fput(filp);
filp = ERR_PTR(error);
}
}
}
out:
if (want_write)
mnt_drop_write(nd->path.mnt);
path_put(&nd->path);
return filp;
exit_mutex_unlock:
mutex_unlock(&dir->d_inode->i_mutex);
exit_dput:
path_put_conditional(path, nd);
exit:
filp = ERR_PTR(error);
goto out;
}
static struct file *path_openat(int dfd, const char *pathname,
struct nameidata *nd, const struct open_flags *op, int flags)
{
struct file *base = NULL;
struct file *filp;
struct path path;
int error;
filp = get_empty_filp();
if (!filp)
return ERR_PTR(-ENFILE);
filp->f_flags = op->open_flag;
nd->intent.open.file = filp;
nd->intent.open.flags = open_to_namei_flags(op->open_flag);
nd->intent.open.create_mode = op->mode;
error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(error))
goto out_filp;
current->total_link_count = 0;
error = link_path_walk(pathname, nd);
if (unlikely(error))
goto out_filp;
filp = do_last(nd, &path, op, pathname);
while (unlikely(!filp)) { /* trailing symlink */
struct path link = path;
void *cookie;
if (!(nd->flags & LOOKUP_FOLLOW)) {
path_put_conditional(&path, nd);
path_put(&nd->path);
filp = ERR_PTR(-ELOOP);
break;
}
nd->flags |= LOOKUP_PARENT;
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
filp = ERR_PTR(error);
else
filp = do_last(nd, &path, op, pathname);
put_link(nd, &link, cookie);
}
out:
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
path_put(&nd->root);
if (base)
fput(base);
release_open_intent(nd);
return filp;
out_filp:
filp = ERR_PTR(error);
goto out;
}
struct file *do_filp_open(int dfd, const char *pathname,
const struct open_flags *op, int flags)
{
struct nameidata nd;
struct file *filp;
filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
if (unlikely(filp == ERR_PTR(-ECHILD)))
filp = path_openat(dfd, pathname, &nd, op, flags);
if (unlikely(filp == ERR_PTR(-ESTALE)))
filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
return filp;
}
struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
const char *name, const struct open_flags *op, int flags)
{
struct nameidata nd;
struct file *file;
nd.root.mnt = mnt;
nd.root.dentry = dentry;
flags |= LOOKUP_ROOT;
if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN)
return ERR_PTR(-ELOOP);
file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU);
if (unlikely(file == ERR_PTR(-ECHILD)))
file = path_openat(-1, name, &nd, op, flags);
if (unlikely(file == ERR_PTR(-ESTALE)))
file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL);
return file;
}
struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct nameidata nd;
int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd);
if (error)
return ERR_PTR(error);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
*/
if (nd.last_type != LAST_NORM)
goto out;
nd.flags &= ~LOOKUP_PARENT;
nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
nd.intent.open.flags = O_EXCL;
/*
* Do the final lookup.
*/
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
if (IS_ERR(dentry))
goto fail;
if (dentry->d_inode)
goto eexist;
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
dput(dentry);
dentry = ERR_PTR(-ENOENT);
goto fail;
}
*path = nd.path;
return dentry;
eexist:
dput(dentry);
dentry = ERR_PTR(-EEXIST);
fail:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
out:
path_put(&nd.path);
return dentry;
}
EXPORT_SYMBOL(kern_path_create);
struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir)
{
char *tmp = getname(pathname);
struct dentry *res;
if (IS_ERR(tmp))
return ERR_CAST(tmp);
res = kern_path_create(dfd, tmp, path, is_dir);
putname(tmp);
return res;
}
EXPORT_SYMBOL(user_path_create);
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
if (error)
return error;
if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
!ns_capable(inode_userns(dir), CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
return -EPERM;
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
error = security_inode_mknod(dir, dentry, mode, dev);
if (error)
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
static int may_mknod(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
case 0: /* zero mode translates to S_IFREG */
return 0;
case S_IFDIR:
return -EPERM;
default:
return -EINVAL;
}
}
SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
unsigned, dev)
{
struct dentry *dentry;
struct path path;
int error;
if (S_ISDIR(mode))
return -EPERM;
dentry = user_path_create(dfd, filename, &path, 0);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = may_mknod(mode);
if (error)
goto out_dput;
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out_drop_write;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
error = vfs_create(path.dentry->d_inode,dentry,mode,NULL);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,
new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
return error;
}
SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
{
return sys_mknodat(AT_FDCWD, filename, mode, dev);
}
int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error = may_create(dir, dentry);
unsigned max_links = dir->i_sb->s_max_links;
if (error)
return error;
if (!dir->i_op->mkdir)
return -EPERM;
mode &= (S_IRWXUGO|S_ISVTX);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
return error;
if (max_links && dir->i_nlink >= max_links)
return -EMLINK;
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
return error;
}
SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
{
struct dentry *dentry;
struct path path;
int error;
dentry = user_path_create(dfd, pathname, &path, 1);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
error = security_path_mkdir(&path, dentry, mode);
if (error)
goto out_drop_write;
error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
return error;
}
SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
{
return sys_mkdirat(AT_FDCWD, pathname, mode);
}
/*
* The dentry_unhash() helper will try to drop the dentry early: we
* should have a usage count of 1 if we're the only user of this
* dentry, and if that is true (possibly after pruning the dcache),
* then we drop the dentry now.
*
* A low-level filesystem can, if it choses, legally
* do a
*
* if (!d_unhashed(dentry))
* return -EBUSY;
*
* if it cannot handle the case of removing a directory
* that is still in use by something else..
*/
void dentry_unhash(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock);
if (dentry->d_count == 1)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
int vfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error = may_delete(dir, dentry, 1);
if (error)
return error;
if (!dir->i_op->rmdir)
return -EPERM;
dget(dentry);
mutex_lock(&dentry->d_inode->i_mutex);
error = -EBUSY;
if (d_mountpoint(dentry))
goto out;
error = security_inode_rmdir(dir, dentry);
if (error)
goto out;
shrink_dcache_parent(dentry);
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
out:
mutex_unlock(&dentry->d_inode->i_mutex);
dput(dentry);
if (!error)
d_delete(dentry);
return error;
}
static long do_rmdir(int dfd, const char __user *pathname)
{
int error = 0;
char * name;
struct dentry *dentry;
struct nameidata nd;
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
return error;
switch(nd.last_type) {
case LAST_DOTDOT:
error = -ENOTEMPTY;
goto exit1;
case LAST_DOT:
error = -EINVAL;
goto exit1;
case LAST_ROOT:
error = -EBUSY;
goto exit1;
}
nd.flags &= ~LOOKUP_PARENT;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
if (!dentry->d_inode) {
error = -ENOENT;
goto exit3;
}
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit3;
error = security_path_rmdir(&nd.path, dentry);
if (error)
goto exit4;
error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
exit4:
mnt_drop_write(nd.path.mnt);
exit3:
dput(dentry);
exit2:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
exit1:
path_put(&nd.path);
putname(name);
return error;
}
SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
{
return do_rmdir(AT_FDCWD, pathname);
}
int vfs_unlink(struct inode *dir, struct dentry *dentry)
{
int error = may_delete(dir, dentry, 0);
if (error)
return error;
if (!dir->i_op->unlink)
return -EPERM;
mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
error = -EBUSY;
else {
error = security_inode_unlink(dir, dentry);
if (!error) {
error = dir->i_op->unlink(dir, dentry);
if (!error)
dont_mount(dentry);
}
}
mutex_unlock(&dentry->d_inode->i_mutex);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
fsnotify_link_count(dentry->d_inode);
d_delete(dentry);
}
return error;
}
/*
* Make sure that the actual truncation of the file will occur outside its
* directory's i_mutex. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
static long do_unlinkat(int dfd, const char __user *pathname)
{
int error;
char *name;
struct dentry *dentry;
struct nameidata nd;
struct inode *inode = NULL;
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
return error;
error = -EISDIR;
if (nd.last_type != LAST_NORM)
goto exit1;
nd.flags &= ~LOOKUP_PARENT;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
if (nd.last.name[nd.last.len])
goto slashes;
inode = dentry->d_inode;
if (!inode)
goto slashes;
ihold(inode);
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit2;
error = security_path_unlink(&nd.path, dentry);
if (error)
goto exit3;
error = vfs_unlink(nd.path.dentry->d_inode, dentry);
exit3:
mnt_drop_write(nd.path.mnt);
exit2:
dput(dentry);
}
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
exit1:
path_put(&nd.path);
putname(name);
return error;
slashes:
error = !dentry->d_inode ? -ENOENT :
S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
goto exit2;
}
SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
{
if ((flag & ~AT_REMOVEDIR) != 0)
return -EINVAL;
if (flag & AT_REMOVEDIR)
return do_rmdir(dfd, pathname);
return do_unlinkat(dfd, pathname);
}
SYSCALL_DEFINE1(unlink, const char __user *, pathname)
{
return do_unlinkat(AT_FDCWD, pathname);
}
int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->symlink)
return -EPERM;
error = security_inode_symlink(dir, dentry, oldname);
if (error)
return error;
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
int error;
char *from;
struct dentry *dentry;
struct path path;
from = getname(oldname);
if (IS_ERR(from))
return PTR_ERR(from);
dentry = user_path_create(newdfd, newname, &path, 0);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putname;
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
error = security_path_symlink(&path, dentry, from);
if (error)
goto out_drop_write;
error = vfs_symlink(path.dentry->d_inode, dentry, from);
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
out_putname:
putname(from);
return error;
}
SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
{
return sys_symlinkat(oldname, AT_FDCWD, newname);
}
int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
int error;
if (!inode)
return -ENOENT;
error = may_create(dir, new_dentry);
if (error)
return error;
if (dir->i_sb != inode->i_sb)
return -EXDEV;
/*
* A link to an append-only or immutable file cannot be created.
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
if (!dir->i_op->link)
return -EPERM;
if (S_ISDIR(inode->i_mode))
return -EPERM;
error = security_inode_link(old_dentry, dir, new_dentry);
if (error)
return error;
mutex_lock(&inode->i_mutex);
/* Make sure we don't allow creating hardlink to an unlinked file */
if (inode->i_nlink == 0)
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
else
error = dir->i_op->link(old_dentry, dir, new_dentry);
mutex_unlock(&inode->i_mutex);
if (!error)
fsnotify_link(dir, inode, new_dentry);
return error;
}
/*
* Hardlinks are often used in delicate situations. We avoid
* security-related surprises by not following symlinks on the
* newname. --KAB
*
* We don't follow them on the oldname either to be compatible
* with linux 2.0, and to avoid hard-linking to directories
* and other special files. --ADM
*/
SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, int, flags)
{
struct dentry *new_dentry;
struct path old_path, new_path;
int how = 0;
int error;
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
/*
* To use null names we require CAP_DAC_READ_SEARCH
* This ensures that not everyone will be able to create
* handlink using the passed filedescriptor.
*/
if (flags & AT_EMPTY_PATH) {
if (!capable(CAP_DAC_READ_SEARCH))
return -ENOENT;
how = LOOKUP_EMPTY;
}
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
error = user_path_at(olddfd, oldname, how, &old_path);
if (error)
return error;
new_dentry = user_path_create(newdfd, newname, &new_path, 0);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
error = mnt_want_write(new_path.mnt);
if (error)
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_drop_write;
error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
out_drop_write:
mnt_drop_write(new_path.mnt);
out_dput:
dput(new_dentry);
mutex_unlock(&new_path.dentry->d_inode->i_mutex);
path_put(&new_path);
out:
path_put(&old_path);
return error;
}
SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
{
return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
}
/*
* The worst of all namespace operations - renaming directory. "Perverted"
* doesn't even start to describe it. Somebody in UCB had a heck of a trip...
* Problems:
* a) we can get into loop creation. Check is done in is_subdir().
* b) race potential - two innocent renames can create a loop together.
* That's where 4.4 screws up. Current fix: serialization on
* sb->s_vfs_rename_mutex. We might be more accurate, but that's another
* story.
* c) we have to lock _three_ objects - parents and victim (if it exists).
* And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_mutex _and_ that parent of the object we
* move will be locked. Thus we can rank directories by the tree
* (ancestors first) and rank all non-directories after them.
* That works since everybody except rename does "lock parent, lookup,
* lock child" and rename is under ->s_vfs_rename_mutex.
* HOWEVER, it relies on the assumption that any object with ->lookup()
* has no more than 1 dentry. If "hybrid" objects will ever appear,
* we'd better make sure that there's no link(2) for them.
* d) conversion from fhandle to dentry may come in the wrong moment - when
* we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
* ->i_mutex on parents, which works but leads to some truly excessive
* locking].
*/
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int error = 0;
struct inode *target = new_dentry->d_inode;
unsigned max_links = new_dir->i_sb->s_max_links;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
error = inode_permission(old_dentry->d_inode, MAY_WRITE);
if (error)
return error;
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
return error;
dget(new_dentry);
if (target)
mutex_lock(&target->i_mutex);
error = -EBUSY;
if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
goto out;
error = -EMLINK;
if (max_links && !target && new_dir != old_dir &&
new_dir->i_nlink >= max_links)
goto out;
if (target)
shrink_dcache_parent(new_dentry);
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
goto out;
if (target) {
target->i_flags |= S_DEAD;
dont_mount(new_dentry);
}
out:
if (target)
mutex_unlock(&target->i_mutex);
dput(new_dentry);
if (!error)
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry,new_dentry);
return error;
}
static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *target = new_dentry->d_inode;
int error;
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
return error;
dget(new_dentry);
if (target)
mutex_lock(&target->i_mutex);
error = -EBUSY;
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
goto out;
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
goto out;
if (target)
dont_mount(new_dentry);
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry, new_dentry);
out:
if (target)
mutex_unlock(&target->i_mutex);
dput(new_dentry);
return error;
}
int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int error;
int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
const unsigned char *old_name;
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!new_dentry->d_inode)
error = may_create(new_dir, new_dentry);
else
error = may_delete(new_dir, new_dentry, is_dir);
if (error)
return error;
if (!old_dir->i_op->rename)
return -EPERM;
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
if (is_dir)
error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
else
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
if (!error)
fsnotify_move(old_dir, new_dir, old_name, is_dir,
new_dentry->d_inode, old_dentry);
fsnotify_oldname_free(old_name);
return error;
}
SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
struct dentry *old_dir, *new_dir;
struct dentry *old_dentry, *new_dentry;
struct dentry *trap;
struct nameidata oldnd, newnd;
char *from;
char *to;
int error;
error = user_path_parent(olddfd, oldname, &oldnd, &from);
if (error)
goto exit;
error = user_path_parent(newdfd, newname, &newnd, &to);
if (error)
goto exit1;
error = -EXDEV;
if (oldnd.path.mnt != newnd.path.mnt)
goto exit2;
old_dir = oldnd.path.dentry;
error = -EBUSY;
if (oldnd.last_type != LAST_NORM)
goto exit2;
new_dir = newnd.path.dentry;
if (newnd.last_type != LAST_NORM)
goto exit2;
oldnd.flags &= ~LOOKUP_PARENT;
newnd.flags &= ~LOOKUP_PARENT;
newnd.flags |= LOOKUP_RENAME_TARGET;
trap = lock_rename(new_dir, old_dir);
old_dentry = lookup_hash(&oldnd);
error = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
goto exit3;
/* source must exist */
error = -ENOENT;
if (!old_dentry->d_inode)
goto exit4;
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
error = -ENOTDIR;
if (oldnd.last.name[oldnd.last.len])
goto exit4;
if (newnd.last.name[newnd.last.len])
goto exit4;
}
/* source should not be ancestor of target */
error = -EINVAL;
if (old_dentry == trap)
goto exit4;
new_dentry = lookup_hash(&newnd);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto exit4;
/* target should not be an ancestor of source */
error = -ENOTEMPTY;
if (new_dentry == trap)
goto exit5;
error = mnt_want_write(oldnd.path.mnt);
if (error)
goto exit5;
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry);
if (error)
goto exit6;
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
exit6:
mnt_drop_write(oldnd.path.mnt);
exit5:
dput(new_dentry);
exit4:
dput(old_dentry);
exit3:
unlock_rename(new_dir, old_dir);
exit2:
path_put(&newnd.path);
putname(to);
exit1:
path_put(&oldnd.path);
putname(from);
exit:
return error;
}
SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
{
return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
}
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
int len;
len = PTR_ERR(link);
if (IS_ERR(link))
goto out;
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
if (copy_to_user(buffer, link, len))
len = -EFAULT;
out:
return len;
}
/*
* A helper for ->readlink(). This should be used *ONLY* for symlinks that
* have ->follow_link() touching nd only in nd_set_link(). Using (or not
* using) it for any given inode is up to filesystem.
*/
int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct nameidata nd;
void *cookie;
int res;
nd.depth = 0;
cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
if (IS_ERR(cookie))
return PTR_ERR(cookie);
res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
return res;
}
int vfs_follow_link(struct nameidata *nd, const char *link)
{
return __vfs_follow_link(nd, link);
}
/* get the link contents into pagecache */
static char *page_getlink(struct dentry * dentry, struct page **ppage)
{
char *kaddr;
struct page *page;
struct address_space *mapping = dentry->d_inode->i_mapping;
page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
return (char*)page;
*ppage = page;
kaddr = kmap(page);
nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1);
return kaddr;
}
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct page *page = NULL;
char *s = page_getlink(dentry, &page);
int res = vfs_readlink(dentry,buffer,buflen,s);
if (page) {
kunmap(page);
page_cache_release(page);
}
return res;
}
void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
nd_set_link(nd, page_getlink(dentry, &page));
return page;
}
void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
struct page *page = cookie;
if (page) {
kunmap(page);
page_cache_release(page);
}
}
/*
* The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
*/
int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
int err;
char *kaddr;
unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
if (nofs)
flags |= AOP_FLAG_NOFS;
retry:
err = pagecache_write_begin(NULL, mapping, 0, len-1,
flags, &page, &fsdata);
if (err)
goto fail;
kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len-1);
kunmap_atomic(kaddr);
err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata);
if (err < 0)
goto fail;
if (err < len-1)
goto retry;
mark_inode_dirty(inode);
return 0;
fail:
return err;
}
int page_symlink(struct inode *inode, const char *symname, int len)
{
return __page_symlink(inode, symname, len,
!(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
}
const struct inode_operations page_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
};
EXPORT_SYMBOL(user_path_at);
EXPORT_SYMBOL(follow_down_one);
EXPORT_SYMBOL(follow_down);
EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(lock_rename);
EXPORT_SYMBOL(lookup_one_len);
EXPORT_SYMBOL(page_follow_link_light);
EXPORT_SYMBOL(page_put_link);
EXPORT_SYMBOL(page_readlink);
EXPORT_SYMBOL(__page_symlink);
EXPORT_SYMBOL(page_symlink);
EXPORT_SYMBOL(page_symlink_inode_operations);
EXPORT_SYMBOL(kern_path);
EXPORT_SYMBOL(vfs_path_lookup);
EXPORT_SYMBOL(inode_permission);
EXPORT_SYMBOL(unlock_rename);
EXPORT_SYMBOL(vfs_create);
EXPORT_SYMBOL(vfs_follow_link);
EXPORT_SYMBOL(vfs_link);
EXPORT_SYMBOL(vfs_mkdir);
EXPORT_SYMBOL(vfs_mknod);
EXPORT_SYMBOL(generic_permission);
EXPORT_SYMBOL(vfs_readlink);
EXPORT_SYMBOL(vfs_rename);
EXPORT_SYMBOL(vfs_rmdir);
EXPORT_SYMBOL(vfs_symlink);
EXPORT_SYMBOL(vfs_unlink);
EXPORT_SYMBOL(dentry_unhash);
EXPORT_SYMBOL(generic_readlink);
|
Pulshen/XKernel
|
fs/namei.c
|
C
|
gpl-2.0
| 87,242
|
/* a10 513
* Copyright (c) 2001-2012 Nicolas Léveillé <knos.free.fr>
*
* You should have received this file ('src/lib/cokus.c') with a license
* agreement. ('LICENSE' file)
*
* Copying, using, modifying and distributing this file are rights
* covered under this licensing agreement and are conditioned by its
* full acceptance and understanding.
* e 513 */
// This is the ``Mersenne Twister'' random number generator MT19937, which
// generates pseudorandom integers uniformly distributed in 0..(2^32 - 1)
// starting from any odd seed in 0..(2^32 - 1). This version is a recode
// by Shawn Cokus (Cokus@math.washington.edu) on March 8, 1998 of a version by
// Takuji Nishimura (who had suggestions from Topher Cooper and Marc Rieffel in
// July-August 1997).
//
// Effectiveness of the recoding (on Goedel2.math.washington.edu, a DEC Alpha
// running OSF/1) using GCC -O3 as a compiler: before recoding: 51.6 sec. to
// generate 300 million random numbers; after recoding: 24.0 sec. for the same
// (i.e., 46.5% of original time), so speed is now about 12.5 million random
// number generations per second on this machine.
//
// According to the URL <http://www.math.keio.ac.jp/~matumoto/emt.html>
// (and paraphrasing a bit in places), the Mersenne Twister is ``designed
// with consideration of the flaws of various existing generators,'' has
// a period of 2^19937 - 1, gives a sequence that is 623-dimensionally
// equidistributed, and ``has passed many stringent tests, including the
// die-hard test of G. Marsaglia and the load test of P. Hellekalek and
// S. Wegenkittl.'' It is efficient in memory usage (typically using 2506
// to 5012 bytes of static data, depending on data type sizes, and the code
// is quite short as well). It generates random numbers in batches of 624
// at a time, so the caching and pipelining of modern systems is exploited.
// It is also divide- and mod-free.
//
// This library is free software; you can redistribute it and/or modify it
// under the terms of the GNU Library General Public License as published by
// the Free Software Foundation (either version 2 of the License or, at your
// option, any later version). This library is distributed in the hope that
// it will be useful, but WITHOUT ANY WARRANTY, without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
// the GNU Library General Public License for more details. You should have
// received a copy of the GNU Library General Public License along with this
// library; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307, USA.
//
// The code as Shawn received it included the following notice:
//
// Copyright (C) 1997 Makoto Matsumoto and Takuji Nishimura. When
// you use this, send an e-mail to <matumoto@math.keio.ac.jp> with
// an appropriate reference to your work.
//
// It would be nice to CC: <Cokus@math.washington.edu> when you write.
//
#include <libc/stdio.h>
#include <libc/stdlib.h>
#include <libc/stdint.h>
//
// uint32_t must be an unsigned integer type capable of holding at least 32
// bits; exactly 32 should be fastest, but 64 is better on an Alpha with
// GCC at -O3 optimization so try your options and see what's best for you
//
#define N (624) // length of state vector
#define M (397) // a period parameter
#define K (0x9908B0DFU) // a magic constant
#define hiBit(u) ((u)&0x80000000U) // mask all but highest bit of u
#define loBit(u) ((u)&0x00000001U) // mask all but lowest bit of u
#define loBits(u) ((u)&0x7FFFFFFFU) // mask the highest bit of u
#define mixBits(u, v) (hiBit(u) | loBits(v)) // move hi bit of u to hi bit of v
static uint32_t state[N + 1]; // state vector + 1 extra to not violate ANSI C
static uint32_t *next; // next random value is computed from here
static int left = -1; // can *next++ this many times before reloading
extern void seedMT(uint32_t seed)
{
//
// We initialize state[0..(N-1)] via the generator
//
// x_new = (69069 * x_old) mod 2^32
//
// from Line 15 of Table 1, p. 106, Sec. 3.3.4 of Knuth's
// _The Art of Computer Programming_, Volume 2, 3rd ed.
//
// Notes (SJC): I do not know what the initial state requirements
// of the Mersenne Twister are, but it seems this seeding generator
// could be better. It achieves the maximum period for its modulus
// (2^30) iff x_initial is odd (p. 20-21, Sec. 3.2.1.2, Knuth); if
// x_initial can be even, you have sequences like 0, 0, 0, ...;
// 2^31, 2^31, 2^31, ...; 2^30, 2^30, 2^30, ...; 2^29, 2^29 + 2^31,
// 2^29, 2^29 + 2^31, ..., etc. so I force seed to be odd below.
//
// Even if x_initial is odd, if x_initial is 1 mod 4 then
//
// the lowest bit of x is always 1,
// the next-to-lowest bit of x is always 0,
// the 2nd-from-lowest bit of x alternates ... 0 1 0 1 0 1 0 1 ... ,
// the 3rd-from-lowest bit of x 4-cycles ... 0 1 1 0 0 1 1 0 ... ,
// the 4th-from-lowest bit of x has the 8-cycle ... 0 0 0 1 1 1 1 0 ... ,
// ...
//
// and if x_initial is 3 mod 4 then
//
// the lowest bit of x is always 1,
// the next-to-lowest bit of x is always 1,
// the 2nd-from-lowest bit of x alternates ... 0 1 0 1 0 1 0 1 ... ,
// the 3rd-from-lowest bit of x 4-cycles ... 0 0 1 1 0 0 1 1 ... ,
// the 4th-from-lowest bit of x has the 8-cycle ... 0 0 1 1 1 1 0 0 ... ,
// ...
//
// The generator's potency (min. s>=0 with (69069-1)^s = 0 mod 2^32) is
// 16, which seems to be alright by p. 25, Sec. 3.2.1.3 of Knuth. It
// also does well in the dimension 2..5 spectral tests, but it could be
// better in dimension 6 (Line 15, Table 1, p. 106, Sec. 3.3.4, Knuth).
//
// Note that the random number user does not see the values generated
// here directly since reloadMT() will always munge them first, so maybe
// none of all of this matters. In fact, the seed values made here could
// even be extra-special desirable if the Mersenne Twister theory says
// so-- that's why the only change I made is to restrict to odd seeds.
//
register uint32_t x = (seed | 1U) & 0xFFFFFFFFU, *s = state;
register int j;
for (left = 0, *s++ = x, j = N; --j; *s++ = (x *= 69069U) & 0xFFFFFFFFU)
;
}
extern uint32_t reloadMT(void)
{
register uint32_t *p0 = state, *p2 = state + 2, *pM = state + M, s0, s1;
register int j;
if (left < -1)
seedMT(4357U);
left = N - 1, next = state + 1;
for (s0 = state[0], s1 = state[1], j = N - M + 1; --j; s0 = s1, s1 = *p2++)
*p0++ = *pM++ ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
for (pM = state, j = M; --j; s0 = s1, s1 = *p2++)
*p0++ = *pM++ ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
s1 = state[0], *p0 = *pM ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
s1 ^= (s1 >> 11);
s1 ^= (s1 << 7) & 0x9D2C5680U;
s1 ^= (s1 << 15) & 0xEFC60000U;
return (s1 ^ (s1 >> 18));
}
extern uint32_t randomMT(void)
{
uint32_t y;
if (--left < 0)
return (reloadMT());
y = *next++;
y ^= (y >> 11);
y ^= (y << 7) & 0x9D2C5680U;
y ^= (y << 15) & 0xEFC60000U;
return (y ^ (y >> 18));
}
#if 0
int main(void)
{
int j;
// you can seed with any uint32_t, but the best are odds in 0..(2^32 - 1)
seedMT(4357U);
// print the first 2,002 random numbers seven to a line as an example
for(j=0; j<2002; j++)
printf(" %10lu%s", (unsigned long) randomMT(), (j%7)==6 ? "\n" : "");
return(EXIT_SUCCESS);
}
#endif
|
uucidl/plus-one-minus-one
|
src/lib/cokus.c
|
C
|
gpl-2.0
| 7,813
|
/*
* Copyright (C) ST-Ericsson SA 2010,2011
*
* Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
*
* License terms: GNU General Public License (GPL) version 2
*
* U9500 <-> M6718 IPC protocol implementation using SPI.
* state machine definition and functionality.
*/
#include <linux/modem/m6718_spi/modem_driver.h>
#include "modem_statemachine.h"
#include "modem_util.h"
#include "modem_netlink.h"
#include "modem_debug.h"
#include "modem_queue.h"
#include "modem_protocol.h"
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
#include "modem_state.h"
#endif
#define CMD_BOOTREQ (1)
#define CMD_BOOTRESP (2)
#define CMD_WRITE (3)
#define CMD_READ (4)
static u8 sm_init_enter(u8 event, struct ipc_link_context *context)
{
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
/* if modem is off un-configure the IPC GPIO pins for low-power */
if (modem_state_get_state() == MODEM_STATE_OFF) {
dev_info(&context->sdev->dev,
"link %d: modem is off, un-configuring GPIO\n",
context->link->id);
ipc_util_link_gpio_unconfig(context);
}
#endif
/* nothing more to do until an event happens */
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_init_exit(u8 event,
struct ipc_link_context *context)
{
bool int_active = false;
/*
* For reset event just re-enter init in case the modem has
* powered off - we need to reconfigure our GPIO pins
*/
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_INIT);
/* re-sample link INT pin */
int_active = ipc_util_int_is_active(context);
atomic_set(&context->state_int, int_active);
dev_info(&context->sdev->dev,
"link %d: link initialised; SS:INACTIVE(%d) INT:%s(%d)\n",
context->link->id,
ipc_util_ss_level_inactive(context),
int_active ? "ACTIVE" : "INACTIVE",
int_active ? ipc_util_int_level_active(context) :
ipc_util_int_level_inactive(context));
/* handshake is only on link 0 */
if (context->link->id == 0) {
if (!int_active) {
dev_info(&context->sdev->dev,
"link %d: slave INT signal is inactive\n",
context->link->id);
/* start boot handshake */
return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
} else {
/* wait for slave INT signal to stabilise inactive */
return ipc_sm_state(IPC_SM_WAIT_SLAVE_STABLE);
}
} else {
dev_info(&context->sdev->dev,
"link %d: boot sync not needed, going idle\n",
context->link->id);
return ipc_sm_state(IPC_SM_IDL);
}
}
static const struct ipc_sm_state *sm_init_aud_exit(u8 event,
struct ipc_link_context *context)
{
bool int_active = false;
/*
* For reset event just re-enter init in case the modem has
* powered off - we need to reconfigure our GPIO pins
*/
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_INIT_AUD);
/* re-sample link INT pin */
int_active = ipc_util_int_is_active(context);
atomic_set(&context->state_int, int_active);
dev_info(&context->sdev->dev,
"link %d: link initialised; SS:INACTIVE(%d) INT:%s(%d)\n",
context->link->id,
ipc_util_ss_level_inactive(context),
int_active ? "ACTIVE" : "INACTIVE",
int_active ? ipc_util_int_level_active(context) :
ipc_util_int_level_inactive(context));
dev_info(&context->sdev->dev,
"link %d: boot sync not needed, going idle\n",
context->link->id);
return ipc_sm_state(IPC_SM_IDL_AUD);
}
static u8 sm_wait_slave_stable_enter(u8 event, struct ipc_link_context *context)
{
static unsigned long printk_warn_time;
if (printk_timed_ratelimit(&printk_warn_time, 60 * 1000))
dev_info(&context->sdev->dev,
"link %d: waiting for stable inactive slave INT\n",
context->link->id);
ipc_util_start_slave_stable_timer(context);
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_wait_slave_stable_exit(u8 event,
struct ipc_link_context *context)
{
if (!ipc_util_int_is_active(context)) {
dev_info(&context->sdev->dev,
"link %d: slave INT signal is stable inactive\n",
context->link->id);
return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
} else {
return ipc_sm_state(IPC_SM_WAIT_SLAVE_STABLE);
}
}
static u8 sm_wait_handshake_inactive_enter(u8 event,
struct ipc_link_context *context)
{
dev_info(&context->sdev->dev,
"link %d: waiting for stable inactive slave INT\n",
context->link->id);
ipc_util_start_slave_stable_timer(context);
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_wait_handshake_inactive_exit(u8 event,
struct ipc_link_context *context)
{
int i;
if (!ipc_util_int_is_active(context)) {
dev_info(&context->sdev->dev,
"link %d: slave INT signal is inactive, going idle\n",
context->link->id);
/* modem sync is done */
atomic_inc(&l1_context.boot_sync_done);
ipc_broadcast_modem_online(context);
/*
* Kick the state machine for any initialised links - skip link0
* since this link has just completed handshake
*/
for (i = 1; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
if (l1_context.device_context[i].state != NULL) {
dev_dbg(&context->sdev->dev,
"link %d has already been probed, "
"kicking state machine\n", i);
ipc_sm_kick(IPC_SM_RUN_INIT,
&l1_context.device_context[i]);
}
return ipc_sm_state(IPC_SM_IDL);
} else {
return ipc_sm_state(IPC_SM_WAIT_HANDSHAKE_INACTIVE);
}
}
static u8 sm_idl_enter(u8 event, struct ipc_link_context *context)
{
ipc_util_deactivate_ss(context);
ipc_dbg_enter_idle(context);
/* check if tx queue contains items */
if (atomic_read(&context->tx_q_count) > 0) {
dev_dbg(&context->sdev->dev,
"link %d: tx queue contains items\n",
context->link->id);
return IPC_SM_RUN_TX_REQ;
}
/* check if modem has already requested transaction start */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
return IPC_SM_RUN_SLAVE_IRQ;
}
dev_dbg(&context->sdev->dev,
"link %d: going idle\n", context->link->id);
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_idl_exit(u8 event,
struct ipc_link_context *context)
{
ipc_dbg_exit_idle(context);
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_TX_REQ)
return ipc_sm_state(IPC_SM_SLW_TX_WR_CMD);
else if (event == IPC_SM_RUN_SLAVE_IRQ)
return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
else
return ipc_sm_state(IPC_SM_HALT);
}
static const struct ipc_sm_state *sm_idl_aud_exit(u8 event,
struct ipc_link_context *context)
{
ipc_dbg_exit_idle(context);
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
/* always transmit data first */
return ipc_sm_state(IPC_SM_SLW_TX_WR_DAT_AUD);
}
static u8 sm_slw_tx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
struct ipc_tx_queue *frame;
/* get the frame from the head of the tx queue */
if (ipc_queue_is_empty(context)) {
dev_err(&context->sdev->dev,
"link %d error: tx queue is empty!\n",
context->link->id);
return IPC_SM_RUN_ABORT;
}
frame = ipc_queue_get_frame(context);
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id, frame, true);
context->cmd = ipc_util_make_l1_header(CMD_WRITE, frame->counter,
frame->len);
dev_dbg(&context->sdev->dev,
"link %d: TX FRAME cmd %08x (type %d counter %d len %d)\n",
context->link->id,
context->cmd,
ipc_util_get_l1_cmd(context->cmd),
ipc_util_get_l1_counter(context->cmd),
ipc_util_get_l1_length(context->cmd));
ipc_util_spi_message_prepare(context, &context->cmd,
NULL, IPC_L1_HDR_SIZE);
context->frame = frame;
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_tx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_TX_WR_CMD);
}
static u8 sm_act_tx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else
return ipc_sm_state(IPC_SM_SLW_TX_WR_DAT);
}
static u8 sm_slw_tx_wr_dat_enter(u8 event, struct ipc_link_context *context)
{
/* prepare to transfer the frame tx data */
ipc_util_spi_message_prepare(context, context->frame->data,
NULL, context->frame->len);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static u8 sm_slw_tx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
{
struct ipc_tx_queue *frame = NULL;
/* check if there is a frame to be sent */
if (!ipc_queue_is_empty(context)) {
frame = ipc_queue_get_frame(context);
} else {
/* no frame to send, create an empty one */
dev_dbg(&context->sdev->dev,
"link %d: no frame to send, allocating dummy\n",
context->link->id);
frame = ipc_queue_new_frame(context, 0);
if (frame == NULL)
return IPC_SM_RUN_ABORT;
}
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id, frame, true);
/* prepare to transfer the frame tx data */
context->frame = frame;
ipc_util_spi_message_prepare(context, context->frame->data,
NULL, context->frame->len);
/* slave might already have signalled ready to transmit */
if (event == IPC_SM_RUN_SLAVE_IRQ || atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_tx_wr_dat_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_TX_WR_DAT);
}
static const struct ipc_sm_state *sm_slw_tx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT_AUD);
else
return ipc_sm_state(IPC_SM_ACT_TX_WR_DAT_AUD);
}
static u8 sm_act_tx_wr_dat_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_wr_dat_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is sent, increment link tx counter */
context->tx_bytes += context->frame->actual_len;
#endif
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
{
u8 channel;
channel = ipc_util_get_l2_channel(*(u32 *)context->frame->data);
if (ipc_util_channel_is_loopback(channel)) {
context->last_frame = context->frame;
} else {
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
}
}
#else
/* free the sent frame */
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
#endif
return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
}
static const struct ipc_sm_state *sm_act_tx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is sent, increment link tx counter */
context->tx_bytes += context->frame->actual_len;
#endif
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
{
u8 channel;
channel = ipc_util_get_l2_channel(*(u32 *)context->frame->data);
if (ipc_util_channel_is_loopback(channel)) {
/* create a copy of the frame */
context->last_frame = ipc_queue_new_frame(context,
context->frame->actual_len);
memcpy(context->last_frame->data,
context->frame->data,
context->frame->actual_len);
}
}
#endif
return ipc_sm_state(IPC_SM_SLW_RX_WR_DAT_AUD);
}
static u8 sm_slw_tx_rd_cmd_enter(u8 event, struct ipc_link_context *context)
{
context->cmd = ipc_util_make_l1_header(CMD_READ, 0, 0);
dev_dbg(&context->sdev->dev,
"link %d: cmd %08x (type %d)\n",
context->link->id,
context->cmd,
ipc_util_get_l1_cmd(context->cmd));
/* prepare the spi message to transfer */
ipc_util_spi_message_prepare(context, &context->cmd,
NULL, IPC_L1_HDR_SIZE);
/* check if the slave requested this transaction */
if (event == IPC_SM_RUN_SLAVE_IRQ) {
dev_dbg(&context->sdev->dev,
"link %d: slave initiated transaction, continue\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
}
static const struct ipc_sm_state *sm_slw_tx_rd_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_TX_RD_CMD);
}
static u8 sm_act_tx_rd_cmd_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_rd_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else
return ipc_sm_state(IPC_SM_SLW_RX_WR_CMD);
}
static u8 sm_slw_rx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
/* prepare to receive MESSAGE WRITE frame header */
ipc_util_spi_message_prepare(context, NULL,
&context->cmd, IPC_L1_HDR_SIZE);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_rx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_RX_WR_CMD);
}
static u8 sm_act_rx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_rx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
u8 cmd_type = ipc_util_get_l1_cmd(context->cmd);
int counter = ipc_util_get_l1_counter(context->cmd);
int length = ipc_util_get_l1_length(context->cmd);
dev_dbg(&context->sdev->dev,
"link %d: RX HEADER %08x (type %d counter %d length %d)\n",
context->link->id,
context->cmd,
cmd_type,
counter,
length);
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
if (cmd_type == CMD_WRITE) {
/* slave has data to send - allocate a frame to hold it */
context->frame = ipc_queue_new_frame(context, length);
if (context->frame == NULL)
return ipc_sm_state(IPC_SM_IDL);
context->frame->counter = counter;
ipc_util_spi_message_prepare(context, NULL,
context->frame->data, context->frame->len);
return ipc_sm_state(IPC_SM_ACT_RX_WR_DAT);
} else {
if (cmd_type != 0)
dev_err(&context->sdev->dev,
"link %d error: received invalid frame type %x "
"(%08x)! assuming TRANSACTION_END...\n",
context->link->id,
cmd_type,
context->cmd);
/* slave has no data to send */
dev_dbg(&context->sdev->dev,
"link %d: slave has no data to send\n",
context->link->id);
return ipc_sm_state(IPC_SM_IDL);
}
}
static u8 sm_slw_rx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
{
/*
* We're using the same frame buffer we just sent, so no need for a
* new allocation here, just prepare the spi message
*/
ipc_util_spi_message_prepare(context, NULL,
context->frame->data, context->frame->len);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_rx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT_AUD);
else
return ipc_sm_state(IPC_SM_ACT_RX_WR_DAT_AUD);
}
static u8 sm_act_rx_wr_dat_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* assume slave is still ready - prepare and start the spi transfer */
ipc_util_spi_message_prepare(context, NULL,
context->frame->data, context->frame->len);
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static u8 sm_act_rx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
{
int err;
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_rx_wr_dat_exit(u8 event,
struct ipc_link_context *context)
{
u32 frame_hdr;
unsigned char l2_header;
unsigned int l2_length;
u8 *l2_data;
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
dev_dbg(&context->sdev->dev,
"link %d: RX PAYLOAD %d bytes\n",
context->link->id, context->frame->len);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is received, increment link rx counter */
context->rx_bytes += context->frame->len;
#endif
/* decode L2 header */
frame_hdr = *(u32 *)context->frame->data;
l2_header = ipc_util_get_l2_channel(frame_hdr);
l2_length = ipc_util_get_l2_length(frame_hdr);
l2_data = (u8 *)context->frame->data + IPC_L2_HDR_SIZE;
context->frame->actual_len = l2_length + IPC_L2_HDR_SIZE;
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id,
context->frame, false);
if (l2_length > (context->frame->len - 4)) {
dev_err(&context->sdev->dev,
"link %d: suspicious frame: L1 len %d L2 len %d\n",
context->link->id, context->frame->len, l2_length);
}
dev_dbg(&context->sdev->dev,
"link %d: L2 PDU decode: header 0x%08x channel %d length %d "
"data[%02x%02x%02x...]\n",
context->link->id, frame_hdr, l2_header, l2_length,
l2_data[0], l2_data[1], l2_data[2]);
if (ipc_util_channel_is_loopback(l2_header))
ipc_dbg_verify_rx_frame(context);
/* pass received frame up to L2mux layer */
if (!modem_protocol_channel_is_open(l2_header)) {
dev_err(&context->sdev->dev,
"link %d error: received frame on invalid channel %d, "
"frame discarded\n",
context->link->id, l2_header);
} else {
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/*
* Discard loopback frames if we are taking throughput
* measurements - we'll be loading the links and so will likely
* overload the buffers.
*/
if (!ipc_util_channel_is_loopback(l2_header))
#endif
modem_m6718_spi_receive(context->sdev,
l2_header, l2_length, l2_data);
}
/* data is copied by L2mux so free the frame here */
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
/* check tx queue for content */
if (!ipc_queue_is_empty(context)) {
dev_dbg(&context->sdev->dev,
"link %d: tx queue not empty\n", context->link->id);
return ipc_sm_state(IPC_SM_SLW_TX_WR_CMD);
} else {
dev_dbg(&context->sdev->dev,
"link %d: tx queue empty\n", context->link->id);
return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
}
}
static const struct ipc_sm_state *sm_act_rx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
u32 frame_hdr;
unsigned char l2_header;
unsigned int l2_length;
u8 *l2_data;
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
dev_dbg(&context->sdev->dev,
"link %d: RX PAYLOAD %d bytes\n",
context->link->id, context->frame->len);
/* decode L2 header */
frame_hdr = *(u32 *)context->frame->data;
l2_header = ipc_util_get_l2_channel(frame_hdr);
l2_length = ipc_util_get_l2_length(frame_hdr);
l2_data = (u8 *)context->frame->data + IPC_L2_HDR_SIZE;
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is received, increment link rx counter */
context->rx_bytes += l2_length;
#endif
if (frame_hdr != 0)
context->frame->actual_len = l2_length + IPC_L2_HDR_SIZE;
else
context->frame->actual_len = 0;
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id,
context->frame, false);
if (l2_length > (context->frame->len - 4))
dev_err(&context->sdev->dev,
"link %d: suspicious frame: L1 len %d L2 len %d\n",
context->link->id, context->frame->len, l2_length);
dev_dbg(&context->sdev->dev,
"link %d: L2 PDU decode: header 0x%08x channel %d length %d "
"data[%02x%02x%02x...]\n",
context->link->id, frame_hdr, l2_header, l2_length,
l2_data[0], l2_data[1], l2_data[2]);
if (ipc_util_channel_is_loopback(l2_header))
ipc_dbg_verify_rx_frame(context);
/* did the slave actually have anything to send? */
if (frame_hdr != 0) {
/* pass received frame up to L2mux layer */
if (!modem_protocol_channel_is_open(l2_header)) {
dev_err(&context->sdev->dev,
"link %d error: received frame on invalid "
"channel %d, frame discarded\n",
context->link->id, l2_header);
} else {
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/*
* Discard loopback frames if we are taking throughput
* measurements - we'll be loading the links and so will
* likely overload the buffers.
*/
if (!ipc_util_channel_is_loopback(l2_header))
#endif
modem_m6718_spi_receive(context->sdev,
l2_header, l2_length, l2_data);
}
} else {
dev_dbg(&context->sdev->dev,
"link %d: received dummy frame, discarding\n",
context->link->id);
}
/* data is copied by L2mux so free the frame here */
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
/* audio link goes idle ready for next transaction */
return ipc_sm_state(IPC_SM_IDL_AUD);
}
static u8 sm_halt_enter(u8 event, struct ipc_link_context *context)
{
dev_err(&context->sdev->dev,
"link %d error: HALTED\n", context->link->id);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
/*
* Force modem reset, this will cause a reset event from the modemstate
* driver which will reset the links. If debugfs is enabled then there
* is a userspace file which controls whether MSR is enabled or not.
*/
#ifdef CONFIG_DEBUG_FS
if (l1_context.msr_disable) {
dev_info(&context->sdev->dev,
"link %d: MSR is disabled by user, "
"not requesting modem reset\n", context->link->id);
return IPC_SM_RUN_RESET;
}
#endif
modem_state_force_reset();
#endif
return IPC_SM_RUN_RESET;
}
static const struct ipc_sm_state *sm_halt_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_RESET);
}
static const struct ipc_sm_state *sm_halt_aud_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_RESET_AUD);
}
static u8 sm_reset_enter(u8 event, struct ipc_link_context *context)
{
dev_err(&context->sdev->dev,
"link %d resetting\n", context->link->id);
if (context->link->id == 0)
ipc_broadcast_modem_reset(context);
ipc_util_deactivate_ss(context);
ipc_queue_reset(context);
if (context->frame != NULL) {
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
}
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
if (context->last_frame != NULL) {
ipc_queue_delete_frame(context->last_frame);
context->last_frame = NULL;
}
#endif
dev_dbg(&context->sdev->dev,
"link %d reset completed\n", context->link->id);
return IPC_SM_RUN_RESET;
}
static const struct ipc_sm_state *sm_reset_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_INIT);
}
static const struct ipc_sm_state *sm_reset_aud_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_INIT_AUD);
}
static u8 sm_slw_tx_bootreq_enter(u8 event, struct ipc_link_context *context)
{
dev_info(&context->sdev->dev,
"link %d: waiting for boot sync\n", context->link->id);
ipc_util_activate_ss(context);
context->cmd = ipc_util_make_l1_header(CMD_BOOTREQ, 0,
IPC_DRIVER_VERSION);
dev_dbg(&context->sdev->dev,
"link %d: TX HEADER cmd %08x (type %x)\n",
context->link->id,
context->cmd,
ipc_util_get_l1_cmd(context->cmd));
ipc_util_spi_message_prepare(context, &context->cmd,
NULL, IPC_L1_HDR_SIZE);
/* wait now for the slave to indicate ready... */
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_slw_tx_bootreq_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_ACT_TX_BOOTREQ);
}
static u8 sm_act_tx_bootreq_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_bootreq_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_SLW_RX_BOOTRESP);
}
static u8 sm_slw_rx_bootresp_enter(u8 event, struct ipc_link_context *context)
{
/* prepare to receive BOOTRESP frame header */
ipc_util_spi_message_prepare(context, NULL,
&context->cmd, IPC_L1_HDR_SIZE);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_rx_bootresp_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_COMMS_TMO) {
/*
* Modem timeout: was it really ready or just noise?
* Revert to waiting for handshake to start.
*/
ipc_util_deactivate_ss(context);
return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
} else {
return ipc_sm_state(IPC_SM_ACT_RX_BOOTRESP);
}
}
static u8 sm_act_rx_bootresp_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_rx_bootresp_exit(u8 event,
struct ipc_link_context *context)
{
u8 cmd_type = ipc_util_get_l1_cmd(context->cmd);
u8 modem_ver;
dev_dbg(&context->sdev->dev,
"link %d: RX HEADER %08x (type %d)\n",
context->link->id, context->cmd, cmd_type);
if (cmd_type == CMD_BOOTRESP) {
modem_ver = ipc_util_get_l1_bootresp_ver(context->cmd);
dev_info(&context->sdev->dev,
"link %d: boot sync done; "
"APE version %02x, MODEM version %02x\n",
context->link->id, IPC_DRIVER_VERSION, modem_ver);
/* check for minimum required modem version */
if (modem_ver < IPC_DRIVER_MODEM_MIN_VER) {
dev_warn(&context->sdev->dev,
"link %d warning: modem version mismatch! "
"minimum required version is %02x\n",
context->link->id,
IPC_DRIVER_MODEM_MIN_VER);
}
return ipc_sm_state(IPC_SM_WAIT_HANDSHAKE_INACTIVE);
} else {
/* invalid response... this is not our slave */
dev_err(&context->sdev->dev,
"link %d error: expected %x (BOOTRESP), received %x.\n",
context->link->id,
CMD_BOOTRESP,
cmd_type);
return ipc_sm_state(IPC_SM_HALT);
}
}
/* the driver protocol state machine */
static const struct ipc_sm_state state_machine[IPC_SM_STATE_ID_NBR] = {
[IPC_SM_INIT] = {
.id = IPC_SM_INIT,
.enter = sm_init_enter,
.exit = sm_init_exit,
.events = IPC_SM_RUN_INIT | IPC_SM_RUN_RESET
},
[IPC_SM_HALT] = {
.id = IPC_SM_HALT,
.enter = sm_halt_enter,
.exit = sm_halt_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_RESET] = {
.id = IPC_SM_RESET,
.enter = sm_reset_enter,
.exit = sm_reset_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_WAIT_SLAVE_STABLE] = {
.id = IPC_SM_WAIT_SLAVE_STABLE,
.enter = sm_wait_slave_stable_enter,
.exit = sm_wait_slave_stable_exit,
.events = IPC_SM_RUN_STABLE_TMO
},
[IPC_SM_WAIT_HANDSHAKE_INACTIVE] = {
.id = IPC_SM_WAIT_HANDSHAKE_INACTIVE,
.enter = sm_wait_handshake_inactive_enter,
.exit = sm_wait_handshake_inactive_exit,
.events = IPC_SM_RUN_STABLE_TMO
},
[IPC_SM_SLW_TX_BOOTREQ] = {
.id = IPC_SM_SLW_TX_BOOTREQ,
.enter = sm_slw_tx_bootreq_enter,
.exit = sm_slw_tx_bootreq_exit,
.events = IPC_SM_RUN_SLAVE_IRQ
},
[IPC_SM_ACT_TX_BOOTREQ] = {
.id = IPC_SM_ACT_TX_BOOTREQ,
.enter = sm_act_tx_bootreq_enter,
.exit = sm_act_tx_bootreq_exit,
.events = IPC_SM_RUN_TFR_COMPLETE
},
[IPC_SM_SLW_RX_BOOTRESP] = {
.id = IPC_SM_SLW_RX_BOOTRESP,
.enter = sm_slw_rx_bootresp_enter,
.exit = sm_slw_rx_bootresp_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO
},
[IPC_SM_ACT_RX_BOOTRESP] = {
.id = IPC_SM_ACT_RX_BOOTRESP,
.enter = sm_act_rx_bootresp_enter,
.exit = sm_act_rx_bootresp_exit,
.events = IPC_SM_RUN_TFR_COMPLETE
},
[IPC_SM_IDL] = {
.id = IPC_SM_IDL,
.enter = sm_idl_enter,
.exit = sm_idl_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_TX_REQ |
IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_WR_CMD] = {
.id = IPC_SM_SLW_TX_WR_CMD,
.enter = sm_slw_tx_wr_cmd_enter,
.exit = sm_slw_tx_wr_cmd_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_WR_CMD] = {
.id = IPC_SM_ACT_TX_WR_CMD,
.enter = sm_act_tx_wr_cmd_enter,
.exit = sm_act_tx_wr_cmd_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_WR_DAT] = {
.id = IPC_SM_SLW_TX_WR_DAT,
.enter = sm_slw_tx_wr_dat_enter,
.exit = sm_slw_tx_wr_dat_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_WR_DAT] = {
.id = IPC_SM_ACT_TX_WR_DAT,
.enter = sm_act_tx_wr_dat_enter,
.exit = sm_act_tx_wr_dat_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_RD_CMD] = {
.id = IPC_SM_SLW_TX_RD_CMD,
.enter = sm_slw_tx_rd_cmd_enter,
.exit = sm_slw_tx_rd_cmd_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_RD_CMD] = {
.id = IPC_SM_ACT_TX_RD_CMD,
.enter = sm_act_tx_rd_cmd_enter,
.exit = sm_act_tx_rd_cmd_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_RX_WR_CMD] = {
.id = IPC_SM_SLW_RX_WR_CMD,
.enter = sm_slw_rx_wr_cmd_enter,
.exit = sm_slw_rx_wr_cmd_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_RX_WR_CMD] = {
.id = IPC_SM_ACT_RX_WR_CMD,
.enter = sm_act_rx_wr_cmd_enter,
.exit = sm_act_rx_wr_cmd_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_ACT_RX_WR_DAT] = {
.id = IPC_SM_ACT_RX_WR_DAT,
.enter = sm_act_rx_wr_dat_enter,
.exit = sm_act_rx_wr_dat_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
/* audio link states below */
[IPC_SM_INIT_AUD] = {
.id = IPC_SM_INIT_AUD,
.enter = sm_init_enter,
.exit = sm_init_aud_exit,
.events = IPC_SM_RUN_INIT | IPC_SM_RUN_RESET
},
[IPC_SM_HALT_AUD] = {
.id = IPC_SM_HALT_AUD,
.enter = sm_halt_enter,
.exit = sm_halt_aud_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_RESET_AUD] = {
.id = IPC_SM_RESET_AUD,
.enter = sm_reset_enter,
.exit = sm_reset_aud_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_IDL_AUD] = {
.id = IPC_SM_IDL_AUD,
.enter = sm_idl_enter,
.exit = sm_idl_aud_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_TX_REQ |
IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_WR_DAT_AUD] = {
.id = IPC_SM_SLW_TX_WR_DAT_AUD,
.enter = sm_slw_tx_wr_dat_aud_enter,
.exit = sm_slw_tx_wr_dat_aud_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_WR_DAT_AUD] = {
.id = IPC_SM_ACT_TX_WR_DAT_AUD,
.enter = sm_act_tx_wr_dat_enter,
.exit = sm_act_tx_wr_dat_aud_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_RX_WR_DAT_AUD] = {
.id = IPC_SM_SLW_RX_WR_DAT_AUD,
.enter = sm_slw_rx_wr_dat_aud_enter,
.exit = sm_slw_rx_wr_dat_aud_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_RX_WR_DAT_AUD] = {
.id = IPC_SM_ACT_RX_WR_DAT_AUD,
.enter = sm_act_rx_wr_dat_aud_enter,
.exit = sm_act_rx_wr_dat_aud_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
}
};
const struct ipc_sm_state *ipc_sm_idle_state(struct ipc_link_context *context)
{
if (context->link->id == IPC_LINK_AUDIO)
return ipc_sm_state(IPC_SM_IDL_AUD);
else
return ipc_sm_state(IPC_SM_IDL);
}
const struct ipc_sm_state *ipc_sm_init_state(struct ipc_link_context *context)
{
if (context->link->id == IPC_LINK_AUDIO)
return ipc_sm_state(IPC_SM_INIT_AUD);
else
return ipc_sm_state(IPC_SM_INIT);
}
const struct ipc_sm_state *ipc_sm_state(u8 id)
{
BUG_ON(id >= IPC_SM_STATE_ID_NBR);
return &state_machine[id];
}
bool ipc_sm_valid_for_state(u8 event, const struct ipc_sm_state *state)
{
return (state->events & event) == event;
}
static void state_machine_run(struct ipc_link_context *context, u8 event)
{
struct modem_m6718_spi_link_platform_data *link = context->link;
struct spi_device *sdev = context->sdev;
const struct ipc_sm_state *cur_state = context->state;
/* some sanity checking */
if (context == NULL || link == NULL || cur_state == NULL) {
pr_err("M6718 IPC protocol error: "
"inconsistent driver state, ignoring event\n");
return;
}
dev_dbg(&sdev->dev, "link %d: RUNNING in %s (%s)\n", link->id,
ipc_dbg_state_id(cur_state), ipc_dbg_event(event));
/* valid trigger event for current state? */
if (!ipc_sm_valid_for_state(event, cur_state)) {
dev_dbg(&sdev->dev,
"link %d: ignoring invalid event\n", link->id);
ipc_dbg_ignoring_event(context, event);
return;
}
ipc_dbg_handling_event(context, event);
/* run machine while state entry functions trigger new changes */
do {
if (event == IPC_SM_RUN_SLAVE_IRQ &&
!ipc_util_int_is_active(context)) {
dev_err(&sdev->dev,
"link %d error: slave is not ready! (%s)",
link->id,
ipc_dbg_state_id(cur_state));
}
if (event == IPC_SM_RUN_ABORT) {
dev_err(&sdev->dev,
"link %d error: abort event\n", link->id);
/* reset state to idle */
context->state = ipc_sm_idle_state(context);
break;
} else {
/* exit current state */
dev_dbg(&sdev->dev, "link %d: exit %s (%s)\n",
link->id, ipc_dbg_state_id(cur_state),
ipc_dbg_event(event));
cur_state = cur_state->exit(event, context);
context->state = cur_state;
}
/* reset state of slave irq to prepare for next event */
if (event == IPC_SM_RUN_SLAVE_IRQ)
atomic_set(&context->state_int, 0);
/* enter new state */
dev_dbg(&sdev->dev, "link %d: enter %s (%s)\n", link->id,
ipc_dbg_state_id(cur_state), ipc_dbg_event(event));
event = context->state->enter(event, context);
ipc_dbg_entering_state(context);
} while (event != IPC_SM_RUN_NONE);
dev_dbg(&sdev->dev, "link %d: STOPPED in %s\n", link->id,
ipc_dbg_state_id(cur_state));
}
void ipc_sm_kick(u8 event, struct ipc_link_context *context)
{
unsigned long flags;
struct modem_m6718_spi_link_platform_data *link = context->link;
struct spi_device *sdev = context->sdev;
struct spi_message *msg = &context->spi_message;
u8 i;
spin_lock_irqsave(&context->sm_lock, flags);
switch (event) {
case IPC_SM_RUN_SLAVE_IRQ:
dev_dbg(&sdev->dev,
"link %d EVENT: slave-ready irq\n", link->id);
del_timer(&context->comms_timer);
atomic_set(&context->state_int,
ipc_util_int_is_active(context));
break;
case IPC_SM_RUN_TFR_COMPLETE:
dev_dbg(&sdev->dev,
"link %d EVENT: spi tfr complete (status %d len %d)\n",
link->id, msg->status, msg->actual_length);
ipc_dbg_dump_spi_tfr(context);
break;
case IPC_SM_RUN_COMMS_TMO:
{
char *statestr;
struct ipc_link_context *contexts = l1_context.device_context;
statestr = ipc_dbg_link_state_str(context);
dev_err(&sdev->dev,
"link %d EVENT: modem comms timeout (%s)!\n",
link->id, ipc_dbg_state_id(context->state));
if (statestr != NULL) {
dev_err(&sdev->dev, "%s", statestr);
kfree(statestr);
}
/* cancel all link timeout timers except this one */
for (i = 0; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
if (contexts[i].link->id != link->id)
del_timer(&contexts[i].comms_timer);
break;
}
case IPC_SM_RUN_STABLE_TMO:
dev_dbg(&sdev->dev,
"link %d EVENT: slave-stable timeout\n", link->id);
break;
case IPC_SM_RUN_RESET:
dev_dbg(&sdev->dev,
"link %d EVENT: reset\n", link->id);
del_timer(&context->comms_timer);
break;
default:
break;
}
if (!ipc_util_link_is_suspended(context))
state_machine_run(context, event);
else
dev_dbg(&sdev->dev,
"link %d is suspended, waiting for resume\n", link->id);
spin_unlock_irqrestore(&context->sm_lock, flags);
}
|
janrinze/snowballkernel
|
drivers/modem/m6718_spi/statemachine.c
|
C
|
gpl-2.0
| 40,413
|
/*
* Virtual Raw MIDI client on Sequencer
*
* Copyright (c) 2000 by Takashi Iwai <tiwai@suse.de>,
* Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* Virtual Raw MIDI client
*
* The virtual rawmidi client is a sequencer client which associate
* a rawmidi device file. The created rawmidi device file can be
* accessed as a normal raw midi, but its MIDI source and destination
* are arbitrary. For example, a user-client software synth connected
* to this port can be used as a normal midi device as well.
*
* The virtual rawmidi device accepts also multiple opens. Each file
* has its own input buffer, so that no conflict would occur. The drain
* of input/output buffer acts only to the local buffer.
*
*/
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/minors.h>
#include <sound/seq_kernel.h>
#include <sound/seq_midi_event.h>
#include <sound/seq_virmidi.h>
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("Virtual Raw MIDI client on Sequencer");
MODULE_LICENSE("GPL");
/*
* initialize an event record
*/
static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
struct snd_seq_event *ev)
{
memset(ev, 0, sizeof(*ev));
ev->source.port = vmidi->port;
switch (vmidi->seq_mode) {
case SNDRV_VIRMIDI_SEQ_DISPATCH:
ev->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
break;
case SNDRV_VIRMIDI_SEQ_ATTACH:
/* FIXME: source and destination are same - not good.. */
ev->dest.client = vmidi->client;
ev->dest.port = vmidi->port;
break;
}
ev->type = SNDRV_SEQ_EVENT_NONE;
}
/*
* decode input event and put to read buffer of each opened file
*/
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
struct snd_seq_event *ev)
{
struct snd_virmidi *vmidi;
unsigned char msg[4];
int len;
read_lock(&rdev->filelist_lock);
list_for_each_entry(vmidi, &rdev->filelist, list) {
if (!vmidi->trigger)
continue;
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
continue;
snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
} else {
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
if (len > 0)
snd_rawmidi_receive(vmidi->substream, msg, len);
}
}
read_unlock(&rdev->filelist_lock);
return 0;
}
/*
* receive an event from the remote virmidi port
*
* for rawmidi inputs, you can call this function from the event
* handler of a remote port which is attached to the virmidi via
* SNDRV_VIRMIDI_SEQ_ATTACH.
*/
#if 0
int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
{
struct snd_virmidi_dev *rdev;
rdev = rmidi->private_data;
return snd_virmidi_dev_receive_event(rdev, ev);
}
#endif /* 0 */
/*
* event handler of virmidi port
*/
static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
void *private_data, int atomic, int hop)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!(rdev->flags & SNDRV_VIRMIDI_USE))
return 0; /* ignored */
return snd_virmidi_dev_receive_event(rdev, ev);
}
/*
* trigger rawmidi stream for input
*/
static void snd_virmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
if (up) {
vmidi->trigger = 1;
} else {
vmidi->trigger = 0;
}
}
/*
* trigger rawmidi stream for output
*/
static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
int count, res;
unsigned char buf[32], *pbuf;
if (up) {
vmidi->trigger = 1;
if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
!(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
return; /* ignored */
}
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
while (1) {
count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
break;
pbuf = buf;
while (count > 0) {
res = snd_midi_event_encode(vmidi->parser, pbuf, count, &vmidi->event);
if (res < 0) {
snd_midi_event_reset_encode(vmidi->parser);
continue;
}
snd_rawmidi_transmit_ack(substream, res);
pbuf += res;
count -= res;
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
}
}
} else {
vmidi->trigger = 0;
}
}
/*
* open rawmidi handle for input
*/
static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
unsigned long flags;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
return -ENOMEM;
vmidi->substream = substream;
if (snd_midi_event_new(0, &vmidi->parser) < 0) {
kfree(vmidi);
return -ENOMEM;
}
vmidi->seq_mode = rdev->seq_mode;
vmidi->client = rdev->client;
vmidi->port = rdev->port;
runtime->private_data = vmidi;
write_lock_irqsave(&rdev->filelist_lock, flags);
list_add_tail(&vmidi->list, &rdev->filelist);
write_unlock_irqrestore(&rdev->filelist_lock, flags);
vmidi->rdev = rdev;
return 0;
}
/*
* open rawmidi handle for output
*/
static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
return -ENOMEM;
vmidi->substream = substream;
if (snd_midi_event_new(MAX_MIDI_EVENT_BUF, &vmidi->parser) < 0) {
kfree(vmidi);
return -ENOMEM;
}
vmidi->seq_mode = rdev->seq_mode;
vmidi->client = rdev->client;
vmidi->port = rdev->port;
snd_virmidi_init_event(vmidi, &vmidi->event);
vmidi->rdev = rdev;
runtime->private_data = vmidi;
return 0;
}
/*
* close rawmidi handle for input
*/
static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
snd_midi_event_free(vmidi->parser);
list_del(&vmidi->list);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
}
/*
* close rawmidi handle for output
*/
static int snd_virmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
}
/*
* subscribe callback - allow output to rawmidi device
*/
static int snd_virmidi_subscribe(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!try_module_get(rdev->card->module))
return -EFAULT;
rdev->flags |= SNDRV_VIRMIDI_SUBSCRIBE;
return 0;
}
/*
* unsubscribe callback - disallow output to rawmidi device
*/
static int snd_virmidi_unsubscribe(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
rdev->flags &= ~SNDRV_VIRMIDI_SUBSCRIBE;
module_put(rdev->card->module);
return 0;
}
/*
* use callback - allow input to rawmidi device
*/
static int snd_virmidi_use(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!try_module_get(rdev->card->module))
return -EFAULT;
rdev->flags |= SNDRV_VIRMIDI_USE;
return 0;
}
/*
* unuse callback - disallow input to rawmidi device
*/
static int snd_virmidi_unuse(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
rdev->flags &= ~SNDRV_VIRMIDI_USE;
module_put(rdev->card->module);
return 0;
}
/*
* Register functions
*/
static struct snd_rawmidi_ops snd_virmidi_input_ops = {
.open = snd_virmidi_input_open,
.close = snd_virmidi_input_close,
.trigger = snd_virmidi_input_trigger,
};
static struct snd_rawmidi_ops snd_virmidi_output_ops = {
.open = snd_virmidi_output_open,
.close = snd_virmidi_output_close,
.trigger = snd_virmidi_output_trigger,
};
/*
* create a sequencer client and a port
*/
static int snd_virmidi_dev_attach_seq(struct snd_virmidi_dev *rdev)
{
int client;
struct snd_seq_port_callback pcallbacks;
struct snd_seq_port_info *pinfo;
int err;
if (rdev->client >= 0)
return 0;
pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
if (!pinfo) {
err = -ENOMEM;
goto __error;
}
client = snd_seq_create_kernel_client(rdev->card, rdev->device,
"%s %d-%d", rdev->rmidi->name,
rdev->card->number,
rdev->device);
if (client < 0) {
err = client;
goto __error;
}
rdev->client = client;
/* create a port */
pinfo->addr.client = client;
sprintf(pinfo->name, "VirMIDI %d-%d", rdev->card->number, rdev->device);
/* set all capabilities */
pinfo->capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SYNC_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE;
pinfo->capability |= SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SYNC_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ;
pinfo->capability |= SNDRV_SEQ_PORT_CAP_DUPLEX;
pinfo->type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC
| SNDRV_SEQ_PORT_TYPE_SOFTWARE
| SNDRV_SEQ_PORT_TYPE_PORT;
pinfo->midi_channels = 16;
memset(&pcallbacks, 0, sizeof(pcallbacks));
pcallbacks.owner = THIS_MODULE;
pcallbacks.private_data = rdev;
pcallbacks.subscribe = snd_virmidi_subscribe;
pcallbacks.unsubscribe = snd_virmidi_unsubscribe;
pcallbacks.use = snd_virmidi_use;
pcallbacks.unuse = snd_virmidi_unuse;
pcallbacks.event_input = snd_virmidi_event_input;
pinfo->kernel = &pcallbacks;
err = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, pinfo);
if (err < 0) {
snd_seq_delete_kernel_client(client);
rdev->client = -1;
goto __error;
}
rdev->port = pinfo->addr.port;
err = 0; /* success */
__error:
kfree(pinfo);
return err;
}
/*
* release the sequencer client
*/
static void snd_virmidi_dev_detach_seq(struct snd_virmidi_dev *rdev)
{
if (rdev->client >= 0) {
snd_seq_delete_kernel_client(rdev->client);
rdev->client = -1;
}
}
/*
* register the device
*/
static int snd_virmidi_dev_register(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
int err;
switch (rdev->seq_mode) {
case SNDRV_VIRMIDI_SEQ_DISPATCH:
err = snd_virmidi_dev_attach_seq(rdev);
if (err < 0)
return err;
break;
case SNDRV_VIRMIDI_SEQ_ATTACH:
if (rdev->client == 0)
return -EINVAL;
/* should check presence of port more strictly.. */
break;
default:
snd_printk(KERN_ERR "seq_mode is not set: %d\n", rdev->seq_mode);
return -EINVAL;
}
return 0;
}
/*
* unregister the device
*/
static int snd_virmidi_dev_unregister(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
if (rdev->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH)
snd_virmidi_dev_detach_seq(rdev);
return 0;
}
/*
*
*/
static struct snd_rawmidi_global_ops snd_virmidi_global_ops = {
.dev_register = snd_virmidi_dev_register,
.dev_unregister = snd_virmidi_dev_unregister,
};
/*
* free device
*/
static void snd_virmidi_free(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
kfree(rdev);
}
/*
* create a new device
*
*/
/* exported */
int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmidi)
{
struct snd_rawmidi *rmidi;
struct snd_virmidi_dev *rdev;
int err;
*rrmidi = NULL;
if ((err = snd_rawmidi_new(card, "VirMidi", device,
16, /* may be configurable */
16, /* may be configurable */
&rmidi)) < 0)
return err;
strcpy(rmidi->name, rmidi->id);
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (rdev == NULL) {
snd_device_free(card, rmidi);
return -ENOMEM;
}
rdev->card = card;
rdev->rmidi = rmidi;
rdev->device = device;
rdev->client = -1;
rwlock_init(&rdev->filelist_lock);
INIT_LIST_HEAD(&rdev->filelist);
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
rmidi->private_data = rdev;
rmidi->private_free = snd_virmidi_free;
rmidi->ops = &snd_virmidi_global_ops;
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_virmidi_input_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_virmidi_output_ops);
rmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
*rrmidi = rmidi;
return 0;
}
/*
* ENTRY functions
*/
static int __init alsa_virmidi_init(void)
{
return 0;
}
static void __exit alsa_virmidi_exit(void)
{
}
module_init(alsa_virmidi_init)
module_exit(alsa_virmidi_exit)
EXPORT_SYMBOL(snd_virmidi_new);
|
Jackeagle/android_kernel_sony_c2305
|
sound/core/seq/seq_virmidi.c
|
C
|
gpl-2.0
| 14,463
|
/*
* Copyright (C) 2014 Fraunhofer ITWM
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Written by:
* Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
*/
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/completion.h>
#include <net/ieee802154.h>
#include <crypto/algapi.h>
#include "mac802154.h"
#include "llsec.h"
static void llsec_key_put(struct mac802154_llsec_key *key);
static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
const struct ieee802154_llsec_key_id *b);
static void llsec_dev_free(struct mac802154_llsec_device *dev);
void mac802154_llsec_init(struct mac802154_llsec *sec)
{
memset(sec, 0, sizeof(*sec));
memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
INIT_LIST_HEAD(&sec->table.security_levels);
INIT_LIST_HEAD(&sec->table.devices);
INIT_LIST_HEAD(&sec->table.keys);
hash_init(sec->devices_short);
hash_init(sec->devices_hw);
rwlock_init(&sec->lock);
}
void mac802154_llsec_destroy(struct mac802154_llsec *sec)
{
struct ieee802154_llsec_seclevel *sl, *sn;
struct ieee802154_llsec_device *dev, *dn;
struct ieee802154_llsec_key_entry *key, *kn;
list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
struct mac802154_llsec_seclevel *msl;
msl = container_of(sl, struct mac802154_llsec_seclevel, level);
list_del(&sl->list);
kfree(msl);
}
list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
struct mac802154_llsec_device *mdev;
mdev = container_of(dev, struct mac802154_llsec_device, dev);
list_del(&dev->list);
llsec_dev_free(mdev);
}
list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
struct mac802154_llsec_key *mkey;
mkey = container_of(key->key, struct mac802154_llsec_key, key);
list_del(&key->list);
llsec_key_put(mkey);
kfree(key);
}
}
int mac802154_llsec_get_params(struct mac802154_llsec *sec,
struct ieee802154_llsec_params *params)
{
read_lock_bh(&sec->lock);
*params = sec->params;
read_unlock_bh(&sec->lock);
return 0;
}
int mac802154_llsec_set_params(struct mac802154_llsec *sec,
const struct ieee802154_llsec_params *params,
int changed)
{
write_lock_bh(&sec->lock);
if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
sec->params.enabled = params->enabled;
if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
sec->params.frame_counter = params->frame_counter;
if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
sec->params.out_level = params->out_level;
if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
sec->params.out_key = params->out_key;
if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
sec->params.default_key_source = params->default_key_source;
if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
sec->params.pan_id = params->pan_id;
if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
sec->params.hwaddr = params->hwaddr;
if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
sec->params.coord_hwaddr = params->coord_hwaddr;
if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
sec->params.coord_shortaddr = params->coord_shortaddr;
write_unlock_bh(&sec->lock);
return 0;
}
static struct mac802154_llsec_key*
llsec_key_alloc(const struct ieee802154_llsec_key *template)
{
const int authsizes[3] = { 4, 8, 16 };
struct mac802154_llsec_key *key;
int i;
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key)
return NULL;
kref_init(&key->ref);
key->key = *template;
BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
CRYPTO_ALG_ASYNC);
if (!key->tfm[i])
goto err_tfm;
if (crypto_aead_setkey(key->tfm[i], template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm;
if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
goto err_tfm;
}
key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
if (!key->tfm0)
goto err_tfm;
if (crypto_blkcipher_setkey(key->tfm0, template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm0;
return key;
err_tfm0:
crypto_free_blkcipher(key->tfm0);
err_tfm:
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (key->tfm[i])
crypto_free_aead(key->tfm[i]);
kfree(key);
return NULL;
}
static void llsec_key_release(struct kref *ref)
{
struct mac802154_llsec_key *key;
int i;
key = container_of(ref, struct mac802154_llsec_key, ref);
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
crypto_free_aead(key->tfm[i]);
crypto_free_blkcipher(key->tfm0);
kfree(key);
}
static struct mac802154_llsec_key*
llsec_key_get(struct mac802154_llsec_key *key)
{
kref_get(&key->ref);
return key;
}
static void llsec_key_put(struct mac802154_llsec_key *key)
{
kref_put(&key->ref, llsec_key_release);
}
static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
const struct ieee802154_llsec_key_id *b)
{
if (a->mode != b->mode)
return false;
if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
if (a->id != b->id)
return false;
switch (a->mode) {
case IEEE802154_SCF_KEY_INDEX:
return true;
case IEEE802154_SCF_KEY_SHORT_INDEX:
return a->short_source == b->short_source;
case IEEE802154_SCF_KEY_HW_INDEX:
return a->extended_source == b->extended_source;
}
return false;
}
int mac802154_llsec_key_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
struct mac802154_llsec_key *mkey = NULL;
struct ieee802154_llsec_key_entry *pos, *new;
if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
key->cmd_frame_ids)
return -EINVAL;
list_for_each_entry(pos, &sec->table.keys, list) {
if (llsec_key_id_equal(&pos->id, id))
return -EEXIST;
if (memcmp(pos->key->key, key->key,
IEEE802154_LLSEC_KEY_SIZE))
continue;
mkey = container_of(pos->key, struct mac802154_llsec_key, key);
/* Don't allow multiple instances of the same AES key to have
* different allowed frame types/command frame ids, as this is
* not possible in the 802.15.4 PIB.
*/
if (pos->key->frame_types != key->frame_types ||
pos->key->cmd_frame_ids != key->cmd_frame_ids)
return -EEXIST;
break;
}
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (!mkey)
mkey = llsec_key_alloc(key);
else
mkey = llsec_key_get(mkey);
if (!mkey)
goto fail;
new->id = *id;
new->key = &mkey->key;
list_add_rcu(&new->list, &sec->table.keys);
return 0;
fail:
kfree(new);
return -ENOMEM;
}
int mac802154_llsec_key_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_key_entry *pos;
list_for_each_entry(pos, &sec->table.keys, list) {
struct mac802154_llsec_key *mkey;
mkey = container_of(pos->key, struct mac802154_llsec_key, key);
if (llsec_key_id_equal(&pos->id, key)) {
list_del_rcu(&pos->list);
llsec_key_put(mkey);
return 0;
}
}
return -ENOENT;
}
static bool llsec_dev_use_shortaddr(__le16 short_addr)
{
return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
short_addr != cpu_to_le16(0xffff);
}
static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
{
return ((__force u16) short_addr) << 16 | (__force u16) pan_id;
}
static u64 llsec_dev_hash_long(__le64 hwaddr)
{
return (__force u64) hwaddr;
}
static struct mac802154_llsec_device*
llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
__le16 pan_id)
{
struct mac802154_llsec_device *dev;
u32 key = llsec_dev_hash_short(short_addr, pan_id);
hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
if (dev->dev.short_addr == short_addr &&
dev->dev.pan_id == pan_id)
return dev;
}
return NULL;
}
static struct mac802154_llsec_device*
llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
{
struct mac802154_llsec_device *dev;
u64 key = llsec_dev_hash_long(hwaddr);
hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
if (dev->dev.hwaddr == hwaddr)
return dev;
}
return NULL;
}
static void llsec_dev_free(struct mac802154_llsec_device *dev)
{
struct ieee802154_llsec_device_key *pos, *pn;
struct mac802154_llsec_device_key *devkey;
list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
devkey = container_of(pos, struct mac802154_llsec_device_key,
devkey);
list_del(&pos->list);
kfree(devkey);
}
kfree(dev);
}
int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_device *dev)
{
struct mac802154_llsec_device *entry;
u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
if ((llsec_dev_use_shortaddr(dev->short_addr) &&
llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
llsec_dev_find_long(sec, dev->hwaddr))
return -EEXIST;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->dev = *dev;
spin_lock_init(&entry->lock);
INIT_LIST_HEAD(&entry->dev.keys);
if (llsec_dev_use_shortaddr(dev->short_addr))
hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
else
INIT_HLIST_NODE(&entry->bucket_s);
hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
return 0;
}
static void llsec_dev_free_rcu(struct rcu_head *rcu)
{
llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
}
int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
{
struct mac802154_llsec_device *pos;
pos = llsec_dev_find_long(sec, device_addr);
if (!pos)
return -ENOENT;
hash_del_rcu(&pos->bucket_s);
hash_del_rcu(&pos->bucket_hw);
call_rcu(&pos->rcu, llsec_dev_free_rcu);
return 0;
}
static struct mac802154_llsec_device_key*
llsec_devkey_find(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_device_key *devkey;
list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
if (!llsec_key_id_equal(key, &devkey->key_id))
continue;
return container_of(devkey, struct mac802154_llsec_device_key,
devkey);
}
return NULL;
}
int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
__le64 dev_addr,
const struct ieee802154_llsec_device_key *key)
{
struct mac802154_llsec_device *dev;
struct mac802154_llsec_device_key *devkey;
dev = llsec_dev_find_long(sec, dev_addr);
if (!dev)
return -ENOENT;
if (llsec_devkey_find(dev, &key->key_id))
return -EEXIST;
devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
if (!devkey)
return -ENOMEM;
devkey->devkey = *key;
list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
return 0;
}
int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
__le64 dev_addr,
const struct ieee802154_llsec_device_key *key)
{
struct mac802154_llsec_device *dev;
struct mac802154_llsec_device_key *devkey;
dev = llsec_dev_find_long(sec, dev_addr);
if (!dev)
return -ENOENT;
devkey = llsec_devkey_find(dev, &key->key_id);
if (!devkey)
return -ENOENT;
list_del_rcu(&devkey->devkey.list);
kfree_rcu(devkey, rcu);
return 0;
}
static struct mac802154_llsec_seclevel*
llsec_find_seclevel(const struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct ieee802154_llsec_seclevel *pos;
list_for_each_entry(pos, &sec->table.security_levels, list) {
if (pos->frame_type != sl->frame_type ||
(pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
pos->cmd_frame_id != sl->cmd_frame_id) ||
pos->device_override != sl->device_override ||
pos->sec_levels != sl->sec_levels)
continue;
return container_of(pos, struct mac802154_llsec_seclevel,
level);
}
return NULL;
}
int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct mac802154_llsec_seclevel *entry;
if (llsec_find_seclevel(sec, sl))
return -EEXIST;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->level = *sl;
list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
return 0;
}
int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct mac802154_llsec_seclevel *pos;
pos = llsec_find_seclevel(sec, sl);
if (!pos)
return -ENOENT;
list_del_rcu(&pos->level.list);
kfree_rcu(pos, rcu);
return 0;
}
static int llsec_recover_addr(struct mac802154_llsec *sec,
struct ieee802154_addr *addr)
{
__le16 caddr = sec->params.coord_shortaddr;
addr->pan_id = sec->params.pan_id;
if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
return -EINVAL;
} else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
addr->extended_addr = sec->params.coord_hwaddr;
addr->mode = IEEE802154_ADDR_LONG;
} else {
addr->short_addr = sec->params.coord_shortaddr;
addr->mode = IEEE802154_ADDR_SHORT;
}
return 0;
}
static struct mac802154_llsec_key*
llsec_lookup_key(struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
const struct ieee802154_addr *addr,
struct ieee802154_llsec_key_id *key_id)
{
struct ieee802154_addr devaddr = *addr;
u8 key_id_mode = hdr->sec.key_id_mode;
struct ieee802154_llsec_key_entry *key_entry;
struct mac802154_llsec_key *key;
if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
devaddr.mode == IEEE802154_ADDR_NONE) {
if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
devaddr.extended_addr = sec->params.coord_hwaddr;
devaddr.mode = IEEE802154_ADDR_LONG;
} else if (llsec_recover_addr(sec, &devaddr) < 0) {
return NULL;
}
}
list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
const struct ieee802154_llsec_key_id *id = &key_entry->id;
if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
continue;
if (id->mode != key_id_mode)
continue;
if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
if (ieee802154_addr_equal(&devaddr, &id->device_addr))
goto found;
} else {
if (id->id != hdr->sec.key_id)
continue;
if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
(key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
id->short_source == hdr->sec.short_src) ||
(key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
id->extended_source == hdr->sec.extended_src))
goto found;
}
}
return NULL;
found:
key = container_of(key_entry->key, struct mac802154_llsec_key, key);
if (key_id)
*key_id = key_entry->id;
return llsec_key_get(key);
}
static void llsec_geniv(u8 iv[16], __le64 addr,
const struct ieee802154_sechdr *sec)
{
__be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
__be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
iv[0] = 1; /* L' = L - 1 = 1 */
memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
iv[13] = sec->level;
iv[14] = 0;
iv[15] = 1;
}
static int
llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
u8 iv[16];
struct scatterlist src;
struct blkcipher_desc req = {
.tfm = key->tfm0,
.info = iv,
.flags = 0,
};
llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
sg_init_one(&src, skb->data, skb->len);
return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
}
static struct crypto_aead*
llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
{
int i;
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (crypto_aead_authsize(key->tfm[i]) == authlen)
return key->tfm[i];
BUG();
}
static int
llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
u8 iv[16];
unsigned char *data;
int authlen, assoclen, datalen, rc;
struct scatterlist src, assoc[2], dst[2];
struct aead_request *req;
authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
if (!req)
return -ENOMEM;
sg_init_table(assoc, 2);
sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
assoclen = skb->mac_len;
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
sg_set_buf(&assoc[1], data, 0);
} else {
sg_set_buf(&assoc[1], data, datalen);
assoclen += datalen;
datalen = 0;
}
sg_init_one(&src, data, datalen);
sg_init_table(dst, 2);
sg_set_buf(&dst[0], data, datalen);
sg_set_buf(&dst[1], skb_put(skb, authlen), authlen);
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_assoc(req, assoc, assoclen);
aead_request_set_crypt(req, &src, dst, datalen, iv);
rc = crypto_aead_encrypt(req);
kfree(req);
return rc;
}
static int llsec_do_encrypt(struct sk_buff *skb,
const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
return llsec_do_encrypt_unauth(skb, sec, hdr, key);
else
return llsec_do_encrypt_auth(skb, sec, hdr, key);
}
int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
{
struct ieee802154_hdr hdr;
int rc, authlen, hlen;
struct mac802154_llsec_key *key;
u32 frame_ctr;
hlen = ieee802154_hdr_pull(skb, &hdr);
if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
return -EINVAL;
if (!hdr.fc.security_enabled || hdr.sec.level == 0) {
skb_push(skb, hlen);
return 0;
}
authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
return -EMSGSIZE;
rcu_read_lock();
read_lock_bh(&sec->lock);
if (!sec->params.enabled) {
rc = -EINVAL;
goto fail_read;
}
key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
if (!key) {
rc = -ENOKEY;
goto fail_read;
}
read_unlock_bh(&sec->lock);
write_lock_bh(&sec->lock);
frame_ctr = be32_to_cpu(sec->params.frame_counter);
hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
if (frame_ctr == 0xFFFFFFFF) {
write_unlock_bh(&sec->lock);
llsec_key_put(key);
rc = -EOVERFLOW;
goto fail;
}
sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
write_unlock_bh(&sec->lock);
rcu_read_unlock();
skb->mac_len = ieee802154_hdr_push(skb, &hdr);
skb_reset_mac_header(skb);
rc = llsec_do_encrypt(skb, sec, &hdr, key);
llsec_key_put(key);
return rc;
fail_read:
read_unlock_bh(&sec->lock);
fail:
rcu_read_unlock();
return rc;
}
static struct mac802154_llsec_device*
llsec_lookup_dev(struct mac802154_llsec *sec,
const struct ieee802154_addr *addr)
{
struct ieee802154_addr devaddr = *addr;
struct mac802154_llsec_device *dev = NULL;
if (devaddr.mode == IEEE802154_ADDR_NONE &&
llsec_recover_addr(sec, &devaddr) < 0)
return NULL;
if (devaddr.mode == IEEE802154_ADDR_SHORT) {
u32 key = llsec_dev_hash_short(devaddr.short_addr,
devaddr.pan_id);
hash_for_each_possible_rcu(sec->devices_short, dev,
bucket_s, key) {
if (dev->dev.pan_id == devaddr.pan_id &&
dev->dev.short_addr == devaddr.short_addr)
return dev;
}
} else {
u64 key = llsec_dev_hash_long(devaddr.extended_addr);
hash_for_each_possible_rcu(sec->devices_hw, dev,
bucket_hw, key) {
if (dev->dev.hwaddr == devaddr.extended_addr)
return dev;
}
}
return NULL;
}
static int
llsec_lookup_seclevel(const struct mac802154_llsec *sec,
u8 frame_type, u8 cmd_frame_id,
struct ieee802154_llsec_seclevel *rlevel)
{
struct ieee802154_llsec_seclevel *level;
list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
if (level->frame_type == frame_type &&
(frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
level->cmd_frame_id == cmd_frame_id)) {
*rlevel = *level;
return 0;
}
}
return -EINVAL;
}
static int
llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
u8 iv[16];
unsigned char *data;
int datalen;
struct scatterlist src;
struct blkcipher_desc req = {
.tfm = key->tfm0,
.info = iv,
.flags = 0,
};
llsec_geniv(iv, dev_addr, &hdr->sec);
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
sg_init_one(&src, data, datalen);
return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
}
static int
llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
u8 iv[16];
unsigned char *data;
int authlen, datalen, assoclen, rc;
struct scatterlist src, assoc[2];
struct aead_request *req;
authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
llsec_geniv(iv, dev_addr, &hdr->sec);
req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
if (!req)
return -ENOMEM;
sg_init_table(assoc, 2);
sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
assoclen = skb->mac_len;
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
sg_set_buf(&assoc[1], data, 0);
} else {
sg_set_buf(&assoc[1], data, datalen - authlen);
assoclen += datalen - authlen;
data += datalen - authlen;
datalen = authlen;
}
sg_init_one(&src, data, datalen);
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_assoc(req, assoc, assoclen);
aead_request_set_crypt(req, &src, &src, datalen, iv);
rc = crypto_aead_decrypt(req);
kfree(req);
skb_trim(skb, skb->len - authlen);
return rc;
}
static int
llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
else
return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
}
static int
llsec_update_devkey_info(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *in_key,
u32 frame_counter)
{
struct mac802154_llsec_device_key *devkey = NULL;
if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
devkey = llsec_devkey_find(dev, in_key);
if (!devkey)
return -ENOENT;
}
spin_lock_bh(&dev->lock);
if ((!devkey && frame_counter < dev->dev.frame_counter) ||
(devkey && frame_counter < devkey->devkey.frame_counter)) {
spin_unlock_bh(&dev->lock);
return -EINVAL;
}
if (devkey)
devkey->devkey.frame_counter = frame_counter + 1;
else
dev->dev.frame_counter = frame_counter + 1;
spin_unlock_bh(&dev->lock);
return 0;
}
int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
{
struct ieee802154_hdr hdr;
struct mac802154_llsec_key *key;
struct ieee802154_llsec_key_id key_id;
struct mac802154_llsec_device *dev;
struct ieee802154_llsec_seclevel seclevel;
int err;
__le64 dev_addr;
u32 frame_ctr;
if (ieee802154_hdr_peek(skb, &hdr) < 0)
return -EINVAL;
if (!hdr.fc.security_enabled)
return 0;
if (hdr.fc.version == 0)
return -EINVAL;
read_lock_bh(&sec->lock);
if (!sec->params.enabled) {
read_unlock_bh(&sec->lock);
return -EINVAL;
}
read_unlock_bh(&sec->lock);
rcu_read_lock();
key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
if (!key) {
err = -ENOKEY;
goto fail;
}
dev = llsec_lookup_dev(sec, &hdr.source);
if (!dev) {
err = -EINVAL;
goto fail_dev;
}
if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
err = -EINVAL;
goto fail_dev;
}
if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
(hdr.sec.level == 0 && seclevel.device_override &&
!dev->dev.seclevel_exempt)) {
err = -EINVAL;
goto fail_dev;
}
frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
if (frame_ctr == 0xffffffff) {
err = -EOVERFLOW;
goto fail_dev;
}
err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
if (err)
goto fail_dev;
dev_addr = dev->dev.hwaddr;
rcu_read_unlock();
err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
llsec_key_put(key);
return err;
fail_dev:
llsec_key_put(key);
fail:
rcu_read_unlock();
return err;
}
|
nsat/zynq-linux
|
net/mac802154/llsec.c
|
C
|
gpl-2.0
| 24,885
|
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id: getinfo.c,v 1.1 2006/07/05 22:41:22 victor Exp $
***************************************************************************/
#include "setup.h"
#include <curl/curl.h>
#include "urldata.h"
#include "getinfo.h"
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include "memory.h"
#include "sslgen.h"
/* Make this the last #include */
#include "memdebug.h"
/*
* This is supposed to be called in the beginning of a perform() session
* and should reset all session-info variables
*/
CURLcode Curl_initinfo(struct SessionHandle *data)
{
struct Progress *pro = &data->progress;
struct PureInfo *info =&data->info;
pro->t_nslookup = 0;
pro->t_connect = 0;
pro->t_pretransfer = 0;
pro->t_starttransfer = 0;
pro->timespent = 0;
pro->t_redirect = 0;
info->httpcode = 0;
info->httpversion=0;
info->filetime=-1; /* -1 is an illegal time and thus means unknown */
if (info->contenttype)
free(info->contenttype);
info->contenttype = NULL;
info->header_size = 0;
info->request_size = 0;
info->numconnects = 0;
return CURLE_OK;
}
CURLcode Curl_getinfo(struct SessionHandle *data, CURLINFO info, ...)
{
va_list arg;
long *param_longp=NULL;
double *param_doublep=NULL;
char **param_charp=NULL;
struct curl_slist **param_slistp=NULL;
va_start(arg, info);
switch(info&CURLINFO_TYPEMASK) {
default:
return CURLE_BAD_FUNCTION_ARGUMENT;
case CURLINFO_STRING:
param_charp = va_arg(arg, char **);
if(NULL == param_charp)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
case CURLINFO_LONG:
param_longp = va_arg(arg, long *);
if(NULL == param_longp)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
case CURLINFO_DOUBLE:
param_doublep = va_arg(arg, double *);
if(NULL == param_doublep)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
case CURLINFO_SLIST:
param_slistp = va_arg(arg, struct curl_slist **);
if(NULL == param_slistp)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
}
switch(info) {
case CURLINFO_EFFECTIVE_URL:
*param_charp = data->change.url?data->change.url:(char *)"";
break;
case CURLINFO_RESPONSE_CODE:
*param_longp = data->info.httpcode;
break;
case CURLINFO_HTTP_CONNECTCODE:
*param_longp = data->info.httpproxycode;
break;
case CURLINFO_FILETIME:
*param_longp = data->info.filetime;
break;
case CURLINFO_HEADER_SIZE:
*param_longp = data->info.header_size;
break;
case CURLINFO_REQUEST_SIZE:
*param_longp = data->info.request_size;
break;
case CURLINFO_TOTAL_TIME:
*param_doublep = data->progress.timespent;
break;
case CURLINFO_NAMELOOKUP_TIME:
*param_doublep = data->progress.t_nslookup;
break;
case CURLINFO_CONNECT_TIME:
*param_doublep = data->progress.t_connect;
break;
case CURLINFO_PRETRANSFER_TIME:
*param_doublep = data->progress.t_pretransfer;
break;
case CURLINFO_STARTTRANSFER_TIME:
*param_doublep = data->progress.t_starttransfer;
break;
case CURLINFO_SIZE_UPLOAD:
*param_doublep = (double)data->progress.uploaded;
break;
case CURLINFO_SIZE_DOWNLOAD:
*param_doublep = (double)data->progress.downloaded;
break;
case CURLINFO_SPEED_DOWNLOAD:
*param_doublep = (double)data->progress.dlspeed;
break;
case CURLINFO_SPEED_UPLOAD:
*param_doublep = (double)data->progress.ulspeed;
break;
case CURLINFO_SSL_VERIFYRESULT:
*param_longp = data->set.ssl.certverifyresult;
break;
case CURLINFO_CONTENT_LENGTH_DOWNLOAD:
*param_doublep = (double)data->progress.size_dl;
break;
case CURLINFO_CONTENT_LENGTH_UPLOAD:
*param_doublep = (double)data->progress.size_ul;
break;
case CURLINFO_REDIRECT_TIME:
*param_doublep = data->progress.t_redirect;
break;
case CURLINFO_REDIRECT_COUNT:
*param_longp = data->set.followlocation;
break;
case CURLINFO_CONTENT_TYPE:
*param_charp = data->info.contenttype;
break;
case CURLINFO_PRIVATE:
*param_charp = data->set.private_data;
break;
case CURLINFO_HTTPAUTH_AVAIL:
*param_longp = data->info.httpauthavail;
break;
case CURLINFO_PROXYAUTH_AVAIL:
*param_longp = data->info.proxyauthavail;
break;
case CURLINFO_OS_ERRNO:
*param_longp = data->state.os_errno;
break;
case CURLINFO_NUM_CONNECTS:
*param_longp = data->info.numconnects;
break;
case CURLINFO_SSL_ENGINES:
*param_slistp = Curl_ssl_engines_list(data);
break;
case CURLINFO_COOKIELIST:
*param_slistp = Curl_cookie_list(data);
break;
case CURLINFO_LASTSOCKET:
if((data->state.lastconnect != -1) &&
(data->state.connects[data->state.lastconnect] != NULL))
*param_longp = data->state.connects[data->state.lastconnect]->
sock[FIRSTSOCKET];
else
*param_longp = -1;
break;
default:
return CURLE_BAD_FUNCTION_ARGUMENT;
}
return CURLE_OK;
}
|
pfchrono/mudmagic-client
|
bundled/curl/getinfo.c
|
C
|
gpl-2.0
| 5,871
|
/* nip2-cli.c ... run the nip2 executable, connecting stdin and stdout to the
* console
*
* 11/12/09
* - use SetHandleInformation() to stop the child inheriting the read
* handle (thanks Leo)
*/
/*
Copyright (C) 2008 Imperial College, London
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
These files are distributed with VIPS - http://www.vips.ecs.soton.ac.uk
*/
/* Adapted from sample code by Leo Davidson, with the author's permission.
*/
/* Windows does not let a single exe run in both command-line and GUI mode. To
* run nip2 in command-line mode, we run this CLI wrapper program instead,
* which starts the main nip2 exe, connecting stdin/out/err appropriately.
*/
#include <windows.h>
#include <stdio.h>
#include <io.h>
#include <ctype.h>
#include <glib.h>
void
print_last_error ()
{
char *buf;
if (FormatMessageA (FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS |
FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
GetLastError (),
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPSTR) & buf, 0, NULL))
{
fprintf (stderr, "%s", buf);
LocalFree (buf);
}
}
int
main (int argc, char **argv)
{
char *dirname;
char command[2048];
gboolean quote;
int i, j;
HANDLE hChildStdoutRd;
HANDLE hChildStdoutWr;
SECURITY_ATTRIBUTES saAttr;
PROCESS_INFORMATION processInformation;
STARTUPINFO startUpInfo;
DWORD dwRead;
CHAR buf[1024];
/* we run the nip2.exe in the same directory as this exe: swap the last
* pathname component for nip2.exe
* we change the argv[0] pointer, probably not a good idea
*/
dirname = g_path_get_dirname (argv[0]);
argv[0] = g_build_filename (dirname, "nip2.exe", NULL);
g_free (dirname);
if (_access (argv[0], 00))
{
fprintf (stderr, "cannot access \"%s\"\n", argv[0]);
exit (1);
}
/* build the command string ... we have to quote items containing spaces
*/
command[0] = '\0';
for (i = 0; i < argc; i++)
{
quote = FALSE;
for (j = 0; argv[i][j]; j++)
{
if (isspace (argv[i][j]))
{
quote = TRUE;
break;
}
}
if (i > 0)
{
strncat (command, " ", sizeof (command) - 1);
}
if (quote)
{
strncat (command, "\"", sizeof (command) - 1);
}
strncat (command, argv[i], sizeof (command) - 1);
if (quote)
{
strncat (command, "\"", sizeof (command) - 1);
}
}
if (strlen (command) == sizeof (command) - 1)
{
fprintf (stderr, "command too long\n");
exit (1);
}
/* Create a pipe for the child process's STDOUT.
*/
hChildStdoutRd = NULL;
hChildStdoutWr = NULL;
saAttr.nLength = sizeof (SECURITY_ATTRIBUTES);
saAttr.bInheritHandle = TRUE;
saAttr.lpSecurityDescriptor = NULL;
if (!CreatePipe (&hChildStdoutRd, &hChildStdoutWr, &saAttr, 0))
{
fprintf (stderr, "CreatePipe failed: ");
print_last_error ();
fprintf (stderr, "\n");
exit (1);
}
/* Ensure the read handle to the pipe for STDOUT is not inherited.
*/
if (!SetHandleInformation(hChildStdoutRd, HANDLE_FLAG_INHERIT, 0))
{
fprintf (stderr, "SetHandleInformation failed: ");
print_last_error ();
fprintf (stderr, "\n");
exit (1);
}
/* Run command.
*/
startUpInfo.cb = sizeof (STARTUPINFO);
startUpInfo.lpReserved = NULL;
startUpInfo.lpDesktop = NULL;
startUpInfo.lpTitle = NULL;
startUpInfo.dwFlags = STARTF_USESHOWWINDOW | STARTF_USESTDHANDLES;
startUpInfo.hStdOutput = hChildStdoutWr;
startUpInfo.hStdError = hChildStdoutWr;
startUpInfo.cbReserved2 = 0;
startUpInfo.lpReserved2 = NULL;
startUpInfo.wShowWindow = SW_SHOWNORMAL;
if (!CreateProcess (NULL, command, NULL, /* default security */
NULL, /* default thread security */
TRUE, /* inherit handles */
CREATE_DEFAULT_ERROR_MODE | DETACHED_PROCESS, NULL, /* use default environment */
NULL, /* set default directory */
&startUpInfo, &processInformation))
{
fprintf (stderr, "error running \"%s\": ", command);
print_last_error ();
fprintf (stderr, "\n");
exit (1);
}
/* Close the write end of the pipe before reading from the read end.
*/
CloseHandle (hChildStdoutWr);
while (ReadFile (hChildStdoutRd, buf, sizeof (buf) - 1, &dwRead, NULL) &&
dwRead > 0)
{
buf[dwRead] = '\0';
printf ("%s", buf);
}
CloseHandle (hChildStdoutRd);
return (0);
}
|
jcupitt/nip2
|
src/nip2-cli.c
|
C
|
gpl-2.0
| 5,159
|
/* utility routines for keeping some statistics */
/* (C) 2009 by Harald Welte <laforge@gnumonks.org>
*
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <sys/types.h>
#include <osmocore/linuxlist.h>
#include <osmocore/talloc.h>
#include <osmocore/statistics.h>
static LLIST_HEAD(counters);
void *tall_ctr_ctx;
struct counter *counter_alloc(const char *name)
{
struct counter *ctr = talloc_zero(tall_ctr_ctx, struct counter);
if (!ctr)
return NULL;
ctr->name = name;
llist_add_tail(&ctr->list, &counters);
return ctr;
}
void counter_free(struct counter *ctr)
{
llist_del(&ctr->list);
talloc_free(ctr);
}
int counters_for_each(int (*handle_counter)(struct counter *, void *), void *data)
{
struct counter *ctr;
int rc = 0;
llist_for_each_entry(ctr, &counters, list) {
rc = handle_counter(ctr, data);
if (rc < 0)
return rc;
}
return rc;
}
|
techniker/libosmocore
|
src/statistics.c
|
C
|
gpl-2.0
| 1,583
|
/*
* fs/f2fs/recovery.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
/*
* Roll forward recovery scenarios.
*
* [Term] F: fsync_mark, D: dentry_mark
*
* 1. inode(x) | CP | inode(x) | dnode(F)
* -> Update the latest inode(x).
*
* 2. inode(x) | CP | inode(F) | dnode(F)
* -> No problem.
*
* 3. inode(x) | CP | dnode(F) | inode(x)
* -> Recover to the latest dnode(F), and drop the last inode(x)
*
* 4. inode(x) | CP | dnode(F) | inode(F)
* -> No problem.
*
* 5. CP | inode(x) | dnode(F)
* -> The inode(DF) was missing. Should drop this dnode(F).
*
* 6. CP | inode(DF) | dnode(F)
* -> No problem.
*
* 7. CP | dnode(F) | inode(DF)
* -> If f2fs_iget fails, then goto next to find inode(DF).
*
* 8. CP | dnode(F) | inode(x)
* -> If f2fs_iget fails, then goto next to find inode(DF).
* But it will fail due to no inode(DF).
*/
static struct kmem_cache *fsync_entry_slab;
bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false;
return true;
}
static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
nid_t ino)
{
struct fsync_inode_entry *entry;
list_for_each_entry(entry, head, list)
if (entry->inode->i_ino == ino)
return entry;
return NULL;
}
static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
struct inode *inode)
{
struct fsync_inode_entry *entry;
entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
if (!entry)
return NULL;
entry->inode = inode;
list_add_tail(&entry->list, head);
return entry;
}
static void del_fsync_inode(struct fsync_inode_entry *entry)
{
iput(entry->inode);
list_del(&entry->list);
kmem_cache_free(fsync_entry_slab, entry);
}
static int recover_dentry(struct inode *inode, struct page *ipage,
struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
struct fscrypt_name fname;
struct page *page;
struct inode *dir, *einode;
struct fsync_inode_entry *entry;
int err = 0;
char *name;
entry = get_fsync_inode(dir_list, pino);
if (!entry) {
dir = f2fs_iget(inode->i_sb, pino);
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
goto out;
}
entry = add_fsync_inode(dir_list, dir);
if (!entry) {
err = -ENOMEM;
iput(dir);
goto out;
}
}
dir = entry->inode;
memset(&fname, 0, sizeof(struct fscrypt_name));
fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
fname.disk_name.name = raw_inode->i_name;
if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
WARN_ON(1);
err = -ENAMETOOLONG;
goto out;
}
retry:
de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put;
if (de) {
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
WARN_ON(1);
err = PTR_ERR(einode);
if (err == -ENOENT)
err = -EEXIST;
goto out_unmap_put;
}
err = acquire_orphan_inode(F2FS_I_SB(inode));
if (err) {
iput(einode);
goto out_unmap_put;
}
f2fs_delete_entry(de, page, dir, einode);
iput(einode);
goto retry;
} else if (IS_ERR(page)) {
err = PTR_ERR(page);
} else {
err = __f2fs_do_add_link(dir, &fname, inode,
inode->i_ino, inode->i_mode);
}
goto out;
out_unmap_put:
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
out:
if (file_enc_name(inode))
name = "<encrypted>";
else
name = raw_inode->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d",
__func__, ino_of_node(ipage), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
static void recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
if (file_enc_name(inode))
name = "<encrypted>";
else
name = F2FS_INODE(page)->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
ino_of_node(page), name);
}
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
struct f2fs_inode *ri = F2FS_INODE(ipage);
struct timespec disk;
if (!IS_INODE(ipage))
return true;
disk.tv_sec = le64_to_cpu(ri->i_ctime);
disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
if (timespec_compare(&inode->i_ctime, &disk) > 0)
return false;
disk.tv_sec = le64_to_cpu(ri->i_atime);
disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
if (timespec_compare(&inode->i_atime, &disk) > 0)
return false;
disk.tv_sec = le64_to_cpu(ri->i_mtime);
disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
if (timespec_compare(&inode->i_mtime, &disk) > 0)
return false;
return true;
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct inode *inode;
struct page *page = NULL;
block_t blkaddr;
int err = 0;
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
struct fsync_inode_entry *entry;
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page))
break;
if (!is_fsync_dnode(page))
goto next;
entry = get_fsync_inode(head, ino_of_node(page));
if (entry) {
if (!is_same_inode(entry->inode, page))
goto next;
} else {
if (IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
break;
}
/*
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
inode = f2fs_iget(sbi->sb, ino_of_node(page));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
if (err == -ENOENT) {
err = 0;
goto next;
}
break;
}
/* add this fsync inode to the list */
entry = add_fsync_inode(head, inode);
if (!entry) {
err = -ENOMEM;
iput(inode);
break;
}
}
entry->blkaddr = blkaddr;
if (IS_INODE(page) && is_dent_dnode(page))
entry->last_dentry = blkaddr;
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
ra_meta_pages_cond(sbi, blkaddr);
}
f2fs_put_page(page, 1);
return err;
}
static void destroy_fsync_dnodes(struct list_head *head)
{
struct fsync_inode_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, head, list)
del_fsync_inode(entry);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
block_t blkaddr, struct dnode_of_data *dn)
{
struct seg_entry *sentry;
unsigned int segno = GET_SEGNO(sbi, blkaddr);
unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
struct f2fs_summary_block *sum_node;
struct f2fs_summary sum;
struct page *sum_page, *node_page;
struct dnode_of_data tdn = *dn;
nid_t ino, nid;
struct inode *inode;
unsigned int offset;
block_t bidx;
int i;
sentry = get_seg_entry(sbi, segno);
if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
return 0;
/* Get the previous summary */
for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];
goto got_it;
}
}
sum_page = get_sum_page(sbi, segno);
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
sum = sum_node->entries[blkoff];
f2fs_put_page(sum_page, 1);
got_it:
/* Use the locked dnode page and inode */
nid = le32_to_cpu(sum.nid);
if (dn->inode->i_ino == nid) {
tdn.nid = nid;
if (!dn->inode_page_locked)
lock_page(dn->inode_page);
tdn.node_page = dn->inode_page;
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
goto truncate_out;
} else if (dn->nid == nid) {
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
goto truncate_out;
}
/* Get the node page */
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
return PTR_ERR(node_page);
offset = ofs_of_node(node_page);
ino = ino_of_node(node_page);
f2fs_put_page(node_page, 1);
if (ino != dn->inode->i_ino) {
/* Deallocate previous index in the node page */
inode = f2fs_iget(sbi->sb, ino);
if (IS_ERR(inode))
return PTR_ERR(inode);
} else {
inode = dn->inode;
}
bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
/*
* if inode page is locked, unlock temporarily, but its reference
* count keeps alive.
*/
if (ino == dn->inode->i_ino && dn->inode_page_locked)
unlock_page(dn->inode_page);
set_new_dnode(&tdn, inode, NULL, NULL, 0);
if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
goto out;
if (tdn.data_blkaddr == blkaddr)
truncate_data_blocks_range(&tdn, 1);
f2fs_put_dnode(&tdn);
out:
if (ino != dn->inode->i_ino)
iput(inode);
else if (dn->inode_page_locked)
lock_page(dn->inode_page);
return 0;
truncate_out:
if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
truncate_data_blocks_range(&tdn, 1);
if (dn->inode->i_ino == nid && !dn->inode_page_locked)
unlock_page(dn->inode_page);
return 0;
}
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, block_t blkaddr)
{
struct dnode_of_data dn;
struct node_info ni;
unsigned int start, end;
int err = 0, recovered = 0;
/* step 1: recover xattr */
if (IS_INODE(page)) {
recover_inline_xattr(inode, page);
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
/*
* Deprecated; xattr blocks should be found from cold log.
* But, we should remain this for backward compatibility.
*/
recover_xattr_data(inode, page, blkaddr);
goto out;
}
/* step 2: recover inline data */
if (recover_inline_data(inode, page))
goto out;
/* step 3: recover data indices */
start = start_bidx_of_node(ofs_of_node(page), inode);
end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
if (err)
goto out;
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
get_node_info(sbi, dn.nid, &ni);
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
for (; start < end; start++, dn.ofs_in_node++) {
block_t src, dest;
src = datablock_addr(dn.node_page, dn.ofs_in_node);
dest = datablock_addr(page, dn.ofs_in_node);
/* skip recovering if dest is the same as src */
if (src == dest)
continue;
/* dest is invalid, just invalidate src block */
if (dest == NULL_ADDR) {
truncate_data_blocks_range(&dn, 1);
continue;
}
if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
/*
* dest is reserved block, invalidate src block
* and then reserve one new block in dnode page.
*/
if (dest == NEW_ADDR) {
truncate_data_blocks_range(&dn, 1);
reserve_new_block(&dn);
continue;
}
/* dest is valid block, try to recover from src to dest */
if (is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
#ifdef CONFIG_F2FS_FAULT_INJECTION
while (err)
err = reserve_new_block(&dn);
#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
if (err)
goto err;
}
/* Check the previous node page having this index */
err = check_index_in_prev_nodes(sbi, dest, &dn);
if (err)
goto err;
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
ni.version, false, false);
recovered++;
}
}
copy_node_footer(dn.node_page, page);
fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false);
set_page_dirty(dn.node_page);
err:
f2fs_put_dnode(&dn);
out:
f2fs_msg(sbi->sb, KERN_NOTICE,
"recover_data: ino = %lx, recovered = %d blocks, err = %d",
inode->i_ino, recovered, err);
return err;
}
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
struct list_head *dir_list)
{
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
int err = 0;
block_t blkaddr;
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
struct fsync_inode_entry *entry;
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
ra_meta_pages_cond(sbi, blkaddr);
page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page)) {
f2fs_put_page(page, 1);
break;
}
entry = get_fsync_inode(inode_list, ino_of_node(page));
if (!entry)
goto next;
/*
* inode(x) | CP | inode(x) | dnode(F)
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
if (IS_INODE(page))
recover_inode(entry->inode, page);
if (entry->last_dentry == blkaddr) {
err = recover_dentry(entry->inode, page, dir_list);
if (err) {
f2fs_put_page(page, 1);
break;
}
}
err = do_recover_data(sbi, entry->inode, page, blkaddr);
if (err) {
f2fs_put_page(page, 1);
break;
}
if (entry->blkaddr == blkaddr)
del_fsync_inode(entry);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
}
if (!err)
allocate_new_segments(sbi);
return err;
}
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list;
struct list_head dir_list;
block_t blkaddr;
int err;
int ret = 0;
bool need_writecp = false;
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
sizeof(struct fsync_inode_entry));
if (!fsync_entry_slab)
return -ENOMEM;
INIT_LIST_HEAD(&inode_list);
INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list);
if (err || list_empty(&inode_list))
goto out;
if (check_only) {
ret = 1;
goto out;
}
need_writecp = true;
/* step #2: recover data */
err = recover_data(sbi, &inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
}
clear_sbi_flag(sbi, SBI_POR_DOING);
if (err) {
bool invalidate = false;
if (test_opt(sbi, LFS)) {
update_meta_page(sbi, NULL, blkaddr);
invalidate = true;
} else if (discard_next_dnode(sbi, blkaddr)) {
invalidate = true;
}
f2fs_wait_all_discard_bio(sbi);
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META))
sync_meta_pages(sbi, META, LONG_MAX);
/* invalidate temporary meta page */
if (invalidate)
invalidate_mapping_pages(META_MAPPING(sbi),
blkaddr, blkaddr);
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
mutex_unlock(&sbi->cp_mutex);
} else if (need_writecp) {
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
mutex_unlock(&sbi->cp_mutex);
err = write_checkpoint(sbi, &cpc);
} else {
mutex_unlock(&sbi->cp_mutex);
}
destroy_fsync_dnodes(&dir_list);
kmem_cache_destroy(fsync_entry_slab);
return ret ? ret: err;
}
|
flaming-toast/linux-jeyu
|
fs/f2fs/recovery.c
|
C
|
gpl-2.0
| 16,232
|
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include "config.h"
#include "mp_msg.h"
#include "help_mp.h"
#ifdef __FreeBSD__
#include <sys/cdrio.h>
#endif
#include "m_option.h"
#include "stream.h"
#include "libmpdemux/demuxer.h"
/// We keep these 2 for the gui atm, but they will be removed.
char* cdrom_device=NULL;
int dvd_chapter=1;
int dvd_last_chapter=0;
char* dvd_device=NULL;
char *bluray_device=NULL;
// Open a new stream (stdin/file/vcd/url)
stream_t* open_stream(const char* filename,char** options, int* file_format){
int dummy = DEMUXER_TYPE_UNKNOWN;
if (!file_format) file_format = &dummy;
// Check if playlist or unknown
if (*file_format != DEMUXER_TYPE_PLAYLIST){
*file_format=DEMUXER_TYPE_UNKNOWN;
}
if(!filename) {
mp_msg(MSGT_OPEN,MSGL_ERR,"NULL filename, report this bug\n");
return NULL;
}
//============ Open STDIN or plain FILE ============
return open_stream_full(filename,STREAM_READ,options,file_format);
}
|
svn2github/MPlayer-SB
|
stream/open.c
|
C
|
gpl-2.0
| 1,868
|
/*
* 13.5.1 Thread Creation Scheduling Parameters, P1003.1c/Draft 10, p. 120
*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <pthread.h>
#include <errno.h>
int pthread_attr_setschedparam(
pthread_attr_t *attr,
const struct sched_param *param
)
{
if ( !attr || !attr->is_initialized || !param )
return EINVAL;
attr->schedparam = *param;
return 0;
}
|
yangxi/omap4m3
|
cpukit/posix/src/pthreadattrsetschedparam.c
|
C
|
gpl-2.0
| 635
|
#include "thanks.h"
//
// EternityProject Public message START:
//
- To the "developers" like franciscofranco:
Are you able to work on your own?
Also, if you want to badly copy my commits, can you at least give credits
to the proprietary of the commit you're copying?
We're an opensource community, we do this for free... but we also are
satisfacted of the TIME WE LOSE on the things we do.
We want to work with everyone that wants to.
We publish our sources.
We give you all everything we do.
And you?
Instead of copying someone else's work, try to lose time on your own
at least sorting the not working commits (yeah, I knew someone was
copying my work and I've committed some fakes).
You did that badly.
The EternityProject Team Manager & Main Developer,
--kholk
//
// EternityProject Public message END
//
|
kozmikkick/eternityprj-kernel-endeavoru-128
|
arch/arm/mach-tegra/EternityProject.c
|
C
|
gpl-2.0
| 827
|
/*
* Copyright 2008-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
#include "jni.h"
#include "jni_util.h"
#include "jvm.h"
#include "jlong.h"
#include <dlfcn.h>
#include <errno.h>
#include <sys/acl.h>
#include "sun_nio_fs_SolarisNativeDispatcher.h"
static void throwUnixException(JNIEnv* env, int errnum) {
jobject x = JNU_NewObjectByName(env, "sun/nio/fs/UnixException",
"(I)V", errnum);
if (x != NULL) {
(*env)->Throw(env, x);
}
}
JNIEXPORT void JNICALL
Java_sun_nio_fs_SolarisNativeDispatcher_init(JNIEnv *env, jclass clazz) {
}
JNIEXPORT jint JNICALL
Java_sun_nio_fs_SolarisNativeDispatcher_facl(JNIEnv* env, jclass this, jint fd,
jint cmd, jint nentries, jlong address)
{
void* aclbufp = jlong_to_ptr(address);
int n = -1;
n = facl((int)fd, (int)cmd, (int)nentries, aclbufp);
if (n == -1) {
throwUnixException(env, errno);
}
return (jint)n;
}
|
TheTypoMaster/Scaper
|
openjdk/jdk/src/solaris/native/sun/nio/fs/SolarisNativeDispatcher.c
|
C
|
gpl-2.0
| 2,058
|
/*
* SSLv3/TLSv1 server-side functions
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
#if !defined(MBEDTLS_CONFIG_FILE)
#include "mbedtls/config.h"
#else
#include MBEDTLS_CONFIG_FILE
#endif
#if defined(MBEDTLS_SSL_SRV_C)
#if defined(MBEDTLS_PLATFORM_C)
#include "mbedtls/platform.h"
#else
#include <stdlib.h>
#define mbedtls_calloc calloc
#define mbedtls_free free
#endif
#include "mbedtls/ssl.h"
#include "mbedtls/ssl_internal.h"
#include "mbedtls/debug.h"
#include "mbedtls/error.h"
#include "mbedtls/platform_util.h"
#include <string.h>
#if defined(MBEDTLS_ECP_C)
#include "mbedtls/ecp.h"
#endif
#if defined(MBEDTLS_HAVE_TIME)
#include "mbedtls/platform_time.h"
#endif
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
int mbedtls_ssl_set_client_transport_id( mbedtls_ssl_context *ssl,
const unsigned char *info,
size_t ilen )
{
if( ssl->conf->endpoint != MBEDTLS_SSL_IS_SERVER )
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
mbedtls_free( ssl->cli_id );
if( ( ssl->cli_id = mbedtls_calloc( 1, ilen ) ) == NULL )
return( MBEDTLS_ERR_SSL_ALLOC_FAILED );
memcpy( ssl->cli_id, info, ilen );
ssl->cli_id_len = ilen;
return( 0 );
}
void mbedtls_ssl_conf_dtls_cookies( mbedtls_ssl_config *conf,
mbedtls_ssl_cookie_write_t *f_cookie_write,
mbedtls_ssl_cookie_check_t *f_cookie_check,
void *p_cookie )
{
conf->f_cookie_write = f_cookie_write;
conf->f_cookie_check = f_cookie_check;
conf->p_cookie = p_cookie;
}
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
static int ssl_parse_servername_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t servername_list_size, hostname_len;
const unsigned char *p;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "parse ServerName extension" ) );
if( len < 2 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
servername_list_size = ( ( buf[0] << 8 ) | ( buf[1] ) );
if( servername_list_size + 2 != len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
p = buf + 2;
while( servername_list_size > 2 )
{
hostname_len = ( ( p[1] << 8 ) | p[2] );
if( hostname_len + 3 > servername_list_size )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( p[0] == MBEDTLS_TLS_EXT_SERVERNAME_HOSTNAME )
{
ret = ssl->conf->f_sni( ssl->conf->p_sni,
ssl, p + 3, hostname_len );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "ssl_sni_wrapper", ret );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_UNRECOGNIZED_NAME );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
return( 0 );
}
servername_list_size -= hostname_len + 3;
p += hostname_len + 3;
}
if( servername_list_size != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
return( 0 );
}
#endif /* MBEDTLS_SSL_SERVER_NAME_INDICATION */
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED)
static int ssl_conf_has_psk_or_cb( mbedtls_ssl_config const *conf )
{
if( conf->f_psk != NULL )
return( 1 );
if( conf->psk_identity_len == 0 || conf->psk_identity == NULL )
return( 0 );
if( conf->psk != NULL && conf->psk_len != 0 )
return( 1 );
#if defined(MBEDTLS_USE_PSA_CRYPTO)
if( conf->psk_opaque != 0 )
return( 1 );
#endif /* MBEDTLS_USE_PSA_CRYPTO */
return( 0 );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
static int ssl_use_opaque_psk( mbedtls_ssl_context const *ssl )
{
if( ssl->conf->f_psk != NULL )
{
/* If we've used a callback to select the PSK,
* the static configuration is irrelevant. */
if( ssl->handshake->psk_opaque != 0 )
return( 1 );
return( 0 );
}
if( ssl->conf->psk_opaque != 0 )
return( 1 );
return( 0 );
}
#endif /* MBEDTLS_USE_PSA_CRYPTO */
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */
static int ssl_parse_renegotiation_info( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
/* Check verify-data in constant-time. The length OTOH is no secret */
if( len != 1 + ssl->verify_data_len ||
buf[0] != ssl->verify_data_len ||
mbedtls_ssl_safer_memcmp( buf + 1, ssl->peer_verify_data,
ssl->verify_data_len ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "non-matching renegotiation info" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
else
#endif /* MBEDTLS_SSL_RENEGOTIATION */
{
if( len != 1 || buf[0] != 0x0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "non-zero length renegotiation info" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
}
return( 0 );
}
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
/*
* Status of the implementation of signature-algorithms extension:
*
* Currently, we are only considering the signature-algorithm extension
* to pick a ciphersuite which allows us to send the ServerKeyExchange
* message with a signature-hash combination that the user allows.
*
* We do *not* check whether all certificates in our certificate
* chain are signed with an allowed signature-hash pair.
* This needs to be done at a later stage.
*
*/
static int ssl_parse_signature_algorithms_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t sig_alg_list_size;
const unsigned char *p;
const unsigned char *end = buf + len;
mbedtls_md_type_t md_cur;
mbedtls_pk_type_t sig_cur;
if ( len < 2 ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
sig_alg_list_size = ( ( buf[0] << 8 ) | ( buf[1] ) );
if( sig_alg_list_size + 2 != len ||
sig_alg_list_size % 2 != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Currently we only guarantee signing the ServerKeyExchange message according
* to the constraints specified in this extension (see above), so it suffices
* to remember only one suitable hash for each possible signature algorithm.
*
* This will change when we also consider certificate signatures,
* in which case we will need to remember the whole signature-hash
* pair list from the extension.
*/
for( p = buf + 2; p < end; p += 2 )
{
/* Silently ignore unknown signature or hash algorithms. */
if( ( sig_cur = mbedtls_ssl_pk_alg_from_sig( p[1] ) ) == MBEDTLS_PK_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext"
" unknown sig alg encoding %d", p[1] ) );
continue;
}
/* Check if we support the hash the user proposes */
md_cur = mbedtls_ssl_md_alg_from_hash( p[0] );
if( md_cur == MBEDTLS_MD_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext:"
" unknown hash alg encoding %d", p[0] ) );
continue;
}
if( mbedtls_ssl_check_sig_hash( ssl, md_cur ) == 0 )
{
mbedtls_ssl_sig_hash_set_add( &ssl->handshake->hash_algs, sig_cur, md_cur );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext:"
" match sig %d and hash %d",
sig_cur, md_cur ) );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext: "
"hash alg %d not supported", md_cur ) );
}
}
return( 0 );
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static int ssl_parse_supported_elliptic_curves( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t list_size, our_size;
const unsigned char *p;
const mbedtls_ecp_curve_info *curve_info, **curves;
if ( len < 2 ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
list_size = ( ( buf[0] << 8 ) | ( buf[1] ) );
if( list_size + 2 != len ||
list_size % 2 != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Should never happen unless client duplicates the extension */
if( ssl->handshake->curves != NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Don't allow our peer to make us allocate too much memory,
* and leave room for a final 0 */
our_size = list_size / 2 + 1;
if( our_size > MBEDTLS_ECP_DP_MAX )
our_size = MBEDTLS_ECP_DP_MAX;
if( ( curves = mbedtls_calloc( our_size, sizeof( *curves ) ) ) == NULL )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INTERNAL_ERROR );
return( MBEDTLS_ERR_SSL_ALLOC_FAILED );
}
ssl->handshake->curves = curves;
p = buf + 2;
while( list_size > 0 && our_size > 1 )
{
curve_info = mbedtls_ecp_curve_info_from_tls_id( ( p[0] << 8 ) | p[1] );
if( curve_info != NULL )
{
*curves++ = curve_info;
our_size--;
}
list_size -= 2;
p += 2;
}
return( 0 );
}
static int ssl_parse_supported_point_formats( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t list_size;
const unsigned char *p;
if( len == 0 || (size_t)( buf[0] + 1 ) != len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
list_size = buf[0];
p = buf + 1;
while( list_size > 0 )
{
if( p[0] == MBEDTLS_ECP_PF_UNCOMPRESSED ||
p[0] == MBEDTLS_ECP_PF_COMPRESSED )
{
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C)
ssl->handshake->ecdh_ctx.point_format = p[0];
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
ssl->handshake->ecjpake_ctx.point_format = p[0];
#endif
MBEDTLS_SSL_DEBUG_MSG( 4, ( "point format selected: %d", p[0] ) );
return( 0 );
}
list_size--;
p++;
}
return( 0 );
}
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C ||
MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static int ssl_parse_ecjpake_kkpp( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
if( mbedtls_ecjpake_check( &ssl->handshake->ecjpake_ctx ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "skip ecjpake kkpp extension" ) );
return( 0 );
}
if( ( ret = mbedtls_ecjpake_read_round_one( &ssl->handshake->ecjpake_ctx,
buf, len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_read_round_one", ret );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( ret );
}
/* Only mark the extension as OK when we're sure it is */
ssl->handshake->cli_exts |= MBEDTLS_TLS_EXT_ECJPAKE_KKPP_OK;
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
static int ssl_parse_max_fragment_length_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 1 || buf[0] >= MBEDTLS_SSL_MAX_FRAG_LEN_INVALID )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->session_negotiate->mfl_code = buf[0];
return( 0 );
}
#endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
static int ssl_parse_cid_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t peer_cid_len;
/* CID extension only makes sense in DTLS */
if( ssl->conf->transport != MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Quoting draft-ietf-tls-dtls-connection-id-05
* https://tools.ietf.org/html/draft-ietf-tls-dtls-connection-id-05
*
* struct {
* opaque cid<0..2^8-1>;
* } ConnectionId;
*/
if( len < 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
peer_cid_len = *buf++;
len--;
if( len != peer_cid_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Ignore CID if the user has disabled its use. */
if( ssl->negotiate_cid == MBEDTLS_SSL_CID_DISABLED )
{
/* Leave ssl->handshake->cid_in_use in its default
* value of MBEDTLS_SSL_CID_DISABLED. */
MBEDTLS_SSL_DEBUG_MSG( 3, ( "Client sent CID extension, but CID disabled" ) );
return( 0 );
}
if( peer_cid_len > MBEDTLS_SSL_CID_OUT_LEN_MAX )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->handshake->cid_in_use = MBEDTLS_SSL_CID_ENABLED;
ssl->handshake->peer_cid_len = (uint8_t) peer_cid_len;
memcpy( ssl->handshake->peer_cid, buf, peer_cid_len );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "Use of CID extension negotiated" ) );
MBEDTLS_SSL_DEBUG_BUF( 3, "Client CID", buf, peer_cid_len );
return( 0 );
}
#endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
static int ssl_parse_truncated_hmac_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
((void) buf);
if( ssl->conf->trunc_hmac == MBEDTLS_SSL_TRUNC_HMAC_ENABLED )
ssl->session_negotiate->trunc_hmac = MBEDTLS_SSL_TRUNC_HMAC_ENABLED;
return( 0 );
}
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
static int ssl_parse_encrypt_then_mac_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
((void) buf);
if( ssl->conf->encrypt_then_mac == MBEDTLS_SSL_ETM_ENABLED &&
ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_0 )
{
ssl->session_negotiate->encrypt_then_mac = MBEDTLS_SSL_ETM_ENABLED;
}
return( 0 );
}
#endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
static int ssl_parse_extended_ms_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
((void) buf);
if( ssl->conf->extended_ms == MBEDTLS_SSL_EXTENDED_MS_ENABLED &&
ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_0 )
{
ssl->handshake->extended_ms = MBEDTLS_SSL_EXTENDED_MS_ENABLED;
}
return( 0 );
}
#endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
static int ssl_parse_session_ticket_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t len )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
mbedtls_ssl_session session;
mbedtls_ssl_session_init( &session );
if( ssl->conf->f_ticket_parse == NULL ||
ssl->conf->f_ticket_write == NULL )
{
return( 0 );
}
/* Remember the client asked us to send a new ticket */
ssl->handshake->new_session_ticket = 1;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket length: %d", len ) );
if( len == 0 )
return( 0 );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket rejected: renegotiating" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
/*
* Failures are ok: just ignore the ticket and proceed.
*/
if( ( ret = ssl->conf->f_ticket_parse( ssl->conf->p_ticket, &session,
buf, len ) ) != 0 )
{
mbedtls_ssl_session_free( &session );
if( ret == MBEDTLS_ERR_SSL_INVALID_MAC )
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket is not authentic" ) );
else if( ret == MBEDTLS_ERR_SSL_SESSION_TICKET_EXPIRED )
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket is expired" ) );
else
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_ticket_parse", ret );
return( 0 );
}
/*
* Keep the session ID sent by the client, since we MUST send it back to
* inform them we're accepting the ticket (RFC 5077 section 3.4)
*/
session.id_len = ssl->session_negotiate->id_len;
memcpy( &session.id, ssl->session_negotiate->id, session.id_len );
mbedtls_ssl_session_free( ssl->session_negotiate );
memcpy( ssl->session_negotiate, &session, sizeof( mbedtls_ssl_session ) );
/* Zeroize instead of free as we copied the content */
mbedtls_platform_zeroize( &session, sizeof( mbedtls_ssl_session ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "session successfully restored from ticket" ) );
ssl->handshake->resume = 1;
/* Don't send a new ticket after all, this one is OK */
ssl->handshake->new_session_ticket = 0;
return( 0 );
}
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
#if defined(MBEDTLS_SSL_ALPN)
static int ssl_parse_alpn_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf, size_t len )
{
size_t list_len, cur_len, ours_len;
const unsigned char *theirs, *start, *end;
const char **ours;
/* If ALPN not configured, just ignore the extension */
if( ssl->conf->alpn_list == NULL )
return( 0 );
/*
* opaque ProtocolName<1..2^8-1>;
*
* struct {
* ProtocolName protocol_name_list<2..2^16-1>
* } ProtocolNameList;
*/
/* Min length is 2 (list_len) + 1 (name_len) + 1 (name) */
if( len < 4 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
list_len = ( buf[0] << 8 ) | buf[1];
if( list_len != len - 2 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Validate peer's list (lengths)
*/
start = buf + 2;
end = buf + len;
for( theirs = start; theirs != end; theirs += cur_len )
{
cur_len = *theirs++;
/* Current identifier must fit in list */
if( cur_len > (size_t)( end - theirs ) )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Empty strings MUST NOT be included */
if( cur_len == 0 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
/*
* Use our order of preference
*/
for( ours = ssl->conf->alpn_list; *ours != NULL; ours++ )
{
ours_len = strlen( *ours );
for( theirs = start; theirs != end; theirs += cur_len )
{
cur_len = *theirs++;
if( cur_len == ours_len &&
memcmp( theirs, *ours, cur_len ) == 0 )
{
ssl->alpn_chosen = *ours;
return( 0 );
}
}
}
/* If we get there, no match was found */
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_NO_APPLICATION_PROTOCOL );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_ALPN */
/*
* Auxiliary functions for ServerHello parsing and related actions
*/
#if defined(MBEDTLS_X509_CRT_PARSE_C)
/*
* Return 0 if the given key uses one of the acceptable curves, -1 otherwise
*/
#if defined(MBEDTLS_ECDSA_C)
static int ssl_check_key_curve( mbedtls_pk_context *pk,
const mbedtls_ecp_curve_info **curves )
{
const mbedtls_ecp_curve_info **crv = curves;
mbedtls_ecp_group_id grp_id = mbedtls_pk_ec( *pk )->grp.id;
while( *crv != NULL )
{
if( (*crv)->grp_id == grp_id )
return( 0 );
crv++;
}
return( -1 );
}
#endif /* MBEDTLS_ECDSA_C */
/*
* Try picking a certificate for this ciphersuite,
* return 0 on success and -1 on failure.
*/
static int ssl_pick_cert( mbedtls_ssl_context *ssl,
const mbedtls_ssl_ciphersuite_t * ciphersuite_info )
{
mbedtls_ssl_key_cert *cur, *list, *fallback = NULL;
mbedtls_pk_type_t pk_alg =
mbedtls_ssl_get_ciphersuite_sig_pk_alg( ciphersuite_info );
uint32_t flags;
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
if( ssl->handshake->sni_key_cert != NULL )
list = ssl->handshake->sni_key_cert;
else
#endif
list = ssl->conf->key_cert;
if( pk_alg == MBEDTLS_PK_NONE )
return( 0 );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite requires certificate" ) );
if( list == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server has no certificate" ) );
return( -1 );
}
for( cur = list; cur != NULL; cur = cur->next )
{
flags = 0;
MBEDTLS_SSL_DEBUG_CRT( 3, "candidate certificate chain, certificate",
cur->cert );
if( ! mbedtls_pk_can_do( &cur->cert->pk, pk_alg ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate mismatch: key type" ) );
continue;
}
/*
* This avoids sending the client a cert it'll reject based on
* keyUsage or other extensions.
*
* It also allows the user to provision different certificates for
* different uses based on keyUsage, eg if they want to avoid signing
* and decrypting with the same RSA key.
*/
if( mbedtls_ssl_check_cert_usage( cur->cert, ciphersuite_info,
MBEDTLS_SSL_IS_SERVER, &flags ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate mismatch: "
"(extended) key usage extension" ) );
continue;
}
#if defined(MBEDTLS_ECDSA_C)
if( pk_alg == MBEDTLS_PK_ECDSA &&
ssl_check_key_curve( &cur->cert->pk, ssl->handshake->curves ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate mismatch: elliptic curve" ) );
continue;
}
#endif
/*
* Try to select a SHA-1 certificate for pre-1.2 clients, but still
* present them a SHA-higher cert rather than failing if it's the only
* one we got that satisfies the other conditions.
*/
if( ssl->minor_ver < MBEDTLS_SSL_MINOR_VERSION_3 &&
cur->cert->sig_md != MBEDTLS_MD_SHA1 )
{
if( fallback == NULL )
fallback = cur;
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate not preferred: "
"sha-2 with pre-TLS 1.2 client" ) );
continue;
}
}
/* If we get there, we got a winner */
break;
}
if( cur == NULL )
cur = fallback;
/* Do not update ssl->handshake->key_cert unless there is a match */
if( cur != NULL )
{
ssl->handshake->key_cert = cur;
MBEDTLS_SSL_DEBUG_CRT( 3, "selected certificate chain, certificate",
ssl->handshake->key_cert->cert );
return( 0 );
}
return( -1 );
}
#endif /* MBEDTLS_X509_CRT_PARSE_C */
/*
* Check if a given ciphersuite is suitable for use with our config/keys/etc
* Sets ciphersuite_info only if the suite matches.
*/
static int ssl_ciphersuite_match( mbedtls_ssl_context *ssl, int suite_id,
const mbedtls_ssl_ciphersuite_t **ciphersuite_info )
{
const mbedtls_ssl_ciphersuite_t *suite_info;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
mbedtls_pk_type_t sig_type;
#endif
suite_info = mbedtls_ssl_ciphersuite_from_id( suite_id );
if( suite_info == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "trying ciphersuite: %s", suite_info->name ) );
if( suite_info->min_minor_ver > ssl->minor_ver ||
suite_info->max_minor_ver < ssl->minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: version" ) );
return( 0 );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
( suite_info->flags & MBEDTLS_CIPHERSUITE_NODTLS ) )
return( 0 );
#endif
#if defined(MBEDTLS_ARC4_C)
if( ssl->conf->arc4_disabled == MBEDTLS_SSL_ARC4_DISABLED &&
suite_info->cipher == MBEDTLS_CIPHER_ARC4_128 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: rc4" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if( suite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECJPAKE &&
( ssl->handshake->cli_exts & MBEDTLS_TLS_EXT_ECJPAKE_KKPP_OK ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: ecjpake "
"not configured or ext missing" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C)
if( mbedtls_ssl_ciphersuite_uses_ec( suite_info ) &&
( ssl->handshake->curves == NULL ||
ssl->handshake->curves[0] == NULL ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: "
"no common elliptic curve" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED)
/* If the ciphersuite requires a pre-shared key and we don't
* have one, skip it now rather than failing later */
if( mbedtls_ssl_ciphersuite_uses_psk( suite_info ) &&
ssl_conf_has_psk_or_cb( ssl->conf ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: no pre-shared key" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
/* If the ciphersuite requires signing, check whether
* a suitable hash algorithm is present. */
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
sig_type = mbedtls_ssl_get_ciphersuite_sig_alg( suite_info );
if( sig_type != MBEDTLS_PK_NONE &&
mbedtls_ssl_sig_hash_set_find( &ssl->handshake->hash_algs, sig_type ) == MBEDTLS_MD_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: no suitable hash algorithm "
"for signature algorithm %d", sig_type ) );
return( 0 );
}
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
#if defined(MBEDTLS_X509_CRT_PARSE_C)
/*
* Final check: if ciphersuite requires us to have a
* certificate/key of a particular type:
* - select the appropriate certificate if we have one, or
* - try the next ciphersuite if we don't
* This must be done last since we modify the key_cert list.
*/
if( ssl_pick_cert( ssl, suite_info ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: "
"no suitable certificate" ) );
return( 0 );
}
#endif
*ciphersuite_info = suite_info;
return( 0 );
}
#if defined(MBEDTLS_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO)
static int ssl_parse_client_hello_v2( mbedtls_ssl_context *ssl )
{
int ret, got_common_suite;
unsigned int i, j;
size_t n;
unsigned int ciph_len, sess_len, chal_len;
unsigned char *buf, *p;
const int *ciphersuites;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client hello v2" ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client hello v2 illegal for renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
buf = ssl->in_hdr;
MBEDTLS_SSL_DEBUG_BUF( 4, "record header", buf, 5 );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message type: %d",
buf[2] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message len.: %d",
( ( buf[0] & 0x7F ) << 8 ) | buf[1] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, max. version: [%d:%d]",
buf[3], buf[4] ) );
/*
* SSLv2 Client Hello
*
* Record layer:
* 0 . 1 message length
*
* SSL layer:
* 2 . 2 message type
* 3 . 4 protocol version
*/
if( buf[2] != MBEDTLS_SSL_HS_CLIENT_HELLO ||
buf[3] != MBEDTLS_SSL_MAJOR_VERSION_3 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
n = ( ( buf[0] << 8 ) | buf[1] ) & 0x7FFF;
if( n < 17 || n > 512 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->major_ver = MBEDTLS_SSL_MAJOR_VERSION_3;
ssl->minor_ver = ( buf[4] <= ssl->conf->max_minor_ver )
? buf[4] : ssl->conf->max_minor_ver;
if( ssl->minor_ver < ssl->conf->min_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client only supports ssl smaller than minimum"
" [%d:%d] < [%d:%d]",
ssl->major_ver, ssl->minor_ver,
ssl->conf->min_major_ver, ssl->conf->min_minor_ver ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_PROTOCOL_VERSION );
}
ssl->handshake->max_major_ver = buf[3];
ssl->handshake->max_minor_ver = buf[4];
if( ( ret = mbedtls_ssl_fetch_input( ssl, 2 + n ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
ssl->handshake->update_checksum( ssl, buf + 2, n );
buf = ssl->in_msg;
n = ssl->in_left - 5;
/*
* 0 . 1 ciphersuitelist length
* 2 . 3 session id length
* 4 . 5 challenge length
* 6 . .. ciphersuitelist
* .. . .. session id
* .. . .. challenge
*/
MBEDTLS_SSL_DEBUG_BUF( 4, "record contents", buf, n );
ciph_len = ( buf[0] << 8 ) | buf[1];
sess_len = ( buf[2] << 8 ) | buf[3];
chal_len = ( buf[4] << 8 ) | buf[5];
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciph_len: %d, sess_len: %d, chal_len: %d",
ciph_len, sess_len, chal_len ) );
/*
* Make sure each parameter length is valid
*/
if( ciph_len < 3 || ( ciph_len % 3 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( sess_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( chal_len < 8 || chal_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( n != 6 + ciph_len + sess_len + chal_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, ciphersuitelist",
buf + 6, ciph_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, session id",
buf + 6 + ciph_len, sess_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, challenge",
buf + 6 + ciph_len + sess_len, chal_len );
p = buf + 6 + ciph_len;
ssl->session_negotiate->id_len = sess_len;
memset( ssl->session_negotiate->id, 0,
sizeof( ssl->session_negotiate->id ) );
memcpy( ssl->session_negotiate->id, p, ssl->session_negotiate->id_len );
p += sess_len;
memset( ssl->handshake->randbytes, 0, 64 );
memcpy( ssl->handshake->randbytes + 32 - chal_len, p, chal_len );
/*
* Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV
*/
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 && p[1] == 0 && p[2] == MBEDTLS_SSL_EMPTY_RENEGOTIATION_INFO )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received TLS_EMPTY_RENEGOTIATION_INFO " ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "received RENEGOTIATION SCSV "
"during renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
break;
}
}
#if defined(MBEDTLS_SSL_FALLBACK_SCSV)
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 &&
p[1] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE >> 8 ) & 0xff ) &&
p[2] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE ) & 0xff ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received FALLBACK_SCSV" ) );
if( ssl->minor_ver < ssl->conf->max_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inapropriate fallback" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INAPROPRIATE_FALLBACK );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
break;
}
}
#endif /* MBEDTLS_SSL_FALLBACK_SCSV */
got_common_suite = 0;
ciphersuites = ssl->conf->ciphersuite_list[ssl->minor_ver];
ciphersuite_info = NULL;
#if defined(MBEDTLS_SSL_SRV_RESPECT_CLIENT_PREFERENCE)
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
for( i = 0; ciphersuites[i] != 0; i++ )
#else
for( i = 0; ciphersuites[i] != 0; i++ )
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
#endif
{
if( p[0] != 0 ||
p[1] != ( ( ciphersuites[i] >> 8 ) & 0xFF ) ||
p[2] != ( ( ciphersuites[i] ) & 0xFF ) )
continue;
got_common_suite = 1;
if( ( ret = ssl_ciphersuite_match( ssl, ciphersuites[i],
&ciphersuite_info ) ) != 0 )
return( ret );
if( ciphersuite_info != NULL )
goto have_ciphersuite_v2;
}
if( got_common_suite )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got ciphersuites in common, "
"but none of them usable" ) );
return( MBEDTLS_ERR_SSL_NO_USABLE_CIPHERSUITE );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no ciphersuites in common" ) );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
have_ciphersuite_v2:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "selected ciphersuite: %s", ciphersuite_info->name ) );
ssl->session_negotiate->ciphersuite = ciphersuites[i];
ssl->handshake->ciphersuite_info = ciphersuite_info;
/*
* SSLv2 Client Hello relevant renegotiation security checks
*/
if( ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_BREAK_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation, breaking off handshake" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->in_left = 0;
ssl->state++;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client hello v2" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO */
/* This function doesn't alert on errors that happen early during
ClientHello parsing because they might indicate that the client is
not talking SSL/TLS at all and would not understand our alert. */
static int ssl_parse_client_hello( mbedtls_ssl_context *ssl )
{
int ret, got_common_suite;
size_t i, j;
size_t ciph_offset, comp_offset, ext_offset;
size_t msg_len, ciph_len, sess_len, comp_len, ext_len;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
size_t cookie_offset, cookie_len;
#endif
unsigned char *buf, *p, *ext;
#if defined(MBEDTLS_SSL_RENEGOTIATION)
int renegotiation_info_seen = 0;
#endif
int handshake_failure = 0;
const int *ciphersuites;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
int major, minor;
/* If there is no signature-algorithm extension present,
* we need to fall back to the default values for allowed
* signature-hash pairs. */
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
int sig_hash_alg_ext_present = 0;
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client hello" ) );
#if defined(MBEDTLS_SSL_DTLS_ANTI_REPLAY)
read_record_header:
#endif
/*
* If renegotiating, then the input was read with mbedtls_ssl_read_record(),
* otherwise read it ourselves manually in order to support SSLv2
* ClientHello, which doesn't use the same record layer format.
*/
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE )
#endif
{
if( ( ret = mbedtls_ssl_fetch_input( ssl, 5 ) ) != 0 )
{
/* No alert on a read error. */
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
}
buf = ssl->in_hdr;
#if defined(MBEDTLS_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO)
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_STREAM )
#endif
if( ( buf[0] & 0x80 ) != 0 )
return( ssl_parse_client_hello_v2( ssl ) );
#endif
MBEDTLS_SSL_DEBUG_BUF( 4, "record header", buf, mbedtls_ssl_in_hdr_len( ssl ) );
/*
* SSLv3/TLS Client Hello
*
* Record layer:
* 0 . 0 message type
* 1 . 2 protocol version
* 3 . 11 DTLS: epoch + record sequence number
* 3 . 4 message length
*/
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, message type: %d",
buf[0] ) );
if( buf[0] != MBEDTLS_SSL_MSG_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, message len.: %d",
( ssl->in_len[0] << 8 ) | ssl->in_len[1] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, protocol version: [%d:%d]",
buf[1], buf[2] ) );
mbedtls_ssl_read_version( &major, &minor, ssl->conf->transport, buf + 1 );
/* According to RFC 5246 Appendix E.1, the version here is typically
* "{03,00}, the lowest version number supported by the client, [or] the
* value of ClientHello.client_version", so the only meaningful check here
* is the major version shouldn't be less than 3 */
if( major < MBEDTLS_SSL_MAJOR_VERSION_3 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* For DTLS if this is the initial handshake, remember the client sequence
* number to use it in our next message (RFC 6347 4.2.1) */
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM
#if defined(MBEDTLS_SSL_RENEGOTIATION)
&& ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE
#endif
)
{
/* Epoch should be 0 for initial handshakes */
if( ssl->in_ctr[0] != 0 || ssl->in_ctr[1] != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
memcpy( ssl->cur_out_ctr + 2, ssl->in_ctr + 2, 6 );
#if defined(MBEDTLS_SSL_DTLS_ANTI_REPLAY)
if( mbedtls_ssl_dtls_replay_check( ssl ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "replayed record, discarding" ) );
ssl->next_record_offset = 0;
ssl->in_left = 0;
goto read_record_header;
}
/* No MAC to check yet, so we can update right now */
mbedtls_ssl_dtls_replay_update( ssl );
#endif
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
msg_len = ( ssl->in_len[0] << 8 ) | ssl->in_len[1];
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
/* Set by mbedtls_ssl_read_record() */
msg_len = ssl->in_hslen;
}
else
#endif
{
if( msg_len > MBEDTLS_SSL_IN_CONTENT_LEN )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( ( ret = mbedtls_ssl_fetch_input( ssl,
mbedtls_ssl_in_hdr_len( ssl ) + msg_len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
/* Done reading this record, get ready for the next one */
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
ssl->next_record_offset = msg_len + mbedtls_ssl_in_hdr_len( ssl );
else
#endif
ssl->in_left = 0;
}
buf = ssl->in_msg;
MBEDTLS_SSL_DEBUG_BUF( 4, "record contents", buf, msg_len );
ssl->handshake->update_checksum( ssl, buf, msg_len );
/*
* Handshake layer:
* 0 . 0 handshake type
* 1 . 3 handshake length
* 4 . 5 DTLS only: message seqence number
* 6 . 8 DTLS only: fragment offset
* 9 . 11 DTLS only: fragment length
*/
if( msg_len < mbedtls_ssl_hs_hdr_len( ssl ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, handshake type: %d", buf[0] ) );
if( buf[0] != MBEDTLS_SSL_HS_CLIENT_HELLO )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, handshake len.: %d",
( buf[1] << 16 ) | ( buf[2] << 8 ) | buf[3] ) );
/* We don't support fragmentation of ClientHello (yet?) */
if( buf[1] != 0 ||
msg_len != mbedtls_ssl_hs_hdr_len( ssl ) + ( ( buf[2] << 8 ) | buf[3] ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
/*
* Copy the client's handshake message_seq on initial handshakes,
* check sequence number on renego.
*/
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
/* This couldn't be done in ssl_prepare_handshake_record() */
unsigned int cli_msg_seq = ( ssl->in_msg[4] << 8 ) |
ssl->in_msg[5];
if( cli_msg_seq != ssl->handshake->in_msg_seq )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message_seq: "
"%d (expected %d)", cli_msg_seq,
ssl->handshake->in_msg_seq ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->handshake->in_msg_seq++;
}
else
#endif
{
unsigned int cli_msg_seq = ( ssl->in_msg[4] << 8 ) |
ssl->in_msg[5];
ssl->handshake->out_msg_seq = cli_msg_seq;
ssl->handshake->in_msg_seq = cli_msg_seq + 1;
}
/*
* For now we don't support fragmentation, so make sure
* fragment_offset == 0 and fragment_length == length
*/
if( ssl->in_msg[6] != 0 || ssl->in_msg[7] != 0 || ssl->in_msg[8] != 0 ||
memcmp( ssl->in_msg + 1, ssl->in_msg + 9, 3 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "ClientHello fragmentation not supported" ) );
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
}
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
buf += mbedtls_ssl_hs_hdr_len( ssl );
msg_len -= mbedtls_ssl_hs_hdr_len( ssl );
/*
* ClientHello layer:
* 0 . 1 protocol version
* 2 . 33 random bytes (starting with 4 bytes of Unix time)
* 34 . 35 session id length (1 byte)
* 35 . 34+x session id
* 35+x . 35+x DTLS only: cookie length (1 byte)
* 36+x . .. DTLS only: cookie
* .. . .. ciphersuite list length (2 bytes)
* .. . .. ciphersuite list
* .. . .. compression alg. list length (1 byte)
* .. . .. compression alg. list
* .. . .. extensions length (2 bytes, optional)
* .. . .. extensions (optional)
*/
/*
* Minimal length (with everything empty and extensions omitted) is
* 2 + 32 + 1 + 2 + 1 = 38 bytes. Check that first, so that we can
* read at least up to session id length without worrying.
*/
if( msg_len < 38 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Check and save the protocol version
*/
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, version", buf, 2 );
mbedtls_ssl_read_version( &ssl->major_ver, &ssl->minor_ver,
ssl->conf->transport, buf );
ssl->handshake->max_major_ver = ssl->major_ver;
ssl->handshake->max_minor_ver = ssl->minor_ver;
if( ssl->major_ver < ssl->conf->min_major_ver ||
ssl->minor_ver < ssl->conf->min_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client only supports ssl smaller than minimum"
" [%d:%d] < [%d:%d]",
ssl->major_ver, ssl->minor_ver,
ssl->conf->min_major_ver, ssl->conf->min_minor_ver ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_PROTOCOL_VERSION );
}
if( ssl->major_ver > ssl->conf->max_major_ver )
{
ssl->major_ver = ssl->conf->max_major_ver;
ssl->minor_ver = ssl->conf->max_minor_ver;
}
else if( ssl->minor_ver > ssl->conf->max_minor_ver )
ssl->minor_ver = ssl->conf->max_minor_ver;
/*
* Save client random (inc. Unix time)
*/
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, random bytes", buf + 2, 32 );
memcpy( ssl->handshake->randbytes, buf + 2, 32 );
/*
* Check the session ID length and save session ID
*/
sess_len = buf[34];
if( sess_len > sizeof( ssl->session_negotiate->id ) ||
sess_len + 34 + 2 > msg_len ) /* 2 for cipherlist length field */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, session id", buf + 35, sess_len );
ssl->session_negotiate->id_len = sess_len;
memset( ssl->session_negotiate->id, 0,
sizeof( ssl->session_negotiate->id ) );
memcpy( ssl->session_negotiate->id, buf + 35,
ssl->session_negotiate->id_len );
/*
* Check the cookie length and content
*/
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
cookie_offset = 35 + sess_len;
cookie_len = buf[cookie_offset];
if( cookie_offset + 1 + cookie_len + 2 > msg_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, cookie",
buf + cookie_offset + 1, cookie_len );
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
if( ssl->conf->f_cookie_check != NULL
#if defined(MBEDTLS_SSL_RENEGOTIATION)
&& ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE
#endif
)
{
if( ssl->conf->f_cookie_check( ssl->conf->p_cookie,
buf + cookie_offset + 1, cookie_len,
ssl->cli_id, ssl->cli_id_len ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "cookie verification failed" ) );
ssl->handshake->verify_cookie_len = 1;
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "cookie verification passed" ) );
ssl->handshake->verify_cookie_len = 0;
}
}
else
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
{
/* We know we didn't send a cookie, so it should be empty */
if( cookie_len != 0 )
{
/* This may be an attacker's probe, so don't send an alert */
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "cookie verification skipped" ) );
}
/*
* Check the ciphersuitelist length (will be parsed later)
*/
ciph_offset = cookie_offset + 1 + cookie_len;
}
else
#endif /* MBEDTLS_SSL_PROTO_DTLS */
ciph_offset = 35 + sess_len;
ciph_len = ( buf[ciph_offset + 0] << 8 )
| ( buf[ciph_offset + 1] );
if( ciph_len < 2 ||
ciph_len + 2 + ciph_offset + 1 > msg_len || /* 1 for comp. alg. len */
( ciph_len % 2 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, ciphersuitelist",
buf + ciph_offset + 2, ciph_len );
/*
* Check the compression algorithms length and pick one
*/
comp_offset = ciph_offset + 2 + ciph_len;
comp_len = buf[comp_offset];
if( comp_len < 1 ||
comp_len > 16 ||
comp_len + comp_offset + 1 > msg_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, compression",
buf + comp_offset + 1, comp_len );
ssl->session_negotiate->compression = MBEDTLS_SSL_COMPRESS_NULL;
#if defined(MBEDTLS_ZLIB_SUPPORT)
for( i = 0; i < comp_len; ++i )
{
if( buf[comp_offset + 1 + i] == MBEDTLS_SSL_COMPRESS_DEFLATE )
{
ssl->session_negotiate->compression = MBEDTLS_SSL_COMPRESS_DEFLATE;
break;
}
}
#endif
/* See comments in ssl_write_client_hello() */
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
ssl->session_negotiate->compression = MBEDTLS_SSL_COMPRESS_NULL;
#endif
/* Do not parse the extensions if the protocol is SSLv3 */
#if defined(MBEDTLS_SSL_PROTO_SSL3)
if( ( ssl->major_ver != 3 ) || ( ssl->minor_ver != 0 ) )
{
#endif
/*
* Check the extension length
*/
ext_offset = comp_offset + 1 + comp_len;
if( msg_len > ext_offset )
{
if( msg_len < ext_offset + 2 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ext_len = ( buf[ext_offset + 0] << 8 )
| ( buf[ext_offset + 1] );
if( ( ext_len > 0 && ext_len < 4 ) ||
msg_len != ext_offset + 2 + ext_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
else
ext_len = 0;
ext = buf + ext_offset + 2;
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello extensions", ext, ext_len );
while( ext_len != 0 )
{
unsigned int ext_id;
unsigned int ext_size;
if ( ext_len < 4 ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ext_id = ( ( ext[0] << 8 ) | ( ext[1] ) );
ext_size = ( ( ext[2] << 8 ) | ( ext[3] ) );
if( ext_size + 4 > ext_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
switch( ext_id )
{
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
case MBEDTLS_TLS_EXT_SERVERNAME:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found ServerName extension" ) );
if( ssl->conf->f_sni == NULL )
break;
ret = ssl_parse_servername_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_SERVER_NAME_INDICATION */
case MBEDTLS_TLS_EXT_RENEGOTIATION_INFO:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found renegotiation extension" ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
renegotiation_info_seen = 1;
#endif
ret = ssl_parse_renegotiation_info( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
case MBEDTLS_TLS_EXT_SIG_ALG:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found signature_algorithms extension" ) );
ret = ssl_parse_signature_algorithms_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
sig_hash_alg_ext_present = 1;
break;
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
case MBEDTLS_TLS_EXT_SUPPORTED_ELLIPTIC_CURVES:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found supported elliptic curves extension" ) );
ret = ssl_parse_supported_elliptic_curves( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
case MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found supported point formats extension" ) );
ssl->handshake->cli_exts |= MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS_PRESENT;
ret = ssl_parse_supported_point_formats( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C ||
MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
case MBEDTLS_TLS_EXT_ECJPAKE_KKPP:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found ecjpake kkpp extension" ) );
ret = ssl_parse_ecjpake_kkpp( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
case MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found max fragment length extension" ) );
ret = ssl_parse_max_fragment_length_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
case MBEDTLS_TLS_EXT_TRUNCATED_HMAC:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found truncated hmac extension" ) );
ret = ssl_parse_truncated_hmac_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
case MBEDTLS_TLS_EXT_CID:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found CID extension" ) );
ret = ssl_parse_cid_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
case MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found encrypt then mac extension" ) );
ret = ssl_parse_encrypt_then_mac_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
case MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found extended master secret extension" ) );
ret = ssl_parse_extended_ms_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
case MBEDTLS_TLS_EXT_SESSION_TICKET:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found session ticket extension" ) );
ret = ssl_parse_session_ticket_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
#if defined(MBEDTLS_SSL_ALPN)
case MBEDTLS_TLS_EXT_ALPN:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found alpn extension" ) );
ret = ssl_parse_alpn_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
default:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "unknown extension found: %d (ignoring)",
ext_id ) );
}
ext_len -= 4 + ext_size;
ext += 4 + ext_size;
if( ext_len > 0 && ext_len < 4 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
#if defined(MBEDTLS_SSL_PROTO_SSL3)
}
#endif
#if defined(MBEDTLS_SSL_FALLBACK_SCSV)
for( i = 0, p = buf + ciph_offset + 2; i < ciph_len; i += 2, p += 2 )
{
if( p[0] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE >> 8 ) & 0xff ) &&
p[1] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE ) & 0xff ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "received FALLBACK_SCSV" ) );
if( ssl->minor_ver < ssl->conf->max_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inapropriate fallback" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INAPROPRIATE_FALLBACK );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
break;
}
}
#endif /* MBEDTLS_SSL_FALLBACK_SCSV */
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
/*
* Try to fall back to default hash SHA1 if the client
* hasn't provided any preferred signature-hash combinations.
*/
if( sig_hash_alg_ext_present == 0 )
{
mbedtls_md_type_t md_default = MBEDTLS_MD_SHA1;
if( mbedtls_ssl_check_sig_hash( ssl, md_default ) != 0 )
md_default = MBEDTLS_MD_NONE;
mbedtls_ssl_sig_hash_set_const_hash( &ssl->handshake->hash_algs, md_default );
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
/*
* Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV
*/
for( i = 0, p = buf + ciph_offset + 2; i < ciph_len; i += 2, p += 2 )
{
if( p[0] == 0 && p[1] == MBEDTLS_SSL_EMPTY_RENEGOTIATION_INFO )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received TLS_EMPTY_RENEGOTIATION_INFO " ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "received RENEGOTIATION SCSV "
"during renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
break;
}
}
/*
* Renegotiation security checks
*/
if( ssl->secure_renegotiation != MBEDTLS_SSL_SECURE_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_BREAK_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation, breaking off handshake" ) );
handshake_failure = 1;
}
#if defined(MBEDTLS_SSL_RENEGOTIATION)
else if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS &&
ssl->secure_renegotiation == MBEDTLS_SSL_SECURE_RENEGOTIATION &&
renegotiation_info_seen == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "renegotiation_info extension missing (secure)" ) );
handshake_failure = 1;
}
else if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS &&
ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_NO_RENEGOTIATION )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation not allowed" ) );
handshake_failure = 1;
}
else if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS &&
ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
renegotiation_info_seen == 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "renegotiation_info extension present (legacy)" ) );
handshake_failure = 1;
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
if( handshake_failure == 1 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Search for a matching ciphersuite
* (At the end because we need information from the EC-based extensions
* and certificate from the SNI callback triggered by the SNI extension.)
*/
got_common_suite = 0;
ciphersuites = ssl->conf->ciphersuite_list[ssl->minor_ver];
ciphersuite_info = NULL;
#if defined(MBEDTLS_SSL_SRV_RESPECT_CLIENT_PREFERENCE)
for( j = 0, p = buf + ciph_offset + 2; j < ciph_len; j += 2, p += 2 )
for( i = 0; ciphersuites[i] != 0; i++ )
#else
for( i = 0; ciphersuites[i] != 0; i++ )
for( j = 0, p = buf + ciph_offset + 2; j < ciph_len; j += 2, p += 2 )
#endif
{
if( p[0] != ( ( ciphersuites[i] >> 8 ) & 0xFF ) ||
p[1] != ( ( ciphersuites[i] ) & 0xFF ) )
continue;
got_common_suite = 1;
if( ( ret = ssl_ciphersuite_match( ssl, ciphersuites[i],
&ciphersuite_info ) ) != 0 )
return( ret );
if( ciphersuite_info != NULL )
goto have_ciphersuite;
}
if( got_common_suite )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got ciphersuites in common, "
"but none of them usable" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_NO_USABLE_CIPHERSUITE );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no ciphersuites in common" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
have_ciphersuite:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "selected ciphersuite: %s", ciphersuite_info->name ) );
ssl->session_negotiate->ciphersuite = ciphersuites[i];
ssl->handshake->ciphersuite_info = ciphersuite_info;
ssl->state++;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
mbedtls_ssl_recv_flight_completed( ssl );
#endif
/* Debugging-only output for testsuite */
#if defined(MBEDTLS_DEBUG_C) && \
defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
mbedtls_pk_type_t sig_alg = mbedtls_ssl_get_ciphersuite_sig_alg( ciphersuite_info );
if( sig_alg != MBEDTLS_PK_NONE )
{
mbedtls_md_type_t md_alg = mbedtls_ssl_sig_hash_set_find( &ssl->handshake->hash_algs,
sig_alg );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext: %d",
mbedtls_ssl_hash_from_md_alg( md_alg ) ) );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "no hash algorithm for signature algorithm "
"%d - should not happen", sig_alg ) );
}
}
#endif
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client hello" ) );
return( 0 );
}
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
static void ssl_write_truncated_hmac_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->session_negotiate->trunc_hmac == MBEDTLS_SSL_TRUNC_HMAC_DISABLED )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding truncated hmac extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_TRUNCATED_HMAC >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_TRUNCATED_HMAC ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
static void ssl_write_cid_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
size_t ext_len;
const unsigned char *end = ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN;
*olen = 0;
/* Skip writing the extension if we don't want to use it or if
* the client hasn't offered it. */
if( ssl->handshake->cid_in_use == MBEDTLS_SSL_CID_DISABLED )
return;
/* ssl->own_cid_len is at most MBEDTLS_SSL_CID_IN_LEN_MAX
* which is at most 255, so the increment cannot overflow. */
if( end < p || (size_t)( end - p ) < (unsigned)( ssl->own_cid_len + 5 ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "buffer too small" ) );
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding CID extension" ) );
/*
* Quoting draft-ietf-tls-dtls-connection-id-05
* https://tools.ietf.org/html/draft-ietf-tls-dtls-connection-id-05
*
* struct {
* opaque cid<0..2^8-1>;
* } ConnectionId;
*/
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_CID >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_CID ) & 0xFF );
ext_len = (size_t) ssl->own_cid_len + 1;
*p++ = (unsigned char)( ( ext_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( ext_len ) & 0xFF );
*p++ = (uint8_t) ssl->own_cid_len;
memcpy( p, ssl->own_cid, ssl->own_cid_len );
*olen = ssl->own_cid_len + 5;
}
#endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
static void ssl_write_encrypt_then_mac_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
const mbedtls_ssl_ciphersuite_t *suite = NULL;
const mbedtls_cipher_info_t *cipher = NULL;
if( ssl->session_negotiate->encrypt_then_mac == MBEDTLS_SSL_ETM_DISABLED ||
ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 )
{
*olen = 0;
return;
}
/*
* RFC 7366: "If a server receives an encrypt-then-MAC request extension
* from a client and then selects a stream or Authenticated Encryption
* with Associated Data (AEAD) ciphersuite, it MUST NOT send an
* encrypt-then-MAC response extension back to the client."
*/
if( ( suite = mbedtls_ssl_ciphersuite_from_id(
ssl->session_negotiate->ciphersuite ) ) == NULL ||
( cipher = mbedtls_cipher_info_from_type( suite->cipher ) ) == NULL ||
cipher->mode != MBEDTLS_MODE_CBC )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding encrypt then mac extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
static void ssl_write_extended_ms_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->handshake->extended_ms == MBEDTLS_SSL_EXTENDED_MS_DISABLED ||
ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding extended master secret "
"extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
static void ssl_write_session_ticket_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->handshake->new_session_ticket == 0 )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding session ticket extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SESSION_TICKET >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SESSION_TICKET ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
static void ssl_write_renegotiation_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->secure_renegotiation != MBEDTLS_SSL_SECURE_RENEGOTIATION )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, secure renegotiation extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_RENEGOTIATION_INFO >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_RENEGOTIATION_INFO ) & 0xFF );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
*p++ = 0x00;
*p++ = ( ssl->verify_data_len * 2 + 1 ) & 0xFF;
*p++ = ssl->verify_data_len * 2 & 0xFF;
memcpy( p, ssl->peer_verify_data, ssl->verify_data_len );
p += ssl->verify_data_len;
memcpy( p, ssl->own_verify_data, ssl->verify_data_len );
p += ssl->verify_data_len;
}
else
#endif /* MBEDTLS_SSL_RENEGOTIATION */
{
*p++ = 0x00;
*p++ = 0x01;
*p++ = 0x00;
}
*olen = p - buf;
}
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
static void ssl_write_max_fragment_length_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->session_negotiate->mfl_code == MBEDTLS_SSL_MAX_FRAG_LEN_NONE )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, max_fragment_length extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH ) & 0xFF );
*p++ = 0x00;
*p++ = 1;
*p++ = ssl->session_negotiate->mfl_code;
*olen = 5;
}
#endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static void ssl_write_supported_point_formats_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
((void) ssl);
if( ( ssl->handshake->cli_exts &
MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS_PRESENT ) == 0 )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, supported_point_formats extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS ) & 0xFF );
*p++ = 0x00;
*p++ = 2;
*p++ = 1;
*p++ = MBEDTLS_ECP_PF_UNCOMPRESSED;
*olen = 6;
}
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C || MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static void ssl_write_ecjpake_kkpp_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned char *p = buf;
const unsigned char *end = ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN;
size_t kkpp_len;
*olen = 0;
/* Skip costly computation if not needed */
if( ssl->handshake->ciphersuite_info->key_exchange !=
MBEDTLS_KEY_EXCHANGE_ECJPAKE )
return;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, ecjpake kkpp extension" ) );
if( end - p < 4 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "buffer too small" ) );
return;
}
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ECJPAKE_KKPP >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ECJPAKE_KKPP ) & 0xFF );
ret = mbedtls_ecjpake_write_round_one( &ssl->handshake->ecjpake_ctx,
p + 2, end - p - 2, &kkpp_len,
ssl->conf->f_rng, ssl->conf->p_rng );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1 , "mbedtls_ecjpake_write_round_one", ret );
return;
}
*p++ = (unsigned char)( ( kkpp_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( kkpp_len ) & 0xFF );
*olen = kkpp_len + 4;
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_SSL_ALPN )
static void ssl_write_alpn_ext( mbedtls_ssl_context *ssl,
unsigned char *buf, size_t *olen )
{
if( ssl->alpn_chosen == NULL )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding alpn extension" ) );
/*
* 0 . 1 ext identifier
* 2 . 3 ext length
* 4 . 5 protocol list length
* 6 . 6 protocol name length
* 7 . 7+n protocol name
*/
buf[0] = (unsigned char)( ( MBEDTLS_TLS_EXT_ALPN >> 8 ) & 0xFF );
buf[1] = (unsigned char)( ( MBEDTLS_TLS_EXT_ALPN ) & 0xFF );
*olen = 7 + strlen( ssl->alpn_chosen );
buf[2] = (unsigned char)( ( ( *olen - 4 ) >> 8 ) & 0xFF );
buf[3] = (unsigned char)( ( ( *olen - 4 ) ) & 0xFF );
buf[4] = (unsigned char)( ( ( *olen - 6 ) >> 8 ) & 0xFF );
buf[5] = (unsigned char)( ( ( *olen - 6 ) ) & 0xFF );
buf[6] = (unsigned char)( ( ( *olen - 7 ) ) & 0xFF );
memcpy( buf + 7, ssl->alpn_chosen, *olen - 7 );
}
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C */
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
static int ssl_write_hello_verify_request( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned char *p = ssl->out_msg + 4;
unsigned char *cookie_len_byte;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write hello verify request" ) );
/*
* struct {
* ProtocolVersion server_version;
* opaque cookie<0..2^8-1>;
* } HelloVerifyRequest;
*/
/* The RFC is not clear on this point, but sending the actual negotiated
* version looks like the most interoperable thing to do. */
mbedtls_ssl_write_version( ssl->major_ver, ssl->minor_ver,
ssl->conf->transport, p );
MBEDTLS_SSL_DEBUG_BUF( 3, "server version", p, 2 );
p += 2;
/* If we get here, f_cookie_check is not null */
if( ssl->conf->f_cookie_write == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inconsistent cookie callbacks" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
/* Skip length byte until we know the length */
cookie_len_byte = p++;
if( ( ret = ssl->conf->f_cookie_write( ssl->conf->p_cookie,
&p, ssl->out_buf + MBEDTLS_SSL_OUT_BUFFER_LEN,
ssl->cli_id, ssl->cli_id_len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "f_cookie_write", ret );
return( ret );
}
*cookie_len_byte = (unsigned char)( p - ( cookie_len_byte + 1 ) );
MBEDTLS_SSL_DEBUG_BUF( 3, "cookie sent", cookie_len_byte + 1, *cookie_len_byte );
ssl->out_msglen = p - ssl->out_msg;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_HELLO_VERIFY_REQUEST;
ssl->state = MBEDTLS_SSL_SERVER_HELLO_VERIFY_REQUEST_SENT;
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
( ret = mbedtls_ssl_flight_transmit( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_flight_transmit", ret );
return( ret );
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write hello verify request" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
static int ssl_write_server_hello( mbedtls_ssl_context *ssl )
{
#if defined(MBEDTLS_HAVE_TIME)
mbedtls_time_t t;
#endif
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t olen, ext_len = 0, n;
unsigned char *buf, *p;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write server hello" ) );
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl->handshake->verify_cookie_len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "client hello was not authenticated" ) );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server hello" ) );
return( ssl_write_hello_verify_request( ssl ) );
}
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
if( ssl->conf->f_rng == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "no RNG provided") );
return( MBEDTLS_ERR_SSL_NO_RNG );
}
/*
* 0 . 0 handshake type
* 1 . 3 handshake length
* 4 . 5 protocol version
* 6 . 9 UNIX time()
* 10 . 37 random bytes
*/
buf = ssl->out_msg;
p = buf + 4;
mbedtls_ssl_write_version( ssl->major_ver, ssl->minor_ver,
ssl->conf->transport, p );
p += 2;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, chosen version: [%d:%d]",
buf[4], buf[5] ) );
#if defined(MBEDTLS_HAVE_TIME)
t = mbedtls_time( NULL );
*p++ = (unsigned char)( t >> 24 );
*p++ = (unsigned char)( t >> 16 );
*p++ = (unsigned char)( t >> 8 );
*p++ = (unsigned char)( t );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, current time: %lu", t ) );
#else
if( ( ret = ssl->conf->f_rng( ssl->conf->p_rng, p, 4 ) ) != 0 )
return( ret );
p += 4;
#endif /* MBEDTLS_HAVE_TIME */
if( ( ret = ssl->conf->f_rng( ssl->conf->p_rng, p, 28 ) ) != 0 )
return( ret );
p += 28;
memcpy( ssl->handshake->randbytes + 32, buf + 6, 32 );
MBEDTLS_SSL_DEBUG_BUF( 3, "server hello, random bytes", buf + 6, 32 );
/*
* Resume is 0 by default, see ssl_handshake_init().
* It may be already set to 1 by ssl_parse_session_ticket_ext().
* If not, try looking up session ID in our cache.
*/
if( ssl->handshake->resume == 0 &&
#if defined(MBEDTLS_SSL_RENEGOTIATION)
ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE &&
#endif
ssl->session_negotiate->id_len != 0 &&
ssl->conf->f_get_cache != NULL &&
ssl->conf->f_get_cache( ssl->conf->p_cache, ssl->session_negotiate ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "session successfully restored from cache" ) );
ssl->handshake->resume = 1;
}
if( ssl->handshake->resume == 0 )
{
/*
* New session, create a new session id,
* unless we're about to issue a session ticket
*/
ssl->state++;
#if defined(MBEDTLS_HAVE_TIME)
ssl->session_negotiate->start = mbedtls_time( NULL );
#endif
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
if( ssl->handshake->new_session_ticket != 0 )
{
ssl->session_negotiate->id_len = n = 0;
memset( ssl->session_negotiate->id, 0, 32 );
}
else
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
{
ssl->session_negotiate->id_len = n = 32;
if( ( ret = ssl->conf->f_rng( ssl->conf->p_rng, ssl->session_negotiate->id,
n ) ) != 0 )
return( ret );
}
}
else
{
/*
* Resuming a session
*/
n = ssl->session_negotiate->id_len;
ssl->state = MBEDTLS_SSL_SERVER_CHANGE_CIPHER_SPEC;
if( ( ret = mbedtls_ssl_derive_keys( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_derive_keys", ret );
return( ret );
}
}
/*
* 38 . 38 session id length
* 39 . 38+n session id
* 39+n . 40+n chosen ciphersuite
* 41+n . 41+n chosen compression alg.
* 42+n . 43+n extensions length
* 44+n . 43+n+m extensions
*/
*p++ = (unsigned char) ssl->session_negotiate->id_len;
memcpy( p, ssl->session_negotiate->id, ssl->session_negotiate->id_len );
p += ssl->session_negotiate->id_len;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, session id len.: %d", n ) );
MBEDTLS_SSL_DEBUG_BUF( 3, "server hello, session id", buf + 39, n );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "%s session has been resumed",
ssl->handshake->resume ? "a" : "no" ) );
*p++ = (unsigned char)( ssl->session_negotiate->ciphersuite >> 8 );
*p++ = (unsigned char)( ssl->session_negotiate->ciphersuite );
*p++ = (unsigned char)( ssl->session_negotiate->compression );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, chosen ciphersuite: %s",
mbedtls_ssl_get_ciphersuite_name( ssl->session_negotiate->ciphersuite ) ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, compress alg.: 0x%02X",
ssl->session_negotiate->compression ) );
/* Do not write the extensions if the protocol is SSLv3 */
#if defined(MBEDTLS_SSL_PROTO_SSL3)
if( ( ssl->major_ver != 3 ) || ( ssl->minor_ver != 0 ) )
{
#endif
/*
* First write extensions, then the total length
*/
ssl_write_renegotiation_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
ssl_write_max_fragment_length_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
ssl_write_truncated_hmac_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
ssl_write_cid_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
ssl_write_encrypt_then_mac_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
ssl_write_extended_ms_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
ssl_write_session_ticket_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if ( mbedtls_ssl_ciphersuite_uses_ec(
mbedtls_ssl_ciphersuite_from_id( ssl->session_negotiate->ciphersuite ) ) )
{
ssl_write_supported_point_formats_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
}
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
ssl_write_ecjpake_kkpp_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_ALPN)
ssl_write_alpn_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, total extension length: %d", ext_len ) );
if( ext_len > 0 )
{
*p++ = (unsigned char)( ( ext_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( ext_len ) & 0xFF );
p += ext_len;
}
#if defined(MBEDTLS_SSL_PROTO_SSL3)
}
#endif
ssl->out_msglen = p - buf;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_SERVER_HELLO;
ret = mbedtls_ssl_write_handshake_msg( ssl );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server hello" ) );
return( ret );
}
#if !defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED)
static int ssl_write_certificate_request( mbedtls_ssl_context *ssl )
{
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write certificate request" ) );
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip write certificate request" ) );
ssl->state++;
return( 0 );
}
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
#else /* !MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
static int ssl_write_certificate_request( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
uint16_t dn_size, total_dn_size; /* excluding length bytes */
size_t ct_len, sa_len; /* including length bytes */
unsigned char *buf, *p;
const unsigned char * const end = ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN;
const mbedtls_x509_crt *crt;
int authmode;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write certificate request" ) );
ssl->state++;
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
if( ssl->handshake->sni_authmode != MBEDTLS_SSL_VERIFY_UNSET )
authmode = ssl->handshake->sni_authmode;
else
#endif
authmode = ssl->conf->authmode;
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) ||
authmode == MBEDTLS_SSL_VERIFY_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip write certificate request" ) );
return( 0 );
}
/*
* 0 . 0 handshake type
* 1 . 3 handshake length
* 4 . 4 cert type count
* 5 .. m-1 cert types
* m .. m+1 sig alg length (TLS 1.2 only)
* m+1 .. n-1 SignatureAndHashAlgorithms (TLS 1.2 only)
* n .. n+1 length of all DNs
* n+2 .. n+3 length of DN 1
* n+4 .. ... Distinguished Name #1
* ... .. ... length of DN 2, etc.
*/
buf = ssl->out_msg;
p = buf + 4;
/*
* Supported certificate types
*
* ClientCertificateType certificate_types<1..2^8-1>;
* enum { (255) } ClientCertificateType;
*/
ct_len = 0;
#if defined(MBEDTLS_RSA_C)
p[1 + ct_len++] = MBEDTLS_SSL_CERT_TYPE_RSA_SIGN;
#endif
#if defined(MBEDTLS_ECDSA_C)
p[1 + ct_len++] = MBEDTLS_SSL_CERT_TYPE_ECDSA_SIGN;
#endif
p[0] = (unsigned char) ct_len++;
p += ct_len;
sa_len = 0;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
/*
* Add signature_algorithms for verify (TLS 1.2)
*
* SignatureAndHashAlgorithm supported_signature_algorithms<2..2^16-2>;
*
* struct {
* HashAlgorithm hash;
* SignatureAlgorithm signature;
* } SignatureAndHashAlgorithm;
*
* enum { (255) } HashAlgorithm;
* enum { (255) } SignatureAlgorithm;
*/
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
const int *cur;
/*
* Supported signature algorithms
*/
for( cur = ssl->conf->sig_hashes; *cur != MBEDTLS_MD_NONE; cur++ )
{
unsigned char hash = mbedtls_ssl_hash_from_md_alg( *cur );
if( MBEDTLS_SSL_HASH_NONE == hash || mbedtls_ssl_set_calc_verify_md( ssl, hash ) )
continue;
#if defined(MBEDTLS_RSA_C)
p[2 + sa_len++] = hash;
p[2 + sa_len++] = MBEDTLS_SSL_SIG_RSA;
#endif
#if defined(MBEDTLS_ECDSA_C)
p[2 + sa_len++] = hash;
p[2 + sa_len++] = MBEDTLS_SSL_SIG_ECDSA;
#endif
}
p[0] = (unsigned char)( sa_len >> 8 );
p[1] = (unsigned char)( sa_len );
sa_len += 2;
p += sa_len;
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
/*
* DistinguishedName certificate_authorities<0..2^16-1>;
* opaque DistinguishedName<1..2^16-1>;
*/
p += 2;
total_dn_size = 0;
if( ssl->conf->cert_req_ca_list == MBEDTLS_SSL_CERT_REQ_CA_LIST_ENABLED )
{
/* NOTE: If trusted certificates are provisioned
* via a CA callback (configured through
* `mbedtls_ssl_conf_ca_cb()`, then the
* CertificateRequest is currently left empty. */
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
if( ssl->handshake->sni_ca_chain != NULL )
crt = ssl->handshake->sni_ca_chain;
else
#endif
crt = ssl->conf->ca_chain;
while( crt != NULL && crt->version != 0 )
{
/* It follows from RFC 5280 A.1 that this length
* can be represented in at most 11 bits. */
dn_size = (uint16_t) crt->subject_raw.len;
if( end < p || (size_t)( end - p ) < 2 + (size_t) dn_size )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "skipping CAs: buffer too short" ) );
break;
}
*p++ = (unsigned char)( dn_size >> 8 );
*p++ = (unsigned char)( dn_size );
memcpy( p, crt->subject_raw.p, dn_size );
p += dn_size;
MBEDTLS_SSL_DEBUG_BUF( 3, "requested DN", p - dn_size, dn_size );
total_dn_size += 2 + dn_size;
crt = crt->next;
}
}
ssl->out_msglen = p - buf;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_CERTIFICATE_REQUEST;
ssl->out_msg[4 + ct_len + sa_len] = (unsigned char)( total_dn_size >> 8 );
ssl->out_msg[5 + ct_len + sa_len] = (unsigned char)( total_dn_size );
ret = mbedtls_ssl_write_handshake_msg( ssl );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write certificate request" ) );
return( ret );
}
#endif /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED)
static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
if( ! mbedtls_pk_can_do( mbedtls_ssl_own_key( ssl ), MBEDTLS_PK_ECKEY ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "server key not ECDH capable" ) );
return( MBEDTLS_ERR_SSL_PK_TYPE_MISMATCH );
}
if( ( ret = mbedtls_ecdh_get_params( &ssl->handshake->ecdh_ctx,
mbedtls_pk_ec( *mbedtls_ssl_own_key( ssl ) ),
MBEDTLS_ECDH_OURS ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "mbedtls_ecdh_get_params" ), ret );
return( ret );
}
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) ||
MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) && \
defined(MBEDTLS_SSL_ASYNC_PRIVATE)
static int ssl_resume_server_key_exchange( mbedtls_ssl_context *ssl,
size_t *signature_len )
{
/* Append the signature to ssl->out_msg, leaving 2 bytes for the
* signature length which will be added in ssl_write_server_key_exchange
* after the call to ssl_prepare_server_key_exchange.
* ssl_write_server_key_exchange also takes care of incrementing
* ssl->out_msglen. */
unsigned char *sig_start = ssl->out_msg + ssl->out_msglen + 2;
size_t sig_max_len = ( ssl->out_buf + MBEDTLS_SSL_OUT_CONTENT_LEN
- sig_start );
int ret = ssl->conf->f_async_resume( ssl,
sig_start, signature_len, sig_max_len );
if( ret != MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
{
ssl->handshake->async_in_progress = 0;
mbedtls_ssl_set_async_operation_data( ssl, NULL );
}
MBEDTLS_SSL_DEBUG_RET( 2, "ssl_resume_server_key_exchange", ret );
return( ret );
}
#endif /* defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) &&
defined(MBEDTLS_SSL_ASYNC_PRIVATE) */
/* Prepare the ServerKeyExchange message, up to and including
* calculating the signature if any, but excluding formatting the
* signature and sending the message. */
static int ssl_prepare_server_key_exchange( mbedtls_ssl_context *ssl,
size_t *signature_len )
{
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PFS_ENABLED)
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
unsigned char *dig_signed = NULL;
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_PFS_ENABLED */
(void) ciphersuite_info; /* unused in some configurations */
#if !defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
(void) signature_len;
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
ssl->out_msglen = 4; /* header (type:1, length:3) to be written later */
/*
*
* Part 1: Provide key exchange parameters for chosen ciphersuite.
*
*/
/*
* - ECJPAKE key exchanges
*/
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECJPAKE )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len = 0;
ret = mbedtls_ecjpake_write_round_two(
&ssl->handshake->ecjpake_ctx,
ssl->out_msg + ssl->out_msglen,
MBEDTLS_SSL_OUT_CONTENT_LEN - ssl->out_msglen, &len,
ssl->conf->f_rng, ssl->conf->p_rng );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_write_round_two", ret );
return( ret );
}
ssl->out_msglen += len;
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
/*
* For (EC)DHE key exchanges with PSK, parameters are prefixed by support
* identity hint (RFC 4279, Sec. 3). Until someone needs this feature,
* we use empty support identity hints here.
**/
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_PSK ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_PSK )
{
ssl->out_msg[ssl->out_msglen++] = 0x00;
ssl->out_msg[ssl->out_msglen++] = 0x00;
}
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED */
/*
* - DHE key exchanges
*/
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_DHE_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_dhe( ciphersuite_info ) )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len = 0;
if( ssl->conf->dhm_P.p == NULL || ssl->conf->dhm_G.p == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "no DH parameters set" ) );
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
}
/*
* Ephemeral DH parameters:
*
* struct {
* opaque dh_p<1..2^16-1>;
* opaque dh_g<1..2^16-1>;
* opaque dh_Ys<1..2^16-1>;
* } ServerDHParams;
*/
if( ( ret = mbedtls_dhm_set_group( &ssl->handshake->dhm_ctx,
&ssl->conf->dhm_P,
&ssl->conf->dhm_G ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_set_group", ret );
return( ret );
}
if( ( ret = mbedtls_dhm_make_params(
&ssl->handshake->dhm_ctx,
(int) mbedtls_mpi_size( &ssl->handshake->dhm_ctx.P ),
ssl->out_msg + ssl->out_msglen, &len,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_make_params", ret );
return( ret );
}
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
dig_signed = ssl->out_msg + ssl->out_msglen;
#endif
ssl->out_msglen += len;
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: X ", &ssl->handshake->dhm_ctx.X );
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: P ", &ssl->handshake->dhm_ctx.P );
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: G ", &ssl->handshake->dhm_ctx.G );
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: GX", &ssl->handshake->dhm_ctx.GX );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_DHE_ENABLED */
/*
* - ECDHE key exchanges
*/
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_ECDHE_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_ecdhe( ciphersuite_info ) )
{
/*
* Ephemeral ECDH parameters:
*
* struct {
* ECParameters curve_params;
* ECPoint public;
* } ServerECDHParams;
*/
const mbedtls_ecp_curve_info **curve = NULL;
const mbedtls_ecp_group_id *gid;
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len = 0;
/* Match our preference list against the offered curves */
for( gid = ssl->conf->curve_list; *gid != MBEDTLS_ECP_DP_NONE; gid++ )
for( curve = ssl->handshake->curves; *curve != NULL; curve++ )
if( (*curve)->grp_id == *gid )
goto curve_matching_done;
curve_matching_done:
if( curve == NULL || *curve == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "no matching curve for ECDHE" ) );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "ECDHE curve: %s", (*curve)->name ) );
if( ( ret = mbedtls_ecdh_setup( &ssl->handshake->ecdh_ctx,
(*curve)->grp_id ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecp_group_load", ret );
return( ret );
}
if( ( ret = mbedtls_ecdh_make_params(
&ssl->handshake->ecdh_ctx, &len,
ssl->out_msg + ssl->out_msglen,
MBEDTLS_SSL_OUT_CONTENT_LEN - ssl->out_msglen,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_make_params", ret );
return( ret );
}
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
dig_signed = ssl->out_msg + ssl->out_msglen;
#endif
ssl->out_msglen += len;
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_Q );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_ECDHE_ENABLED */
/*
*
* Part 2: For key exchanges involving the server signing the
* exchange parameters, compute and add the signature here.
*
*/
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_server_signature( ciphersuite_info ) )
{
size_t dig_signed_len = ssl->out_msg + ssl->out_msglen - dig_signed;
size_t hashlen = 0;
unsigned char hash[MBEDTLS_MD_MAX_SIZE];
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
/*
* 2.1: Choose hash algorithm:
* A: For TLS 1.2, obey signature-hash-algorithm extension
* to choose appropriate hash.
* B: For SSL3, TLS1.0, TLS1.1 and ECDHE_ECDSA, use SHA1
* (RFC 4492, Sec. 5.4)
* C: Otherwise, use MD5 + SHA1 (RFC 4346, Sec. 7.4.3)
*/
mbedtls_md_type_t md_alg;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
mbedtls_pk_type_t sig_alg =
mbedtls_ssl_get_ciphersuite_sig_pk_alg( ciphersuite_info );
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
/* A: For TLS 1.2, obey signature-hash-algorithm extension
* (RFC 5246, Sec. 7.4.1.4.1). */
if( sig_alg == MBEDTLS_PK_NONE ||
( md_alg = mbedtls_ssl_sig_hash_set_find( &ssl->handshake->hash_algs,
sig_alg ) ) == MBEDTLS_MD_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
/* (... because we choose a cipher suite
* only if there is a matching hash.) */
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
}
else
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
#if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_1)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA )
{
/* B: Default hash SHA1 */
md_alg = MBEDTLS_MD_SHA1;
}
else
#endif /* MBEDTLS_SSL_PROTO_SSL3 || MBEDTLS_SSL_PROTO_TLS1 || \
MBEDTLS_SSL_PROTO_TLS1_1 */
{
/* C: MD5 + SHA1 */
md_alg = MBEDTLS_MD_NONE;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "pick hash algorithm %d for signing", md_alg ) );
/*
* 2.2: Compute the hash to be signed
*/
#if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_1)
if( md_alg == MBEDTLS_MD_NONE )
{
hashlen = 36;
ret = mbedtls_ssl_get_key_exchange_md_ssl_tls( ssl, hash,
dig_signed,
dig_signed_len );
if( ret != 0 )
return( ret );
}
else
#endif /* MBEDTLS_SSL_PROTO_SSL3 || MBEDTLS_SSL_PROTO_TLS1 || \
MBEDTLS_SSL_PROTO_TLS1_1 */
#if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( md_alg != MBEDTLS_MD_NONE )
{
ret = mbedtls_ssl_get_key_exchange_md_tls1_2( ssl, hash, &hashlen,
dig_signed,
dig_signed_len,
md_alg );
if( ret != 0 )
return( ret );
}
else
#endif /* MBEDTLS_SSL_PROTO_TLS1 || MBEDTLS_SSL_PROTO_TLS1_1 || \
MBEDTLS_SSL_PROTO_TLS1_2 */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "parameters hash", hash, hashlen );
/*
* 2.3: Compute and add the signature
*/
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
/*
* For TLS 1.2, we need to specify signature and hash algorithm
* explicitly through a prefix to the signature.
*
* struct {
* HashAlgorithm hash;
* SignatureAlgorithm signature;
* } SignatureAndHashAlgorithm;
*
* struct {
* SignatureAndHashAlgorithm algorithm;
* opaque signature<0..2^16-1>;
* } DigitallySigned;
*
*/
ssl->out_msg[ssl->out_msglen++] =
mbedtls_ssl_hash_from_md_alg( md_alg );
ssl->out_msg[ssl->out_msglen++] =
mbedtls_ssl_sig_from_pk_alg( sig_alg );
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if( ssl->conf->f_async_sign_start != NULL )
{
ret = ssl->conf->f_async_sign_start( ssl,
mbedtls_ssl_own_cert( ssl ),
md_alg, hash, hashlen );
switch( ret )
{
case MBEDTLS_ERR_SSL_HW_ACCEL_FALLTHROUGH:
/* act as if f_async_sign was null */
break;
case 0:
ssl->handshake->async_in_progress = 1;
return( ssl_resume_server_key_exchange( ssl, signature_len ) );
case MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS:
ssl->handshake->async_in_progress = 1;
return( MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS );
default:
MBEDTLS_SSL_DEBUG_RET( 1, "f_async_sign_start", ret );
return( ret );
}
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
if( mbedtls_ssl_own_key( ssl ) == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no private key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
/* Append the signature to ssl->out_msg, leaving 2 bytes for the
* signature length which will be added in ssl_write_server_key_exchange
* after the call to ssl_prepare_server_key_exchange.
* ssl_write_server_key_exchange also takes care of incrementing
* ssl->out_msglen. */
if( ( ret = mbedtls_pk_sign( mbedtls_ssl_own_key( ssl ),
md_alg, hash, hashlen,
ssl->out_msg + ssl->out_msglen + 2,
signature_len,
ssl->conf->f_rng,
ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_pk_sign", ret );
return( ret );
}
}
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
return( 0 );
}
/* Prepare the ServerKeyExchange message and send it. For ciphersuites
* that do not include a ServerKeyExchange message, do nothing. Either
* way, if successful, move on to the next step in the SSL state
* machine. */
static int ssl_write_server_key_exchange( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t signature_len = 0;
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED)
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write server key exchange" ) );
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED)
/* Extract static ECDH parameters and abort if ServerKeyExchange
* is not needed. */
if( mbedtls_ssl_ciphersuite_no_pfs( ciphersuite_info ) )
{
/* For suites involving ECDH, extract DH parameters
* from certificate at this point. */
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_ECDH_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_ecdh( ciphersuite_info ) )
{
ssl_get_ecdh_params_from_cert( ssl );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_ECDH_ENABLED */
/* Key exchanges not involving ephemeral keys don't use
* ServerKeyExchange, so end here. */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip write server key exchange" ) );
ssl->state++;
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) && \
defined(MBEDTLS_SSL_ASYNC_PRIVATE)
/* If we have already prepared the message and there is an ongoing
* signature operation, resume signing. */
if( ssl->handshake->async_in_progress != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "resuming signature operation" ) );
ret = ssl_resume_server_key_exchange( ssl, &signature_len );
}
else
#endif /* defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) &&
defined(MBEDTLS_SSL_ASYNC_PRIVATE) */
{
/* ServerKeyExchange is needed. Prepare the message. */
ret = ssl_prepare_server_key_exchange( ssl, &signature_len );
}
if( ret != 0 )
{
/* If we're starting to write a new message, set ssl->out_msglen
* to 0. But if we're resuming after an asynchronous message,
* out_msglen is the amount of data written so far and mst be
* preserved. */
if( ret == MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server key exchange (pending)" ) );
else
ssl->out_msglen = 0;
return( ret );
}
/* If there is a signature, write its length.
* ssl_prepare_server_key_exchange already wrote the signature
* itself at its proper place in the output buffer. */
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
if( signature_len != 0 )
{
ssl->out_msg[ssl->out_msglen++] = (unsigned char)( signature_len >> 8 );
ssl->out_msg[ssl->out_msglen++] = (unsigned char)( signature_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "my signature",
ssl->out_msg + ssl->out_msglen,
signature_len );
/* Skip over the already-written signature */
ssl->out_msglen += signature_len;
}
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
/* Add header and send. */
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_SERVER_KEY_EXCHANGE;
ssl->state++;
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server key exchange" ) );
return( 0 );
}
static int ssl_write_server_hello_done( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write server hello done" ) );
ssl->out_msglen = 4;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_SERVER_HELLO_DONE;
ssl->state++;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
mbedtls_ssl_send_flight_completed( ssl );
#endif
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
( ret = mbedtls_ssl_flight_transmit( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_flight_transmit", ret );
return( ret );
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server hello done" ) );
return( 0 );
}
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED)
static int ssl_parse_client_dh_public( mbedtls_ssl_context *ssl, unsigned char **p,
const unsigned char *end )
{
int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE;
size_t n;
/*
* Receive G^Y mod P, premaster = (G^Y)^X mod P
*/
if( *p + 2 > end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
n = ( (*p)[0] << 8 ) | (*p)[1];
*p += 2;
if( *p + n > end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ( ret = mbedtls_dhm_read_public( &ssl->handshake->dhm_ctx, *p, n ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_read_public", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_RP );
}
*p += n;
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: GY", &ssl->handshake->dhm_ctx.GY );
return( ret );
}
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED)
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
static int ssl_resume_decrypt_pms( mbedtls_ssl_context *ssl,
unsigned char *peer_pms,
size_t *peer_pmslen,
size_t peer_pmssize )
{
int ret = ssl->conf->f_async_resume( ssl,
peer_pms, peer_pmslen, peer_pmssize );
if( ret != MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
{
ssl->handshake->async_in_progress = 0;
mbedtls_ssl_set_async_operation_data( ssl, NULL );
}
MBEDTLS_SSL_DEBUG_RET( 2, "ssl_decrypt_encrypted_pms", ret );
return( ret );
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
static int ssl_decrypt_encrypted_pms( mbedtls_ssl_context *ssl,
const unsigned char *p,
const unsigned char *end,
unsigned char *peer_pms,
size_t *peer_pmslen,
size_t peer_pmssize )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
mbedtls_pk_context *private_key = mbedtls_ssl_own_key( ssl );
mbedtls_pk_context *public_key = &mbedtls_ssl_own_cert( ssl )->pk;
size_t len = mbedtls_pk_get_len( public_key );
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
/* If we have already started decoding the message and there is an ongoing
* decryption operation, resume signing. */
if( ssl->handshake->async_in_progress != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "resuming decryption operation" ) );
return( ssl_resume_decrypt_pms( ssl,
peer_pms, peer_pmslen, peer_pmssize ) );
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
/*
* Prepare to decrypt the premaster using own private RSA key
*/
#if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_0 )
{
if ( p + 2 > end ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( *p++ != ( ( len >> 8 ) & 0xFF ) ||
*p++ != ( ( len ) & 0xFF ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
}
#endif
if( p + len != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
/*
* Decrypt the premaster secret
*/
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if( ssl->conf->f_async_decrypt_start != NULL )
{
ret = ssl->conf->f_async_decrypt_start( ssl,
mbedtls_ssl_own_cert( ssl ),
p, len );
switch( ret )
{
case MBEDTLS_ERR_SSL_HW_ACCEL_FALLTHROUGH:
/* act as if f_async_decrypt_start was null */
break;
case 0:
ssl->handshake->async_in_progress = 1;
return( ssl_resume_decrypt_pms( ssl,
peer_pms,
peer_pmslen,
peer_pmssize ) );
case MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS:
ssl->handshake->async_in_progress = 1;
return( MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS );
default:
MBEDTLS_SSL_DEBUG_RET( 1, "f_async_decrypt_start", ret );
return( ret );
}
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
if( ! mbedtls_pk_can_do( private_key, MBEDTLS_PK_RSA ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no RSA private key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
ret = mbedtls_pk_decrypt( private_key, p, len,
peer_pms, peer_pmslen, peer_pmssize,
ssl->conf->f_rng, ssl->conf->p_rng );
return( ret );
}
static int ssl_parse_encrypted_pms( mbedtls_ssl_context *ssl,
const unsigned char *p,
const unsigned char *end,
size_t pms_offset )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned char *pms = ssl->handshake->premaster + pms_offset;
unsigned char ver[2];
unsigned char fake_pms[48], peer_pms[48];
unsigned char mask;
size_t i, peer_pmslen;
unsigned int diff;
/* In case of a failure in decryption, the decryption may write less than
* 2 bytes of output, but we always read the first two bytes. It doesn't
* matter in the end because diff will be nonzero in that case due to
* peer_pmslen being less than 48, and we only care whether diff is 0.
* But do initialize peer_pms for robustness anyway. This also makes
* memory analyzers happy (don't access uninitialized memory, even
* if it's an unsigned char). */
peer_pms[0] = peer_pms[1] = ~0;
ret = ssl_decrypt_encrypted_pms( ssl, p, end,
peer_pms,
&peer_pmslen,
sizeof( peer_pms ) );
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if ( ret == MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
return( ret );
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
mbedtls_ssl_write_version( ssl->handshake->max_major_ver,
ssl->handshake->max_minor_ver,
ssl->conf->transport, ver );
/* Avoid data-dependent branches while checking for invalid
* padding, to protect against timing-based Bleichenbacher-type
* attacks. */
diff = (unsigned int) ret;
diff |= peer_pmslen ^ 48;
diff |= peer_pms[0] ^ ver[0];
diff |= peer_pms[1] ^ ver[1];
/* mask = diff ? 0xff : 0x00 using bit operations to avoid branches */
/* MSVC has a warning about unary minus on unsigned, but this is
* well-defined and precisely what we want to do here */
#if defined(_MSC_VER)
#pragma warning( push )
#pragma warning( disable : 4146 )
#endif
mask = - ( ( diff | - diff ) >> ( sizeof( unsigned int ) * 8 - 1 ) );
#if defined(_MSC_VER)
#pragma warning( pop )
#endif
/*
* Protection against Bleichenbacher's attack: invalid PKCS#1 v1.5 padding
* must not cause the connection to end immediately; instead, send a
* bad_record_mac later in the handshake.
* To protect against timing-based variants of the attack, we must
* not have any branch that depends on whether the decryption was
* successful. In particular, always generate the fake premaster secret,
* regardless of whether it will ultimately influence the output or not.
*/
ret = ssl->conf->f_rng( ssl->conf->p_rng, fake_pms, sizeof( fake_pms ) );
if( ret != 0 )
{
/* It's ok to abort on an RNG failure, since this does not reveal
* anything about the RSA decryption. */
return( ret );
}
#if defined(MBEDTLS_SSL_DEBUG_ALL)
if( diff != 0 )
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
#endif
if( sizeof( ssl->handshake->premaster ) < pms_offset ||
sizeof( ssl->handshake->premaster ) - pms_offset < 48 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
ssl->handshake->pmslen = 48;
/* Set pms to either the true or the fake PMS, without
* data-dependent branches. */
for( i = 0; i < ssl->handshake->pmslen; i++ )
pms[i] = ( mask & fake_pms[i] ) | ( (~mask) & peer_pms[i] );
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED)
static int ssl_parse_client_psk_identity( mbedtls_ssl_context *ssl, unsigned char **p,
const unsigned char *end )
{
int ret = 0;
uint16_t n;
if( ssl_conf_has_psk_or_cb( ssl->conf ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no pre-shared key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
/*
* Receive client pre-shared key identity name
*/
if( end - *p < 2 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
n = ( (*p)[0] << 8 ) | (*p)[1];
*p += 2;
if( n == 0 || n > end - *p )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ssl->conf->f_psk != NULL )
{
if( ssl->conf->f_psk( ssl->conf->p_psk, ssl, *p, n ) != 0 )
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
else
{
/* Identity is not a big secret since clients send it in the clear,
* but treat it carefully anyway, just in case */
if( n != ssl->conf->psk_identity_len ||
mbedtls_ssl_safer_memcmp( ssl->conf->psk_identity, *p, n ) != 0 )
{
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
}
if( ret == MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY )
{
MBEDTLS_SSL_DEBUG_BUF( 3, "Unknown PSK identity", *p, n );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_UNKNOWN_PSK_IDENTITY );
return( MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY );
}
*p += n;
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */
static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
unsigned char *p, *end;
ciphersuite_info = ssl->handshake->ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client key exchange" ) );
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE) && \
( defined(MBEDTLS_KEY_EXCHANGE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED) )
if( ( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA_PSK ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA ) &&
( ssl->handshake->async_in_progress != 0 ) )
{
/* We've already read a record and there is an asynchronous
* operation in progress to decrypt it. So skip reading the
* record. */
MBEDTLS_SSL_DEBUG_MSG( 3, ( "will resume decryption of previously-read record" ) );
}
else
#endif
if( ( ret = mbedtls_ssl_read_record( ssl, 1 ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_read_record", ret );
return( ret );
}
p = ssl->in_msg + mbedtls_ssl_hs_hdr_len( ssl );
end = ssl->in_msg + ssl->in_hslen;
if( ssl->in_msgtype != MBEDTLS_SSL_MSG_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ssl->in_msg[0] != MBEDTLS_SSL_HS_CLIENT_KEY_EXCHANGE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_RSA )
{
if( ( ret = ssl_parse_client_dh_public( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_dh_public" ), ret );
return( ret );
}
if( p != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ( ret = mbedtls_dhm_calc_secret( &ssl->handshake->dhm_ctx,
ssl->handshake->premaster,
MBEDTLS_PREMASTER_SIZE,
&ssl->handshake->pmslen,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_calc_secret", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_CS );
}
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: K ", &ssl->handshake->dhm_ctx.K );
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_RSA ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDH_RSA ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA )
{
if( ( ret = mbedtls_ecdh_read_public( &ssl->handshake->ecdh_ctx,
p, end - p) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_read_public", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_RP );
}
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_QP );
if( ( ret = mbedtls_ecdh_calc_secret( &ssl->handshake->ecdh_ctx,
&ssl->handshake->pmslen,
ssl->handshake->premaster,
MBEDTLS_MPI_MAX_SIZE,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_calc_secret", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_CS );
}
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_Z );
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_PSK )
{
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
if( p != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* For opaque PSKs, we perform the PSK-to-MS derivation atomatically
* and skip the intermediate PMS. */
if( ssl_use_opaque_psk( ssl ) == 1 )
MBEDTLS_SSL_DEBUG_MSG( 1, ( "skip PMS generation for opaque PSK" ) );
else
#endif /* MBEDTLS_USE_PSA_CRYPTO */
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA_PSK )
{
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if ( ssl->handshake->async_in_progress != 0 )
{
/* There is an asynchronous operation in progress to
* decrypt the encrypted premaster secret, so skip
* directly to resuming this operation. */
MBEDTLS_SSL_DEBUG_MSG( 3, ( "PSK identity already parsed" ) );
/* Update p to skip the PSK identity. ssl_parse_encrypted_pms
* won't actually use it, but maintain p anyway for robustness. */
p += ssl->conf->psk_identity_len + 2;
}
else
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* Opaque PSKs are currently only supported for PSK-only. */
if( ssl_use_opaque_psk( ssl ) == 1 )
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
#endif
if( ( ret = ssl_parse_encrypted_pms( ssl, p, end, 2 ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_encrypted_pms" ), ret );
return( ret );
}
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_PSK )
{
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
if( ( ret = ssl_parse_client_dh_public( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_dh_public" ), ret );
return( ret );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* Opaque PSKs are currently only supported for PSK-only. */
if( ssl_use_opaque_psk( ssl ) == 1 )
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
#endif
if( p != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_PSK )
{
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
if( ( ret = mbedtls_ecdh_read_public( &ssl->handshake->ecdh_ctx,
p, end - p ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_read_public", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_RP );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* Opaque PSKs are currently only supported for PSK-only. */
if( ssl_use_opaque_psk( ssl ) == 1 )
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
#endif
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_QP );
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_RSA_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA )
{
if( ( ret = ssl_parse_encrypted_pms( ssl, p, end, 0 ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_parse_encrypted_pms_secret" ), ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_RSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECJPAKE )
{
ret = mbedtls_ecjpake_read_round_two( &ssl->handshake->ecjpake_ctx,
p, end - p );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_read_round_two", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE );
}
ret = mbedtls_ecjpake_derive_secret( &ssl->handshake->ecjpake_ctx,
ssl->handshake->premaster, 32, &ssl->handshake->pmslen,
ssl->conf->f_rng, ssl->conf->p_rng );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_derive_secret", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
if( ( ret = mbedtls_ssl_derive_keys( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_derive_keys", ret );
return( ret );
}
ssl->state++;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client key exchange" ) );
return( 0 );
}
#if !defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED)
static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl )
{
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse certificate verify" ) );
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
#else /* !MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE;
size_t i, sig_len;
unsigned char hash[48];
unsigned char *hash_start = hash;
size_t hashlen;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
mbedtls_pk_type_t pk_alg;
#endif
mbedtls_md_type_t md_alg;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
mbedtls_pk_context * peer_pk;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse certificate verify" ) );
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
#if defined(MBEDTLS_SSL_KEEP_PEER_CERTIFICATE)
if( ssl->session_negotiate->peer_cert == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
#else /* MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
if( ssl->session_negotiate->peer_cert_digest == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
#endif /* !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
/* Read the message without adding it to the checksum */
ret = mbedtls_ssl_read_record( ssl, 0 /* no checksum update */ );
if( 0 != ret )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "mbedtls_ssl_read_record" ), ret );
return( ret );
}
ssl->state++;
/* Process the message contents */
if( ssl->in_msgtype != MBEDTLS_SSL_MSG_HANDSHAKE ||
ssl->in_msg[0] != MBEDTLS_SSL_HS_CERTIFICATE_VERIFY )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
i = mbedtls_ssl_hs_hdr_len( ssl );
#if !defined(MBEDTLS_SSL_KEEP_PEER_CERTIFICATE)
peer_pk = &ssl->handshake->peer_pubkey;
#else /* !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
if( ssl->session_negotiate->peer_cert == NULL )
{
/* Should never happen */
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
peer_pk = &ssl->session_negotiate->peer_cert->pk;
#endif /* MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
/*
* struct {
* SignatureAndHashAlgorithm algorithm; -- TLS 1.2 only
* opaque signature<0..2^16-1>;
* } DigitallySigned;
*/
#if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_1)
if( ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_3 )
{
md_alg = MBEDTLS_MD_NONE;
hashlen = 36;
/* For ECDSA, use SHA-1, not MD-5 + SHA-1 */
if( mbedtls_pk_can_do( peer_pk, MBEDTLS_PK_ECDSA ) )
{
hash_start += 16;
hashlen -= 16;
md_alg = MBEDTLS_MD_SHA1;
}
}
else
#endif /* MBEDTLS_SSL_PROTO_SSL3 || MBEDTLS_SSL_PROTO_TLS1 ||
MBEDTLS_SSL_PROTO_TLS1_1 */
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
if( i + 2 > ssl->in_hslen )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
/*
* Hash
*/
md_alg = mbedtls_ssl_md_alg_from_hash( ssl->in_msg[i] );
if( md_alg == MBEDTLS_MD_NONE || mbedtls_ssl_set_calc_verify_md( ssl, ssl->in_msg[i] ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "peer not adhering to requested sig_alg"
" for verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
#if !defined(MBEDTLS_MD_SHA1)
if( MBEDTLS_MD_SHA1 == md_alg )
hash_start += 16;
#endif
/* Info from md_alg will be used instead */
hashlen = 0;
i++;
/*
* Signature
*/
if( ( pk_alg = mbedtls_ssl_pk_alg_from_sig( ssl->in_msg[i] ) )
== MBEDTLS_PK_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "peer not adhering to requested sig_alg"
" for verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
/*
* Check the certificate's key type matches the signature alg
*/
if( !mbedtls_pk_can_do( peer_pk, pk_alg ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "sig_alg doesn't match cert key" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
i++;
}
else
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
if( i + 2 > ssl->in_hslen )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
sig_len = ( ssl->in_msg[i] << 8 ) | ssl->in_msg[i+1];
i += 2;
if( i + sig_len != ssl->in_hslen )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
/* Calculate hash and verify signature */
{
size_t dummy_hlen;
ssl->handshake->calc_verify( ssl, hash, &dummy_hlen );
}
if( ( ret = mbedtls_pk_verify( peer_pk,
md_alg, hash_start, hashlen,
ssl->in_msg + i, sig_len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_pk_verify", ret );
return( ret );
}
mbedtls_ssl_update_handshake_status( ssl );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse certificate verify" ) );
return( ret );
}
#endif /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
static int ssl_write_new_session_ticket( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t tlen;
uint32_t lifetime;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write new session ticket" ) );
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_NEW_SESSION_TICKET;
/*
* struct {
* uint32 ticket_lifetime_hint;
* opaque ticket<0..2^16-1>;
* } NewSessionTicket;
*
* 4 . 7 ticket_lifetime_hint (0 = unspecified)
* 8 . 9 ticket_len (n)
* 10 . 9+n ticket content
*/
if( ( ret = ssl->conf->f_ticket_write( ssl->conf->p_ticket,
ssl->session_negotiate,
ssl->out_msg + 10,
ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN,
&tlen, &lifetime ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_ticket_write", ret );
tlen = 0;
}
ssl->out_msg[4] = ( lifetime >> 24 ) & 0xFF;
ssl->out_msg[5] = ( lifetime >> 16 ) & 0xFF;
ssl->out_msg[6] = ( lifetime >> 8 ) & 0xFF;
ssl->out_msg[7] = ( lifetime ) & 0xFF;
ssl->out_msg[8] = (unsigned char)( ( tlen >> 8 ) & 0xFF );
ssl->out_msg[9] = (unsigned char)( ( tlen ) & 0xFF );
ssl->out_msglen = 10 + tlen;
/*
* Morally equivalent to updating ssl->state, but NewSessionTicket and
* ChangeCipherSpec share the same state.
*/
ssl->handshake->new_session_ticket = 0;
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write new session ticket" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
/*
* SSL handshake -- server side -- single step
*/
int mbedtls_ssl_handshake_server_step( mbedtls_ssl_context *ssl )
{
int ret = 0;
if( ssl->state == MBEDTLS_SSL_HANDSHAKE_OVER || ssl->handshake == NULL )
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "server state: %d", ssl->state ) );
if( ( ret = mbedtls_ssl_flush_output( ssl ) ) != 0 )
return( ret );
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl->handshake->retransmit_state == MBEDTLS_SSL_RETRANS_SENDING )
{
if( ( ret = mbedtls_ssl_flight_transmit( ssl ) ) != 0 )
return( ret );
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
switch( ssl->state )
{
case MBEDTLS_SSL_HELLO_REQUEST:
ssl->state = MBEDTLS_SSL_CLIENT_HELLO;
break;
/*
* <== ClientHello
*/
case MBEDTLS_SSL_CLIENT_HELLO:
ret = ssl_parse_client_hello( ssl );
break;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
case MBEDTLS_SSL_SERVER_HELLO_VERIFY_REQUEST_SENT:
return( MBEDTLS_ERR_SSL_HELLO_VERIFY_REQUIRED );
#endif
/*
* ==> ServerHello
* Certificate
* ( ServerKeyExchange )
* ( CertificateRequest )
* ServerHelloDone
*/
case MBEDTLS_SSL_SERVER_HELLO:
ret = ssl_write_server_hello( ssl );
break;
case MBEDTLS_SSL_SERVER_CERTIFICATE:
ret = mbedtls_ssl_write_certificate( ssl );
break;
case MBEDTLS_SSL_SERVER_KEY_EXCHANGE:
ret = ssl_write_server_key_exchange( ssl );
break;
case MBEDTLS_SSL_CERTIFICATE_REQUEST:
ret = ssl_write_certificate_request( ssl );
break;
case MBEDTLS_SSL_SERVER_HELLO_DONE:
ret = ssl_write_server_hello_done( ssl );
break;
/*
* <== ( Certificate/Alert )
* ClientKeyExchange
* ( CertificateVerify )
* ChangeCipherSpec
* Finished
*/
case MBEDTLS_SSL_CLIENT_CERTIFICATE:
ret = mbedtls_ssl_parse_certificate( ssl );
break;
case MBEDTLS_SSL_CLIENT_KEY_EXCHANGE:
ret = ssl_parse_client_key_exchange( ssl );
break;
case MBEDTLS_SSL_CERTIFICATE_VERIFY:
ret = ssl_parse_certificate_verify( ssl );
break;
case MBEDTLS_SSL_CLIENT_CHANGE_CIPHER_SPEC:
ret = mbedtls_ssl_parse_change_cipher_spec( ssl );
break;
case MBEDTLS_SSL_CLIENT_FINISHED:
ret = mbedtls_ssl_parse_finished( ssl );
break;
/*
* ==> ( NewSessionTicket )
* ChangeCipherSpec
* Finished
*/
case MBEDTLS_SSL_SERVER_CHANGE_CIPHER_SPEC:
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
if( ssl->handshake->new_session_ticket != 0 )
ret = ssl_write_new_session_ticket( ssl );
else
#endif
ret = mbedtls_ssl_write_change_cipher_spec( ssl );
break;
case MBEDTLS_SSL_SERVER_FINISHED:
ret = mbedtls_ssl_write_finished( ssl );
break;
case MBEDTLS_SSL_FLUSH_BUFFERS:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "handshake: done" ) );
ssl->state = MBEDTLS_SSL_HANDSHAKE_WRAPUP;
break;
case MBEDTLS_SSL_HANDSHAKE_WRAPUP:
mbedtls_ssl_handshake_wrapup( ssl );
break;
default:
MBEDTLS_SSL_DEBUG_MSG( 1, ( "invalid state %d", ssl->state ) );
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
}
return( ret );
}
#endif /* MBEDTLS_SSL_SRV_C */
|
mc-server/polarssl
|
library/ssl_srv.c
|
C
|
gpl-2.0
| 158,375
|
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Ring initialization rules:
* 1. Each segment is initialized to zero, except for link TRBs.
* 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
* Consumer Cycle State (CCS), depending on ring function.
* 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
*
* Ring behavior rules:
* 1. A ring is empty if enqueue == dequeue. This means there will always be at
* least one free TRB in the ring. This is useful if you want to turn that
* into a link TRB and expand the ring.
* 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
* link TRB, then load the pointer with the address in the link TRB. If the
* link TRB had its toggle bit set, you may need to update the ring cycle
* state (see cycle bit rules). You may have to do this multiple times
* until you reach a non-link TRB.
* 3. A ring is full if enqueue++ (for the definition of increment above)
* equals the dequeue pointer.
*
* Cycle bit rules:
* 1. When a consumer increments a dequeue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
* 2. When a producer increments an enqueue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
*
* Producer rules:
* 1. Check if ring is full before you enqueue.
* 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
* Update enqueue pointer between each write (which may update the ring
* cycle state).
* 3. Notify consumer. If SW is producer, it rings the doorbell for command
* and endpoint rings. If HC is the producer for the event ring,
* and it generates an interrupt according to interrupt modulation rules.
*
* Consumer rules:
* 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
* the TRB is owned by the consumer.
* 2. Update dequeue pointer (which may update the ring cycle state) and
* continue processing TRBs until you reach a TRB which is not owned by you.
* 3. Notify the producer. SW is the consumer for the event ring, and it
* updates event ring dequeue pointer. HC is the consumer for the command and
* endpoint rings; it generates events on the event ring for these.
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "xhci.h"
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct xhci_event_cmd *event);
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
*/
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
union xhci_trb *trb)
{
unsigned long segment_offset;
if (!seg || !trb || trb < seg->trbs)
return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
if (segment_offset > TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
/* Does this link TRB point to the first segment in a ring,
* or was the previous TRB the last TRB on the last segment in the ERST?
*/
static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
(seg->next == xhci->event_ring->first_seg);
else
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
* segment? I.e. would the updated event TRB pointer step off the end of the
* event seg?
*/
static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
return trb == &seg->trbs[TRBS_PER_SEGMENT];
else
return TRB_TYPE_LINK_LE32(trb->link.control);
}
static int enqueue_is_link_trb(struct xhci_ring *ring)
{
struct xhci_link_trb *link = &ring->enqueue->link;
return TRB_TYPE_LINK_LE32(link->control);
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
*/
static void next_trb(struct xhci_hcd *xhci,
struct xhci_ring *ring,
struct xhci_segment **seg,
union xhci_trb **trb)
{
if (last_trb(xhci, ring, *seg, *trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
(*trb)++;
}
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
union xhci_trb *next;
unsigned long long addr;
ring->deq_updates++;
/* If this is not event ring, there is one more usable TRB */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
ring->num_trbs_free++;
next = ++(ring->dequeue);
/* Update the dequeue pointer further if that was a link TRB or we're at
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
next = ring->dequeue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
* chain bit is set), then set the chain bit in all the following link TRBs.
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
* Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
* set, but other sections talk about dealing with the chain bit set. This was
* fixed in the 0.96 specification errata, but we have to assume that all 0.95
* xHCI hardware can't handle the chain bit being cleared on a link TRB.
*
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
unsigned long long addr;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
ring->num_trbs_free--;
next = ++(ring->enqueue);
ring->enq_updates++;
/* Update the dequeue pointer further if that was a link TRB or we're at
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (ring->type != TYPE_EVENT) {
/*
* If the caller doesn't plan on enqueueing more
* TDs before ringing the doorbell, then we
* don't want to give the link TRB to the
* hardware just yet. We'll give the link TRB
* back in prepare_ring() just before we enqueue
* the TD at the top of the ring.
*/
if (!chain && !more_trbs_coming)
break;
/* If we're not dealing with 0.95 hardware or
* isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
}
/*
* Check to see if there's room to enqueue num_trbs on the ring and make sure
* enqueue pointer will not advance into dequeue segment. See rules above.
*/
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
int num_trbs_in_deq_seg;
if (ring->num_trbs_free < num_trbs)
return 0;
if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return 0;
}
return 1;
}
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
xhci_dbg(xhci, "// Ding dong!\n");
xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
xhci_readl(xhci, &xhci->dba->doorbell[0]);
}
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index,
unsigned int stream_id)
{
__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
unsigned int ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because we don't want to interrupt processing.
* We don't want to restart any stream rings if there's a set dequeue
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
* FIXME - check all the stream rings for pending cancellations.
*/
if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED))
return;
xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
*/
}
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
unsigned int stream_id;
struct xhci_virt_ep *ep;
ep = &xhci->devs[slot_id]->eps[ep_index];
/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
if (!(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
struct xhci_stream_info *stream_info = ep->stream_info;
if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
stream_id);
}
}
/*
* Find the segment that trb is in. Start searching in start_seg.
* If we must move past a segment that has a link TRB with a toggle cycle state
* bit set, then we will toggle the value pointed at by cycle_state.
*/
static struct xhci_segment *find_trb_seg(
struct xhci_segment *start_seg,
union xhci_trb *trb, int *cycle_state)
{
struct xhci_segment *cur_seg = start_seg;
struct xhci_generic_trb *generic_trb;
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
*cycle_state ^= 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
return NULL;
}
return cur_seg;
}
static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id)
{
struct xhci_virt_ep *ep;
ep = &xhci->devs[slot_id]->eps[ep_index];
/* Common case: no streams */
if (!(ep->ep_state & EP_HAS_STREAMS))
return ep->ring;
if (stream_id == 0) {
xhci_warn(xhci,
"WARN: Slot ID %u, ep index %u has streams, "
"but URB has no stream ID.\n",
slot_id, ep_index);
return NULL;
}
if (stream_id < ep->stream_info->num_streams)
return ep->stream_info->stream_rings[stream_id];
xhci_warn(xhci,
"WARN: Slot ID %u, ep index %u has "
"stream IDs 1 to %u allocated, "
"but stream ID %u is requested.\n",
slot_id, ep_index,
ep->stream_info->num_streams - 1,
stream_id);
return NULL;
}
/* Get the right ring for the given URB.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
*/
static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
struct urb *urb)
{
return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
}
/*
* Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment,
* dequeue pointer, and new consumer cycle state in state.
* Update our internal representation of the ring's dequeue pointer.
*
* We do this in three jumps:
* - First we update our new ring state to be the same as when the xHC stopped.
* - Then we traverse the ring to find the segment that contains
* the last TRB in the TD. We toggle the xHC's new cycle state when we pass
* any link TRBs with the toggle cycle bit set.
* - Finally we move the dequeue state one TRB further, toggling the cycle bit
* if we've moved it past a link TRB with the toggle cycle bit set.
*
* Some of the uses of xhci_generic_trb are grotty, but if they're done
* with correct __le32 accesses they should work fine. Only users of this are
* in here.
*/
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *cur_td,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN can't find new dequeue state "
"for invalid stream ID %u.\n",
stream_id);
return;
}
state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
trb = &state->new_deq_ptr->generic;
if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
(trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
/*
* If there is only one segment in a ring, find_trb_seg()'s while loop
* will not run, and it will return before it has a chance to see if it
* needs to toggle the cycle bit. It can't tell if the stalled transfer
* ended just before the link TRB on a one-segment ring, or if the TD
* wrapped around the top of the ring, because it doesn't have the TD in
* question. Look for the one-segment case where stalled TRB's address
* is greater than the new dequeue pointer address.
*/
if (ep_ring->first_seg == ep_ring->first_seg->next &&
state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
state->new_cycle_state ^= 0x1;
xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
/* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
(unsigned long long) addr);
}
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
* (The last TRB actually points to the ring enqueue pointer, which is not part
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
struct xhci_td *cur_td, bool flip_cycle)
{
struct xhci_segment *cur_seg;
union xhci_trb *cur_trb;
for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
true;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
/* Unchain any chained Link TRBs, but
* leave the pointers intact.
*/
cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
/* Flip the cycle bit (link TRBs can't be the first
* or last TRB).
*/
if (flip_cycle)
cur_trb->generic.field[3] ^=
cpu_to_le32(TRB_CYCLE);
xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
xhci_dbg(xhci, "Address = %p (0x%llx dma); "
"in seg %p (0x%llx dma)\n",
cur_trb,
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
cur_seg,
(unsigned long long)cur_seg->dma);
} else {
cur_trb->generic.field[0] = 0;
cur_trb->generic.field[1] = 0;
cur_trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */
cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
/* Flip the cycle bit except on the first or last TRB */
if (flip_cycle && cur_trb != cur_td->first_trb &&
cur_trb != cur_td->last_trb)
cur_trb->generic.field[3] ^=
cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
(unsigned long long)
xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
if (cur_trb == cur_td->last_trb)
break;
}
}
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
deq_state->new_deq_seg,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
/* Stop the TD queueing code from ringing the doorbell until
* this command completes. The HC won't set the dequeue pointer
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep->ep_state |= SET_DEQ_PENDING;
}
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
ep->ep_state &= ~EP_HALT_PENDING;
/* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
* timer is running on another CPU, we don't decrement stop_cmds_pending
* (since we didn't successfully stop the watchdog timer).
*/
if (del_timer(&ep->stop_cmd_timer))
ep->stop_cmds_pending--;
}
/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status, char *adjective)
{
struct usb_hcd *hcd;
struct urb *urb;
struct urb_priv *urb_priv;
urb = cur_td->urb;
urb_priv = urb->hcpriv;
urb_priv->td_cnt++;
hcd = bus_to_hcd(urb->dev->bus);
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(xhci, urb_priv);
spin_lock(&xhci->lock);
}
}
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that:
*
* 1. If the HW was in the middle of processing the TD that needs to be
* cancelled, then we must move the ring's dequeue pointer past the last TRB
* in the TD with a Set Dequeue Pointer Command.
* 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
* bit cleared) so that the HW will skip over them.
*/
static void handle_stopped_endpoint(struct xhci_hcd *xhci,
union xhci_trb *trb, struct xhci_event_cmd *event)
{
unsigned int slot_id;
unsigned int ep_index;
struct xhci_virt_device *virt_dev;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
struct xhci_td *cur_td = NULL;
struct xhci_td *last_unlinked_td;
struct xhci_dequeue_state deq_state;
if (unlikely(TRB_TO_SUSPEND_PORT(
le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
virt_dev = xhci->devs[slot_id];
if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev,
event);
else
xhci_warn(xhci, "Stop endpoint command "
"completion for disabled slot %u\n",
slot_id);
return;
}
memset(&deq_state, 0, sizeof(deq_state));
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = &xhci->devs[slot_id]->eps[ep_index];
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
ep->stopped_td = NULL;
ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
/* Fix up the ep ring first, so HW stops executing cancelled TDs.
* We have the xHCI lock, so nothing can modify this list until we drop
* it. We're also in the event handler, so we can't get re-interrupted
* if another Stop Endpoint command completes
*/
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
(unsigned long long)xhci_trb_virt_to_dma(
cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (!ep_ring) {
/* This shouldn't happen unless a driver is mucking
* with the stream ID after submission. This will
* leave the TD on the hardware ring, and the hardware
* will try to execute it, and may access a buffer
* that has already been freed. In the best case, the
* hardware will execute it, and the event handler will
* ignore the completion event for that TD, since it was
* removed from the td_list for that endpoint. In
* short, don't muck with the stream ID after
* submission.
*/
xhci_warn(xhci, "WARN Cancelled URB %p "
"has invalid stream ID %u.\n",
cur_td->urb,
cur_td->urb->stream_id);
goto remove_finished_td;
}
/*
* If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
if (cur_td == ep->stopped_td)
xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
cur_td->urb->stream_id,
cur_td, &deq_state);
else
td_to_noop(xhci, ep_ring, cur_td, false);
remove_finished_td:
/*
* The event handler won't see a completion for this TD anymore,
* so remove it from the endpoint ring's TD list. Keep it in
* the cancelled TD list for URB completion later.
*/
list_del_init(&cur_td->td_list);
}
last_unlinked_td = cur_td;
xhci_stop_watchdog_timer_in_irq(xhci, ep);
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_queue_new_dequeue_state(xhci,
slot_id, ep_index,
ep->stopped_td->urb->stream_id,
&deq_state);
xhci_ring_cmd_db(xhci);
} else {
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
ep->stopped_td = NULL;
ep->stopped_trb = NULL;
/*
* Drop the lock and complete the URBs in the cancelled TD list.
* New TDs to be cancelled might be added to the end of the list before
* we can complete all the URBs for the TDs we already unlinked.
* So stop when we've completed the URB for the last TD we unlinked.
*/
do {
cur_td = list_entry(ep->cancelled_td_list.next,
struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
/* Clean up the cancelled URB */
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
/* Stop processing the cancelled list if the watchdog timer is
* running.
*/
if (xhci->xhc_state & XHCI_STATE_DYING)
return;
} while (cur_td != last_unlinked_td);
/* Return to the event handler with xhci->lock re-acquired */
}
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
* let the event ring handler and the URB dequeueing/enqueueing functions know
* through xhci->state.
*
* The timer may also fire if the host takes a very long time to respond to the
* command, and the stop endpoint command completion handler cannot delete the
* timer before the timer function is called. Another endpoint cancellation may
* sneak in before the timer function can grab the lock, and that may queue
* another stop endpoint command and add the timer back. So we cannot use a
* simple flag to say whether there is a pending stop endpoint command for a
* particular endpoint.
*
* Instead we use a combination of that flag and a counter for the number of
* pending stop endpoint commands. If the timer is the tail end of the last
* stop endpoint command, and the endpoint's command is still pending, we assume
* the host is dying.
*/
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
struct xhci_virt_ep *temp_ep;
struct xhci_ring *ring;
struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
ep = (struct xhci_virt_ep *) arg;
xhci = ep->xhci;
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
"xHCI as DYING, exiting.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
"exiting.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
xhci_warn(xhci, "Assuming host is dying, halting host.\n");
/* Oops, HC is dead or dying or at least not responding to the stop
* endpoint command.
*/
xhci->xhc_state |= XHCI_STATE_DYING;
/* Disable interrupts from the host controller and start halting it */
xhci_quiesce(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
ret = xhci_halt(xhci);
spin_lock_irqsave(&xhci->lock, flags);
if (ret < 0) {
/* This is bad; the host is not responding to commands and it's
* not allowing itself to be halted. At least interrupts are
* disabled. If we call usb_hc_died(), it will attempt to
* disconnect all device drivers under this host. Those
* disconnect() methods will wait for all URBs to be unlinked,
* so we must complete them.
*/
xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
xhci_warn(xhci, "Completing active URBs anyway.\n");
/* We could turn all TDs on the rings to no-ops. This won't
* help if the host has cached part of the ring, and is slow if
* we want to preserve the cycle bit. Skip it and hope the host
* doesn't touch the memory.
*/
}
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; j++) {
temp_ep = &xhci->devs[i]->eps[j];
ring = temp_ep->ring;
if (!ring)
continue;
xhci_dbg(xhci, "Killing URBs for slot ID %u, "
"ep index %u\n", i, j);
while (!list_empty(&ring->td_list)) {
cur_td = list_first_entry(&ring->td_list,
struct xhci_td,
td_list);
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN, "killed");
}
while (!list_empty(&temp_ep->cancelled_td_list)) {
cur_td = list_first_entry(
&temp_ep->cancelled_td_list,
struct xhci_td,
cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN, "killed");
}
}
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Calling usb_hc_died()\n");
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
}
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
struct xhci_ring *ep_ring,
unsigned int ep_index)
{
union xhci_trb *dequeue_temp;
int num_trbs_free_temp;
bool revert = false;
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
ep_ring->dequeue++;
if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
ep_ring->dequeue)) {
if (ep_ring->dequeue ==
dev->eps[ep_index].queued_deq_ptr)
break;
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
if (ep_ring->dequeue == dequeue_temp) {
revert = true;
break;
}
}
if (revert) {
xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
ep_ring->num_trbs_free = num_trbs_free_temp;
}
}
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
* the TD queueing code can ring the doorbell again. We also need to ring the
* endpoint doorbell to restart the ring, but only if there aren't more
* cancellations pending.
*/
static void handle_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event,
union xhci_trb *trb)
{
unsigned int slot_id;
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for "
"freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
return;
}
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
case COMP_TRB_ERR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
"of stream ID configuration\n");
break;
case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
"to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
slot_state, ep_state);
break;
case COMP_EBADSLT:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
"slot %u was not enabled.\n", slot_id);
break;
default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
"completion code of %u.\n",
GET_COMP_CODE(le32_to_cpu(event->status)));
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
* should never get to this point if the synchronization between
* queueing, and endpoint state are correct. This might happen
* if the device gets disconnected after we've finished
* cancelling URBs, which might not be an error...
*/
} else {
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
le64_to_cpu(ep_ctx->deq));
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr) ==
(le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq "
"Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr);
}
}
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
dev->eps[ep_index].queued_deq_seg = NULL;
dev->eps[ep_index].queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event,
union xhci_trb *trb)
{
int slot_id;
unsigned int ep_index;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
GET_COMP_CODE(le32_to_cpu(event->status)));
/* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
xhci_dbg(xhci, "Queueing configure endpoint command\n");
xhci_queue_configure_endpoint(xhci,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
xhci_ring_cmd_db(xhci);
} else {
/* Clear our internal halted state and restart the ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
}
/* Check to see if a command in the device's command queue matches this one.
* Signal the completion or free the command, and return 1. Return 0 if the
* completed command isn't at the head of the command list.
*/
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct xhci_event_cmd *event)
{
struct xhci_command *command;
if (list_empty(&virt_dev->cmd_list))
return 0;
command = list_entry(virt_dev->cmd_list.next,
struct xhci_command, cmd_list);
if (xhci->cmd_ring->dequeue != command->command_trb)
return 0;
command->status = GET_COMP_CODE(le32_to_cpu(event->status));
list_del(&command->cmd_list);
if (command->completion)
complete(command->completion);
else
xhci_free_command(xhci, command);
return 1;
}
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_virt_device *virt_dev;
unsigned int ep_index;
struct xhci_ring *ep_ring;
unsigned int ep_state;
cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */
if (cmd_dequeue_dma == 0) {
xhci->error_bitmask |= 1 << 4;
return;
}
/* Does the DMA address match our internal dequeue pointer address? */
if (cmd_dma != (u64) cmd_dequeue_dma) {
xhci->error_bitmask |= 1 << 5;
return;
}
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
& TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_ENABLE_SLOT):
if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
xhci->slot_id = slot_id;
else
xhci->slot_id = 0;
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_DISABLE_SLOT):
if (xhci->devs[slot_id]) {
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci,
xhci->devs[slot_id], true);
xhci_free_virt_device(xhci, slot_id);
}
break;
case TRB_TYPE(TRB_CONFIG_EP):
virt_dev = xhci->devs[slot_id];
if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
break;
/*
* Configure endpoint commands can come from the USB core
* configuration or alt setting changes, or because the HW
* needed an extra configure endpoint command after a reset
* endpoint command or streams were being configured.
* If the command was for a halted endpoint, the xHCI driver
* is not waiting on the configure endpoint command.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci,
virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
/* A usb_set_interface() call directly after clearing a halted
* condition may race on this quirky hardware. Not worth
* worrying about, since this is prototype hardware. Not sure
* if this will work for streams, but streams support was
* untested on this prototype.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
ep_index != (unsigned int) -1 &&
le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
le32_to_cpu(ctrl_ctx->drop_flags)) {
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
if (!(ep_state & EP_HALTED))
goto bandwidth_change;
xhci_dbg(xhci, "Completed config ep cmd - "
"last ep index = %d, state = %d\n",
ep_index, ep_state);
/* Clear internal halted state and restart ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
~EP_HALTED;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
break;
}
bandwidth_change:
xhci_dbg(xhci, "Completed config ep cmd\n");
xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->devs[slot_id]->cmd_completion);
break;
case TRB_TYPE(TRB_EVAL_CONTEXT):
virt_dev = xhci->devs[slot_id];
if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
break;
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->devs[slot_id]->cmd_completion);
break;
case TRB_TYPE(TRB_ADDR_DEV):
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_STOP_RING):
handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
break;
case TRB_TYPE(TRB_SET_DEQ):
handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
case TRB_TYPE(TRB_CMD_NOOP):
break;
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
case TRB_TYPE(TRB_RESET_DEV):
xhci_dbg(xhci, "Completed reset device command.\n");
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
virt_dev = xhci->devs[slot_id];
if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
else
xhci_warn(xhci, "Reset device command completion "
"for disabled slot %u\n", slot_id);
break;
case TRB_TYPE(TRB_NEC_GET_FW):
if (!(xhci->quirks & XHCI_NEC_HOST)) {
xhci->error_bitmask |= 1 << 6;
break;
}
xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
NEC_FW_MAJOR(le32_to_cpu(event->status)),
NEC_FW_MINOR(le32_to_cpu(event->status)));
break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
break;
}
inc_deq(xhci, xhci->cmd_ring);
}
static void handle_vendor_event(struct xhci_hcd *xhci,
union xhci_trb *event)
{
u32 trb_type;
trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
handle_cmd_completion(xhci, &event->event_cmd);
}
/* @port_id: the one-based port ID from the hardware (indexed from array of all
* port registers -- USB 3.0 and USB 2.0).
*
* Returns a zero-based port number, which is suitable for indexing into each of
* the split roothubs' port arrays and bus state arrays.
* Add one to it in order to call xhci_find_slot_id_by_port.
*/
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
struct xhci_hcd *xhci, u32 port_id)
{
unsigned int i;
unsigned int num_similar_speed_ports = 0;
/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
* and usb2_ports are 0-based indexes. Count the number of similar
* speed ports, up to 1 port before this port.
*/
for (i = 0; i < (port_id - 1); i++) {
u8 port_speed = xhci->port_array[i];
/*
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
* USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
* 1.1 ports are under the USB 2.0 hub. If the port speed
* matches the device speed, it's a similar speed port.
*/
if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
num_similar_speed_ports++;
}
return num_similar_speed_ports;
}
static void handle_device_notification(struct xhci_hcd *xhci,
union xhci_trb *event)
{
u32 slot_id;
struct usb_device *udev;
slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "Device Notification event for "
"unused slot %u\n", slot_id);
return;
}
xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
slot_id);
udev = xhci->devs[slot_id]->udev;
if (udev && udev->parent)
usb_wakeup_notification(udev->parent, udev->portnum);
}
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
struct usb_hcd *hcd;
u32 port_id;
u32 temp, temp1;
int max_ports;
int slot_id;
unsigned int faked_port_index;
u8 major_revision;
struct xhci_bus_state *bus_state;
__le32 __iomem **port_array;
bool bogus_port_status = false;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
xhci->error_bitmask |= 1 << 8;
}
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Invalid port id %d\n", port_id);
bogus_port_status = true;
goto cleanup;
}
/* Figure out which usb_hcd this port is attached to:
* is it a USB 3.0 port or a USB 2.0/1.1 port?
*/
major_revision = xhci->port_array[port_id - 1];
if (major_revision == 0) {
xhci_warn(xhci, "Event for port %u not in "
"Extended Capabilities, ignoring.\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
if (major_revision == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Event for port %u duplicated in"
"Extended Capabilities, ignoring.\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
/*
* Hardware port IDs reported by a Port Status Change Event include USB
* 3.0 and USB 2.0 ports. We want to check if the port has reported a
* resume event, but we first need to translate the hardware port ID
* into the index into the ports on the correct split roothub, and the
* correct bus_state structure.
*/
/* Find the right roothub. */
hcd = xhci_to_hcd(xhci);
if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
hcd = xhci->shared_hcd;
bus_state = &xhci->bus_state[hcd_index(hcd)];
if (hcd->speed == HCD_USB3)
port_array = xhci->usb3_ports;
else
port_array = xhci->usb2_ports;
/* Find the faked port hub number */
faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
port_id);
temp = xhci_readl(xhci, port_array[faked_port_index]);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
}
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
temp1 = xhci_readl(xhci, &xhci->op_regs->command);
if (!(temp1 & CMD_RUN)) {
xhci_warn(xhci, "xHC is not running.\n");
goto cleanup;
}
if (DEV_SUPERSPEED(temp)) {
xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
/* Set a flag to say the port signaled remote wakeup,
* so we can tell the difference between the end of
* device and host initiated resume.
*/
bus_state->port_remote_wakeup |= 1 << faked_port_index;
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
xhci_set_link_state(xhci, port_array, faked_port_index,
XDEV_U0);
/* Need to wait until the next link state change
* indicates the device is actually in U0.
*/
bogus_port_status = true;
goto cleanup;
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(20);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
/* Do the rest in GetPortStatus */
}
}
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
DEV_SUPERSPEED(temp)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
/* We've just brought the device into U0 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
faked_port_index + 1);
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
bus_state->port_remote_wakeup &=
~(1 << faked_port_index);
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
faked_port_index + 1);
bogus_port_status = true;
goto cleanup;
}
}
if (hcd->speed != HCD_USB3)
xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
PORT_PLC);
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
* (USB 2.0 or USB 3.0) to kick.
*/
if (bogus_port_status)
return;
spin_unlock(&xhci->lock);
/* Pass this up to the core */
usb_hcd_poll_rh_status(hcd);
spin_lock(&xhci->lock);
}
/*
* This TD is defined by the TRBs starting at start_trb in start_seg and ending
* at end_trb, which may be in another segment. If the suspect DMA address is a
* TRB in this TD, this function returns that TRB's segment. Otherwise it
* returns 0.
*/
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb,
union xhci_trb *end_trb,
dma_addr_t suspect_dma)
{
dma_addr_t start_dma;
dma_addr_t end_seg_dma;
dma_addr_t end_trb_dma;
struct xhci_segment *cur_seg;
start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
cur_seg = start_seg;
do {
if (start_dma == 0)
return NULL;
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
/* If the end TRB isn't in this segment, this is set to 0 */
end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
if (end_trb_dma > 0) {
/* The end TRB is in this segment, so suspect should be here */
if (start_dma <= end_trb_dma) {
if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
return cur_seg;
} else {
/* Case for one segment with
* a TD wrapped around to the top
*/
if ((suspect_dma >= start_dma &&
suspect_dma <= end_seg_dma) ||
(suspect_dma >= cur_seg->dma &&
suspect_dma <= end_trb_dma))
return cur_seg;
}
return NULL;
} else {
/* Might still be somewhere in this segment */
if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
return cur_seg;
}
cur_seg = cur_seg->next;
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
} while (cur_seg != start_seg);
return NULL;
}
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_td *td, union xhci_trb *event_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
ep->stopped_stream = stream_id;
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
ep->stopped_td = NULL;
ep->stopped_trb = NULL;
ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
}
/* Check if an error has halted the endpoint ring. The class driver will
* cleanup the halt for a non-default control endpoint if we indicate a stall.
* However, a babble and other errors also halt the endpoint ring, and the class
* driver won't clear the halt in that case, so we need to issue a Set Transfer
* Ring Dequeue Pointer command manually.
*/
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
unsigned int trb_comp_code)
{
/* TRB completion codes that may require a manual halt cleanup */
if (trb_comp_code == COMP_TX_ERR ||
trb_comp_code == COMP_BABBLE ||
trb_comp_code == COMP_SPLIT_ERR)
/* The 0.96 spec says a babbling control endpoint
* is not halted. The 0.96 spec says it is. Some HW
* claims to be 0.95 compliant, but it halts the control
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
cpu_to_le32(EP_STATE_HALTED))
return 1;
return 0;
}
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
if (trb_comp_code >= 224 && trb_comp_code <= 255) {
/* Vendor defined "informational" completion code,
* treat as not-an-error.
*/
xhci_dbg(xhci, "Vendor defined info completion code %u\n",
trb_comp_code);
xhci_dbg(xhci, "Treating code as success.\n");
return 1;
}
return 0;
}
/*
* Finish the td processing, remove the td from td list;
* Return 1 if the urb can be given back.
*/
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status, bool skip)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct urb *urb = NULL;
struct xhci_ep_ctx *ep_ctx;
int ret = 0;
struct urb_priv *urb_priv;
u32 trb_comp_code;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
if (skip)
goto td_cleanup;
if (trb_comp_code == COMP_STOP_INVAL ||
trb_comp_code == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
return 0;
} else {
if (trb_comp_code == COMP_STALL) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
* the halt condition must be cleared first. Let the
* USB class driver clear the stall later.
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
/* Other types of errors halt the endpoint, but the
* class driver doesn't call usb_reset_endpoint() unless
* the error is -EPIPE. Clear the halted status in the
* xHCI hardware manually.
*/
xhci_cleanup_halted_endpoint(xhci,
slot_id, ep_index, ep_ring->stream_id,
td, event_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
}
td_cleanup:
/* Clean up the endpoint's TD list */
urb = td->urb;
urb_priv = urb->hcpriv;
/* Do one last check of the actual transfer length.
* If the host controller said we transferred more data than
* the buffer length, urb->actual_length will be a very big
* number (since it's unsigned). Play it safe and say we didn't
* transfer anything.
*/
if (urb->actual_length > urb->transfer_buffer_length) {
xhci_warn(xhci, "URB transfer length is wrong, "
"xHC issue? req. len = %u, "
"act. len = %u\n",
urb->transfer_buffer_length,
urb->actual_length);
urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
}
list_del_init(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
urb_priv->td_cnt++;
/* Giveback the urb when all the tds are completed */
if (urb_priv->td_cnt == urb_priv->length) {
ret = 1;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
== 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
}
}
return ret;
}
/*
* Process control tds, update urb status and actual_length.
*/
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
"without IOC set??\n");
*status = -ESHUTDOWN;
} else if (event_trb != td->last_trb) {
xhci_warn(xhci, "WARN: Success on ctrl data TRB "
"without IOC set??\n");
*status = -ESHUTDOWN;
} else {
*status = 0;
}
break;
case COMP_SHORT_TX:
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
break;
case COMP_STOP_INVAL:
case COMP_STOP:
return finish_td(xhci, td, event_trb, event, ep, status, false);
default:
if (!xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code))
break;
xhci_dbg(xhci, "TRB error code %u, "
"halted endpoint index = %u\n",
trb_comp_code, ep_index);
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
if (event_trb != ep_ring->dequeue &&
event_trb != td->last_trb)
td->urb->actual_length =
td->urb->transfer_buffer_length
- TRB_LEN(le32_to_cpu(event->transfer_len));
else
td->urb->actual_length = 0;
xhci_cleanup_halted_endpoint(xhci,
slot_id, ep_index, 0, td, event_trb);
return finish_td(xhci, td, event_trb, event, ep, status, true);
}
/*
* Did we transfer any data, despite the errors that might have
* happened? I.e. did we get past the setup stage?
*/
if (event_trb != ep_ring->dequeue) {
/* The event was for the status stage */
if (event_trb == td->last_trb) {
if (td->urb->actual_length != 0) {
/* Don't overwrite a previously set error code
*/
if ((*status == -EINPROGRESS || *status == 0) &&
(td->urb->transfer_flags
& URB_SHORT_NOT_OK))
/* Did we already see a short data
* stage? */
*status = -EREMOTEIO;
} else {
td->urb->actual_length =
td->urb->transfer_buffer_length;
}
} else {
/* Maybe the event was for the data stage? */
td->urb->actual_length =
td->urb->transfer_buffer_length -
TRB_LEN(le32_to_cpu(event->transfer_len));
xhci_dbg(xhci, "Waiting for status "
"stage event\n");
return 0;
}
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
/*
* Process isochronous tds, update urb packet status and actual_length.
*/
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
int idx;
int len = 0;
union xhci_trb *cur_trb;
struct xhci_segment *cur_seg;
struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
bool skip_td = false;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
frame->status = 0;
break;
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
-EREMOTEIO : 0;
break;
case COMP_BW_OVER:
frame->status = -ECOMM;
skip_td = true;
break;
case COMP_BUFF_OVER:
case COMP_BABBLE:
frame->status = -EOVERFLOW;
skip_td = true;
break;
case COMP_DEV_ERR:
case COMP_STALL:
frame->status = -EPROTO;
skip_td = true;
break;
case COMP_STOP:
case COMP_STOP_INVAL:
break;
default:
frame->status = -1;
break;
}
if (trb_comp_code == COMP_SUCCESS || skip_td) {
frame->actual_length = frame->length;
td->urb->actual_length += frame->length;
} else {
for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
!TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
TRB_LEN(le32_to_cpu(event->transfer_len));
if (trb_comp_code != COMP_STOP_INVAL) {
frame->actual_length = len;
td->urb->actual_length += len;
}
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
int idx;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
/* The transfer is partly done. */
frame->status = -EXDEV;
/* calc actual length */
frame->actual_length = 0;
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
return finish_td(xhci, td, NULL, event, ep, status, true);
}
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
union xhci_trb *cur_trb;
struct xhci_segment *cur_seg;
u32 trb_comp_code;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
switch (trb_comp_code) {
case COMP_SUCCESS:
/* Double check that the HW transferred everything. */
if (event_trb != td->last_trb) {
xhci_warn(xhci, "WARN Successful completion "
"on short TX\n");
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
} else {
*status = 0;
}
break;
case COMP_SHORT_TX:
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
break;
default:
/* Others already handled above */
break;
}
if (trb_comp_code == COMP_SHORT_TX)
xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
"%d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
td->urb->transfer_buffer_length,
TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
TRB_LEN(le32_to_cpu(event->transfer_len));
if (td->urb->transfer_buffer_length <
td->urb->actual_length) {
xhci_warn(xhci, "HC gave bad length "
"of %d bytes left\n",
TRB_LEN(le32_to_cpu(event->transfer_len)));
td->urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
}
/* Don't overwrite a previously set error code */
if (*status == -EINPROGRESS) {
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
}
} else {
td->urb->actual_length =
td->urb->transfer_buffer_length;
/* Ignore a short packet completion if the
* untransferred length was zero.
*/
if (*status == -EREMOTEIO)
*status = 0;
}
} else {
/* Slow path - walk the list, starting from the dequeue
* pointer, to get the actual length transferred.
*/
td->urb->actual_length = 0;
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
!TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
/* If the ring didn't stop on a Link or No-op TRB, add
* in the actual bytes transferred from the Normal TRB
*/
if (trb_comp_code != COMP_STOP_INVAL)
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
TRB_LEN(le32_to_cpu(event->transfer_len));
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
* At this point, the host controller is probably hosed and should be reset.
*/
static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_transfer_event *event)
{
struct xhci_virt_device *xdev;
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct xhci_td *td = NULL;
dma_addr_t event_dma;
struct xhci_segment *event_seg;
union xhci_trb *event_trb;
struct urb *urb = NULL;
int status = -EINPROGRESS;
struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
struct list_head *tmp;
u32 trb_comp_code;
int ret = 0;
int td_num = 0;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
xhci->event_ring->deq_seg,
xhci->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring ||
(le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
xhci->event_ring->deq_seg,
xhci->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
/* Count current td numbers if ep->skip is set */
if (ep->skip) {
list_for_each(tmp, &ep_ring->td_list)
td_num++;
}
event_dma = le64_to_cpu(event->buffer);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
*/
case COMP_SUCCESS:
case COMP_SHORT_TX:
break;
case COMP_STOP:
xhci_dbg(xhci, "Stopped on Transfer TRB\n");
break;
case COMP_STOP_INVAL:
xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
break;
case COMP_STALL:
xhci_dbg(xhci, "Stalled endpoint\n");
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
case COMP_TRB_ERR:
xhci_warn(xhci, "WARN: TRB error on endpoint\n");
status = -EILSEQ;
break;
case COMP_SPLIT_ERR:
case COMP_TX_ERR:
xhci_dbg(xhci, "Transfer error on endpoint\n");
status = -EPROTO;
break;
case COMP_BABBLE:
xhci_dbg(xhci, "Babble error on endpoint\n");
status = -EOVERFLOW;
break;
case COMP_DB_ERR:
xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
status = -ENOSR;
break;
case COMP_BW_OVER:
xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
break;
case COMP_BUFF_OVER:
xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
break;
case COMP_UNDERRUN:
/*
* When the Isoch ring is empty, the xHC will generate
* a Ring Overrun Event for IN Isoch endpoint or Ring
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "underrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
"still with TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
case COMP_OVERRUN:
xhci_dbg(xhci, "overrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
"still with TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
case COMP_DEV_ERR:
xhci_warn(xhci, "WARN: detect an incompatible device");
status = -EPROTO;
break;
case COMP_MISSED_INT:
/*
* When encounter missed service error, one or more isoc tds
* may be missed by xHC.
* Set skip flag of the ep_ring; Complete the missed tds as
* short transfer when process the ep_ring next time.
*/
ep->skip = true;
xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
goto cleanup;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
"busted\n");
goto cleanup;
}
do {
/* This TRB should be in the TD at the head of this ring's
* TD list.
*/
if (list_empty(&ep_ring->td_list)) {
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
"with no TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
(le32_to_cpu(event->flags) &
TRB_TYPE_BITMASK)>>10);
xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
if (ep->skip) {
ep->skip = false;
xhci_dbg(xhci, "td_list is empty while skip "
"flag set. Clear skip flag.\n");
}
ret = 0;
goto cleanup;
}
/* We've skipped all the TDs on the ep ring when ep->skip set */
if (ep->skip && td_num == 0) {
ep->skip = false;
xhci_dbg(xhci, "All tds on the ep_ring skipped. "
"Clear skip flag.\n");
ret = 0;
goto cleanup;
}
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
if (ep->skip)
td_num--;
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma);
/*
* Skip the Force Stopped Event. The event_trb(event_dma) of FSE
* is not in the current TD pointed by ep_ring->dequeue because
* that the hardware dequeue pointer still at the previous TRB
* of the current TD. The previous TRB maybe a Link TD or the
* last TRB of the previous TD. The command completion handle
* will take care the rest.
*/
if (!event_seg && (trb_comp_code == COMP_STOP ||
trb_comp_code == COMP_STOP_INVAL)) {
ret = 0;
goto cleanup;
}
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* Some host controllers give a spurious
* successful event after a short transfer.
* Ignore it.
*/
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
ret = 0;
goto cleanup;
}
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
"part of current TD\n");
return -ESHUTDOWN;
}
ret = skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_TX)
ep_ring->last_td_was_short = true;
else
ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
ep->skip = false;
}
event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
sizeof(*event_trb)];
/*
* No-op TRB should not trigger interrupts.
* If event_trb is a no-op TRB, it means the
* corresponding TD has been cancelled. Just ignore
* the TD.
*/
if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
xhci_dbg(xhci,
"event_trb is a no-op TRB. Skip it\n");
goto cleanup;
}
/* Now update the urb's actual_length and give back to
* the core
*/
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
ret = process_ctrl_td(xhci, td, event_trb, event, ep,
&status);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
ret = process_isoc_td(xhci, td, event_trb, event, ep,
&status);
else
ret = process_bulk_intr_td(xhci, td, event_trb, event,
ep, &status);
cleanup:
/*
* Do not update event ring dequeue pointer if ep->skip is set.
* Will roll back to continue process missed tds.
*/
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
inc_deq(xhci, xhci->event_ring);
}
if (ret) {
urb = td->urb;
urb_priv = urb->hcpriv;
/* Leave the TD around for the reset endpoint function
* to use(but only if it's not a control endpoint,
* since we already queued the Set TR dequeue pointer
* command for stalled control endpoints).
*/
if (usb_endpoint_xfer_control(&urb->ep->desc) ||
(trb_comp_code != COMP_STALL &&
trb_comp_code != COMP_BABBLE))
xhci_urb_free_priv(xhci, urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
if ((urb->actual_length != urb->transfer_buffer_length &&
(urb->transfer_flags &
URB_SHORT_NOT_OK)) ||
(status != 0 &&
!usb_endpoint_xfer_isoc(&urb->ep->desc)))
xhci_dbg(xhci, "Giveback URB %p, len = %d, "
"expected = %x, status = %d\n",
urb, urb->actual_length,
urb->transfer_buffer_length,
status);
spin_unlock(&xhci->lock);
/* EHCI, UHCI, and OHCI always unconditionally set the
* urb->status of an isochronous endpoint to 0.
*/
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
status = 0;
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
}
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
* Process them as short transfer until reach the td pointed by
* the event.
*/
} while (ep->skip && trb_comp_code != COMP_MISSED_INT);
return 0;
}
/*
* This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/
static int xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
int ret;
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1;
return 0;
}
event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
xhci->event_ring->cycle_state) {
xhci->error_bitmask |= 1 << 2;
return 0;
}
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
* speculative reads of the event's flags/data below.
*/
rmb();
/* FIXME: Handle more event types. */
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION):
handle_cmd_completion(xhci, &event->event_cmd);
break;
case TRB_TYPE(TRB_PORT_STATUS):
handle_port_status(xhci, event);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
ret = handle_tx_event(xhci, &event->trans_event);
if (ret < 0)
xhci->error_bitmask |= 1 << 9;
else
update_ptrs = 0;
break;
case TRB_TYPE(TRB_DEV_NOTE):
handle_device_notification(xhci, event);
break;
default:
if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
TRB_TYPE(48))
handle_vendor_event(xhci, event);
else
xhci->error_bitmask |= 1 << 3;
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI host dying, returning from "
"event handler.\n");
return 0;
}
if (update_ptrs)
/* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
*/
return 1;
}
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
*/
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 status;
union xhci_trb *trb;
u64 temp_64;
union xhci_trb *event_ring_deq;
dma_addr_t deq;
spin_lock(&xhci->lock);
trb = xhci->event_ring->dequeue;
/* Check if the xHC generated the interrupt, or the irq is shared */
status = xhci_readl(xhci, &xhci->op_regs->status);
if (status == 0xffffffff)
goto hw_died;
if (!(status & STS_EINT)) {
spin_unlock(&xhci->lock);
return IRQ_NONE;
}
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
hw_died:
spin_unlock(&xhci->lock);
return -ESHUTDOWN;
}
/*
* Clear the op reg interrupt status first,
* so we can receive interrupts from other MSI-X interrupters.
* Write 1 to clear the interrupt status.
*/
status |= STS_EINT;
xhci_writel(xhci, status, &xhci->op_regs->status);
/* FIXME when MSI-X is supported and there are multiple vectors */
/* Clear the MSI-X event interrupt status */
if (hcd->irq) {
u32 irq_pending;
/* Acknowledge the PCI interrupt */
irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
irq_pending |= IMAN_IP;
xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
"Shouldn't IRQs be disabled?\n");
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci_write_64(xhci, temp_64 | ERST_EHB,
&xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
}
event_ring_deq = xhci->event_ring->dequeue;
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
while (xhci_handle_event(xhci) > 0) {}
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
xhci->event_ring->dequeue);
if (deq == 0)
xhci_warn(xhci, "WARN something wrong with SW event "
"ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp_64 &= ERST_PTR_MASK;
temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C); event ring is empty. */
temp_64 |= ERST_EHB;
xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
}
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
return xhci_irq(hcd);
}
/**** Endpoint Ring Operations ****/
/*
* Generic function for queueing a TRB on a ring.
* The caller must have checked to make sure there's room on the ring.
*
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
trb = &ring->enqueue->generic;
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
inc_enq(xhci, ring, more_trbs_coming);
}
/*
* Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
unsigned int num_trbs_needed;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
case EP_STATE_DISABLED:
/*
* USB core changed config/interfaces without notifying us,
* or hardware is reporting the wrong state.
*/
xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
return -ENOENT;
case EP_STATE_ERROR:
xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
/* FIXME event handling code for error needs to clear it */
/* XXX not sure if this should be -ENOENT or not */
return -EINVAL;
case EP_STATE_HALTED:
xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
case EP_STATE_STOPPED:
case EP_STATE_RUNNING:
break;
default:
xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
/*
* FIXME issue Configure Endpoint command to try to get the HC
* back into a known state.
*/
return -EINVAL;
}
while (1) {
if (room_on_ring(xhci, ep_ring, num_trbs))
break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
return -ENOMEM;
}
xhci_dbg(xhci, "ERROR no room on ep ring, "
"try ring expansion\n");
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
mem_flags)) {
xhci_err(xhci, "Ring expansion failed\n");
return -ENOMEM;
}
};
if (enqueue_is_link_trb(ep_ring)) {
struct xhci_ring *ring = ep_ring;
union xhci_trb *next;
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
if (!xhci_link_trb_quirk(xhci) &&
!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
}
return 0;
}
static int prepare_transfer(struct xhci_hcd *xhci,
struct xhci_virt_device *xdev,
unsigned int ep_index,
unsigned int stream_id,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
gfp_t mem_flags)
{
int ret;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
if (!ep_ring) {
xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
stream_id);
return -EINVAL;
}
ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
return ret;
urb_priv = urb->hcpriv;
td = urb_priv->td[td_index];
INIT_LIST_HEAD(&td->td_list);
INIT_LIST_HEAD(&td->cancelled_td_list);
if (td_index == 0) {
ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
if (unlikely(ret))
return ret;
}
td->urb = urb;
/* Add this TD to the tail of the endpoint ring's TD list */
list_add_tail(&td->td_list, &ep_ring->td_list);
td->start_seg = ep_ring->enq_seg;
td->first_trb = ep_ring->enqueue;
urb_priv->td[td_index] = td;
return 0;
}
static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
{
int num_sgs, num_trbs, running_total, temp, i;
struct scatterlist *sg;
sg = NULL;
num_sgs = urb->num_mapped_sgs;
temp = urb->transfer_buffer_length;
num_trbs = 0;
for_each_sg(urb->sg, sg, num_sgs, i) {
unsigned int len = sg_dma_len(sg);
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
len = min_t(int, len, temp);
temp -= len;
if (temp == 0)
break;
}
return num_trbs;
}
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
running_total, running_total,
urb->transfer_buffer_length,
urb->transfer_buffer_length);
}
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id, int start_cycle,
struct xhci_generic_trb *start_trb)
{
/*
* Pass all the TRBs to the hardware at once and make sure this write
* isn't reordered.
*/
wmb();
if (start_cycle)
start_trb->field[3] |= cpu_to_le32(start_cycle);
else
start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
/*
* xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
* endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
* (comprised of sg list entries) can take several service intervals to
* transmit.
*/
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
xhci->devs[slot_id]->out_ctx, ep_index);
int xhci_interval;
int ep_interval;
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
ep_interval,
ep_interval == 1 ? "" : "s",
xhci_interval,
xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/*
* The TD size is the number of bytes remaining in the TD (including this TRB),
* right shifted by 10.
* It must fit in bits 21:17, so it can't be bigger than 31.
*/
static u32 xhci_td_remainder(unsigned int remainder)
{
u32 max = (1 << (21 - 17 + 1)) - 1;
if ((remainder >> 10) >= max)
return max << 17;
else
return (remainder >> 10) << 17;
}
/*
* For xHCI 1.0 host controllers, TD size is the number of packets remaining in
* the TD (*not* including this TRB).
*
* Total TD packet count = total_packet_count =
* roundup(TD size in bytes / wMaxPacketSize)
*
* Packets transferred up to and including this TRB = packets_transferred =
* rounddown(total bytes transferred including this TRB / wMaxPacketSize)
*
* TD size = total_packet_count - packets_transferred
*
* It must fit in bits 21:17, so it can't be bigger than 31.
*/
static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
unsigned int total_packet_count, struct urb *urb)
{
int packets_transferred;
/* One TRB with a zero-length data packet. */
if (running_total == 0 && trb_buff_len == 0)
return 0;
/* All the TRB queueing functions don't count the current TRB in
* running_total.
*/
packets_transferred = (running_total + trb_buff_len) /
usb_endpoint_maxp(&urb->ep->desc);
return xhci_td_remainder(total_packet_count - packets_transferred);
}
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
unsigned int num_trbs;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct scatterlist *sg;
int num_sgs;
int trb_buff_len, this_sg_len, running_total;
unsigned int total_packet_count;
bool first_trb;
u64 addr;
bool more_trbs_coming;
struct xhci_generic_trb *start_trb;
int start_cycle;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_mapped_sgs;
total_packet_count = roundup(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
urb_priv = urb->hcpriv;
td = urb_priv->td[0];
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
running_total = 0;
/*
* How much data is in the first TRB?
*
* There are three forces at work for TRB buffer pointers and lengths:
* 1. We don't want to walk off the end of this sg-list entry buffer.
* 2. The transfer length that the driver requested may be smaller than
* the amount of memory allocated for this scatter-gather list.
* 3. TRBs buffers can't cross 64KB boundaries.
*/
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
do {
u32 field = 0;
u32 length_field = 0;
u32 remainder = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb) {
first_trb = false;
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
if (num_trbs > 1) {
field |= TRB_CHAIN;
} else {
/* FIXME - add check for ZERO_PACKET flag before this */
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
if (TRB_MAX_BUFF_SIZE -
(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
}
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
urb->transfer_buffer_length -
running_total);
} else {
remainder = xhci_v1_0_td_remainder(running_total,
trb_buff_len, total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
if (num_trbs > 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field | TRB_TYPE(TRB_NORMAL));
--num_trbs;
running_total += trb_buff_len;
/* Calculate length for next transfer --
* Are we done queueing all the TRBs for this sg entry?
*/
this_sg_len -= trb_buff_len;
if (this_sg_len == 0) {
--num_sgs;
if (num_sgs == 0)
break;
sg = sg_next(sg);
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
} else {
addr += trb_buff_len;
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
(addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
urb->transfer_buffer_length - running_total;
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
}
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
int num_trbs;
struct xhci_generic_trb *start_trb;
bool first_trb;
bool more_trbs_coming;
int start_cycle;
u32 field, length_field;
int running_total, trb_buff_len, ret;
unsigned int total_packet_count;
u64 addr;
if (urb->num_sgs)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
*/
if (running_total != 0 || urb->transfer_buffer_length == 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
while (running_total < urb->transfer_buffer_length) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
urb_priv = urb->hcpriv;
td = urb_priv->td[0];
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
running_total = 0;
total_packet_count = roundup(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
do {
u32 remainder = 0;
field = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb) {
first_trb = false;
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
if (num_trbs > 1) {
field |= TRB_CHAIN;
} else {
/* FIXME - add check for ZERO_PACKET flag before this */
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
urb->transfer_buffer_length -
running_total);
} else {
remainder = xhci_v1_0_td_remainder(running_total,
trb_buff_len, total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
if (num_trbs > 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field | TRB_TYPE(TRB_NORMAL));
--num_trbs;
running_total += trb_buff_len;
/* Calculate length for next transfer */
addr += trb_buff_len;
trb_buff_len = urb->transfer_buffer_length - running_total;
if (trb_buff_len > TRB_MAX_BUFF_SIZE)
trb_buff_len = TRB_MAX_BUFF_SIZE;
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
}
/* Caller must have locked xhci->lock */
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
int num_trbs;
int ret;
struct usb_ctrlrequest *setup;
struct xhci_generic_trb *start_trb;
int start_cycle;
u32 field, length_field;
struct urb_priv *urb_priv;
struct xhci_td *td;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
/*
* Need to copy setup packet into setup TRB, so we can't use the setup
* DMA address.
*/
if (!urb->setup_packet)
return -EINVAL;
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
* Don't need to check if we need additional event data and normal TRBs,
* since data in control transfers will never get bigger than 16MB
* XXX: can we get a buffer that crosses 64KB boundaries?
*/
if (urb->transfer_buffer_length > 0)
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
urb_priv = urb->hcpriv;
td = urb_priv->td[0];
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
/* Queue setup TRB - see section 6.4.1.2.1 */
/* FIXME better way to translate setup_packet into two u32 fields? */
setup = (struct usb_ctrlrequest *) urb->setup_packet;
field = 0;
field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
if (start_cycle == 0)
field |= 0x1;
/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
if (xhci->hci_version == 0x100) {
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
else
field |= TRB_TX_TYPE(TRB_DATA_OUT);
}
}
queue_trb(xhci, ep_ring, true,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
/* Immediate data in pointer */
field);
/* If there's data, queue data TRBs */
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field = TRB_ISP | TRB_TYPE(TRB_DATA);
else
field = TRB_TYPE(TRB_DATA);
length_field = TRB_LEN(urb->transfer_buffer_length) |
xhci_td_remainder(urb->transfer_buffer_length) |
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, true,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
field | ep_ring->cycle_state);
}
/* Save the DMA address of the last TRB in the TD */
td->last_trb = ep_ring->enqueue;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */
if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
field = 0;
else
field = TRB_DIR_IN;
queue_trb(xhci, ep_ring, false,
0,
0,
TRB_INTR_TARGET(0),
/* Event on completion */
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
giveback_first_trb(xhci, slot_id, ep_index, 0,
start_cycle, start_trb);
return 0;
}
static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
struct urb *urb, int i)
{
int num_trbs = 0;
u64 addr, td_len;
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
TRB_MAX_BUFF_SIZE);
if (num_trbs == 0)
num_trbs++;
return num_trbs;
}
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
* devices can burst up to bMaxBurst number of packets per service interval.
* This field is zero based, meaning a value of zero in the field means one
* burst. Basically, for everything but SuperSpeed devices, this field will be
* zero. Only xHCI 1.0 host controllers support this field.
*/
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
return roundup(total_packet_count, max_burst + 1) - 1;
}
/*
* Returns the number of packets in the last "burst" of packets. This field is
* valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
* the last burst packet count is equal to the total number of packets in the
* TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
* must contain (bMaxBurst + 1) number of packets, but the last burst can
* contain 1 to (bMaxBurst + 1) packets.
*/
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
unsigned int residue;
if (xhci->hci_version < 0x100)
return 0;
switch (udev->speed) {
case USB_SPEED_SUPER:
/* bMaxBurst is zero based: 0 means 1 packet per burst */
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
residue = total_packet_count % (max_burst + 1);
/* If residue is zero, the last burst contains (max_burst + 1)
* number of packets, but the TLBPC field is zero-based.
*/
if (residue == 0)
return max_burst;
return residue - 1;
default:
if (total_packet_count == 0)
return 0;
return total_packet_count - 1;
}
}
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
int num_tds, trbs_per_td;
struct xhci_generic_trb *start_trb;
bool first_trb;
int start_cycle;
u32 field, length_field;
int running_total, trb_buff_len, td_len, td_remain_len, ret;
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_tds = urb->number_of_packets;
if (num_tds < 1) {
xhci_dbg(xhci, "Isoc URB with zero packets?\n");
return -EINVAL;
}
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
urb_priv = urb->hcpriv;
/* Queue the first TRB, even if it's zero-length */
for (i = 0; i < num_tds; i++) {
unsigned int total_packet_count;
unsigned int burst_count;
unsigned int residue;
first_trb = true;
running_total = 0;
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
total_packet_count = roundup(td_len,
usb_endpoint_maxp(&urb->ep->desc));
/* A zero-length transfer still involves at least one packet. */
if (total_packet_count == 0)
total_packet_count++;
burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
total_packet_count);
residue = xhci_get_last_burst_packet_count(xhci,
urb->dev, urb, total_packet_count);
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
goto cleanup;
}
td = urb_priv->td[i];
for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0;
field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
if (first_trb) {
/* Queue the isoc TRB */
field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
field |= TRB_SIA;
if (i == 0) {
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
first_trb = false;
} else {
/* Queue other normal TRBs */
field |= TRB_TYPE(TRB_NORMAL);
field |= ep_ring->cycle_state;
}
/* Only set interrupt on short packet for IN EPs */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Chain all the TRBs together; clear the chain bit in
* the last TRB to indicate it's the last TRB in the
* chain.
*/
if (j < trbs_per_td - 1) {
field |= TRB_CHAIN;
more_trbs_coming = true;
} else {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
if (xhci->hci_version == 0x100) {
/* Set BEI bit except for the last td */
if (i < num_tds - 1)
field |= TRB_BEI;
}
more_trbs_coming = false;
}
/* Calculate TRB length */
trb_buff_len = TRB_MAX_BUFF_SIZE -
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
td_len - running_total);
} else {
remainder = xhci_v1_0_td_remainder(
running_total, trb_buff_len,
total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field);
running_total += trb_buff_len;
addr += trb_buff_len;
td_remain_len -= trb_buff_len;
}
/* Check TD length */
if (running_total != td_len) {
xhci_err(xhci, "ISOC TD length unmatch\n");
ret = -EINVAL;
goto cleanup;
}
}
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable();
}
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
cleanup:
/* Clean up a partially enqueued isoc transfer. */
for (i--; i >= 0; i--)
list_del_init(&urb_priv->td[i]->td_list);
/* Use the first TD as a temporary variable to turn the TDs we've queued
* into No-ops with a software-owned cycle bit. That way the hardware
* won't accidentally start executing bogus TDs when we partially
* overwrite them. td->first_trb and td->start_seg are already set.
*/
urb_priv->td[0]->last_trb = ep_ring->enqueue;
/* Every TRB except the first & last will have its cycle bit flipped. */
td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
ep_ring->enqueue = urb_priv->td[0]->first_trb;
ep_ring->enq_seg = urb_priv->td[0]->start_seg;
ep_ring->cycle_state = start_cycle;
ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
return ret;
}
/*
* Check transfer ring to guarantee there is enough room for the urb.
* Update ISO URB start_frame and interval.
* Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
* update the urb->start_frame by now.
* Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
*/
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx;
int start_frame;
int xhci_interval;
int ep_interval;
int num_tds, num_trbs, i;
int ret;
xdev = xhci->devs[slot_id];
ep_ring = xdev->eps[ep_index].ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
num_trbs = 0;
num_tds = urb->number_of_packets;
for (i = 0; i < num_tds; i++)
num_trbs += count_isoc_trbs_needed(xhci, urb, i);
/* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
return ret;
start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
start_frame &= 0x3fff;
urb->start_frame = start_frame;
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->start_frame >>= 3;
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
ep_interval,
ep_interval == 1 ? "" : "s",
xhci_interval,
xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/**** Command Ring Operations ****/
/* Generic function for queueing a command TRB on the command ring.
* Check to make sure there's room on the command ring for one command TRB.
* Also check that there's room reserved for commands that must not fail.
* If this is a command that must not fail, meaning command_must_succeed = TRUE,
* then only check for the number of reserved spots.
* Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
* because the command event handler may want to resubmit a failed command.
*/
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
u32 field3, u32 field4, bool command_must_succeed)
{
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
if (!command_must_succeed)
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
reserved_trbs, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
xhci_err(xhci, "ERR: Reserved TRB counting for "
"unfailable commands failed.\n");
return ret;
}
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}
/* Queue a slot enable or disable request on the command ring */
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
return queue_command(xhci, 0, 0, 0,
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
u32 field1, u32 field2, u32 field3, u32 field4)
{
return queue_command(xhci, field1, field2, field3, field4, false);
}
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{
return queue_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/*
* Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
* activity on an endpoint that is about to be suspended.
*/
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, int suspend)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_STOP_RING);
u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
return queue_command(xhci, 0, 0, 0,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
/* Set Transfer Ring Dequeue Pointer command.
* This should not be used for endpoints that have streams enabled.
*/
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
deq_seg, deq_ptr);
return 0;
}
ep = &xhci->devs[slot_id]->eps[ep_index];
if ((ep->ep_state & SET_DEQ_PENDING)) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
return 0;
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
false);
}
|
ashikrobi/Crabbykernel
|
drivers/usb/host/xhci-ring.c
|
C
|
gpl-2.0
| 115,318
|
/*
* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/clk.h>
#include <mach/hardware.h>
#include <mach/iommu_domains.h>
#include <mach/iommu.h>
#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <linux/file.h>
#include <linux/android_pmem.h>
#include <linux/major.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/msm_kgsl.h>
#include "mdp.h"
#include "msm_fb.h"
#include "mdp4.h"
#define VERSION_KEY_MASK 0xFFFFFF00
struct mdp4_overlay_ctrl {
struct mdp4_overlay_pipe plist[OVERLAY_PIPE_MAX];
struct mdp4_overlay_pipe *stage[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
uint32 mixer_cfg[MDP4_MIXER_MAX];
uint32 flush[MDP4_MIXER_MAX];
struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
struct iommu_free_list iommu_free_prev[MDP4_MIXER_MAX];
uint32 dmap_cfg[5];
uint32 cs_controller;
uint32 panel_3d;
uint32 panel_mode;
uint32 mixer0_played;
uint32 mixer1_played;
uint32 mixer2_played;
} mdp4_overlay_db = {
.cs_controller = CS_CONTROLLER_0,
.plist = {
{
.pipe_type = OVERLAY_TYPE_RGB,
.pipe_num = OVERLAY_PIPE_RGB1,
.pipe_ndx = 1,
},
{
.pipe_type = OVERLAY_TYPE_RGB,
.pipe_num = OVERLAY_PIPE_RGB2,
.pipe_ndx = 2,
},
{
.pipe_type = OVERLAY_TYPE_VIDEO,
.pipe_num = OVERLAY_PIPE_VG1,
.pipe_ndx = 3,
},
{
.pipe_type = OVERLAY_TYPE_VIDEO,
.pipe_num = OVERLAY_PIPE_VG2,
.pipe_ndx = 4,
},
{
.pipe_type = OVERLAY_TYPE_BF,
.pipe_num = OVERLAY_PIPE_RGB3,
.pipe_ndx = 5,
.mixer_num = MDP4_MIXER0,
},
{
.pipe_type = OVERLAY_TYPE_BF,
.pipe_num = OVERLAY_PIPE_VG3,
.pipe_ndx = 6,
.mixer_num = MDP4_MIXER1,
},
{
.pipe_type = OVERLAY_TYPE_BF,
.pipe_num = OVERLAY_PIPE_VG4,
.pipe_ndx = 7,
.mixer_num = MDP4_MIXER2,
},
},
};
static DEFINE_MUTEX(iommu_mutex);
static DEFINE_MUTEX(perf_mutex);
static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
struct mdp4_overlay_perf {
u32 mdp_clk_rate;
u32 use_ov_blt[MDP4_MIXER_MAX];
u64 mdp_ov_ab_bw[MDP4_MIXER_MAX];
u64 mdp_ov_ib_bw[MDP4_MIXER_MAX];
u32 mdp_ab_bw;
u32 mdp_ib_bw;
};
static struct mdp4_overlay_perf perf_request;
static struct mdp4_overlay_perf perf_current;
void mdp4_overlay_free_base_pipe(struct msm_fb_data_type *mfd)
{
if (!hdmi_prim_display && mfd->index == 0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_free_base_pipe(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_free_base_pipe(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_free_base_pipe(mfd);
} else if (hdmi_prim_display || mfd->index == 1) {
mdp4_dtv_free_base_pipe(mfd);
}
}
static struct ion_client *display_iclient;
static int mdp4_map_sec_resource(struct msm_fb_data_type *mfd)
{
int ret = 0;
if (!mfd) {
pr_err("%s: mfd is invalid\n", __func__);
return -ENODEV;
}
pr_debug("%s %d mfd->index=%d,mapped=%d\n",
__func__, __LINE__,
mfd->index, mfd->sec_mapped);
if (mfd->sec_mapped)
return 0;
ret = mdp_enable_iommu_clocks();
if (ret) {
pr_err("IOMMU clock enabled failed while open");
return ret;
}
ret = msm_ion_secure_heap(ION_HEAP(ION_CP_MM_HEAP_ID));
if (ret)
pr_err("ION heap secure failed heap id %d ret %d\n",
ION_CP_MM_HEAP_ID, ret);
else
mfd->sec_mapped = 1;
mdp_disable_iommu_clocks();
return ret;
}
int mdp4_unmap_sec_resource(struct msm_fb_data_type *mfd)
{
int ret = 0;
int i, sec_cnt = 0;
struct mdp4_overlay_pipe *pipe;
if (!mfd) {
pr_err("%s: mfd is invalid\n", __func__);
return -ENODEV;
}
if (mfd->sec_mapped == 0)
return 0;
for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
pipe = &ctrl->plist[i];
if ((pipe->mixer_num == mfd->index) &&
pipe->flags & MDP_SECURE_OVERLAY_SESSION)
sec_cnt++;
}
if (sec_cnt)
return 0;
pr_debug("%s %d mfd->index=%d,mapped=%d\n",
__func__, __LINE__,
mfd->index, mfd->sec_mapped);
ret = mdp_enable_iommu_clocks();
if (ret) {
pr_err("IOMMU clock enabled failed while close\n");
return ret;
}
msm_ion_unsecure_heap(ION_HEAP(ION_CP_MM_HEAP_ID));
mfd->sec_mapped = 0;
mdp_disable_iommu_clocks();
return ret;
}
/*
* mdp4_overlay_iommu_unmap_freelist()
* mdp4_overlay_iommu_2freelist()
* mdp4_overlay_iommu_pipe_free()
* above three functiosns need to be called from same thread and
* in order so that no mutex are needed.
*/
void mdp4_overlay_iommu_unmap_freelist(int mixer)
{
int i;
struct ion_handle *ihdl;
struct iommu_free_list *flist, *pflist;
if (mixer >= MDP4_MIXER_MAX)
return;
mutex_lock(&iommu_mutex);
pflist = &ctrl->iommu_free_prev[mixer];
flist = &ctrl->iommu_free[mixer];
pr_debug("%s: mixer=%d fndx=%d %d\n", __func__,
mixer, pflist->fndx, flist->fndx);
if (pflist->fndx == 0) {
goto flist_to_pflist;
}
for (i = 0; i < IOMMU_FREE_LIST_MAX; i++) {
ihdl = pflist->ihdl[i];
if (ihdl == NULL)
continue;
pr_debug("%s: mixer=%d i=%d ihdl=0x%p\n", __func__,
mixer, i, ihdl);
ion_unmap_iommu(display_iclient, ihdl, DISPLAY_READ_DOMAIN,
GEN_POOL);
mdp4_stat.iommu_unmap++;
pr_debug("%s: map=%d unmap=%d drop=%d\n", __func__,
(int)mdp4_stat.iommu_map, (int)mdp4_stat.iommu_unmap,
(int)mdp4_stat.iommu_drop);
ion_free(display_iclient, ihdl);
}
flist_to_pflist:
/* move flist to pflist*/
memcpy(pflist, flist, sizeof(*pflist));
memset(flist, 0, sizeof(*flist));
mutex_unlock(&iommu_mutex);
}
void mdp4_overlay_iommu_2freelist(int mixer, struct ion_handle *ihdl)
{
struct iommu_free_list *flist;
flist = &ctrl->iommu_free[mixer];
if (flist->fndx >= IOMMU_FREE_LIST_MAX) {
pr_err("%s: Error, mixer=%d iommu fndx=%d\n",
__func__, mixer, flist->fndx);
mdp4_stat.iommu_drop++;
return;
}
pr_debug("%s: add mixer=%d fndx=%d ihdl=0x%p\n", __func__,
mixer, flist->fndx, ihdl);
flist->ihdl[flist->fndx++] = ihdl;
}
void mdp4_overlay_iommu_pipe_free(int ndx, int all)
{
struct mdp4_overlay_pipe *pipe;
struct mdp4_iommu_pipe_info *iom;
int plane, mixer;
pipe = mdp4_overlay_ndx2pipe(ndx);
if (pipe == NULL)
return;
if (pipe->flags & MDP_MEMORY_ID_TYPE_FB) {
pipe->flags &= ~MDP_MEMORY_ID_TYPE_FB;
if (pipe->put0_need) {
fput_light(pipe->srcp0_file, pipe->put0_need);
pipe->put0_need = 0;
}
if (pipe->put1_need) {
fput_light(pipe->srcp1_file, pipe->put1_need);
pipe->put1_need = 0;
}
if (pipe->put2_need) {
fput_light(pipe->srcp2_file, pipe->put2_need);
pipe->put2_need = 0;
}
pr_debug("%s: ndx=%d flags=%x put=%d\n", __func__,
pipe->pipe_ndx, pipe->flags, pipe->put0_need);
return;
}
mutex_lock(&iommu_mutex);
mixer = pipe->mixer_num;
iom = &pipe->iommu;
pr_debug("%s: mixer=%d ndx=%d all=%d\n", __func__,
mixer, pipe->pipe_ndx, all);
for (plane = 0; plane < MDP4_MAX_PLANE; plane++) {
if (iom->prev_ihdl[plane]) {
mdp4_overlay_iommu_2freelist(mixer,
iom->prev_ihdl[plane]);
iom->prev_ihdl[plane] = NULL;
}
if (all && iom->ihdl[plane]) {
mdp4_overlay_iommu_2freelist(mixer, iom->ihdl[plane]);
iom->ihdl[plane] = NULL;
}
}
mutex_unlock(&iommu_mutex);
}
int mdp4_overlay_iommu_map_buf(int mem_id,
struct mdp4_overlay_pipe *pipe, unsigned int plane,
unsigned long *start, unsigned long *len,
struct ion_handle **srcp_ihdl)
{
struct mdp4_iommu_pipe_info *iom;
if (!display_iclient)
return -EINVAL;
*srcp_ihdl = ion_import_dma_buf(display_iclient, mem_id);
if (IS_ERR_OR_NULL(*srcp_ihdl)) {
pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*srcp_ihdl);
}
pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
ion_share_dma_buf(display_iclient, *srcp_ihdl));
pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
pipe->pipe_ndx, plane);
if (ion_map_iommu(display_iclient, *srcp_ihdl,
DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K, 0, start,
len, 0, 0)) {
ion_free(display_iclient, *srcp_ihdl);
pr_err("ion_map_iommu() failed\n");
return -EINVAL;
}
mutex_lock(&iommu_mutex);
iom = &pipe->iommu;
if (iom->prev_ihdl[plane]) {
mdp4_overlay_iommu_2freelist(pipe->mixer_num,
iom->prev_ihdl[plane]);
mdp4_stat.iommu_drop++;
pr_err("%s: dropped, ndx=%d plane=%d\n", __func__,
pipe->pipe_ndx, plane);
}
iom->prev_ihdl[plane] = iom->ihdl[plane];
iom->ihdl[plane] = *srcp_ihdl;
mdp4_stat.iommu_map++;
pr_debug("%s: ndx=%d plane=%d prev=0x%p cur=0x%p start=0x%lx len=%lx\n",
__func__, pipe->pipe_ndx, plane, iom->prev_ihdl[plane],
iom->ihdl[plane], *start, *len);
mutex_unlock(&iommu_mutex);
return 0;
}
static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_iommu_pipe_info *iom_pipe_info;
unsigned char i, j;
if (!display_iclient)
return;
for (j = 0; j < OVERLAY_PIPE_MAX; j++) {
iom_pipe_info = &mdp_iommu[pipe->mixer_num][j];
for (i = 0; i < MDP4_MAX_PLANE; i++) {
if (iom_pipe_info->prev_ihdl[i]) {
pr_debug("%s(): mixer %u, pipe %u, plane %u, "
"prev_ihdl %p\n", __func__,
pipe->mixer_num, j + 1, i,
iom_pipe_info->prev_ihdl[i]);
ion_unmap_iommu(display_iclient,
iom_pipe_info->prev_ihdl[i],
DISPLAY_READ_DOMAIN, GEN_POOL);
ion_free(display_iclient,
iom_pipe_info->prev_ihdl[i]);
iom_pipe_info->prev_ihdl[i] = NULL;
}
if (iom_pipe_info->mark_unmap) {
if (iom_pipe_info->ihdl[i]) {
pr_debug("%s(): MARK, mixer %u, pipe %u, plane %u, "
"ihdl %p\n", __func__,
pipe->mixer_num, j + 1, i,
iom_pipe_info->ihdl[i]);
ion_unmap_iommu(display_iclient,
iom_pipe_info->ihdl[i],
DISPLAY_READ_DOMAIN, GEN_POOL);
ion_free(display_iclient,
iom_pipe_info->ihdl[i]);
iom_pipe_info->ihdl[i] = NULL;
}
}
}
iom_pipe_info->mark_unmap = 0;
}
}
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
static int panel_rotate_180 = 1;
#endif
int mdp4_overlay_mixer_play(int mixer_num)
{
if (mixer_num == MDP4_MIXER2)
return ctrl->mixer2_played;
else if (mixer_num == MDP4_MIXER1)
return ctrl->mixer1_played;
else
return ctrl->mixer0_played;
}
void mdp4_overlay_panel_3d(int mixer_num, uint32 panel_3d)
{
ctrl->panel_3d = panel_3d;
}
void mdp4_overlay_panel_mode(int mixer_num, uint32 mode)
{
ctrl->panel_mode |= mode;
}
void mdp4_overlay_panel_mode_unset(int mixer_num, uint32 mode)
{
ctrl->panel_mode &= ~mode;
}
uint32 mdp4_overlay_panel_list(void)
{
return ctrl->panel_mode;
}
int mdp4_overlay_borderfill_supported(void)
{
return (mdp_rev >= MDP_REV_42);
}
void mdp4_overlay_dmae_cfg(struct msm_fb_data_type *mfd, int atv)
{
uint32 dmae_cfg_reg;
if (atv)
dmae_cfg_reg = DMA_DEFLKR_EN;
else
dmae_cfg_reg = 0;
if (mfd->fb_imgType == MDP_BGR_565)
dmae_cfg_reg |= DMA_PACK_PATTERN_BGR;
else
dmae_cfg_reg |= DMA_PACK_PATTERN_RGB;
if (mfd->panel_info.bpp == 18) {
dmae_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
} else if (mfd->panel_info.bpp == 16) {
dmae_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
} else {
dmae_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 16BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* dma2 config register */
MDP_OUTP(MDP_BASE + 0xb0000, dmae_cfg_reg);
if (atv) {
MDP_OUTP(MDP_BASE + 0xb0070, 0xeb0010);
MDP_OUTP(MDP_BASE + 0xb0074, 0xf00010);
MDP_OUTP(MDP_BASE + 0xb0078, 0xf00010);
MDP_OUTP(MDP_BASE + 0xb3000, 0x80);
MDP_OUTP(MDP_BASE + 0xb3010, 0x1800040);
MDP_OUTP(MDP_BASE + 0xb3014, 0x1000080);
MDP_OUTP(MDP_BASE + 0xb4004, 0x67686970);
} else {
mdp_vid_quant_set();
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
#ifdef CONFIG_FB_MSM_HDMI_3D
void unfill_black_screen(void) { return; }
#else
void unfill_black_screen(void)
{
uint32 temp_src_format;
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* VG2 Constant Color
*/
temp_src_format = inpdw(MDP_BASE + 0x30050);
MDP_OUTP(MDP_BASE + 0x30050, temp_src_format&(~BIT(22)));
/*
* MDP_OVERLAY_REG_FLUSH
*/
MDP_OUTP(MDP_BASE + 0x18000, BIT(3));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return;
}
#endif
#ifdef CONFIG_FB_MSM_HDMI_3D
void fill_black_screen(void) { return; }
#else
void fill_black_screen(void)
{
/*Black color*/
uint32 color = 0x00000000;
uint32 temp_src_format;
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* VG2 Constant Color
*/
MDP_OUTP(MDP_BASE + 0x31008, color);
/*
* MDP_VG2_SRC_FORMAT
*/
temp_src_format = inpdw(MDP_BASE + 0x30050);
MDP_OUTP(MDP_BASE + 0x30050, temp_src_format | BIT(22));
/*
* MDP_OVERLAY_REG_FLUSH
*/
MDP_OUTP(MDP_BASE + 0x18000, BIT(3));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return;
}
#endif
void mdp4_overlay_dmae_xy(struct mdp4_overlay_pipe *pipe)
{
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + 0xb0004,
(pipe->src_height << 16 | pipe->src_width));
if (pipe->dma_blt_addr) {
uint32 off, bpp;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
MDP_OUTP(MDP_BASE + 0xb0008, pipe->dma_blt_addr + off);
/* RGB888, output of overlay blending */
MDP_OUTP(MDP_BASE + 0xb000c, pipe->src_width * bpp);
} else {
/* dma_e source */
MDP_OUTP(MDP_BASE + 0xb0008, pipe->srcp0_addr);
MDP_OUTP(MDP_BASE + 0xb000c, pipe->srcp0_ystride);
}
/* dma_e dest */
MDP_OUTP(MDP_BASE + 0xb0010, (pipe->dst_y << 16 | pipe->dst_x));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc)
{
uint32 dma2_cfg_reg;
uint32 mask, curr;
dma2_cfg_reg = DMA_DITHER_EN;
#ifdef BLT_RGB565
/* RGB888 is 0 */
dma2_cfg_reg |= DMA_BUF_FORMAT_RGB565; /* blt only */
#endif
if (mfd->fb_imgType == MDP_BGR_565)
dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
else
dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
if ((mfd->panel_info.type == MIPI_CMD_PANEL) ||
(mfd->panel_info.type == MIPI_VIDEO_PANEL)) {
dma2_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 24BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
} else if (mfd->panel_info.bpp == 18) {
dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
} else if (mfd->panel_info.bpp == 16) {
dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
} else {
dma2_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 24BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
#ifndef CONFIG_FB_MSM_LCDC_CHIMEI_WXGA_PANEL
if (lcdc)
dma2_cfg_reg |= DMA_PACK_ALIGN_MSB;
#endif
/* dma2 config register */
curr = inpdw(MDP_BASE + 0x90000);
mask = 0x0FFFFFFF;
dma2_cfg_reg = (dma2_cfg_reg & mask) | (curr & ~mask);
MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
ctrl->dmap_cfg[0] = dma2_cfg_reg;
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
/*
* mdp4_overlay_dmap_xy: called form baselayer only
*/
void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe)
{
uint32 off, bpp;
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
if (pipe->dma_blt_addr) {
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
ctrl->dmap_cfg[2] = pipe->dma_blt_addr + off;
MDP_OUTP(MDP_BASE + 0x90008, pipe->dma_blt_addr + off);
/* RGB888, output of overlay blending */
MDP_OUTP(MDP_BASE + 0x9000c, pipe->src_width * bpp);
ctrl->dmap_cfg[3] = pipe->src_width * bpp;
} else {
MDP_OUTP(MDP_BASE + 0x90008, pipe->srcp0_addr);
ctrl->dmap_cfg[2] = pipe->srcp0_addr;
MDP_OUTP(MDP_BASE + 0x9000c, pipe->srcp0_ystride);
ctrl->dmap_cfg[3] = pipe->srcp0_ystride;
}
/* dma_p source */
MDP_OUTP(MDP_BASE + 0x90004,
(pipe->src_height << 16 | pipe->src_width));
ctrl->dmap_cfg[1] = (pipe->src_height << 16 | pipe->src_width);
/* dma_p dest */
MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
ctrl->dmap_cfg[4] = (pipe->dst_y << 16 | pipe->dst_x);
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
static void mdp4_overlay_dmap_reconfig(void)
{
MDP_OUTP(MDP_BASE + 0x90000, ctrl->dmap_cfg[0]);
MDP_OUTP(MDP_BASE + 0x90004, ctrl->dmap_cfg[1]);
MDP_OUTP(MDP_BASE + 0x90008, ctrl->dmap_cfg[2]);
MDP_OUTP(MDP_BASE + 0x9000c, ctrl->dmap_cfg[3]);
MDP_OUTP(MDP_BASE + 0x90010, ctrl->dmap_cfg[4]);
}
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
#define MDP4_VG_PHASE_STEP_SHIFT 29
static int mdp4_leading_0(uint32 num)
{
uint32 bit = 0x80000000;
int i;
for (i = 0; i < 32; i++) {
if (bit & num)
return i;
bit >>= 1;
}
return i;
}
static uint32 mdp4_scale_phase_step(int f_num, uint32 src, uint32 dst)
{
uint32 val, s;
int n;
n = mdp4_leading_0(src);
if (n > f_num)
n = f_num;
s = src << n; /* maximum to reduce lose of resolution */
val = s / dst;
if (n < f_num) {
n = f_num - n;
val <<= n;
val |= ((s % dst) << n) / dst;
}
return val;
}
static void mdp4_scale_setup(struct mdp4_overlay_pipe *pipe)
{
pipe->phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
pipe->phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
if (pipe->dst_h && pipe->src_h != pipe->dst_h) {
u32 upscale_max;
upscale_max = (mdp_rev >= MDP_REV_41) ?
MDP4_REV41_OR_LATER_UP_SCALING_MAX :
MDP4_REV40_UP_SCALING_MAX;
if (pipe->dst_h > pipe->src_h * upscale_max)
return;
pipe->op_mode |= MDP4_OP_SCALEY_EN;
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
if (pipe->flags & MDP_BACKEND_COMPOSITION &&
pipe->alpha_enable && pipe->dst_h > pipe->src_h)
pipe->op_mode |= MDP4_OP_SCALEY_PIXEL_RPT;
else if (pipe->dst_h <= (pipe->src_h / 4))
pipe->op_mode |= MDP4_OP_SCALEY_MN_PHASE;
else
pipe->op_mode |= MDP4_OP_SCALEY_FIR;
} else { /* RGB pipe */
pipe->op_mode |= MDP4_OP_SCALE_RGB_ENHANCED |
MDP4_OP_SCALE_RGB_BILINEAR |
MDP4_OP_SCALE_ALPHA_BILINEAR;
}
pipe->phasey_step = mdp4_scale_phase_step(29,
pipe->src_h, pipe->dst_h);
}
if (pipe->dst_w && pipe->src_w != pipe->dst_w) {
u32 upscale_max;
upscale_max = (mdp_rev >= MDP_REV_41) ?
MDP4_REV41_OR_LATER_UP_SCALING_MAX :
MDP4_REV40_UP_SCALING_MAX;
if (pipe->dst_w > pipe->src_w * upscale_max)
return;
pipe->op_mode |= MDP4_OP_SCALEX_EN;
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
if (pipe->flags & MDP_BACKEND_COMPOSITION &&
pipe->alpha_enable && pipe->dst_w > pipe->src_w)
pipe->op_mode |= MDP4_OP_SCALEX_PIXEL_RPT;
else if (pipe->dst_w <= (pipe->src_w / 4))
pipe->op_mode |= MDP4_OP_SCALEX_MN_PHASE;
else
pipe->op_mode |= MDP4_OP_SCALEX_FIR;
} else { /* RGB pipe */
pipe->op_mode |= MDP4_OP_SCALE_RGB_ENHANCED |
MDP4_OP_SCALE_RGB_BILINEAR |
MDP4_OP_SCALE_ALPHA_BILINEAR;
}
pipe->phasex_step = mdp4_scale_phase_step(29,
pipe->src_w, pipe->dst_w);
}
}
void mdp4_overlay_solidfill_init(struct mdp4_overlay_pipe *pipe)
{
char *base;
uint32 src_size, src_xy, dst_size, dst_xy;
uint32 format;
uint32 off;
int i;
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
base = MDP_BASE + MDP4_VIDEO_BASE;
off = MDP4_VIDEO_OFF; /* 0x10000 */
mdp_clk_ctrl(1);
for(i = 0; i < 4; i++) { /* 4 pipes */
format = inpdw(base + 0x50);
format |= MDP4_FORMAT_SOLID_FILL;
outpdw(base + 0x0000, src_size);/* MDP_RGB_SRC_SIZE */
outpdw(base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(base + 0x0008, dst_size);/* MDP_RGB_DST_SIZE */
outpdw(base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
outpdw(base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
outpdw(base + 0x1008, 0x0);/* Black */
base += off;
}
/*
* keep it at primary
* will be picked up at first commit
*/
ctrl->flush[MDP4_MIXER0] = 0x3c; /* all pipes */
mdp_clk_ctrl(0);
}
void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe)
{
char *rgb_base;
uint32 src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern;
uint32 curr, mask;
uint32 offset = 0;
int pnum;
pnum = pipe->pipe_num - OVERLAY_PIPE_RGB1; /* start from 0 */
rgb_base = MDP_BASE + MDP4_RGB_BASE;
rgb_base += (MDP4_RGB_OFF * pnum);
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
if ((pipe->src_x + pipe->src_w) > 0x7FF) {
offset += pipe->src_x * pipe->bpp;
src_xy &= 0xFFFF0000;
}
if ((pipe->src_y + pipe->src_h) > 0x7FF) {
offset += pipe->src_y * pipe->src_width * pipe->bpp;
src_xy &= 0x0000FFFF;
}
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
#ifdef MDP4_IGC_LUT_ENABLE
pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
#endif
mdp4_scale_setup(pipe);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* Ensure proper covert matrix loaded when color space swaps */
curr = inpdw(rgb_base + 0x0058);
/* Don't touch bits you don't want to configure*/
mask = 0xFFFEFFFF;
pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [START]
if((pipe->mfd->panel_info.type != DTV_PANEL)&&(pipe->mfd->panel_info.type != WRITEBACK_PANEL))
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [END]
{
if (panel_rotate_180 && (pipe->pipe_num == OVERLAY_PIPE_RGB1 || pipe->pipe_num == OVERLAY_PIPE_RGB2))
{
uint32 op_mode = pipe->op_mode | MDP4_OP_FLIP_UD | MDP4_OP_SCALEY_EN;
if (pipe->ext_flag & MDP_FLIP_UD)
op_mode &= ~MDP4_OP_FLIP_UD;
pipe->op_mode = op_mode;
}
if ((pipe->op_mode & MDP4_OP_FLIP_UD) && pipe->mfd)
dst_xy = (((pipe->mfd->panel_info.yres - pipe->dst_y - pipe->dst_h) << 16) | pipe->dst_x);
}
if (!pipe->mfd)
pr_err("rgb mfd is not set\n");
#endif
outpdw(rgb_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
outpdw(rgb_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(rgb_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
outpdw(rgb_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
outpdw(rgb_base + 0x0010, pipe->srcp0_addr + offset);
outpdw(rgb_base + 0x0040, pipe->srcp0_ystride);
outpdw(rgb_base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
outpdw(rgb_base + 0x0054, pattern);/* MDP_RGB_SRC_UNPACK_PATTERN */
if (format & MDP4_FORMAT_SOLID_FILL) {
u32 op_mode = pipe->op_mode;
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
outpdw(rgb_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
} else {
if (pipe->op_mode & MDP4_OP_FLIP_LR && mdp_rev >= MDP_REV_42) {
/* Enable x-scaling bit to enable LR flip */
/* for MDP > 4.2 targets */
pipe->op_mode |= 0x01;
}
outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
}
outpdw(rgb_base + 0x005c, pipe->phasex_step);
outpdw(rgb_base + 0x0060, pipe->phasey_step);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp4_stat.pipe[pipe->pipe_num]++;
}
static void mdp4_overlay_vg_get_src_offset(struct mdp4_overlay_pipe *pipe,
char *vg_base, uint32 *luma_off, uint32 *chroma_off)
{
uint32 src_xy;
*luma_off = 0;
*chroma_off = 0;
if (pipe->src_x && (pipe->frame_format ==
MDP4_FRAME_FORMAT_LINEAR)) {
src_xy = (pipe->src_y << 16) | pipe->src_x;
src_xy &= 0xffff0000;
outpdw(vg_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
switch (pipe->src_format) {
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CB_CR_H2V2:
*luma_off = pipe->src_x;
*chroma_off = pipe->src_x/2;
break;
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
*luma_off = pipe->src_x +
(pipe->src_y * pipe->srcp0_ystride);
*chroma_off = pipe->src_x +
(pipe->src_y * pipe->srcp1_ystride);
break;
case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
if (pipe->src_x & 0x1)
pipe->src_x += 1;
*luma_off += pipe->src_x * 2;
break;
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
case MDP_RGB_565:
case MDP_BGR_565:
case MDP_XRGB_8888:
case MDP_RGB_888:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
*luma_off = pipe->src_x * pipe->bpp;
break;
default:
pr_err("%s: fmt %u not supported for adjustment\n",
__func__, pipe->src_format);
break;
}
}
}
void mdp4_overlay_vg_setup(struct mdp4_overlay_pipe *pipe)
{
char *vg_base;
uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern, luma_offset, chroma_offset;
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [S]*/
/* This source code confirmed by QCT*/
uint32 mask, curr, addr;
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [E]*/
int pnum, ptype, i;
uint32_t block;
pnum = pipe->pipe_num - OVERLAY_PIPE_VG1; /* start from 0 */
vg_base = MDP_BASE + MDP4_VIDEO_BASE;
vg_base += (MDP4_VIDEO_OFF * pnum);
frame_size = ((pipe->src_height << 16) | pipe->src_width);
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
ptype = mdp4_overlay_format2type(pipe->src_format);
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
/* CSC Post Processing enabled? */
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG) {
if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_ENABLE)
pipe->op_mode |= MDP4_OP_CSC_EN;
if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_IN)
pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_OUT)
pipe->op_mode |= MDP4_OP_DST_DATA_YCBCR;
mdp4_csc_write(&pipe->pp_cfg.csc_cfg,
(uint32_t) (vg_base + MDP4_VIDEO_CSC_OFF));
if (pipe->pipe_num == OVERLAY_PIPE_VG1)
block = MDP_BLOCK_VG_1;
else
block = MDP_BLOCK_VG_2;
for (i = 0; i < CSC_MAX_BLOCKS; i++) {
if (block == csc_cfg_matrix[i].block) {
memcpy(&csc_cfg_matrix[i].csc_data,
&(pipe->pp_cfg.csc_cfg),
sizeof(struct mdp_csc_cfg));
break;
}
}
}
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_QSEED_CFG) {
mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[0],
(uint32_t) vg_base);
mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[1],
(uint32_t) vg_base);
}
}
/* not RGB use VG pipe, pure VG pipe */
if (ptype != OVERLAY_TYPE_RGB)
pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR);
#ifdef MDP4_IGC_LUT_ENABLE
pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
#endif
mdp4_scale_setup(pipe);
luma_offset = 0;
chroma_offset = 0;
if (ptype == OVERLAY_TYPE_RGB) {
if ((pipe->src_y + pipe->src_h) > 0x7FF) {
luma_offset = pipe->src_y * pipe->src_width * pipe->bpp;
src_xy &= 0x0000FFFF;
}
if ((pipe->src_x + pipe->src_w) > 0x7FF) {
luma_offset += pipe->src_x * pipe->bpp;
src_xy &= 0xFFFF0000;
}
}
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [START]
if((pipe->mfd->panel_info.type != DTV_PANEL) && (pipe->mfd->panel_info.type != WRITEBACK_PANEL))
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [END]
{
if (panel_rotate_180)
{
uint32 op_mode = pipe->op_mode | MDP4_OP_FLIP_UD;
if (pipe->ext_flag & MDP_FLIP_UD)
op_mode &= ~MDP4_OP_FLIP_UD;
pipe->op_mode = op_mode;
}
if ((pipe->op_mode & MDP4_OP_FLIP_UD) && pipe->mfd)
{
dst_xy = (((pipe->mfd->panel_info.yres - pipe->dst_y - pipe->dst_h) << 16) | pipe->dst_x);
outpdw(MDP_BASE + 0xE0044, 0xe0fff);
}
}
if (!pipe->mfd)
pr_err("vg mfd is not set\n");
#endif
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
outpdw(vg_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
outpdw(vg_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(vg_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
outpdw(vg_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
if (pipe->frame_format != MDP4_FRAME_FORMAT_LINEAR) {
struct mdp4_overlay_pipe *real_pipe;
u32 psize, csize;
/*
* video tile frame size register is NOT double buffered.
* when this register updated, it kicks in immediatly
* During transition from smaller resolution to higher
* resolution it may have possibility that mdp still fetch
* from smaller resolution buffer with new higher resolution
* frame size. This will cause iommu page fault.
*/
real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
psize = real_pipe->prev_src_height * real_pipe->prev_src_width;
csize = pipe->src_height * pipe->src_width;
if (psize && (csize > psize)) {
frame_size = (real_pipe->prev_src_height << 16 |
real_pipe->prev_src_width);
}
outpdw(vg_base + 0x0048, frame_size); /* TILE frame size */
real_pipe->prev_src_height = pipe->src_height;
real_pipe->prev_src_width = pipe->src_width;
}
/*
* Adjust src X offset to avoid MDP from overfetching pixels
* present before the offset. This is required for video
* frames coming with unused green pixels along the left margin
*/
/* not RGB use VG pipe, pure VG pipe */
if (ptype != OVERLAY_TYPE_RGB) {
mdp4_overlay_vg_get_src_offset(pipe, vg_base, &luma_offset,
&chroma_offset);
}
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [S]*/
/* This source code confirmed by QCT*/
/* Ensure proper covert matrix loaded when color space swaps */
curr = inpdw(vg_base + 0x0058);
mask = 0x600;
if ((curr & mask) != (pipe->op_mode & mask)) {
addr = ((uint32_t)vg_base) + 0x4000;
if (ptype != OVERLAY_TYPE_RGB)
mdp4_csc_write(&(mdp_csc_convert[1]), addr);
else
mdp4_csc_write(&(mdp_csc_convert[0]), addr);
mask = 0xFFFCFFFF;
} else {
/* Don't touch bits you don't want to configure*/
mask = 0xFFFCF1FF;
}
pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [E]*/
/* luma component plane */
outpdw(vg_base + 0x0010, pipe->srcp0_addr + luma_offset);
/* chroma component plane or planar color 1 */
outpdw(vg_base + 0x0014, pipe->srcp1_addr + chroma_offset);
/* planar color 2 */
outpdw(vg_base + 0x0018, pipe->srcp2_addr + chroma_offset);
outpdw(vg_base + 0x0040,
pipe->srcp1_ystride << 16 | pipe->srcp0_ystride);
outpdw(vg_base + 0x0044,
pipe->srcp3_ystride << 16 | pipe->srcp2_ystride);
outpdw(vg_base + 0x0050, format); /* MDP_RGB_SRC_FORMAT */
outpdw(vg_base + 0x0054, pattern); /* MDP_RGB_SRC_UNPACK_PATTERN */
if (format & MDP4_FORMAT_SOLID_FILL) {
u32 op_mode = pipe->op_mode;
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
outpdw(vg_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
} else
outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(vg_base + 0x005c, pipe->phasex_step);
outpdw(vg_base + 0x0060, pipe->phasey_step);
if (pipe->op_mode & MDP4_OP_DITHER_EN) {
outpdw(vg_base + 0x0068,
pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
}
if (mdp_rev > MDP_REV_41) {
/* mdp chip select controller */
mask = 0;
if (pipe->pipe_num == OVERLAY_PIPE_VG1)
mask = 0x020; /* bit 5 */
else if (pipe->pipe_num == OVERLAY_PIPE_VG2)
mask = 0x02000; /* bit 13 */
if (mask) {
if (pipe->op_mode & MDP4_OP_SCALEY_MN_PHASE)
ctrl->cs_controller &= ~mask;
else
ctrl->cs_controller |= mask;
/* NOT double buffered */
outpdw(MDP_BASE + 0x00c0, ctrl->cs_controller);
}
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp4_stat.pipe[pipe->pipe_num]++;
}
int mdp4_overlay_format2type(uint32 format)
{
switch (format) {
case MDP_RGB_565:
case MDP_RGB_888:
case MDP_BGR_565:
case MDP_XRGB_8888:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
return OVERLAY_TYPE_RGB;
case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
case MDP_YCRCB_H1V1:
case MDP_YCBCR_H1V1:
return OVERLAY_TYPE_VIDEO;
case MDP_RGB_BORDERFILL:
return OVERLAY_TYPE_BF;
default:
mdp4_stat.err_format++;
return -ERANGE;
}
}
#define C3_ALPHA 3 /* alpha */
#define C2_R_Cr 2 /* R/Cr */
#define C1_B_Cb 1 /* B/Cb */
#define C0_G_Y 0 /* G/luma */
#define YUV_444_MAX_WIDTH 1280 /* Max width for YUV 444*/
int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe)
{
switch (pipe->src_format) {
case MDP_RGB_565:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 1; /* R, 5 bits */
pipe->b_bit = 1; /* B, 5 bits */
pipe->g_bit = 2; /* G, 6 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 2; /* 2 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_RGB_888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 3; /* 3 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_BGR_565:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 1; /* R, 5 bits */
pipe->b_bit = 1; /* B, 5 bits */
pipe->g_bit = 2; /* G, 6 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 2; /* 2 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_XRGB_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C1_B_Cb; /* B */
pipe->element2 = C0_G_Y; /* G */
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C3_ALPHA; /* alpha */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_ARGB_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C1_B_Cb; /* B */
pipe->element2 = C0_G_Y; /* G */
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C3_ALPHA; /* alpha */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_RGBA_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C3_ALPHA; /* alpha */
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_RGBX_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C3_ALPHA; /* alpha */
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_BGRA_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C3_ALPHA; /* alpha */
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
if (pipe->src_format == MDP_YCRYCB_H2V1) {
pipe->element3 = C0_G_Y; /* G */
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
} else if (pipe->src_format == MDP_YCBYCR_H2V1) {
pipe->element3 = C0_G_Y; /* G */
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
}
pipe->bpp = 2; /* 2 bpp */
pipe->chroma_sample = MDP4_CHROMA_H2V1;
break;
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 1; /* 2 */
if (pipe->src_format == MDP_Y_CRCB_H2V1) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
pipe->chroma_sample = MDP4_CHROMA_H2V1;
} else if (pipe->src_format == MDP_Y_CRCB_H1V1) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
if (pipe->src_width > YUV_444_MAX_WIDTH)
pipe->chroma_sample = MDP4_CHROMA_H1V2;
else
pipe->chroma_sample = MDP4_CHROMA_RGB;
} else if (pipe->src_format == MDP_Y_CBCR_H2V1) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
pipe->chroma_sample = MDP4_CHROMA_H2V1;
} else if (pipe->src_format == MDP_Y_CBCR_H1V1) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
if (pipe->src_width > YUV_444_MAX_WIDTH)
pipe->chroma_sample = MDP4_CHROMA_H1V2;
else
pipe->chroma_sample = MDP4_CHROMA_RGB;
} else if (pipe->src_format == MDP_Y_CRCB_H1V2) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
pipe->chroma_sample = MDP4_CHROMA_H1V2;
} else if (pipe->src_format == MDP_Y_CBCR_H1V2) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
pipe->chroma_sample = MDP4_CHROMA_H1V2;
} else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
pipe->chroma_sample = MDP4_CHROMA_420;
} else if (pipe->src_format == MDP_Y_CBCR_H2V2) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
pipe->chroma_sample = MDP4_CHROMA_420;
}
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
pipe->frame_format = MDP4_FRAME_FORMAT_VIDEO_SUPERTILE;
pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 1; /* 2 */
if (pipe->src_format == MDP_Y_CRCB_H2V2_TILE) {
pipe->element1 = C1_B_Cb; /* B */
pipe->element0 = C2_R_Cr; /* R */
pipe->chroma_sample = MDP4_CHROMA_420;
} else if (pipe->src_format == MDP_Y_CBCR_H2V2_TILE) {
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C1_B_Cb; /* B */
pipe->chroma_sample = MDP4_CHROMA_420;
}
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CB_CR_H2V2:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->chroma_sample = MDP4_CHROMA_420;
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element0 = C0_G_Y; /* G */
if (pipe->src_format == MDP_YCRCB_H1V1) {
pipe->element1 = C2_R_Cr; /* R */
pipe->element2 = C1_B_Cb; /* B */
} else {
pipe->element1 = C1_B_Cb; /* B */
pipe->element2 = C2_R_Cr; /* R */
}
pipe->bpp = 3; /* 3 bpp */
case MDP_RGB_BORDERFILL:
pipe->alpha_enable = 0;
pipe->alpha = 0;
break;
default:
/* not likely */
mdp4_stat.err_format++;
return -ERANGE;
}
return 0;
}
/*
* color_key_convert: output with 12 bits color key
*/
static uint32 color_key_convert(int start, int num, uint32 color)
{
uint32 data;
data = (color >> start) & ((1 << num) - 1);
/* convert to 8 bits */
if (num == 5)
data = ((data << 3) | (data >> 2));
else if (num == 6)
data = ((data << 2) | (data >> 4));
/* convert 8 bits to 12 bits */
data = (data << 4) | (data >> 4);
return data;
}
void transp_color_key(int format, uint32 transp,
uint32 *c0, uint32 *c1, uint32 *c2)
{
int b_start, g_start, r_start;
int b_num, g_num, r_num;
switch (format) {
case MDP_RGB_565:
b_start = 0;
g_start = 5;
r_start = 11;
r_num = 5;
g_num = 6;
b_num = 5;
break;
case MDP_RGB_888:
case MDP_XRGB_8888:
case MDP_ARGB_8888:
case MDP_BGRA_8888:
b_start = 0;
g_start = 8;
r_start = 16;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_RGBA_8888:
case MDP_RGBX_8888:
b_start = 16;
g_start = 8;
r_start = 0;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_BGR_565:
b_start = 11;
g_start = 5;
r_start = 0;
r_num = 5;
g_num = 6;
b_num = 5;
break;
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V1:
case MDP_YCBCR_H1V1:
b_start = 8;
g_start = 16;
r_start = 0;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
case MDP_YCRCB_H1V1:
b_start = 0;
g_start = 16;
r_start = 8;
r_num = 8;
g_num = 8;
b_num = 8;
break;
default:
b_start = 0;
g_start = 8;
r_start = 16;
r_num = 8;
g_num = 8;
b_num = 8;
break;
}
*c0 = color_key_convert(g_start, g_num, transp);
*c1 = color_key_convert(b_start, b_num, transp);
*c2 = color_key_convert(r_start, r_num, transp);
}
uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe)
{
uint32 format;
format = 0;
if (pipe->solid_fill)
format |= MDP4_FORMAT_SOLID_FILL;
if (pipe->unpack_align_msb)
format |= MDP4_FORMAT_UNPACK_ALIGN_MSB;
if (pipe->unpack_tight)
format |= MDP4_FORMAT_UNPACK_TIGHT;
if (pipe->alpha_enable)
format |= MDP4_FORMAT_ALPHA_ENABLE;
if (pipe->flags & MDP_SOURCE_ROTATED_90)
format |= MDP4_FORMAT_90_ROTATED;
format |= (pipe->unpack_count << 13);
format |= ((pipe->bpp - 1) << 9);
format |= (pipe->a_bit << 6);
format |= (pipe->r_bit << 4);
format |= (pipe->b_bit << 2);
format |= pipe->g_bit;
format |= (pipe->frame_format << 29);
/* video/graphic */
format |= (pipe->fetch_plane << 19);
format |= (pipe->chroma_site << 28);
format |= (pipe->chroma_sample << 26);
return format;
}
uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe)
{
return (pipe->element3 << 24) | (pipe->element2 << 16) |
(pipe->element1 << 8) | pipe->element0;
}
/*
* mdp4_overlayproc_cfg: only be called from base layer
*/
void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe)
{
uint32 data, intf;
char *overlay_base;
uint32 curr;
intf = 0;
if (pipe->mixer_num == MDP4_MIXER2)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC2_BASE;
else if (pipe->mixer_num == MDP4_MIXER1) {
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
intf = inpdw(MDP_BASE + 0x0038); /* MDP_DISP_INTF_SEL */
intf >>= 4;
intf &= 0x03;
} else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* BLT support both primary and external external
*/
if (pipe->ov_blt_addr) {
int off, bpp;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
data = pipe->src_height;
data <<= 16;
data |= pipe->src_width;
outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
if (pipe->mixer_num == MDP4_MIXER0 ||
pipe->mixer_num == MDP4_MIXER1) {
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
/* overlay ouput is RGB888 */
outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
/* MDDI - BLT + on demand */
outpdw(overlay_base + 0x0004, 0x08);
curr = inpdw(overlay_base + 0x0014);
curr &= 0x4;
#ifdef BLT_RGB565
outpdw(overlay_base + 0x0014, curr | 0x1); /* RGB565 */
#else
outpdw(overlay_base + 0x0014, curr | 0x0); /* RGB888 */
#endif
} else if (pipe->mixer_num == MDP4_MIXER2) {
if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
off = 0;
bpp = 1;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height *
pipe->src_width * bpp;
outpdw(overlay_base + 0x000c,
pipe->ov_blt_addr + off);
/* overlay ouput is RGB888 */
outpdw(overlay_base + 0x0010,
((pipe->src_width << 16) |
pipe->src_width));
outpdw(overlay_base + 0x001c,
pipe->ov_blt_addr + off);
off = pipe->src_height * pipe->src_width;
/* align chroma to 2k address */
off = (off + 2047) & ~2047;
/* UV plane adress */
outpdw(overlay_base + 0x0020,
pipe->ov_blt_addr + off);
/* MDDI - BLT + on demand */
outpdw(overlay_base + 0x0004, 0x08);
/* pseudo planar + writeback */
curr = inpdw(overlay_base + 0x0014);
curr &= 0x4;
outpdw(overlay_base + 0x0014, curr | 0x012);
/* rgb->yuv */
outpdw(overlay_base + 0x0200, 0x05);
}
}
} else {
data = pipe->src_height;
data <<= 16;
data |= pipe->src_width;
outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
outpdw(overlay_base + 0x000c, pipe->srcp0_addr);
outpdw(overlay_base + 0x0010, pipe->srcp0_ystride);
outpdw(overlay_base + 0x0004, 0x01); /* directout */
}
if (pipe->mixer_num == MDP4_MIXER1) {
if (intf == TV_INTF) {
curr = inpdw(overlay_base + 0x0014);
curr &= 0x4;
outpdw(overlay_base + 0x0014, 0x02); /* yuv422 */
/* overlay1 CSC config */
outpdw(overlay_base + 0x0200, 0x05); /* rgb->yuv */
}
}
#ifdef MDP4_IGC_LUT_ENABLE
curr = inpdw(overlay_base + 0x0014);
curr &= ~0x4;
outpdw(overlay_base + 0x0014, curr | 0x4); /* GC_LUT_EN, 888 */
#endif
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
int mdp4_overlay_pipe_staged(struct mdp4_overlay_pipe *pipe)
{
uint32 data, mask;
int mixer;
mixer = pipe->mixer_num;
data = ctrl->mixer_cfg[mixer];
mask = 0x0f;
mask <<= (4 * pipe->pipe_num);
data &= mask;
return data;
}
int mdp4_mixer_info(int mixer_num, struct mdp_mixer_info *info)
{
int ndx, cnt;
struct mdp4_overlay_pipe *pipe;
if (mixer_num > MDP4_MIXER_MAX)
return -ENODEV;
cnt = 0;
ndx = MDP4_MIXER_STAGE_BASE;
for ( ; ndx < MDP4_MIXER_STAGE_MAX; ndx++) {
pipe = &ctrl->plist[ndx];
if (pipe == NULL)
continue;
if (!pipe->pipe_used)
continue;
info->z_order = pipe->mixer_stage - MDP4_MIXER_STAGE0;
/* z_order == -1, means base layer */
info->ptype = pipe->pipe_type;
info->pnum = pipe->pipe_num;
info->pndx = pipe->pipe_ndx;
info->mixer_num = pipe->mixer_num;
info++;
cnt++;
}
return cnt;
}
void mdp4_mixer_reset(int mixer)
{
uint32 data, data1, mask;
int i, ndx, min, max, bit;
mdp_clk_ctrl(1);
/* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1 */
data = inpdw(MDP_BASE + 0x10100);
data1 = data;
if (mixer == 0) {
min = 1;
max = 8;
bit = 0x03; /* mixer0, dmap */
} else {
min = 9;
max = 0xf;
bit = 0x0C; /* mixer1, dmae */
}
mask = 0x0f;
for (i = 0 ; i < 8 ; i++) {
ndx = data & mask;
ndx >>= (i * 4);
if (ndx >= min && ndx <= max)
data1 &= ~mask; /* unstage pipe from mixer */
mask <<= 4;
}
pr_debug("%s: => MIXER_RESET, data1=%x data=%x bit=%x\n",
__func__, data1, data, bit);
/* unstage pipes of mixer to be reset */
outpdw(MDP_BASE + 0x10100, data1); /* MDP_LAYERMIXER_IN_CFG */
outpdw(MDP_BASE + 0x18000, 0);
mdp4_sw_reset(bit); /* reset mixer */ /* 0 => mixer0, dmap */
/* restore origianl stage */
outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
outpdw(MDP_BASE + 0x18000, 0);
mdp4_vg_csc_restore();
mdp4_overlay_dmap_reconfig();
mdp_clk_ctrl(0);
}
void mdp4_mixer_stage_commit(int mixer)
{
struct mdp4_overlay_pipe *pipe;
int i, num;
u32 data, stage;
int off;
unsigned long flags;
data = 0;
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pipe = ctrl->stage[mixer][i];
if (pipe == NULL)
continue;
pr_debug("%s: mixer=%d ndx=%d stage=%d\n", __func__,
mixer, pipe->pipe_ndx, i);
stage = pipe->mixer_stage;
if (mixer >= MDP4_MIXER1)
stage += 8;
stage <<= (4 * pipe->pipe_num);
data |= stage;
}
/*
* stage_commit may be called from overlay_unset
* for command panel, mdp clocks may be off at this time.
* so mdp clock enabled is necessary
*/
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_clk_ctrl(1);
if (data)
mdp4_mixer_blend_setup(mixer);
off = 0;
if (data != ctrl->mixer_cfg[mixer]) {
ctrl->mixer_cfg[mixer] = data;
if (mixer >= MDP4_MIXER2) {
/* MDP_LAYERMIXER2_IN_CFG */
off = 0x100f0;
} else {
/* mixer 0 or 1 */
num = mixer + 1;
num &= 0x01;
data |= ctrl->mixer_cfg[num];
off = 0x10100;
}
pr_debug("%s: mixer=%d data=%x flush=%x pid=%d\n", __func__,
mixer, data, ctrl->flush[mixer], current->pid);
}
local_irq_save(flags);
if (off)
outpdw(MDP_BASE + off, data);
if (ctrl->flush[mixer]) {
outpdw(MDP_BASE + 0x18000, ctrl->flush[mixer]);
ctrl->flush[mixer] = 0;
}
local_irq_restore(flags);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp_clk_ctrl(0);
}
void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe, int commit)
{
struct mdp4_overlay_pipe *pp;
int i, mixer;
mixer = pipe->mixer_num;
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
if (pp && pp->pipe_ndx == pipe->pipe_ndx) {
ctrl->stage[mixer][i] = NULL;
break;
}
}
ctrl->stage[mixer][pipe->mixer_stage] = pipe; /* keep it */
if (commit)
mdp4_mixer_stage_commit(mixer);
}
void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe, int commit)
{
struct mdp4_overlay_pipe *pp;
int i, mixer;
mixer = pipe->mixer_num;
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
if (pp && pp->pipe_ndx == pipe->pipe_ndx)
ctrl->stage[mixer][i] = NULL; /* clear it */
}
if (commit)
mdp4_mixer_stage_commit(mixer);
}
/*
* mixer0: rgb3: border color at register 0x15004, 0x15008
* mixer1: vg3: border color at register 0x1D004, 0x1D008
* mixer2: xxx: border color at register 0x8D004, 0x8D008
*/
void mdp4_overlay_borderfill_stage_up(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_overlay_pipe *bspipe;
int ptype, pnum, pndx, mixer;
int format, alpha_enable, alpha;
struct mdp4_iommu_pipe_info iom;
if (pipe->pipe_type != OVERLAY_TYPE_BF)
return;
mixer = pipe->mixer_num;
if (ctrl->baselayer[mixer])
return;
bspipe = ctrl->stage[mixer][MDP4_MIXER_STAGE_BASE];
if (bspipe == NULL) {
pr_err("%s: no base layer at mixer=%d\n",
__func__, mixer);
return;
}
/*
* bspipe is clone here
* get real pipe
*/
bspipe = mdp4_overlay_ndx2pipe(bspipe->pipe_ndx);
if (bspipe == NULL) {
pr_err("%s: mdp4_overlay_ndx2pipe returned null pipe ndx\n",
__func__);
return;
}
/* save original base layer */
ctrl->baselayer[mixer] = bspipe;
iom = pipe->iommu;
pipe->alpha = 0; /* make sure bf pipe has alpha 0 */
ptype = pipe->pipe_type;
pnum = pipe->pipe_num;
pndx = pipe->pipe_ndx;
format = pipe->src_format;
alpha_enable = pipe->alpha_enable;
alpha = pipe->alpha;
*pipe = *bspipe; /* keep base layer configuration */
pipe->pipe_type = ptype;
pipe->pipe_num = pnum;
pipe->pipe_ndx = pndx;
pipe->src_format = format;
pipe->alpha_enable = alpha_enable;
pipe->alpha = alpha;
pipe->iommu = iom;
/* free original base layer pipe to be sued as normal pipe */
bspipe->pipe_used = 0;
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_dtv_base_swap(0, pipe);
mdp4_overlay_reg_flush(bspipe, 1);
/* borderfill pipe as base layer */
mdp4_mixer_stage_up(pipe, 0);
}
void mdp4_overlay_borderfill_stage_down(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_overlay_pipe *bspipe;
int ptype, pnum, pndx, mixer;
int format, alpha_enable, alpha;
struct mdp4_iommu_pipe_info iom;
if (pipe->pipe_type != OVERLAY_TYPE_BF)
return;
mixer = pipe->mixer_num;
/* retrieve original base layer */
bspipe = ctrl->baselayer[mixer];
if (bspipe == NULL) {
pr_err("%s: no base layer at mixer=%d\n",
__func__, mixer);
return;
}
iom = bspipe->iommu;
ptype = bspipe->pipe_type;
pnum = bspipe->pipe_num;
pndx = bspipe->pipe_ndx;
format = bspipe->src_format;
alpha_enable = bspipe->alpha_enable;
alpha = bspipe->alpha;
*bspipe = *pipe; /* restore base layer configuration */
bspipe->pipe_type = ptype;
bspipe->pipe_num = pnum;
bspipe->pipe_ndx = pndx;
bspipe->src_format = format;
bspipe->alpha_enable = alpha_enable;
bspipe->alpha = alpha;
bspipe->iommu = iom;
bspipe->pipe_used++; /* mark base layer pipe used */
ctrl->baselayer[mixer] = NULL;
/* free borderfill pipe */
pipe->pipe_used = 0;
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_base_swap(0, bspipe);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_base_swap(0, bspipe);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_base_swap(0, bspipe);
else if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_dtv_base_swap(0, bspipe);
/* free borderfill pipe */
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 0); /* commit will happen for bspipe up */
mdp4_overlay_pipe_free(pipe, 0);
/* stage up base layer */
mdp4_overlay_reg_flush(bspipe, 1);
/* restore original base layer */
mdp4_mixer_stage_up(bspipe, 1);
}
static struct mdp4_overlay_pipe *mdp4_background_layer(int mixer,
struct mdp4_overlay_pipe *sp)
{
struct mdp4_overlay_pipe *pp;
struct mdp4_overlay_pipe *kp;
int i;
kp = ctrl->stage[mixer][MDP4_MIXER_STAGE_BASE];
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
if (pp == NULL)
continue;
if (pp == sp)
break;
if ((pp->dst_x <= sp->dst_x) &&
((pp->dst_x + pp->dst_w) >= (sp->dst_x + sp->dst_w))) {
if ((pp->dst_y <= sp->dst_y) &&
((pp->dst_y + pp->dst_h) >=
(sp->dst_y + sp->dst_h))) {
kp = pp;
}
}
}
return kp;
}
static void mdp4_overlay_bg_solidfill(struct blend_cfg *blend)
{
struct mdp4_overlay_pipe *pipe;
char *base;
u32 op_mode, format;
int pnum, ptype;
pipe = blend->solidfill_pipe;
if (pipe == NULL)
return;
if (pipe->pipe_type == OVERLAY_TYPE_BF)
return;
ptype = mdp4_overlay_format2type(pipe->src_format);
if (ptype == OVERLAY_TYPE_RGB) {
pnum = pipe->pipe_num - OVERLAY_PIPE_RGB1;
base = MDP_BASE + MDP4_RGB_BASE;
base += MDP4_RGB_OFF * pnum;
} else {
pnum = pipe->pipe_num - OVERLAY_PIPE_VG1;
base = MDP_BASE + MDP4_VIDEO_BASE;
base += MDP4_VIDEO_OFF * pnum;
}
format = inpdw(base + 0x50);
if (blend->solidfill) {
format |= MDP4_FORMAT_SOLID_FILL;
/*
* If solid fill is enabled, flip and scale
* have to be disabled. otherwise, h/w
* underruns.
*/
op_mode = inpdw(base + 0x0058);
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
outpdw(base + 0x0058, op_mode);
outpdw(base + 0x1008, 0); /* black */
/*
* Set src size and dst size same to avoid underruns
*/
outpdw(base + 0x0000, inpdw(base + 0x0008));
} else {
u32 src_size = ((pipe->src_h << 16) | pipe->src_w);
outpdw(base + 0x0000, src_size);
format &= ~MDP4_FORMAT_SOLID_FILL;
blend->solidfill_pipe = NULL;
}
outpdw(base + 0x50, format);
mdp4_overlay_reg_flush(pipe, 0);
}
void mdp4_mixer_blend_cfg(int mixer)
{
int i, off;
unsigned char *overlay_base;
struct blend_cfg *blend;
if (mixer == MDP4_MIXER2)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC2_BASE;
else if (mixer == MDP4_MIXER1)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;
else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;
blend = &ctrl->blend[mixer][MDP4_MIXER_STAGE_BASE];
blend++; /* stage0 */
for (i = MDP4_MIXER_STAGE0; i < MDP4_MIXER_STAGE_MAX; i++) {
off = 20 * i;
off = 0x20 * (i - MDP4_MIXER_STAGE0);
if (i == MDP4_MIXER_STAGE3)
off -= 4;
outpdw(overlay_base + off + 0x104, blend->op);
blend++;
}
}
static void mdp4_set_blend_by_op(struct mdp4_overlay_pipe *s_pipe,
struct mdp4_overlay_pipe *d_pipe,
int alpha_drop,
struct blend_cfg *blend)
{
int d_alpha, s_alpha;
u32 op;
d_alpha = d_pipe->alpha_enable;
s_alpha = s_pipe->alpha_enable;
/* base on fg's alpha */
blend->fg_alpha = s_pipe->alpha;
blend->bg_alpha = 0x0ff - s_pipe->alpha;
blend->op = MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_ALPHA_BG_CONST;
blend->co3_sel = 1; /* use fg alpha */
op = s_pipe->blend_op;
if (op == BLEND_OP_OPAQUE) {
blend->bg_alpha = 0;
blend->fg_alpha = 0xff;
} else if ((op == BLEND_OP_PREMULTIPLIED) &&
(!alpha_drop) && s_alpha) {
blend->op = MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_INV_ALPHA |
MDP4_BLEND_BG_ALPHA_FG_PIXEL;
if (blend->fg_alpha != 0xff) {
blend->bg_alpha = blend->fg_alpha;
blend->op |= MDP4_BLEND_BG_MOD_ALPHA;
}
} else if (!alpha_drop && s_alpha) {
blend->op = MDP4_BLEND_FG_ALPHA_FG_PIXEL |
MDP4_BLEND_BG_INV_ALPHA |
MDP4_BLEND_BG_ALPHA_FG_PIXEL;
if (blend->fg_alpha != 0xff) {
blend->bg_alpha = blend->fg_alpha;
blend->op |= MDP4_BLEND_FG_MOD_ALPHA |
MDP4_BLEND_BG_MOD_ALPHA;
}
}
if (!s_alpha && d_alpha)
blend->co3_sel = 0;
pr_debug("%s: op %d bg alpha %d, fg alpha %d blend: %x\n",
__func__, op, blend->bg_alpha, blend->fg_alpha, blend->op);
}
static void mdp4_set_blend_by_fmt(struct mdp4_overlay_pipe *s_pipe,
struct mdp4_overlay_pipe *d_pipe,
int alpha_drop,
struct blend_cfg *blend)
{
int ptype, d_alpha, s_alpha;
d_alpha = d_pipe->alpha_enable;
s_alpha = s_pipe->alpha_enable;
/* base on fg's alpha */
blend->bg_alpha = 0x0ff - s_pipe->alpha;
blend->fg_alpha = s_pipe->alpha;
blend->co3_sel = 1; /* use fg alpha */
if (s_pipe->is_fg) {
if (s_pipe->alpha == 0xff) {
blend->solidfill = 1;
blend->solidfill_pipe = d_pipe;
}
} else if (s_alpha) {
if (!alpha_drop) {
blend->op = MDP4_BLEND_BG_ALPHA_FG_PIXEL;
if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
blend->op |=
MDP4_BLEND_FG_ALPHA_FG_PIXEL;
} else
blend->op = MDP4_BLEND_BG_ALPHA_FG_CONST;
blend->op |= MDP4_BLEND_BG_INV_ALPHA;
} else if (d_alpha) {
ptype = mdp4_overlay_format2type(s_pipe->src_format);
if (ptype == OVERLAY_TYPE_VIDEO &&
(!(s_pipe->flags & MDP_BACKEND_COMPOSITION))) {
blend->op = (MDP4_BLEND_FG_ALPHA_BG_PIXEL |
MDP4_BLEND_FG_INV_ALPHA);
if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
blend->op |=
MDP4_BLEND_BG_ALPHA_BG_PIXEL;
blend->co3_sel = 0; /* use bg alpha */
} else {
/* s_pipe is rgb without alpha */
blend->op = (MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_ALPHA_BG_CONST);
blend->bg_alpha = 0;
}
}
}
/*
* D(i+1) = Ks * S + Kd * D(i)
*/
void mdp4_mixer_blend_setup(int mixer)
{
struct mdp4_overlay_pipe *d_pipe;
struct mdp4_overlay_pipe *s_pipe;
struct blend_cfg *blend;
int i, off, alpha_drop;
unsigned char *overlay_base;
uint32 c0, c1, c2;
d_pipe = ctrl->stage[mixer][MDP4_MIXER_STAGE_BASE];
if (d_pipe == NULL) {
pr_err("%s: Error: no bg_pipe at mixer=%d\n", __func__, mixer);
return;
}
blend = &ctrl->blend[mixer][MDP4_MIXER_STAGE0];
for (i = MDP4_MIXER_STAGE0; i < MDP4_MIXER_STAGE_MAX; i++) {
blend->solidfill = 0;
blend->op = (MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_ALPHA_BG_CONST);
s_pipe = ctrl->stage[mixer][i];
if (s_pipe == NULL) {
blend++;
d_pipe = NULL;
continue;
}
alpha_drop = 0; /* per stage */
/* alpha channel is lost on VG pipe when using QSEED or M/N */
if (s_pipe->pipe_type == OVERLAY_TYPE_VIDEO &&
s_pipe->alpha_enable &&
((s_pipe->op_mode & MDP4_OP_SCALEY_EN) ||
(s_pipe->op_mode & MDP4_OP_SCALEX_EN)) &&
!(s_pipe->op_mode & (MDP4_OP_SCALEX_PIXEL_RPT |
MDP4_OP_SCALEY_PIXEL_RPT)))
alpha_drop = 1;
d_pipe = mdp4_background_layer(mixer, s_pipe);
pr_debug("%s: stage=%d: bg: ndx=%d da=%d dalpha=%x "
"fg: ndx=%d sa=%d salpha=%x is_fg=%d alpha_drop=%d\n",
__func__, i-2, d_pipe->pipe_ndx, d_pipe->alpha_enable,
d_pipe->alpha, s_pipe->pipe_ndx, s_pipe->alpha_enable,
s_pipe->alpha, s_pipe->is_fg, alpha_drop);
if ((s_pipe->blend_op == BLEND_OP_NOT_DEFINED) ||
(s_pipe->blend_op >= BLEND_OP_MAX))
mdp4_set_blend_by_fmt(s_pipe, d_pipe,
alpha_drop, blend);
else
mdp4_set_blend_by_op(s_pipe, d_pipe, alpha_drop, blend);
if (s_pipe->transp != MDP_TRANSP_NOP) {
if (s_pipe->is_fg) {
transp_color_key(s_pipe->src_format,
s_pipe->transp, &c0, &c1, &c2);
/* Fg blocked */
blend->op |= MDP4_BLEND_FG_TRANSP_EN;
/* lower limit */
blend->transp_low0 = (c1 << 16 | c0);
blend->transp_low1 = c2;
/* upper limit */
blend->transp_high0 = (c1 << 16 | c0);
blend->transp_high1 = c2;
} else {
transp_color_key(d_pipe->src_format,
s_pipe->transp, &c0, &c1, &c2);
/* Fg blocked */
blend->op |= MDP4_BLEND_BG_TRANSP_EN;
blend--; /* one stage back */
/* lower limit */
blend->transp_low0 = (c1 << 16 | c0);
blend->transp_low1 = c2;
/* upper limit */
blend->transp_high0 = (c1 << 16 | c0);
blend->transp_high1 = c2;
blend++; /* back to original stage */
}
}
blend++;
}
/* mixer numer, /dev/fb0, /dev/fb1, /dev/fb2 */
if (mixer == MDP4_MIXER2)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC2_BASE;/* 0x88000 */
else if (mixer == MDP4_MIXER1)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
blend = &ctrl->blend[mixer][MDP4_MIXER_STAGE_BASE];
/* lower limit */
outpdw(overlay_base + 0x180, blend->transp_low0);
outpdw(overlay_base + 0x184, blend->transp_low1);
/* upper limit */
outpdw(overlay_base + 0x188, blend->transp_high0);
outpdw(overlay_base + 0x18c, blend->transp_high1);
blend++; /* stage0 */
for (i = MDP4_MIXER_STAGE0; i < MDP4_MIXER_STAGE_MAX; i++) {
off = 20 * i;
off = 0x20 * (i - MDP4_MIXER_STAGE0);
if (i == MDP4_MIXER_STAGE3)
off -= 4;
if (blend->solidfill_pipe)
mdp4_overlay_bg_solidfill(blend);
outpdw(overlay_base + off + 0x108, blend->fg_alpha);
outpdw(overlay_base + off + 0x10c, blend->bg_alpha);
if (mdp_rev >= MDP_REV_42)
outpdw(overlay_base + off + 0x104, blend->op);
outpdw(overlay_base + (off << 5) + 0x1004, blend->co3_sel);
outpdw(overlay_base + off + 0x110, blend->transp_low0);/* low */
outpdw(overlay_base + off + 0x114, blend->transp_low1);/* low */
/* upper limit */
outpdw(overlay_base + off + 0x118, blend->transp_high0);
outpdw(overlay_base + off + 0x11c, blend->transp_high1);
blend++;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
void mdp4_overlay_reg_flush(struct mdp4_overlay_pipe *pipe, int all)
{
int mixer;
uint32 *reg;
mixer = pipe->mixer_num;
reg = &ctrl->flush[mixer];
*reg |= (1 << (2 + pipe->pipe_num));
if (all) {
if (mixer == MDP4_MIXER0)
*reg |= 0x01;
else
*reg |= 0x02;
}
}
void mdp4_overlay_flush_piggyback(int m0, int m1)
{
u32 data;
data = ctrl->flush[m0] | ctrl->flush[m1];
ctrl->flush[m0] = data;
}
void mdp4_overlay_reg_flush_reset(struct mdp4_overlay_pipe *pipe)
{
int mixer;
mixer = pipe->mixer_num;
ctrl->flush[mixer] = 0;
}
struct mdp4_overlay_pipe *mdp4_overlay_stage_pipe(int mixer, int stage)
{
return ctrl->stage[mixer][stage];
}
struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx)
{
struct mdp4_overlay_pipe *pipe;
if (ndx <= 0 || ndx > OVERLAY_PIPE_MAX)
return NULL;
pipe = &ctrl->plist[ndx - 1]; /* ndx start from 1 */
if (pipe->pipe_used == 0)
return NULL;
return pipe;
}
struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer)
{
int i;
struct mdp4_overlay_pipe *pipe;
if (ptype == OVERLAY_TYPE_BF) {
if (!mdp4_overlay_borderfill_supported())
return NULL;
}
for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
pipe = &ctrl->plist[i];
if (pipe->pipe_type == ptype ||
(ptype == OVERLAY_TYPE_RGB && pipe->pipe_type == OVERLAY_TYPE_VIDEO)) {
if ((ptype == OVERLAY_TYPE_BF && mixer != pipe->mixer_num) ||
(ptype != OVERLAY_TYPE_BF && pipe->pipe_used != 0)) {
continue;
}
else if (ptype == OVERLAY_TYPE_BF) { //borderfill pipe
mdp4_overlay_borderfill_stage_down(pipe);
}
init_completion(&pipe->comp);
init_completion(&pipe->dmas_comp);
pr_debug("%s: pipe=%x ndx=%d num=%d\n", __func__,
(int)pipe, pipe->pipe_ndx, pipe->pipe_num);
return pipe;
}
}
pr_err("%s: ptype=%d FAILED\n", __func__, ptype);
return NULL;
}
void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe, int all)
{
uint32 ptype, num, ndx, mixer;
struct mdp4_iommu_pipe_info iom;
struct mdp4_overlay_pipe *orgpipe;
pr_debug("%s: pipe=%x ndx=%d\n", __func__, (int)pipe, pipe->pipe_ndx);
ptype = pipe->pipe_type;
num = pipe->pipe_num;
ndx = pipe->pipe_ndx;
mixer = pipe->mixer_num;
/* No need for borderfill pipe */
if (pipe->pipe_type != OVERLAY_TYPE_BF)
mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, all);
iom = pipe->iommu;
memset(pipe, 0, sizeof(*pipe));
pipe->pipe_type = ptype;
pipe->pipe_num = num;
pipe->pipe_ndx = ndx;
pipe->mixer_num = mixer;
pipe->iommu = iom;
/*Clear real pipe attributes as well */
orgpipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
if (orgpipe != NULL)
orgpipe->pipe_used = 0;
}
static int mdp4_overlay_req2pipe(struct mdp_overlay *req, int mixer,
struct mdp4_overlay_pipe **ppipe,
struct msm_fb_data_type *mfd)
{
struct mdp4_overlay_pipe *pipe;
int ret, ptype;
u32 upscale_max;
upscale_max = (mdp_rev >= MDP_REV_41) ?
MDP4_REV41_OR_LATER_UP_SCALING_MAX :
MDP4_REV40_UP_SCALING_MAX;
if (mfd == NULL) {
pr_err("%s: mfd == NULL, -ENODEV\n", __func__);
return -ENODEV;
}
if (mixer >= MDP4_MIXER_MAX) {
pr_err("%s: mixer out of range!\n", __func__);
mdp4_stat.err_mixer++;
return -ERANGE;
}
if (req->z_order < 0 || req->z_order > 3) {
pr_err("%s: z_order=%d out of range!\n", __func__,
req->z_order);
mdp4_stat.err_zorder++;
return -ERANGE;
}
if (req->src_rect.h > 0xFFF || req->src_rect.h < 2) {
pr_err("%s: src_h is out of range: 0X%x!\n",
__func__, req->src_rect.h);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.w > 0xFFF || req->src_rect.w < 2) {
pr_err("%s: src_w is out of range: 0X%x!\n",
__func__, req->src_rect.w);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.x > 0xFFF) {
pr_err("%s: src_x is out of range: 0X%x!\n",
__func__, req->src_rect.x);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.y > 0xFFF) {
pr_err("%s: src_y is out of range: 0X%x!\n",
__func__, req->src_rect.y);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.h > 0xFFF || req->dst_rect.h < 2) {
pr_err("%s: dst_h is out of range: 0X%x!\n",
__func__, req->dst_rect.h);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.w > 0xFFF || req->dst_rect.w < 2) {
pr_err("%s: dst_w is out of range: 0X%x!\n",
__func__, req->dst_rect.w);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.x > 0xFFF) {
pr_err("%s: dst_x is out of range: 0X%x!\n",
__func__, req->dst_rect.x);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.y > 0xFFF) {
pr_err("%s: dst_y is out of range: 0X%x!\n",
__func__, req->dst_rect.y);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.h == 0 || req->src_rect.w == 0) {
pr_err("%s: src img of zero size!\n", __func__);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.h > (req->src_rect.h * upscale_max)) {
mdp4_stat.err_scale++;
pr_err("%s: scale up, too much (h)!\n", __func__);
return -ERANGE;
}
if (req->src_rect.h > (req->dst_rect.h * 8)) { /* too little */
mdp4_stat.err_scale++;
pr_err("%s: scale down, too little (h)!\n", __func__);
return -ERANGE;
}
if (req->dst_rect.w > (req->src_rect.w * upscale_max)) {
mdp4_stat.err_scale++;
pr_err("%s: scale up, too much (w)!\n", __func__);
return -ERANGE;
}
if (req->src_rect.w > (req->dst_rect.w * 8)) { /* too little */
mdp4_stat.err_scale++;
pr_err("%s: scale down, too little (w)!\n", __func__);
return -ERANGE;
}
if (mdp_hw_revision == MDP4_REVISION_V1) {
/* non integer down saceling ratio smaller than 1/4
* is not supportted
*/
if (req->src_rect.h > (req->dst_rect.h * 4)) {
if (req->src_rect.h % req->dst_rect.h) {
mdp4_stat.err_scale++;
pr_err("%s: need integer (h)!\n", __func__);
return -ERANGE;
}
}
if (req->src_rect.w > (req->dst_rect.w * 4)) {
if (req->src_rect.w % req->dst_rect.w) {
mdp4_stat.err_scale++;
pr_err("%s: need integer (w)!\n", __func__);
return -ERANGE;
}
}
}
if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
((req->src_rect.y + req->src_rect.h) > req->src.height)) {
mdp4_stat.err_size++;
pr_err("%s invalid src rectangle\n", __func__);
return -ERANGE;
}
if (ctrl->panel_3d != MDP4_3D_SIDE_BY_SIDE) {
int xres;
int yres;
xres = mfd->panel_info.xres;
yres = mfd->panel_info.yres;
if (((req->dst_rect.x + req->dst_rect.w) > xres) ||
((req->dst_rect.y + req->dst_rect.h) > yres)) {
mdp4_stat.err_size++;
pr_err("%s invalid dst rectangle (%dx%d) vs (%dx%d)\n", __func__,(req->dst_rect.x + req->dst_rect.w),(req->dst_rect.y + req->dst_rect.h),xres,yres);
return -ERANGE;
}
}
ptype = mdp4_overlay_format2type(req->src.format);
if (ptype < 0) {
pr_err("%s: mdp4_overlay_format2type!\n", __func__);
return ptype;
}
if (req->flags & MDP_OV_PIPE_SHARE)
ptype = OVERLAY_TYPE_VIDEO; /* VG pipe supports both RGB+YUV */
if (req->id == MSMFB_NEW_REQUEST) /* new request */
pipe = mdp4_overlay_pipe_alloc(ptype, mixer);
else
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL) {
pr_err("%s: pipe == NULL!\n", __func__);
return -ENOMEM;
}
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
pipe->mfd = mfd;
#endif
if (!display_iclient && !IS_ERR_OR_NULL(mfd->iclient)) {
display_iclient = mfd->iclient;
pr_debug("%s(): display_iclient %p\n", __func__,
display_iclient);
}
pipe->src_format = req->src.format;
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0) {
pr_err("%s: mdp4_overlay_format2pipe!\n", __func__);
return ret;
}
/*
* base layer == 1, reserved for frame buffer
* zorder 0 == stage 0 == 2
* zorder 1 == stage 1 == 3
* zorder 2 == stage 2 == 4
*/
if (req->id == MSMFB_NEW_REQUEST) { /* new request */
if (mdp4_overlay_pipe_staged(pipe)) {
pr_err("%s: ndx=%d still staged\n", __func__,
pipe->pipe_ndx);
return -EPERM;
}
pipe->pipe_used++;
pipe->mixer_num = mixer;
pr_debug("%s: zorder=%d pipe ndx=%d num=%d\n", __func__,
req->z_order, pipe->pipe_ndx, pipe->pipe_num);
}
pipe->mixer_stage = req->z_order + MDP4_MIXER_STAGE0;
pipe->src_width = req->src.width & 0x1fff; /* source img width */
pipe->src_height = req->src.height & 0x1fff; /* source img height */
pipe->src_h = req->src_rect.h & 0x07ff;
pipe->src_w = req->src_rect.w & 0x07ff;
pipe->src_y = req->src_rect.y & 0x07ff;
pipe->src_x = req->src_rect.x & 0x07ff;
pipe->dst_h = req->dst_rect.h & 0x07ff;
pipe->dst_w = req->dst_rect.w & 0x07ff;
pipe->dst_y = req->dst_rect.y & 0x07ff;
pipe->dst_x = req->dst_rect.x & 0x07ff;
pipe->op_mode = 0;
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
pipe->ext_flag = req->flags;
#endif
if (req->flags & MDP_FLIP_LR)
pipe->op_mode |= MDP4_OP_FLIP_LR;
if (req->flags & MDP_FLIP_UD)
pipe->op_mode |= MDP4_OP_FLIP_UD;
if (req->flags & MDP_DITHER)
pipe->op_mode |= MDP4_OP_DITHER_EN;
if (req->flags & MDP_DEINTERLACE)
pipe->op_mode |= MDP4_OP_DEINT_EN;
if (req->flags & MDP_DEINTERLACE_ODD)
pipe->op_mode |= MDP4_OP_DEINT_ODD_REF;
pipe->is_fg = req->is_fg;/* control alpha and color key */
pipe->alpha = req->alpha & 0x0ff;
pipe->blend_op = req->blend_op;
pipe->transp = req->transp_mask;
pipe->flags = req->flags;
*ppipe = pipe;
return 0;
}
static int mdp4_calc_pipe_mdp_clk(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
u32 pclk;
u32 xscale, yscale;
u32 hsync = 0;
u32 shift = 16;
u64 rst;
int ptype;
int ret = -EINVAL;
if (!pipe) {
pr_err("%s: pipe is null!\n", __func__);
return ret;
}
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
pr_debug("%s: pipe sets: panel res(x,y)=(%d,%d)\n",
__func__, mfd->panel_info.xres, mfd->panel_info.yres);
pr_debug("%s: src(w,h)(%d,%d),src(x,y)(%d,%d)\n",
__func__, pipe->src_w, pipe->src_h, pipe->src_x, pipe->src_y);
pr_debug("%s: dst(w,h)(%d,%d),dst(x,y)(%d,%d)\n",
__func__, pipe->dst_w, pipe->dst_h, pipe->dst_x, pipe->dst_y);
pclk = (mfd->panel_info.type == MIPI_VIDEO_PANEL ||
mfd->panel_info.type == MIPI_CMD_PANEL) ?
mfd->panel_info.mipi.dsi_pclk_rate :
mfd->panel_info.clk_rate;
if (mfd->panel_info.type == LVDS_PANEL &&
mfd->panel_info.lvds.channel_mode == LVDS_DUAL_CHANNEL_MODE)
pclk = pclk << 1;
if (!pclk) {
pipe->req_clk = mdp_max_clk;
pr_err("%s panel pixel clk is zero!\n", __func__);
return ret;
}
pr_debug("%s: mdp panel pixel clk is %d.\n",
__func__, pclk);
if (!pipe->dst_h) {
pr_err("%s: pipe dst_h is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (!pipe->src_h) {
pr_err("%s: pipe src_h is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (!pipe->dst_w) {
pr_err("%s: pipe dst_w is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (!pipe->dst_h) {
pr_err("%s: pipe dst_h is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (pipe->mixer_num == MDP4_MIXER0) {
if (pipe->blt_forced)
return 0;
ptype = mdp4_overlay_format2type(pipe->src_format);
if (ptype == OVERLAY_TYPE_VIDEO) {
if ((pipe->src_h >= 720) && (pipe->src_w >= 1080)) {
pipe->req_clk = (u32) mdp_max_clk + 100;
pipe->blt_forced++;
return 0;
} else if ((pipe->src_h >= 1080) && (pipe->src_w >= 720)) {
pipe->req_clk = (u32) mdp_max_clk + 100;
pipe->blt_forced++;
return 0;
}
}
}
/*
* For the scaling cases, make more margin by removing porch
* values and adding extra 20%.
*/
if ((pipe->src_h != pipe->dst_h) ||
(pipe->src_w != pipe->dst_w)) {
hsync = mfd->panel_info.xres;
hsync *= 100;
hsync /= 120;
pr_debug("%s: panel hsync is %d. with scaling\n",
__func__, hsync);
} else {
hsync = mfd->panel_info.lcdc.h_back_porch +
mfd->panel_info.lcdc.h_front_porch +
mfd->panel_info.lcdc.h_pulse_width +
mfd->panel_info.xres;
pr_debug("%s: panel hsync is %d.\n",
__func__, hsync);
}
if (!hsync) {
pipe->req_clk = mdp_max_clk;
pr_err("%s: panel hsync is zero!\n", __func__);
return 0;
}
xscale = mfd->panel_info.xres;
xscale += pipe->src_w;
if (xscale < pipe->dst_w) {
pipe->req_clk = mdp_max_clk;
pr_err("%s: xres+src_w cannot be less than dst_w!\n",
__func__);
return ret;
}
xscale -= pipe->dst_w;
xscale <<= shift;
xscale /= hsync;
pr_debug("%s: the right %d shifted xscale is %d.\n",
__func__, shift, xscale);
if (pipe->src_h > pipe->dst_h) {
yscale = pipe->src_h;
yscale <<= shift;
yscale /= pipe->dst_h;
} else { /* upscale */
yscale = pipe->dst_h;
yscale <<= shift;
yscale /= pipe->src_h;
}
yscale *= pipe->src_w;
yscale /= hsync;
pr_debug("%s: the right %d shifted yscale is %d.\n",
__func__, shift, yscale);
rst = pclk;
if (yscale > xscale)
rst *= yscale;
else
rst *= xscale;
rst >>= shift;
/*
* There is one special case for the panels that have low
* v_back_porch (<=4), mdp clk should be fast enough to buffer
* 4 lines input during back porch time if scaling is
* required(FIR).
*/
if ((mfd->panel_info.lcdc.v_back_porch <= 4) &&
(pipe->src_h != pipe->dst_h) &&
(mfd->panel_info.lcdc.v_back_porch)) {
u32 clk = 0;
clk = 4 * (pclk >> shift) / mfd->panel_info.lcdc.v_back_porch;
clk <<= shift;
pr_debug("%s: mdp clk rate %d based on low vbp %d\n",
__func__, clk, mfd->panel_info.lcdc.v_back_porch);
rst = (rst > clk) ? rst : clk;
}
/*
* If the calculated mdp clk is less than panel pixel clk,
* most likely due to upscaling, mdp clk rate will be set to
* greater than pclk. Now the driver uses 1.15 as the
* factor. Ideally this factor is passed from board file.
*/
if (rst < pclk) {
rst = ((pclk >> shift) * 23 / 20) << shift;
pr_debug("%s calculated mdp clk is less than pclk.\n",
__func__);
}
/*
* Interlaced videos require the max mdp clk but cannot
* be explained by mdp clk equation.
*/
if (pipe->flags & MDP_DEINTERLACE) {
rst = (rst > mdp_max_clk) ? rst : mdp_max_clk;
pr_info("%s deinterlace requires max mdp clk.\n",
__func__);
}
pipe->req_clk = (u32) rst;
pr_debug("%s: required mdp clk %d mixer %d pipe ndx %d\n",
__func__, pipe->req_clk, pipe->mixer_num, pipe->pipe_ndx);
return 0;
}
static int mdp4_calc_pipe_mdp_bw(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
u32 fps;
int ret = -EINVAL;
u32 quota;
u32 shift = 16;
if (!pipe) {
pr_err("%s: pipe is null!\n", __func__);
return ret;
}
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
fps = mdp_get_panel_framerate(mfd);
quota = pipe->src_w * pipe->src_h * fps * pipe->bpp;
quota >>= shift;
pipe->bw_ab_quota = quota * mdp_bw_ab_factor / 100;
pipe->bw_ib_quota = quota * mdp_bw_ib_factor / 100;
pr_debug("%s max_bw=%llu ab_factor=%d ib_factor=%d\n", __func__,
mdp_max_bw, mdp_bw_ab_factor, mdp_bw_ib_factor);
/* down scaling factor for ib */
if ((!pipe->dst_h) && (!pipe->src_h) &&
(pipe->src_h > pipe->dst_h)) {
u64 ib = quota;
ib *= pipe->src_h;
ib /= pipe->dst_h;
pipe->bw_ib_quota = max(ib, pipe->bw_ib_quota);
pr_debug("%s: src_h=%d dst_h=%d mdp ib %llu, ib_quota=%llu\n",
__func__, pipe->src_h, pipe->dst_h,
ib<<shift, pipe->bw_ib_quota<<shift);
}
pipe->bw_ab_quota <<= shift;
pipe->bw_ib_quota <<= shift;
pr_debug("%s: pipe ndx=%d src(h,w)(%d, %d) fps=%d bpp=%d\n",
__func__, pipe->pipe_ndx, pipe->src_h, pipe->src_w,
fps, pipe->bpp);
pr_debug("%s: ab_quota=%llu ib_quota=%llu\n", __func__,
pipe->bw_ab_quota, pipe->bw_ib_quota);
return 0;
}
int mdp4_calc_blt_mdp_bw(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
struct mdp4_overlay_perf *perf_req = &perf_request;
u32 fps;
int bpp;
int ret = -EINVAL;
u32 quota;
u32 shift = 16;
if (!pipe) {
pr_err("%s: pipe is null!\n", __func__);
return ret;
}
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
mutex_lock(&perf_mutex);
bpp = BLT_BPP;
fps = mdp_get_panel_framerate(mfd);
/* read and write bw*/
quota = pipe->dst_w * pipe->dst_h * fps * bpp * 2;
quota >>= shift;
perf_req->mdp_ov_ab_bw[pipe->mixer_num] =
quota * mdp_bw_ab_factor / 100;
perf_req->mdp_ov_ib_bw[pipe->mixer_num] =
quota * mdp_bw_ib_factor / 100;
perf_req->mdp_ov_ab_bw[pipe->mixer_num] <<= shift;
perf_req->mdp_ov_ib_bw[pipe->mixer_num] <<= shift;
pr_debug("%s: pipe ndx=%d dst(h,w)(%d, %d) fps=%d bpp=%d\n",
__func__, pipe->pipe_ndx, pipe->dst_h, pipe->dst_w,
fps, bpp);
pr_debug("%s: overlay=%d ab_bw=%llu ib_bw=%llu\n", __func__,
pipe->mixer_num,
perf_req->mdp_ov_ab_bw[pipe->mixer_num],
perf_req->mdp_ov_ib_bw[pipe->mixer_num]);
mutex_unlock(&perf_mutex);
return 0;
}
int mdp4_overlay_mdp_perf_req(struct msm_fb_data_type *mfd)
{
u32 worst_mdp_clk = 0;
int i;
struct mdp4_overlay_perf *perf_req = &perf_request;
struct mdp4_overlay_pipe *pipe;
u32 cnt = 0;
int ret = -EINVAL;
u64 ab_quota_total = 0, ib_quota_total = 0;
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
mutex_lock(&perf_mutex);
pipe = ctrl->plist;
for (i = 0; i < MDP4_MIXER_MAX; i++)
perf_req->use_ov_blt[i] = 0;
for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
if (!pipe) {
mutex_unlock(&perf_mutex);
return ret;
}
if (!pipe->pipe_used)
continue;
cnt++;
if (worst_mdp_clk < pipe->req_clk)
worst_mdp_clk = pipe->req_clk;
if (pipe->req_clk > mdp_max_clk)
perf_req->use_ov_blt[pipe->mixer_num] = 1;
if (pipe->mixer_num == MDP4_MIXER2)
perf_req->use_ov_blt[MDP4_MIXER2] = 1;
if (pipe->pipe_type != OVERLAY_TYPE_BF) {
ab_quota_total += pipe->bw_ab_quota;
ib_quota_total += pipe->bw_ib_quota;
}
if (mfd->mdp_rev == MDP_REV_41) {
/*
* writeback (blt) mode to provide work around
* for dsi cmd mode interface hardware bug.
*/
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
if (pipe->dst_x != 0)
perf_req->use_ov_blt[MDP4_MIXER0] = 1;
}
if ((mfd->panel_info.xres > 1280) &&
(mfd->panel_info.type != DTV_PANEL)) {
perf_req->use_ov_blt[MDP4_MIXER0] = 1;
}
}
}
perf_req->mdp_clk_rate = min(worst_mdp_clk, mdp_max_clk);
perf_req->mdp_clk_rate = mdp_clk_round_rate(perf_req->mdp_clk_rate);
for (i = 0; i < MDP4_MIXER_MAX; i++) {
if (perf_req->use_ov_blt[i]) {
ab_quota_total += perf_req->mdp_ov_ab_bw[i];
ib_quota_total += perf_req->mdp_ov_ib_bw[i];
}
}
perf_req->mdp_ab_bw = roundup(ab_quota_total, MDP_BUS_SCALE_AB_STEP);
perf_req->mdp_ib_bw = roundup(ib_quota_total, MDP_BUS_SCALE_AB_STEP);
pr_debug("%s %d: ab_quota_total=(%llu, %d) ib_quota_total=(%llu, %d)\n",
__func__, __LINE__,
ab_quota_total, perf_req->mdp_ab_bw,
ib_quota_total, perf_req->mdp_ib_bw);
if (ab_quota_total > mdp_max_bw)
pr_warn("%s: req ab bw=%llu is larger than max bw=%llu",
__func__, ab_quota_total, mdp_max_bw);
if (ib_quota_total > mdp_max_bw)
pr_warn("%s: req ib bw=%llu is larger than max bw=%llu",
__func__, ib_quota_total, mdp_max_bw);
pr_debug("%s %d: pid %d cnt %d clk %d ov0_blt %d, ov1_blt %d\n",
__func__, __LINE__, current->pid, cnt,
perf_req->mdp_clk_rate,
perf_req->use_ov_blt[0],
perf_req->use_ov_blt[1]);
mutex_unlock(&perf_mutex);
return 0;
}
int mdp4_overlay_mdp_pipe_req(struct mdp4_overlay_pipe *pipe,
struct msm_fb_data_type *mfd)
{
int ret = 0;
if (mdp4_calc_pipe_mdp_clk(mfd, pipe)) {
pr_err("%s unable to calc mdp pipe clk rate ret=%d\n",
__func__, ret);
ret = -EINVAL;
}
if (mdp4_calc_pipe_mdp_bw(mfd, pipe)) {
pr_err("%s unable to calc mdp pipe bandwidth ret=%d\n",
__func__, ret);
ret = -EINVAL;
}
return ret;
}
void mdp4_overlay_mdp_perf_upd(struct msm_fb_data_type *mfd,
int flag)
{
struct mdp4_overlay_perf *perf_req = &perf_request;
struct mdp4_overlay_perf *perf_cur = &perf_current;
pr_debug("%s %d: req mdp clk %d, cur mdp clk %d flag %d\n",
__func__, __LINE__,
perf_req->mdp_clk_rate,
perf_cur->mdp_clk_rate,
flag);
mutex_lock(&perf_mutex);
if (!mdp4_extn_disp)
perf_cur->use_ov_blt[1] = 0;
if (flag) {
if (perf_req->mdp_clk_rate > perf_cur->mdp_clk_rate) {
mdp_set_core_clk(perf_req->mdp_clk_rate);
pr_info("%s mdp clk is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_clk_rate,
perf_req->mdp_clk_rate);
perf_cur->mdp_clk_rate =
perf_req->mdp_clk_rate;
}
if ((perf_req->mdp_ab_bw > perf_cur->mdp_ab_bw) ||
(perf_req->mdp_ib_bw > perf_cur->mdp_ib_bw)) {
mdp_bus_scale_update_request
(perf_req->mdp_ab_bw, perf_req->mdp_ib_bw);
pr_debug("%s mdp ab_bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ab_bw,
perf_req->mdp_ab_bw);
pr_debug("%s mdp ib_bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ib_bw,
perf_req->mdp_ib_bw);
perf_cur->mdp_ab_bw = perf_req->mdp_ab_bw;
perf_cur->mdp_ib_bw = perf_req->mdp_ib_bw;
}
if ((mfd->panel_info.pdest == DISPLAY_1 &&
perf_req->use_ov_blt[0] && !perf_cur->use_ov_blt[0]) ||
dbg_force_ov0_blt) {
if (mfd->panel_info.type == LCDC_PANEL ||
mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_start(mfd);
else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
mdp4_dsi_video_blt_start(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_blt_start(mfd);
pr_debug("%s mixer0 start blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[0],
perf_req->use_ov_blt[0]);
perf_cur->use_ov_blt[0] = perf_req->use_ov_blt[0];
}
if ((mfd->panel_info.pdest == DISPLAY_2 &&
perf_req->use_ov_blt[1] && !perf_cur->use_ov_blt[1]) ||
dbg_force_ov1_blt) {
mdp4_dtv_overlay_blt_start(mfd);
pr_debug("%s mixer1 start blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[1],
perf_req->use_ov_blt[1]);
perf_cur->use_ov_blt[1] = perf_req->use_ov_blt[1];
}
} else {
if (perf_req->mdp_clk_rate < perf_cur->mdp_clk_rate) {
pr_info("%s mdp clk is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_clk_rate,
perf_req->mdp_clk_rate);
mdp_set_core_clk(perf_req->mdp_clk_rate);
perf_cur->mdp_clk_rate =
perf_req->mdp_clk_rate;
}
if (perf_req->mdp_ab_bw < perf_cur->mdp_ab_bw ||
perf_req->mdp_ib_bw < perf_cur->mdp_ib_bw) {
mdp_bus_scale_update_request
(perf_req->mdp_ab_bw, perf_req->mdp_ib_bw);
pr_debug("%s mdp ab bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ab_bw,
perf_req->mdp_ab_bw);
pr_debug("%s mdp ib bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ib_bw,
perf_req->mdp_ib_bw);
perf_cur->mdp_ab_bw = perf_req->mdp_ab_bw;
perf_cur->mdp_ib_bw = perf_req->mdp_ib_bw;
}
if ((mfd->panel_info.pdest == DISPLAY_1 &&
!perf_req->use_ov_blt[0] && perf_cur->use_ov_blt[0]) ||
dbg_force_ov0_blt) {
if (mfd->panel_info.type == LCDC_PANEL ||
mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_stop(mfd);
else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
mdp4_dsi_video_blt_stop(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_blt_stop(mfd);
pr_debug("%s mixer0 stop blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[0],
perf_req->use_ov_blt[0]);
perf_cur->use_ov_blt[0] = perf_req->use_ov_blt[0];
}
if ((mfd->panel_info.pdest == DISPLAY_2 &&
!perf_req->use_ov_blt[1] && perf_cur->use_ov_blt[1]) ||
dbg_force_ov1_blt) {
mdp4_dtv_overlay_blt_stop(mfd);
pr_debug("%s mixer1 stop blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[1],
perf_req->use_ov_blt[1]);
perf_cur->use_ov_blt[1] = perf_req->use_ov_blt[1];
}
}
mutex_unlock(&perf_mutex);
return;
}
static int get_img(struct msmfb_data *img, struct fb_info *info,
struct mdp4_overlay_pipe *pipe, unsigned int plane,
unsigned long *start, unsigned long *len, struct file **srcp_file,
int *p_need, struct ion_handle **srcp_ihdl)
{
struct file *file;
int put_needed, ret = 0, fb_num;
#ifdef CONFIG_ANDROID_PMEM
unsigned long vstart;
#endif
*p_need = 0;
if (img->flags & MDP_BLIT_SRC_GEM) {
*srcp_file = NULL;
return kgsl_gem_obj_addr(img->memory_id, (int) img->priv,
start, len);
}
if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
file = fget_light(img->memory_id, &put_needed);
if (file == NULL)
return -EINVAL;
pipe->flags |= MDP_MEMORY_ID_TYPE_FB;
if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
if (get_fb_phys_info(start, len, fb_num,
DISPLAY_SUBSYSTEM_ID)) {
ret = -1;
} else {
*srcp_file = file;
*p_need = put_needed;
}
} else
ret = -1;
if (ret)
fput_light(file, put_needed);
return ret;
}
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
return mdp4_overlay_iommu_map_buf(img->memory_id, pipe, plane,
start, len, srcp_ihdl);
#endif
#ifdef CONFIG_ANDROID_PMEM
if (!get_pmem_file(img->memory_id, start, &vstart,
len, srcp_file))
return 0;
else
return -EINVAL;
#endif
}
#ifdef CONFIG_FB_MSM_MIPI_DSI
int mdp4_overlay_3d_sbys(struct fb_info *info, struct msmfb_overlay_3d *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int ret = -EPERM;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
mdp4_dsi_cmd_3d_sbys(mfd, req);
ret = 0;
} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
mdp4_dsi_video_3d_sbys(mfd, req);
ret = 0;
}
mutex_unlock(&mfd->dma->ov_mutex);
return ret;
}
#else
int mdp4_overlay_3d_sbys(struct fb_info *info, struct msmfb_overlay_3d *req)
{
/* do nothing */
return -EPERM;
}
#endif
int mdp4_overlay_blt(struct fb_info *info, struct msmfb_overlay_blt *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (mfd == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_overlay_blt(mfd, req);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_overlay_blt(mfd, req);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_overlay_blt(mfd, req);
else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
mdp4_mddi_overlay_blt(mfd, req);
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
{
struct mdp4_overlay_pipe *pipe;
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL)
return -ENODEV;
*req = pipe->req_data;
if (mdp4_overlay_borderfill_supported())
req->flags |= MDP_BORDERFILL_SUPPORTED;
return 0;
}
int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int ret, mixer;
struct mdp4_overlay_pipe *pipe;
if (mfd == NULL) {
pr_err("%s: mfd == NULL, -ENODEV\n", __func__);
return -ENODEV;
}
if (info->node != 0 || mfd->cont_splash_done) /* primary */
if (!mfd->panel_power_on) /* suspended */
return -EPERM;
if (req->src.format == MDP_FB_FORMAT)
req->src.format = mfd->fb_imgType;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex)) {
pr_err("%s: mutex_lock_interruptible, -EINTR\n", __func__);
return -EINTR;
}
mixer = mfd->panel_info.pdest; /* DISPLAY_1 or DISPLAY_2 */
ret = mdp4_overlay_req2pipe(req, mixer, &pipe, mfd);
if (ret < 0) {
mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: mdp4_overlay_req2pipe, ret=%d\n", __func__, ret);
return ret;
}
#if (CONFIG_MACH_LGE)
mdp4_calc_pipe_mdp_clk(mfd, pipe);
if(pipe->mixer_num == MDP4_MIXER0
&& pipe->req_clk > mdp_max_clk
&& OVERLAY_TYPE_RGB == mdp4_overlay_format2type(pipe->src_format)) {
pr_err("%s UI blt case, can't compose with MDP directly.\n", __func__);
if(req->id == MSMFB_NEW_REQUEST)
{
mdp4_overlay_pipe_free(pipe,0);
}
mutex_unlock(&mfd->dma->ov_mutex);
return -EINVAL;
}
#endif
if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
mdp4_map_sec_resource(mfd);
}
/* return id back to user */
req->id = pipe->pipe_ndx; /* pipe_ndx start from 1 */
pipe->req_data = *req; /* keep original req */
if (!IS_ERR_OR_NULL(mfd->iclient)) {
pr_debug("pipe->flags 0x%x\n", pipe->flags);
if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
mfd->mem_hid &= ~BIT(ION_IOMMU_HEAP_ID);
mfd->mem_hid |= ION_SECURE;
} else {
mfd->mem_hid |= BIT(ION_IOMMU_HEAP_ID);
mfd->mem_hid &= ~ION_SECURE;
}
}
mdp4_stat.overlay_set[pipe->mixer_num]++;
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if (pipe->pipe_num <= OVERLAY_PIPE_VG2)
memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
sizeof(struct mdp_overlay_pp_params));
else
pr_debug("%s: RGB Pipes don't support CSC/QSEED\n",
__func__);
}
mdp4_overlay_mdp_pipe_req(pipe, mfd);
#if (CONFIG_MACH_LGE)
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT) || defined(CONFIG_FB_MSM_MIPI_HITACHI_VIDEO_HD_PT)
if(pipe->mixer_num == MDP4_MIXER0
&& OVERLAY_TYPE_VIDEO == mdp4_overlay_format2type(pipe->src_format)) {
pr_debug("%s video blt mode off, req_clk is max now.\n", __func__);
pipe->req_clk = mdp_max_clk;
}
#else
if(pipe->mixer_num == MDP4_MIXER0
&& pipe->req_clk > mdp_max_clk
&& OVERLAY_TYPE_VIDEO == mdp4_overlay_format2type(pipe->src_format)) {
pr_debug("%s video blt mode off, req_clk is max now.\n", __func__);
pipe->req_clk = mdp_max_clk;
}
#endif
#endif
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
int mdp4_overlay_unset_mixer(int mixer)
{
struct mdp4_overlay_pipe *pipe;
int i, cnt = 0;
/* free pipe besides base layer pipe */
for (i = MDP4_MIXER_STAGE3; i > MDP4_MIXER_STAGE_BASE; i--) {
pipe = ctrl->stage[mixer][i];
if (pipe == NULL)
continue;
pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 1);
mdp4_overlay_pipe_free(pipe, 1);
cnt++;
}
return cnt;
}
int mdp4_overlay_unset(struct fb_info *info, int ndx)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdp4_overlay_pipe *pipe;
if (mfd == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
pipe = mdp4_overlay_ndx2pipe(ndx);
if (pipe == NULL) {
mutex_unlock(&mfd->dma->ov_mutex);
return -ENODEV;
}
if (pipe->pipe_type == OVERLAY_TYPE_BF) {
mdp4_overlay_borderfill_stage_down(pipe);
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
if (pipe->mixer_num == MDP4_MIXER2)
ctrl->mixer2_played = 0;
else if (pipe->mixer_num == MDP4_MIXER1)
ctrl->mixer1_played = 0;
else {
/* mixer 0 */
ctrl->mixer0_played = 0;
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
if (mfd->panel_power_on)
mdp4_mddi_blt_dmap_busy_wait(mfd);
}
}
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 0);
if (pipe->blt_forced) {
if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
pipe->blt_forced = 0;
pipe->req_clk = 0;
mdp4_overlay_mdp_perf_req(mfd);
}
}
if (pipe->mixer_num == MDP4_MIXER0) {
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
if (mfd->panel_power_on)
mdp4_mddi_overlay_restore();
}
} else { /* mixer1, DTV, ATV */
if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_overlay_dtv_unset(mfd, pipe);
}
mdp4_stat.overlay_unset[pipe->mixer_num]++;
mdp4_overlay_pipe_free(pipe, 0);
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
int mdp4_overlay_wait4vsync(struct fb_info *info)
{
if (!hdmi_prim_display && info->node == 0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_wait4vsync(0);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_wait4vsync(0);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_wait4vsync(0);
} else if (hdmi_prim_display || info->node == 1) {
mdp4_dtv_wait4vsync(0);
}
return 0;
}
int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable)
{
int cmd;
if (enable)
cmd = 1;
else
cmd = 0;
if (!hdmi_prim_display && info->node == 0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_vsync_ctrl(info, cmd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_vsync_ctrl(info, cmd);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_vsync_ctrl(info, cmd);
} else if (hdmi_prim_display || info->node == 1)
mdp4_dtv_vsync_ctrl(info, cmd);
return 0;
}
struct tile_desc {
uint32 width; /* tile's width */
uint32 height; /* tile's height */
uint32 row_tile_w; /* tiles per row's width */
uint32 row_tile_h; /* tiles per row's height */
};
void tile_samsung(struct tile_desc *tp)
{
/*
* each row of samsung tile consists of two tiles in height
* and two tiles in width which means width should align to
* 64 x 2 bytes and height should align to 32 x 2 bytes.
* video decoder generate two tiles in width and one tile
* in height which ends up height align to 32 X 1 bytes.
*/
tp->width = 64; /* 64 bytes */
tp->row_tile_w = 2; /* 2 tiles per row's width */
tp->height = 32; /* 32 bytes */
tp->row_tile_h = 1; /* 1 tiles per row's height */
}
uint32 tile_mem_size(struct mdp4_overlay_pipe *pipe, struct tile_desc *tp)
{
uint32 tile_w, tile_h;
uint32 row_num_w, row_num_h;
tile_w = tp->width * tp->row_tile_w;
tile_h = tp->height * tp->row_tile_h;
row_num_w = (pipe->src_width + tile_w - 1) / tile_w;
row_num_h = (pipe->src_height + tile_h - 1) / tile_h;
return ((row_num_w * row_num_h * tile_w * tile_h) + 8191) & ~8191;
}
int mdp4_overlay_play_wait(struct fb_info *info, struct msmfb_overlay_data *req)
{
return 0;
}
/*
* mdp4_overlay_dma_commit: called from dma_done isr
* No mutex/sleep allowed
*/
void mdp4_overlay_dma_commit(int mixer)
{
/*
* non double buffer register update here
* perf level, new clock rate should be done here
*/
}
/*
* mdp4_overlay_vsync_commit: called from tasklet context
* No mutex/sleep allowed
*/
void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe)
{
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
else
mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
pr_debug("%s: pipe=%x ndx=%d num=%d used=%d\n", __func__,
(int) pipe, pipe->pipe_ndx, pipe->pipe_num, pipe->pipe_used);
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe, 0);
}
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msmfb_data *img;
struct mdp4_overlay_pipe *pipe;
ulong start, addr;
ulong len = 0;
struct ion_handle *srcp0_ihdl = NULL;
struct ion_handle *srcp1_ihdl = NULL, *srcp2_ihdl = NULL;
uint32_t overlay_version = 0;
int ret = 0;
if (mfd == NULL)
return -ENODEV;
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL) {
mdp4_stat.err_play++;
return -ENODEV;
}
if (pipe->pipe_type == OVERLAY_TYPE_BF) {
mdp4_overlay_borderfill_stage_up(pipe);
mdp4_mixer_stage_commit(pipe->mixer_num);
return 0;
}
mutex_lock(&mfd->dma->ov_mutex);
img = &req->data;
get_img(img, info, pipe, 0, &start, &len, &pipe->srcp0_file,
&pipe->put0_need, &srcp0_ihdl);
if (len == 0) {
pr_err("%s: pmem Error\n", __func__);
ret = -1;
goto end;
}
addr = start + img->offset;
pipe->srcp0_addr = addr;
pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x pid=%d\n", __func__,
pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags,
current->pid);
if ((req->version_key & VERSION_KEY_MASK) == 0xF9E8D700)
overlay_version = (req->version_key & ~VERSION_KEY_MASK);
if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR) {
if (overlay_version > 0) {
img = &req->plane1_data;
get_img(img, info, pipe, 1, &start, &len,
&pipe->srcp1_file, &pipe->put1_need,
&srcp1_ihdl);
if (len == 0) {
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
}
pipe->srcp1_addr = start + img->offset;
} else if (pipe->frame_format ==
MDP4_FRAME_FORMAT_VIDEO_SUPERTILE) {
struct tile_desc tile;
tile_samsung(&tile);
pipe->srcp1_addr = addr + tile_mem_size(pipe, &tile);
} else {
pipe->srcp1_addr = addr + (pipe->src_width *
pipe->src_height);
}
pipe->srcp0_ystride = pipe->src_width;
if ((pipe->src_format == MDP_Y_CRCB_H1V1) ||
(pipe->src_format == MDP_Y_CBCR_H1V1) ||
(pipe->src_format == MDP_Y_CRCB_H1V2) ||
(pipe->src_format == MDP_Y_CBCR_H1V2)) {
if (pipe->src_width > YUV_444_MAX_WIDTH)
pipe->srcp1_ystride = pipe->src_width << 2;
else
pipe->srcp1_ystride = pipe->src_width << 1;
} else
pipe->srcp1_ystride = pipe->src_width;
} else if (pipe->fetch_plane == OVERLAY_PLANE_PLANAR) {
if (overlay_version > 0) {
img = &req->plane1_data;
get_img(img, info, pipe, 1, &start, &len,
&pipe->srcp1_file, &pipe->put1_need,
&srcp1_ihdl);
if (len == 0) {
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
}
pipe->srcp1_addr = start + img->offset;
img = &req->plane2_data;
get_img(img, info, pipe, 2, &start, &len,
&pipe->srcp2_file, &pipe->put2_need,
&srcp2_ihdl);
if (len == 0) {
pr_err("%s: Error to get plane2\n", __func__);
ret = -EINVAL;
goto end;
}
pipe->srcp2_addr = start + img->offset;
} else {
if (pipe->src_format == MDP_Y_CR_CB_GH2V2) {
addr += (ALIGN(pipe->src_width, 16) *
pipe->src_height);
pipe->srcp1_addr = addr;
addr += ((ALIGN((pipe->src_width / 2), 16)) *
(pipe->src_height / 2));
pipe->srcp2_addr = addr;
} else {
addr += (pipe->src_width * pipe->src_height);
pipe->srcp1_addr = addr;
addr += ((pipe->src_width / 2) *
(pipe->src_height / 2));
pipe->srcp2_addr = addr;
}
}
/* mdp planar format expects Cb in srcp1 and Cr in p2 */
if ((pipe->src_format == MDP_Y_CR_CB_H2V2) ||
(pipe->src_format == MDP_Y_CR_CB_GH2V2))
swap(pipe->srcp1_addr, pipe->srcp2_addr);
if (pipe->src_format == MDP_Y_CR_CB_GH2V2) {
pipe->srcp0_ystride = ALIGN(pipe->src_width, 16);
pipe->srcp1_ystride = ALIGN(pipe->src_width / 2, 16);
pipe->srcp2_ystride = ALIGN(pipe->src_width / 2, 16);
} else {
pipe->srcp0_ystride = pipe->src_width;
pipe->srcp1_ystride = pipe->src_width / 2;
pipe->srcp2_ystride = pipe->src_width / 2;
}
}
mdp4_overlay_mdp_perf_req(mfd);
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
pipe->mfd = mfd;
#endif
if (pipe->mixer_num == MDP4_MIXER0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
/* cndx = 0 */
mdp4_dsi_cmd_pipe_queue(0, pipe);
} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
/* cndx = 0 */
mdp4_dsi_video_pipe_queue(0, pipe);
} else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
/* cndx = 0 */
mdp4_lcdc_pipe_queue(0, pipe);
}
} else if (pipe->mixer_num == MDP4_MIXER1) {
if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
} else if (pipe->mixer_num == MDP4_MIXER2) {
ctrl->mixer2_played++;
if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK)
mdp4_wfd_pipe_queue(0, pipe);/* cndx = 0 */
}
if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
mdp4_iommu_unmap(pipe);
mdp4_stat.overlay_play[pipe->mixer_num]++;
end:
mutex_unlock(&mfd->dma->ov_mutex);
return ret;
}
int mdp4_overlay_commit(struct fb_info *info)
{
int ret = 0, release_busy = true;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int mixer;
if (mfd == NULL) {
ret = -ENODEV;
goto mdp4_overlay_commit_exit;
}
if (!mfd->panel_power_on) {
ret = -EINVAL;
goto mdp4_overlay_commit_exit;
}
mixer = mfd->panel_info.pdest; /* DISPLAY_1 or DISPLAY_2 */
if (mixer >= MDP4_MIXER_MAX)
return -EPERM;
mutex_lock(&mfd->dma->ov_mutex);
msm_fb_wait_for_fence(mfd);
switch (mfd->panel.type) {
case MIPI_CMD_PANEL:
mdp4_dsi_cmd_pipe_commit(0, 1, &release_busy);
break;
case MIPI_VIDEO_PANEL:
mdp4_dsi_video_pipe_commit(0, 1);
break;
case LCDC_PANEL:
mdp4_lcdc_pipe_commit(0, 1);
break;
case DTV_PANEL:
mdp4_dtv_pipe_commit(0, 1);
break;
case WRITEBACK_PANEL:
mdp4_wfd_pipe_commit(mfd, 0, 1);
break;
default:
pr_err("Panel Not Supported for Commit");
ret = -EINVAL;
break;
}
msm_fb_signal_timeline(mfd);
mdp4_unmap_sec_resource(mfd);
if (release_busy)
mutex_unlock(&mfd->dma->ov_mutex);
mdp4_overlay_commit_exit:
if (release_busy)
msm_fb_release_busy(mfd);
return ret;
}
void mdp4_overlay_commit_finish(struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
mdp4_overlay_mdp_perf_upd(mfd, 0);
}
struct msm_iommu_ctx {
char *name;
int domain;
};
static struct msm_iommu_ctx msm_iommu_ctx_names[] = {
/* Display read*/
{
.name = "mdp_port0_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display read*/
{
.name = "mdp_port0_cb1",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb1",
.domain = DISPLAY_READ_DOMAIN,
},
};
static struct msm_iommu_ctx msm_iommu_split_ctx_names[] = {
/* Display read*/
{
.name = "mdp_port0_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display read*/
{
.name = "mdp_port0_cb1",
.domain = DISPLAY_WRITE_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb1",
.domain = DISPLAY_WRITE_DOMAIN,
},
};
void mdp4_iommu_attach(void)
{
static int done;
struct msm_iommu_ctx *ctx_names;
struct iommu_domain *domain;
int i, arr_size;
if (!done) {
if (mdp_iommu_split_domain) {
ctx_names = msm_iommu_split_ctx_names;
arr_size = ARRAY_SIZE(msm_iommu_split_ctx_names);
} else {
ctx_names = msm_iommu_ctx_names;
arr_size = ARRAY_SIZE(msm_iommu_ctx_names);
}
for (i = 0; i < arr_size; i++) {
int domain_idx;
struct device *ctx = msm_iommu_get_ctx(
ctx_names[i].name);
if (!ctx)
continue;
domain_idx = ctx_names[i].domain;
domain = msm_get_iommu_domain(domain_idx);
if (!domain)
continue;
if (iommu_attach_device(domain, ctx)) {
WARN(1, "%s: could not attach domain %d to context %s."
" iommu programming will not occur.\n",
__func__, domain_idx,
ctx_names[i].name);
continue;
}
}
done = 1;
}
}
int mdp4_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req,
struct mdp4_overlay_pipe **ppipe)
{
struct mdp4_overlay_pipe *pipe;
int err;
struct msm_fb_data_type *mfb = info->par;
req->z_order = 0;
req->id = MSMFB_NEW_REQUEST;
req->is_fg = false;
req->alpha = 0xff;
err = mdp4_overlay_req2pipe(req, MDP4_MIXER0, &pipe, mfb);
if (err < 0) {
pr_err("%s:Could not allocate MDP overlay pipe\n", __func__);
return err;
}
mdp4_mixer_blend_setup(pipe->mixer_num);
*ppipe = pipe;
return 0;
}
void mdp4_v4l2_overlay_clear(struct mdp4_overlay_pipe *pipe)
{
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 1);
mdp4_overlay_pipe_free(pipe, 1);
}
int mdp4_v4l2_overlay_play(struct fb_info *info, struct mdp4_overlay_pipe *pipe,
unsigned long srcp0_addr, unsigned long srcp1_addr,
unsigned long srcp2_addr)
{
struct msm_fb_data_type *mfd = info->par;
int err;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
switch (pipe->src_format) {
case MDP_Y_CR_CB_H2V2:
/* YUV420 */
pipe->srcp0_addr = srcp0_addr;
pipe->srcp0_ystride = pipe->src_width;
/*
* For YUV420, the luma plane is 1 byte per pixel times
* num of pixels in the image Also, the planes are
* switched in MDP, srcp2 is actually first chroma plane
*/
pipe->srcp2_addr = srcp1_addr ? srcp1_addr :
pipe->srcp0_addr + (pipe->src_width * pipe->src_height);
pipe->srcp2_ystride = pipe->src_width/2;
/*
* The chroma planes are half the size of the luma
* planes
*/
pipe->srcp1_addr = srcp2_addr ? srcp2_addr :
pipe->srcp2_addr +
(pipe->src_width * pipe->src_height / 4);
pipe->srcp1_ystride = pipe->src_width/2;
break;
case MDP_Y_CRCB_H2V2:
/* NV12 */
pipe->srcp0_addr = srcp0_addr;
pipe->srcp0_ystride = pipe->src_width;
pipe->srcp1_addr = srcp1_addr ? srcp1_addr :
pipe->srcp0_addr +
(pipe->src_width * pipe->src_height);
pipe->srcp1_ystride = pipe->src_width;
break;
default:
pr_err("%s: format (%u) is not supported\n", __func__,
pipe->src_format);
err = -EINVAL;
goto done;
}
pr_debug("%s: pipe ndx=%d stage=%d format=%x\n", __func__,
pipe->pipe_ndx, pipe->mixer_stage, pipe->src_format);
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
mdp4_overlay_vg_setup(pipe);
else
mdp4_overlay_rgb_setup(pipe);
if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe, 0); /* mixer stage commit commits this */
mdp4_mixer_stage_commit(pipe->mixer_num);
#ifdef V4L2_VSYNC
/*
* TODO: incorporate v4l2 into vsycn driven mechanism
*/
if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
} else {
#ifdef CONFIG_FB_MSM_MIPI_DSI
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
mdp4_dsi_cmd_dma_busy_wait(mfd);
mdp4_dsi_cmd_kickoff_video(mfd, pipe);
}
#else
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
mdp4_mddi_dma_busy_wait(mfd);
mdp4_mddi_kickoff_video(mfd, pipe);
}
#endif
}
#endif
done:
mutex_unlock(&mfd->dma->ov_mutex);
return err;
}
int mdp4_overlay_reset()
{
memset(&perf_request, 0, sizeof(perf_request));
memset(&perf_current, 0, sizeof(perf_current));
return 0;
}
|
OlegKyiashko/LGOGP-kernel
|
drivers/video/msm/mdp4_overlay.c
|
C
|
gpl-2.0
| 112,817
|
/*
* linux/drivers/mmc/core/core.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
* MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
* Copyright (C) 2016 XiaoMi, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/devfreq.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/leds.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/jiffies.h>
#include <trace/events/mmc.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
#include "core.h"
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
/* If the device is not responding */
#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
/*
* Background operations can take a long time, depending on the housekeeping
* operations the card has to perform.
*/
#define MMC_BKOPS_MAX_TIMEOUT (30 * 1000) /* max time to wait in ms */
static struct workqueue_struct *workqueue;
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
* performance cost, and for other reasons may not always be desired.
* So we allow it it to be disabled.
*/
bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
/*
* Internal function. Flush all scheduled work from the MMC work queue.
*/
static void mmc_flush_scheduled_work(void)
{
flush_workqueue(workqueue);
}
#ifdef CONFIG_FAIL_MMC_REQUEST
/*
* Internal function. Inject random data errors.
* If mmc_data is NULL no errors are injected.
*/
static void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
static const int data_errors[] = {
-ETIMEDOUT,
-EILSEQ,
-EIO,
};
if (!data)
return;
if (cmd->error || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
data->fault_injected = true;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
static inline void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
}
#endif /* CONFIG_FAIL_MMC_REQUEST */
static bool mmc_is_data_request(struct mmc_request *mmc_request)
{
switch (mmc_request->cmd->opcode) {
case MMC_READ_SINGLE_BLOCK:
case MMC_READ_MULTIPLE_BLOCK:
case MMC_WRITE_BLOCK:
case MMC_WRITE_MULTIPLE_BLOCK:
return true;
default:
return false;
}
}
static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
return;
if (lock_needed)
spin_lock_bh(&clk_scaling->lock);
clk_scaling->start_busy = ktime_get();
clk_scaling->is_busy_started = true;
if (lock_needed)
spin_unlock_bh(&clk_scaling->lock);
}
static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
{
struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
return;
if (lock_needed)
spin_lock_bh(&clk_scaling->lock);
if (!clk_scaling->is_busy_started) {
WARN_ON(1);
goto out;
}
clk_scaling->total_busy_time_us +=
ktime_to_us(ktime_sub(ktime_get(),
clk_scaling->start_busy));
pr_debug("%s: accumulated busy time is %lu usec\n",
mmc_hostname(host), clk_scaling->total_busy_time_us);
clk_scaling->is_busy_started = false;
out:
if (lock_needed)
spin_unlock_bh(&clk_scaling->lock);
}
/**
* mmc_cmdq_clk_scaling_start_busy() - start busy timer for data requests
* @host: pointer to mmc host structure
* @lock_needed: flag indication if locking is needed
*
* This function starts the busy timer in case it was not already started.
*/
void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed)
{
if (!host->clk_scaling.enable)
return;
if (lock_needed)
spin_lock_bh(&host->clk_scaling.lock);
if (!host->clk_scaling.is_busy_started &&
!test_bit(CMDQ_STATE_DCMD_ACTIVE,
&host->cmdq_ctx.curr_state)) {
host->clk_scaling.start_busy = ktime_get();
host->clk_scaling.is_busy_started = true;
}
if (lock_needed)
spin_unlock_bh(&host->clk_scaling.lock);
}
EXPORT_SYMBOL(mmc_cmdq_clk_scaling_start_busy);
/**
* mmc_cmdq_clk_scaling_stop_busy() - stop busy timer for last data requests
* @host: pointer to mmc host structure
* @lock_needed: flag indication if locking is needed
*
* This function stops the busy timer in case it is the last data request.
* In case the current request is not the last one, the busy time till
* now will be accumulated and the counter will be restarted.
*/
void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd)
{
if (!host->clk_scaling.enable)
return;
if (lock_needed)
spin_lock_bh(&host->clk_scaling.lock);
/*
* For CQ mode: In completion of DCMD request, start busy time in
* case of pending data requests
*/
if (is_cmdq_dcmd) {
if (host->cmdq_ctx.data_active_reqs) {
host->clk_scaling.is_busy_started = true;
host->clk_scaling.start_busy = ktime_get();
}
goto out;
}
host->clk_scaling.total_busy_time_us +=
ktime_to_us(ktime_sub(ktime_get(),
host->clk_scaling.start_busy));
if (host->cmdq_ctx.data_active_reqs) {
host->clk_scaling.is_busy_started = true;
host->clk_scaling.start_busy = ktime_get();
} else {
host->clk_scaling.is_busy_started = false;
}
out:
if (lock_needed)
spin_unlock_bh(&host->clk_scaling.lock);
}
EXPORT_SYMBOL(mmc_cmdq_clk_scaling_stop_busy);
/**
* mmc_can_scale_clk() - Check clock scaling capability
* @host: pointer to mmc host structure
*/
bool mmc_can_scale_clk(struct mmc_host *host)
{
if (!host) {
pr_err("bad host parameter\n");
WARN_ON(1);
return false;
}
return host->caps2 & MMC_CAP2_CLK_SCALE;
}
EXPORT_SYMBOL(mmc_can_scale_clk);
static int mmc_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
struct mmc_devfeq_clk_scaling *clk_scaling;
if (!host) {
pr_err("bad host parameter\n");
WARN_ON(1);
return -EINVAL;
}
clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
return 0;
spin_lock_bh(&clk_scaling->lock);
/* accumulate the busy time of ongoing work */
memset(status, 0, sizeof(*status));
if (clk_scaling->is_busy_started) {
if (mmc_card_cmdq(host->card)) {
/* the "busy-timer" will be restarted in case there
* are pending data requests */
mmc_cmdq_clk_scaling_stop_busy(host, false, false);
} else {
mmc_clk_scaling_stop_busy(host, false);
mmc_clk_scaling_start_busy(host, false);
}
}
status->busy_time = clk_scaling->total_busy_time_us;
status->total_time = ktime_to_us(ktime_sub(ktime_get(),
clk_scaling->measure_interval_start));
clk_scaling->total_busy_time_us = 0;
status->current_frequency = clk_scaling->curr_freq;
clk_scaling->measure_interval_start = ktime_get();
pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
mmc_hostname(host),
(status->busy_time*100)/status->total_time,
status->total_time, status->busy_time,
status->current_frequency);
spin_unlock_bh(&clk_scaling->lock);
return 0;
}
static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
{
struct mmc_card *card = host->card;
u32 status;
/*
* If the current partition type is RPMB, clock switching may not
* work properly as sending tuning command (CMD21) is illegal in
* this mode.
*/
if (!card || (mmc_card_mmc(card) &&
(card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB ||
mmc_card_doing_bkops(card))))
return false;
if (mmc_send_status(card, &status)) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
return false;
}
return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
}
int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
{
int err = 0;
err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
(!host->cmdq_ctx.active_reqs));
if (host->cmdq_ctx.active_reqs) {
pr_err("%s: %s: unexpected active requests (%lu)\n",
mmc_hostname(host), __func__,
host->cmdq_ctx.active_reqs);
return -EPERM;
}
err = mmc_cmdq_halt(host, true);
if (err) {
pr_err("%s: %s: mmc_cmdq_halt failed (%d)\n",
mmc_hostname(host), __func__, err);
goto out;
}
out:
return err;
}
EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
int mmc_clk_update_freq(struct mmc_host *host,
unsigned long freq, enum mmc_load state)
{
int err = 0;
bool cmdq_mode;
if (!host) {
pr_err("bad host parameter\n");
WARN_ON(1);
return -EINVAL;
}
mmc_host_clk_hold(host);
cmdq_mode = mmc_card_cmdq(host->card);
/* make sure the card supports the frequency we want */
if (unlikely(freq > host->card->clk_scaling_highest)) {
freq = host->card->clk_scaling_highest;
pr_warn("%s: %s: frequency was overridden to %lu\n",
mmc_hostname(host), __func__,
host->card->clk_scaling_highest);
}
if (unlikely(freq < host->card->clk_scaling_lowest)) {
freq = host->card->clk_scaling_lowest;
pr_warn("%s: %s: frequency was overridden to %lu\n",
mmc_hostname(host), __func__,
host->card->clk_scaling_lowest);
}
if (freq == host->clk_scaling.curr_freq)
goto out;
if (host->ops->notify_load) {
err = host->ops->notify_load(host, state);
if (err) {
pr_err("%s: %s: fail on notify_load\n",
mmc_hostname(host), __func__);
goto out;
}
}
if (cmdq_mode) {
err = mmc_cmdq_halt_on_empty_queue(host);
if (err) {
pr_err("%s: %s: failed halting queue (%d)\n",
mmc_hostname(host), __func__, err);
goto halt_failed;
}
}
if (!mmc_is_valid_state_for_clk_scaling(host)) {
pr_debug("%s: invalid state for clock scaling - skipping",
mmc_hostname(host));
goto invalid_state;
}
err = host->bus_ops->change_bus_speed(host, &freq);
if (!err)
host->clk_scaling.curr_freq = freq;
else
pr_err("%s: %s: failed (%d) at freq=%lu\n",
mmc_hostname(host), __func__, err, freq);
invalid_state:
if (cmdq_mode) {
if (mmc_cmdq_halt(host, false))
pr_err("%s: %s: cmdq unhalt failed\n",
mmc_hostname(host), __func__);
}
halt_failed:
if (err) {
/* restore previous state */
if (host->ops->notify_load)
if (host->ops->notify_load(host,
host->clk_scaling.state))
pr_err("%s: %s: fail on notify_load restore\n",
mmc_hostname(host), __func__);
}
out:
mmc_host_clk_release(host);
return err;
}
EXPORT_SYMBOL(mmc_clk_update_freq);
static int mmc_devfreq_set_target(struct device *dev,
unsigned long *freq, u32 devfreq_flags)
{
struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
struct mmc_devfeq_clk_scaling *clk_scaling;
int err = 0;
int abort;
if (!(host && freq)) {
pr_err("%s: unexpected host/freq parameter\n", __func__);
err = -EINVAL;
goto out;
}
clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
goto out;
pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
*freq, current->comm);
if ((clk_scaling->curr_freq == *freq) ||
clk_scaling->skip_clk_scale_freq_update)
goto out;
/* No need to scale the clocks if they are gated */
if (!host->ios.clock)
goto out;
spin_lock_bh(&clk_scaling->lock);
if (clk_scaling->clk_scaling_in_progress) {
pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
mmc_hostname(host));
spin_unlock_bh(&clk_scaling->lock);
goto out;
}
clk_scaling->need_freq_change = true;
clk_scaling->target_freq = *freq;
clk_scaling->state = *freq < clk_scaling->curr_freq ?
MMC_LOAD_LOW : MMC_LOAD_HIGH;
spin_unlock_bh(&clk_scaling->lock);
abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
if (abort)
goto out;
/*
* In case we were able to claim host there is no need to
* defer the frequency change. It will be done now
*/
clk_scaling->need_freq_change = false;
mmc_host_clk_hold(host);
err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
if (err && err != -EAGAIN)
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
else
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
mmc_host_clk_release(host);
mmc_release_host(host);
out:
return err;
}
/**
* mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
* @host: pointer to mmc host structure
*
* This function does clock scaling in case "need_freq_change" flag was set
* by the clock scaling logic.
*/
void mmc_deferred_scaling(struct mmc_host *host)
{
unsigned long target_freq;
int err;
if (!host->clk_scaling.enable)
return;
spin_lock_bh(&host->clk_scaling.lock);
if (host->clk_scaling.clk_scaling_in_progress ||
!(host->clk_scaling.need_freq_change)) {
spin_unlock_bh(&host->clk_scaling.lock);
return;
}
atomic_inc(&host->clk_scaling.devfreq_abort);
target_freq = host->clk_scaling.target_freq;
host->clk_scaling.clk_scaling_in_progress = true;
host->clk_scaling.need_freq_change = false;
spin_unlock_bh(&host->clk_scaling.lock);
pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
err = mmc_clk_update_freq(host, target_freq,
host->clk_scaling.state);
if (err && err != -EAGAIN)
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
else
pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
host->clk_scaling.clk_scaling_in_progress = false;
atomic_dec(&host->clk_scaling.devfreq_abort);
}
EXPORT_SYMBOL(mmc_deferred_scaling);
static int mmc_devfreq_create_freq_table(struct mmc_host *host)
{
int i;
struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
mmc_hostname(host),
host->card->clk_scaling_lowest,
host->card->clk_scaling_highest);
if (!clk_scaling->freq_table) {
pr_debug("%s: no frequency table defined - setting default\n",
mmc_hostname(host));
clk_scaling->freq_table = kzalloc(
2*sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
if (!clk_scaling->freq_table)
return -ENOMEM;
clk_scaling->freq_table[0] = host->card->clk_scaling_lowest;
clk_scaling->freq_table[1] = host->card->clk_scaling_highest;
clk_scaling->freq_table_sz = 2;
goto out;
}
if (host->card->clk_scaling_lowest >
clk_scaling->freq_table[0])
pr_debug("%s: frequency table undershot possible freq\n",
mmc_hostname(host));
if (strcmp(mmc_hostname(host), "mmc1") == 0) {
clk_scaling->freq_table[0] = host->card->clk_scaling_highest;
} else {
for (i = 0; i < clk_scaling->freq_table_sz; i++) {
if (clk_scaling->freq_table[i] < host->card->clk_scaling_highest) {
continue;
} else {
break;
}
}
clk_scaling->freq_table[i] = host->card->clk_scaling_highest;
clk_scaling->freq_table_sz = i + 1;
}
out:
clk_scaling->devfreq_profile.freq_table = clk_scaling->freq_table;
clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
for (i = 0; i < clk_scaling->freq_table_sz; i++)
pr_debug("%s: freq[%d] = %u\n",
mmc_hostname(host), i, clk_scaling->freq_table[i]);
return 0;
}
/**
* mmc_init_devfreq_clk_scaling() - Initialize clock scaling
* @host: pointer to mmc host structure
*
* Initialize clock scaling for supported hosts. It is assumed that the caller
* ensure clock is running at maximum possible frequency before calling this
* function. Shall use struct devfreq_simple_ondemand_data to configure
* governor.
*/
int mmc_init_clk_scaling(struct mmc_host *host)
{
int err;
if (!host || !host->card) {
pr_err("%s: unexpected host/card parameters\n",
__func__);
return -EINVAL;
}
if (!mmc_can_scale_clk(host) ||
!host->bus_ops->change_bus_speed) {
pr_debug("%s: clock scaling is not supported\n",
mmc_hostname(host));
return 0;
}
pr_debug("registering %s dev (%p) to devfreq",
mmc_hostname(host),
mmc_classdev(host));
if (host->clk_scaling.devfreq) {
pr_err("%s: dev is already registered for dev %p\n",
mmc_hostname(host),
mmc_dev(host));
return -EPERM;
}
spin_lock_init(&host->clk_scaling.lock);
atomic_set(&host->clk_scaling.devfreq_abort, 0);
host->clk_scaling.curr_freq = host->ios.clock;
host->clk_scaling.clk_scaling_in_progress = false;
host->clk_scaling.need_freq_change = false;
host->clk_scaling.is_busy_started = false;
host->clk_scaling.devfreq_profile.polling_ms =
host->clk_scaling.polling_delay_ms;
host->clk_scaling.devfreq_profile.get_dev_status =
mmc_devfreq_get_dev_status;
host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;
host->clk_scaling.ondemand_gov_data.simple_scaling = true;
host->clk_scaling.ondemand_gov_data.upthreshold =
host->clk_scaling.upthreshold;
host->clk_scaling.ondemand_gov_data.downdifferential =
host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
err = mmc_devfreq_create_freq_table(host);
if (err) {
pr_err("%s: fail to create devfreq frequency table\n",
mmc_hostname(host));
return err;
}
pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
mmc_hostname(host),
host->clk_scaling.ondemand_gov_data.upthreshold,
host->clk_scaling.ondemand_gov_data.downdifferential,
host->clk_scaling.devfreq_profile.polling_ms);
host->clk_scaling.devfreq = devfreq_add_device(
mmc_classdev(host),
&host->clk_scaling.devfreq_profile,
"simple_ondemand",
&host->clk_scaling.ondemand_gov_data);
if (!host->clk_scaling.devfreq) {
pr_err("%s: unable to register with devfreq\n",
mmc_hostname(host));
return -EPERM;
}
pr_debug("%s: clk scaling is enabled for device %s (%p) with devfreq %p (clock = %uHz)\n",
mmc_hostname(host),
dev_name(mmc_classdev(host)),
mmc_classdev(host),
host->clk_scaling.devfreq,
host->ios.clock);
host->clk_scaling.enable = true;
return err;
}
EXPORT_SYMBOL(mmc_init_clk_scaling);
/**
* mmc_suspend_clk_scaling() - suspend clock scaling
* @host: pointer to mmc host structure
*
* This API will suspend devfreq feature for the specific host.
* The statistics collected by mmc will be cleared.
* This function is intended to be called by the pm callbacks
* (e.g. runtime_suspend, suspend) of the mmc device
*/
int mmc_suspend_clk_scaling(struct mmc_host *host)
{
int err;
if (!host) {
WARN(1, "bad host parameter\n");
return -EINVAL;
}
if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable)
return 0;
if (!host->clk_scaling.devfreq) {
pr_err("%s: %s: no devfreq is assosiated with this device\n",
mmc_hostname(host), __func__);
return -EPERM;
}
atomic_inc(&host->clk_scaling.devfreq_abort);
wake_up(&host->wq);
err = devfreq_suspend_device(host->clk_scaling.devfreq);
if (err) {
pr_err("%s: %s: failed to suspend devfreq\n",
mmc_hostname(host), __func__);
return err;
}
host->clk_scaling.enable = false;
host->clk_scaling.total_busy_time_us = 0;
pr_debug("%s: devfreq suspended\n", mmc_hostname(host));
return 0;
}
EXPORT_SYMBOL(mmc_suspend_clk_scaling);
/**
* mmc_resume_clk_scaling() - resume clock scaling
* @host: pointer to mmc host structure
*
* This API will resume devfreq feature for the specific host.
* This API is intended to be called by the pm callbacks
* (e.g. runtime_suspend, suspend) of the mmc device
*/
int mmc_resume_clk_scaling(struct mmc_host *host)
{
int err = 0;
u32 max_clk_idx = 0;
u32 devfreq_max_clk = 0;
u32 devfreq_min_clk = 0;
if (!host) {
WARN(1, "bad host parameter\n");
return -EINVAL;
}
if (!mmc_can_scale_clk(host))
return 0;
if (!host->clk_scaling.devfreq) {
pr_err("%s: %s: no devfreq is assosiated with this device\n",
mmc_hostname(host), __func__);
return -EPERM;
}
atomic_set(&host->clk_scaling.devfreq_abort, 0);
max_clk_idx = host->clk_scaling.freq_table_sz - 1;
devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
devfreq_min_clk = host->clk_scaling.freq_table[0];
host->clk_scaling.curr_freq = devfreq_max_clk;
if (host->ios.clock < host->card->clk_scaling_highest)
host->clk_scaling.curr_freq = devfreq_min_clk;
host->clk_scaling.clk_scaling_in_progress = false;
host->clk_scaling.need_freq_change = false;
err = devfreq_resume_device(host->clk_scaling.devfreq);
if (err) {
pr_err("%s: %s: failed to resume devfreq (%d)\n",
mmc_hostname(host), __func__, err);
} else {
host->clk_scaling.enable = true;
pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
}
return err;
}
EXPORT_SYMBOL(mmc_resume_clk_scaling);
/**
* mmc_exit_devfreq_clk_scaling() - Disable clock scaling
* @host: pointer to mmc host structure
*
* Disable clock scaling permanently.
*/
int mmc_exit_clk_scaling(struct mmc_host *host)
{
int err;
if (!host) {
pr_err("%s: bad host parameter\n", __func__);
WARN_ON(1);
return -EINVAL;
}
if (!mmc_can_scale_clk(host))
return 0;
if (!host->clk_scaling.devfreq) {
pr_err("%s: %s: no devfreq is assosiated with this device\n",
mmc_hostname(host), __func__);
return -EPERM;
}
err = mmc_suspend_clk_scaling(host);
if (err) {
pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
mmc_hostname(host), __func__, err);
return err;
}
err = devfreq_remove_device(host->clk_scaling.devfreq);
if (err) {
pr_err("%s: remove devfreq failed (%d)\n",
mmc_hostname(host), err);
return err;
}
host->clk_scaling.devfreq = NULL;
atomic_set(&host->clk_scaling.devfreq_abort, 1);
pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
return 0;
}
EXPORT_SYMBOL(mmc_exit_clk_scaling);
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
* @mrq: MMC request which request
*
* MMC drivers should call this function when they have completed
* their processing of a request.
*/
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
#ifdef CONFIG_MMC_PERF_PROFILING
ktime_t diff;
#endif
if (host->clk_scaling.is_busy_started)
mmc_clk_scaling_stop_busy(host, true);
if (err && cmd->retries && mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
cmd->retries = 0;
}
if (err && cmd->retries && !mmc_card_removed(host->card)) {
/*
* Request starter must handle retries - see
* mmc_wait_for_req_done().
*/
if (mrq->done)
mrq->done(mrq);
} else {
mmc_should_fail_request(host, mrq);
led_trigger_event(host->led, LED_OFF);
pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), cmd->opcode, err,
cmd->resp[0], cmd->resp[1],
cmd->resp[2], cmd->resp[3]);
if (mrq->data) {
#ifdef CONFIG_MMC_PERF_PROFILING
if (host->perf_enable) {
diff = ktime_sub(ktime_get(), host->perf.start);
if (mrq->data->flags == MMC_DATA_READ) {
host->perf.rbytes_drv +=
mrq->data->bytes_xfered;
host->perf.rtime_drv =
ktime_add(host->perf.rtime_drv,
diff);
} else {
host->perf.wbytes_drv +=
mrq->data->bytes_xfered;
host->perf.wtime_drv =
ktime_add(host->perf.wtime_drv,
diff);
}
}
#endif
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
}
if (mrq->stop) {
pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->error,
mrq->stop->resp[0], mrq->stop->resp[1],
mrq->stop->resp[2], mrq->stop->resp[3]);
}
if (mrq->done)
mrq->done(mrq);
mmc_host_clk_release(host);
}
}
EXPORT_SYMBOL(mmc_request_done);
static void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
unsigned int i, sz;
struct scatterlist *sg;
#endif
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
mmc_hostname(host), mrq->sbc->opcode,
mrq->sbc->arg, mrq->sbc->flags);
}
pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->cmd->opcode,
mrq->cmd->arg, mrq->cmd->flags);
if (mrq->data) {
pr_debug("%s: blksz %d blocks %d flags %08x "
"tsac %d ms nsac %d\n",
mmc_hostname(host), mrq->data->blksz,
mrq->data->blocks, mrq->data->flags,
mrq->data->timeout_ns / 1000000,
mrq->data->timeout_clks);
}
if (mrq->stop) {
pr_debug("%s: CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->arg, mrq->stop->flags);
}
WARN_ON(!host->claimed);
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
if (mrq->data) {
BUG_ON(mrq->data->blksz > host->max_blk_size);
BUG_ON(mrq->data->blocks > host->max_blk_count);
BUG_ON(mrq->data->blocks * mrq->data->blksz >
host->max_req_size);
#ifdef CONFIG_MMC_DEBUG
sz = 0;
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
sz += sg->length;
BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
#endif
mrq->cmd->data = mrq->data;
mrq->data->error = 0;
mrq->data->mrq = mrq;
if (mrq->stop) {
mrq->data->stop = mrq->stop;
mrq->stop->error = 0;
mrq->stop->mrq = mrq;
}
#ifdef CONFIG_MMC_PERF_PROFILING
if (host->perf_enable)
host->perf.start = ktime_get();
#endif
}
mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
if (mmc_is_data_request(mrq)) {
mmc_deferred_scaling(host);
mmc_clk_scaling_start_busy(host, true);
}
host->ops->request(host, mrq);
}
static void mmc_start_cmdq_request(struct mmc_host *host,
struct mmc_request *mrq)
{
if (mrq->data) {
pr_debug("%s: blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
mmc_hostname(host), mrq->data->blksz,
mrq->data->blocks, mrq->data->flags,
mrq->data->timeout_ns / NSEC_PER_MSEC,
mrq->data->timeout_clks);
BUG_ON(mrq->data->blksz > host->max_blk_size);
BUG_ON(mrq->data->blocks > host->max_blk_count);
BUG_ON(mrq->data->blocks * mrq->data->blksz >
host->max_req_size);
mrq->data->error = 0;
mrq->data->mrq = mrq;
}
if (mrq->cmd) {
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
}
mmc_host_clk_hold(host);
if (likely(host->cmdq_ops->request))
host->cmdq_ops->request(host, mrq);
else
pr_err("%s: %s: issue request failed\n", mmc_hostname(host),
__func__);
}
/**
* mmc_blk_init_bkops_statistics - initialize bkops statistics
* @card: MMC card to start BKOPS
*
* Initialize and enable the bkops statistics
*/
void mmc_blk_init_bkops_statistics(struct mmc_card *card)
{
int i;
struct mmc_bkops_stats *stats;
if (!card)
return;
stats = &card->bkops.stats;
spin_lock(&stats->lock);
stats->manual_start = 0;
stats->hpi = 0;
stats->auto_start = 0;
stats->auto_stop = 0;
for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; i++)
stats->level[i] = 0;
stats->enabled = true;
spin_unlock(&stats->lock);
}
EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
static void mmc_update_bkops_hpi(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->hpi++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_start(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->manual_start++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_auto_on(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->auto_start++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_auto_off(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->auto_stop++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_level(struct mmc_bkops_stats *stats,
unsigned level)
{
BUG_ON(level >= MMC_BKOPS_NUM_SEVERITY_LEVELS);
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->level[level]++;
spin_unlock_irq(&stats->lock);
}
/**
* mmc_set_auto_bkops - set auto BKOPS for supported cards
* @card: MMC card to start BKOPS
* @enable: enable/disable flag
*
* Configure the card to run automatic BKOPS.
*
* Should be called when host is claimed.
*/
int mmc_set_auto_bkops(struct mmc_card *card, bool enable)
{
int ret = 0;
u8 bkops_en;
BUG_ON(!card);
enable = !!enable;
if (unlikely(!mmc_card_support_auto_bkops(card))) {
pr_err("%s: %s: card doesn't support auto bkops\n",
mmc_hostname(card->host), __func__);
return -EPERM;
}
if (enable) {
if (mmc_card_doing_auto_bkops(card))
goto out;
bkops_en = card->ext_csd.bkops_en | EXT_CSD_BKOPS_AUTO_EN;
} else {
if (!mmc_card_doing_auto_bkops(card))
goto out;
bkops_en = card->ext_csd.bkops_en & ~EXT_CSD_BKOPS_AUTO_EN;
}
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
bkops_en, 0);
if (ret) {
pr_err("%s: %s: error in setting auto bkops to %d (%d)\n",
mmc_hostname(card->host), __func__, enable, ret);
} else {
if (enable) {
mmc_card_set_auto_bkops(card);
mmc_update_bkops_auto_on(&card->bkops.stats);
} else {
mmc_card_clr_auto_bkops(card);
mmc_update_bkops_auto_off(&card->bkops.stats);
}
card->ext_csd.bkops_en = bkops_en;
pr_debug("%s: %s: bkops state %x\n",
mmc_hostname(card->host), __func__, bkops_en);
}
out:
return ret;
}
EXPORT_SYMBOL(mmc_set_auto_bkops);
/**
* mmc_check_bkops - check BKOPS for supported cards
* @card: MMC card to check BKOPS
*
* Read the BKOPS status in order to determine whether the
* card requires bkops to be started.
*/
void mmc_check_bkops(struct mmc_card *card)
{
int err;
BUG_ON(!card);
if (mmc_card_doing_bkops(card))
return;
err = mmc_read_bkops_status(card);
if (err) {
pr_err("%s: Failed to read bkops status: %d\n",
mmc_hostname(card->host), err);
return;
}
card->bkops.needs_check = false;
mmc_update_bkops_level(&card->bkops.stats,
card->ext_csd.raw_bkops_status);
card->bkops.needs_bkops = card->ext_csd.raw_bkops_status > 0;
}
EXPORT_SYMBOL(mmc_check_bkops);
/**
* mmc_start_manual_bkops - start BKOPS for supported cards
* @card: MMC card to start BKOPS
*
* Send START_BKOPS to the card.
* The function should be called with claimed host.
*/
void mmc_start_manual_bkops(struct mmc_card *card)
{
int err;
BUG_ON(!card);
if (unlikely(!mmc_card_configured_manual_bkops(card)))
return;
if (mmc_card_doing_bkops(card))
return;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START,
1, 0, false, true, false);
if (err) {
pr_err("%s: Error %d starting manual bkops\n",
mmc_hostname(card->host), err);
} else {
mmc_card_set_doing_bkops(card);
mmc_update_bkops_start(&card->bkops.stats);
card->bkops.needs_bkops = false;
}
}
EXPORT_SYMBOL(mmc_start_manual_bkops);
/*
* mmc_wait_data_done() - done callback for data request
* @mrq: done data request
*
* Wakes up mmc context, passed as a callback to host controller driver
*/
static void mmc_wait_data_done(struct mmc_request *mrq)
{
unsigned long flags;
struct mmc_context_info *context_info = &mrq->host->context_info;
spin_lock_irqsave(&context_info->lock, flags);
context_info->is_done_rcv = true;
wake_up_interruptible(&context_info->wait);
spin_unlock_irqrestore(&context_info->lock, flags);
}
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
/*
*__mmc_start_data_req() - starts data request
* @host: MMC host to start the request
* @mrq: data request to start
*
* Sets the done callback to be called when request is completed by the card.
* Starts data mmc request execution
*/
static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
{
mrq->done = mmc_wait_data_done;
mrq->host = host;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_wait_data_done(mrq);
return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
return 0;
}
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
complete(&mrq->completion);
return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
return 0;
}
/*
* mmc_wait_for_data_req_done() - wait for request completed
* @host: MMC host to prepare the command.
* @mrq: MMC request to wait for
*
* Blocks MMC context till host controller will ack end of data request
* execution or new request notification arrives from the block layer.
* Handles command retries.
*
* Returns enum mmc_blk_status after checking errors.
*/
static int mmc_wait_for_data_req_done(struct mmc_host *host,
struct mmc_request *mrq,
struct mmc_async_req *next_req)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
int err;
bool is_done_rcv = false;
unsigned long flags;
while (1) {
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
spin_lock_irqsave(&context_info->lock, flags);
is_done_rcv = context_info->is_done_rcv;
context_info->is_waiting_last_req = false;
spin_unlock_irqrestore(&context_info->lock, flags);
if (is_done_rcv) {
context_info->is_done_rcv = false;
context_info->is_new_req = false;
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card)) {
err = host->areq->err_check(host->card,
host->areq);
break; /* return err */
} else {
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host),
cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
continue; /* wait for done/new event again */
}
} else if (context_info->is_new_req) {
context_info->is_new_req = false;
if (!next_req) {
err = MMC_BLK_NEW_REQUEST;
break; /* return err */
}
}
}
return err;
}
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd;
while (1) {
wait_for_completion_io(&mrq->completion);
cmd = mrq->cmd;
/*
* If host has timed out waiting for the sanitize/bkops
* to complete, card might be still in programming state
* so let's try to bring the card out of programming
* state.
*/
if ((cmd->bkops_busy || cmd->sanitize_busy) && cmd->error == -ETIMEDOUT) {
if (!mmc_interrupt_hpi(host->card)) {
pr_warn("%s: %s: Interrupted sanitize/bkops\n",
mmc_hostname(host), __func__);
cmd->error = 0;
break;
} else {
pr_err("%s: %s: Failed to interrupt sanitize\n",
mmc_hostname(host), __func__);
}
}
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card))
break;
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host), cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
}
}
/**
* mmc_pre_req - Prepare for a new request
* @host: MMC host to prepare command
* @mrq: MMC request to prepare for
* @is_first_req: true if there is no previous started request
* that may run in parellel to this call, otherwise false
*
* mmc_pre_req() is called in prior to mmc_start_req() to let
* host prepare for the new request. Preparation of a request may be
* performed while another request is running on the host.
*/
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
bool is_first_req)
{
if (host->ops->pre_req) {
mmc_host_clk_hold(host);
host->ops->pre_req(host, mrq, is_first_req);
mmc_host_clk_release(host);
}
}
/**
* mmc_post_req - Post process a completed request
* @host: MMC host to post process command
* @mrq: MMC request to post process for
* @err: Error, if non zero, clean up any resources made in pre_req
*
* Let the host post process a completed request. Post processing of
* a request may be performed while another reuqest is running.
*/
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
int err)
{
if (host->ops->post_req) {
mmc_host_clk_hold(host);
host->ops->post_req(host, mrq, err);
mmc_host_clk_release(host);
}
}
/**
* mmc_cmdq_discard_card_queue - discard the task[s] in the device
* @host: host instance
* @tasks: mask of tasks to be knocked off
* 0: remove all queued tasks
*/
int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
{
return mmc_discard_queue(host, tasks);
}
EXPORT_SYMBOL(mmc_cmdq_discard_queue);
/**
* mmc_cmdq_post_req - post process of a completed request
* @host: host instance
* @tag: the request tag.
* @err: non-zero is error, success otherwise
*/
void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err)
{
if (likely(host->cmdq_ops->post_req))
host->cmdq_ops->post_req(host, tag, err);
}
EXPORT_SYMBOL(mmc_cmdq_post_req);
/**
* mmc_cmdq_halt - halt/un-halt the command queue engine
* @host: host instance
* @halt: true - halt, un-halt otherwise
*
* Host halts the command queue engine. It should complete
* the ongoing transfer and release the bus.
* All legacy commands can be sent upon successful
* completion of this function.
* Returns 0 on success, negative otherwise
*/
int mmc_cmdq_halt(struct mmc_host *host, bool halt)
{
int err = 0;
if ((halt && mmc_host_halt(host)) ||
(!halt && !mmc_host_halt(host))) {
pr_debug("%s: %s: CQE is already %s\n", mmc_hostname(host),
__func__, halt ? "halted" : "un-halted");
return 0;
}
mmc_host_clk_hold(host);
if (host->cmdq_ops->halt) {
err = host->cmdq_ops->halt(host, halt);
if (!err && host->ops->notify_halt)
host->ops->notify_halt(host, halt);
if (!err && halt)
mmc_host_set_halt(host);
else if (!err && !halt) {
mmc_host_clr_halt(host);
wake_up(&host->cmdq_ctx.wait);
}
} else {
err = -ENOSYS;
}
mmc_host_clk_release(host);
return err;
}
EXPORT_SYMBOL(mmc_cmdq_halt);
int mmc_cmdq_start_req(struct mmc_host *host, struct mmc_cmdq_req *cmdq_req)
{
struct mmc_request *mrq = &cmdq_req->mrq;
mrq->host = host;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
return -ENOMEDIUM;
}
mmc_start_cmdq_request(host, mrq);
return 0;
}
EXPORT_SYMBOL(mmc_cmdq_start_req);
static void mmc_cmdq_dcmd_req_done(struct mmc_request *mrq)
{
mmc_host_clk_release(mrq->host);
complete(&mrq->completion);
}
int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
struct mmc_cmdq_req *cmdq_req)
{
struct mmc_request *mrq = &cmdq_req->mrq;
struct mmc_command *cmd = mrq->cmd;
int err = 0;
init_completion(&mrq->completion);
mrq->done = mmc_cmdq_dcmd_req_done;
err = mmc_cmdq_start_req(host, cmdq_req);
if (err)
return err;
wait_for_completion_io(&mrq->completion);
if (cmd->error) {
pr_err("%s: DCMD %d failed with err %d\n",
mmc_hostname(host), cmd->opcode,
cmd->error);
err = cmd->error;
mmc_host_clk_hold(host);
host->cmdq_ops->dumpstate(host);
mmc_host_clk_release(host);
}
return err;
}
EXPORT_SYMBOL(mmc_cmdq_wait_for_dcmd);
int mmc_cmdq_prepare_flush(struct mmc_command *cmd)
{
return __mmc_switch_cmdq_mode(cmd, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1,
0, true, true);
}
EXPORT_SYMBOL(mmc_cmdq_prepare_flush);
/**
* mmc_start_req - start a non-blocking request
* @host: MMC host to start command
* @areq: async request to start
* @error: out parameter returns 0 for success, otherwise non zero
*
* Start a new MMC custom command request for a host.
* If there is on ongoing async request wait for completion
* of that request and start the new one and return.
* Does not wait for the new request to complete.
*
* Returns the completed request, NULL in case of none completed.
* Wait for the an ongoing request (previoulsy started) to complete and
* return the completed request. If there is no ongoing request, NULL
* is returned without waiting. NULL is not an error condition.
*/
struct mmc_async_req *mmc_start_req(struct mmc_host *host,
struct mmc_async_req *areq, int *error)
{
int err = 0;
int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
if (areq)
mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) {
err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
if (err == MMC_BLK_NEW_REQUEST) {
if (error)
*error = err;
/*
* The previous request was not completed,
* nothing to return
*/
return NULL;
}
/*
* Check BKOPS urgency for each R1 response
*/
if (host->card && mmc_card_mmc(host->card) &&
((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
(mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
(host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
mmc_check_bkops(host->card);
}
if (!err && areq) {
trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
areq->mrq->cmd->arg,
areq->mrq->data);
start_err = __mmc_start_data_req(host, areq->mrq);
}
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
if (err && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
if (err)
host->areq = NULL;
else
host->areq = areq;
if (error)
*error = err;
return data;
}
EXPORT_SYMBOL(mmc_start_req);
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
*
* Start a new MMC custom command request for a host, and wait
* for the command to complete. Does not attempt to parse the
* response.
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
__mmc_start_req(host, mrq);
mmc_wait_for_req_done(host, mrq);
}
EXPORT_SYMBOL(mmc_wait_for_req);
/**
* mmc_interrupt_hpi - Issue for High priority Interrupt
* @card: the MMC card associated with the HPI transfer
*
* Issued High Priority Interrupt, and check for card status
* until out-of prg-state.
*/
int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
unsigned long prg_wait;
BUG_ON(!card);
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
return 1;
}
mmc_claim_host(card->host);
err = mmc_send_status(card, &status);
if (err) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
goto out;
}
switch (R1_CURRENT_STATE(status)) {
case R1_STATE_IDLE:
case R1_STATE_READY:
case R1_STATE_STBY:
case R1_STATE_TRAN:
/*
* In idle and transfer states, HPI is not needed and the caller
* can issue the next intended command immediately
*/
goto out;
case R1_STATE_PRG:
break;
default:
/* In all other states, it's illegal to issue HPI */
pr_debug("%s: HPI cannot be sent. Card state=%d\n",
mmc_hostname(card->host), R1_CURRENT_STATE(status));
err = -EINVAL;
goto out;
}
err = mmc_send_hpi_cmd(card, &status);
prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
do {
err = mmc_send_status(card, &status);
if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
break;
if (time_after(jiffies, prg_wait)) {
err = mmc_send_status(card, &status);
if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
err = -ETIMEDOUT;
else
break;
}
} while (!err);
out:
mmc_release_host(card->host);
return err;
}
EXPORT_SYMBOL(mmc_interrupt_hpi);
/**
* mmc_wait_for_cmd - start a command and wait for completion
* @host: MMC host to start command
* @cmd: MMC command to start
* @retries: maximum number of retries
*
* Start a new MMC command for a host, and wait for the command
* to complete. Return any error that occurred while the command
* was executing. Do not attempt to parse the response.
*/
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
struct mmc_request mrq = {NULL};
WARN_ON(!host->claimed);
memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = retries;
mrq.cmd = cmd;
cmd->data = NULL;
mmc_wait_for_req(host, &mrq);
return cmd->error;
}
EXPORT_SYMBOL(mmc_wait_for_cmd);
/**
* mmc_stop_bkops - stop ongoing BKOPS
* @card: MMC card to check BKOPS
*
* Send HPI command to stop ongoing background operations to
* allow rapid servicing of foreground operations, e.g. read/
* writes. Wait until the card comes out of the programming state
* to avoid errors in servicing read/write requests.
*/
int mmc_stop_bkops(struct mmc_card *card)
{
int err = 0;
BUG_ON(!card);
if (unlikely(!mmc_card_configured_manual_bkops(card)))
goto out;
if (!mmc_card_doing_bkops(card))
goto out;
err = mmc_interrupt_hpi(card);
/*
* If err is EINVAL, we can't issue an HPI.
* It should complete the BKOPS.
*/
if (!err || (err == -EINVAL)) {
mmc_card_clr_doing_bkops(card);
mmc_update_bkops_hpi(&card->bkops.stats);
err = 0;
}
out:
return err;
}
EXPORT_SYMBOL(mmc_stop_bkops);
int mmc_read_bkops_status(struct mmc_card *card)
{
int err;
u8 *ext_csd;
/*
* In future work, we should consider storing the entire ext_csd.
*/
ext_csd = kmalloc(512, GFP_KERNEL);
if (!ext_csd) {
pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
mmc_hostname(card->host));
return -ENOMEM;
}
mmc_claim_host(card->host);
err = mmc_send_ext_csd(card, ext_csd);
mmc_release_host(card->host);
if (err)
goto out;
card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS] &
MMC_BKOPS_URGENCY_MASK;
card->ext_csd.raw_exception_status =
ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & (EXT_CSD_URGENT_BKOPS |
EXT_CSD_DYNCAP_NEEDED |
EXT_CSD_SYSPOOL_EXHAUSTED
| EXT_CSD_PACKED_FAILURE);
out:
kfree(ext_csd);
return err;
}
EXPORT_SYMBOL(mmc_read_bkops_status);
/**
* mmc_set_data_timeout - set the timeout for a data command
* @data: data phase for command
* @card: the MMC card associated with the data transfer
*
* Computes the data timeout parameters according to the
* correct algorithm given the card type.
*/
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
{
unsigned int mult;
if (!card) {
WARN_ON(1);
return;
}
/*
* SDIO cards only define an upper 1 s limit on access.
*/
if (mmc_card_sdio(card)) {
data->timeout_ns = 1000000000;
data->timeout_clks = 0;
return;
}
/*
* SD cards use a 100 multiplier rather than 10
*/
mult = mmc_card_sd(card) ? 100 : 10;
/*
* Scale up the multiplier (and therefore the timeout) by
* the r2w factor for writes.
*/
if (data->flags & MMC_DATA_WRITE)
mult <<= card->csd.r2w_factor;
data->timeout_ns = card->csd.tacc_ns * mult;
data->timeout_clks = card->csd.tacc_clks * mult;
/*
* SD cards also have an upper limit on the timeout.
*/
if (mmc_card_sd(card)) {
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
if (mmc_host_clk_rate(card->host))
timeout_us += data->timeout_clks * 1000 /
(mmc_host_clk_rate(card->host) / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
* The MMC spec "It is strongly recommended
* for hosts to implement more than 500ms
* timeout value even if the card indicates
* the 250ms maximum busy length." Even the
* previous value of 300ms is known to be
* insufficient for some cards.
*/
limit_us = 3000000;
else
limit_us = 100000;
/*
* SDHC cards always use these fixed values.
*/
if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
data->timeout_ns = limit_us * 1000;
data->timeout_clks = 0;
}
/* assign limit value if invalid */
if (timeout_us == 0)
data->timeout_ns = limit_us * 1000;
}
/*
* Some cards require longer data read timeout than indicated in CSD.
* Address this by setting the read timeout to a "reasonably high"
* value. For the cards tested, 600ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
data->timeout_ns = 600000000;
data->timeout_clks = 0;
}
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
* continuous stream of data until the internal logic
* overflowed.
*/
if (mmc_host_is_spi(card->host)) {
if (data->flags & MMC_DATA_WRITE) {
if (data->timeout_ns < 1000000000)
data->timeout_ns = 1000000000; /* 1s */
} else {
if (data->timeout_ns < 100000000)
data->timeout_ns = 100000000; /* 100ms */
}
}
/* Increase the timeout values for some bad INAND MCP devices */
if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
data->timeout_ns = 4000000000u; /* 4s */
data->timeout_clks = 0;
}
}
EXPORT_SYMBOL(mmc_set_data_timeout);
/**
* mmc_align_data_size - pads a transfer size to a more optimal value
* @card: the MMC card associated with the data transfer
* @sz: original transfer size
*
* Pads the original data size with a number of extra bytes in
* order to avoid controller bugs and/or performance hits
* (e.g. some controllers revert to PIO for certain sizes).
*
* Returns the improved size, which might be unmodified.
*
* Note that this function is only relevant when issuing a
* single scatter gather entry.
*/
unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
{
/*
* FIXME: We don't have a system for the controller to tell
* the core about its problems yet, so for now we just 32-bit
* align the size.
*/
sz = ((sz + 3) / 4) * 4;
return sz;
}
EXPORT_SYMBOL(mmc_align_data_size);
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
* dereference a non-zero value then this will return prematurely with
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
might_sleep();
add_wait_queue(&host->wq, &wait);
spin_lock_irqsave(&host->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
if (stop || !host->claimed || host->claimer == current)
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
spin_lock_irqsave(&host->lock, flags);
}
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
host->claimer = current;
host->claim_cnt += 1;
} else
wake_up(&host->wq);
spin_unlock_irqrestore(&host->lock, flags);
remove_wait_queue(&host->wq, &wait);
if (host->ops->enable && !stop && host->claim_cnt == 1)
host->ops->enable(host);
return stop;
}
EXPORT_SYMBOL(__mmc_claim_host);
/**
* mmc_release_host - release a host
* @host: mmc host to release
*
* Release a MMC host, allowing others to claim the host
* for their operations.
*/
void mmc_release_host(struct mmc_host *host)
{
unsigned long flags;
WARN_ON(!host->claimed);
if (host->ops->disable && host->claim_cnt == 1)
host->ops->disable(host);
spin_lock_irqsave(&host->lock, flags);
if (--host->claim_cnt) {
/* Release for nested claim */
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
}
}
EXPORT_SYMBOL(mmc_release_host);
/*
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
void mmc_get_card(struct mmc_card *card)
{
pm_runtime_get_sync(&card->dev);
mmc_claim_host(card->host);
}
EXPORT_SYMBOL(mmc_get_card);
/*
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
void mmc_put_card(struct mmc_card *card)
{
mmc_release_host(card->host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
EXPORT_SYMBOL(mmc_put_card);
/*
* Internal function that does the actual ios call to the host driver,
* optionally printing some debug output.
*/
void mmc_set_ios(struct mmc_host *host)
{
struct mmc_ios *ios = &host->ios;
pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
"width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing);
if (ios->clock > 0)
mmc_set_ungated(host);
host->ops->set_ios(host, ios);
if (ios->old_rate != ios->clock) {
if (likely(ios->clk_ts)) {
char trace_info[80];
snprintf(trace_info, 80,
"%s: freq_KHz %d --> %d | t = %d",
mmc_hostname(host), ios->old_rate / 1000,
ios->clock / 1000, jiffies_to_msecs(
(long)jiffies - (long)ios->clk_ts));
trace_mmc_clk(trace_info);
}
ios->old_rate = ios->clock;
ios->clk_ts = jiffies;
}
}
EXPORT_SYMBOL(mmc_set_ios);
/*
* Control chip select pin on a host.
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
mmc_host_clk_hold(host);
host->ios.chip_select = mode;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz && hz < host->f_min);
if (hz > host->f_max)
hz = host->f_max;
host->ios.clock = hz;
mmc_set_ios(host);
}
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
mmc_host_clk_hold(host);
__mmc_set_clock(host, hz);
mmc_host_clk_release(host);
}
#ifdef CONFIG_MMC_CLKGATE
/*
* This gates the clock by setting it to 0 Hz.
*/
void mmc_gate_clock(struct mmc_host *host)
{
unsigned long flags;
WARN_ON(!host->ios.clock);
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_old = host->ios.clock;
host->ios.clock = 0;
host->clk_gated = true;
spin_unlock_irqrestore(&host->clk_lock, flags);
mmc_set_ios(host);
}
/*
* This restores the clock from gating by using the cached
* clock value.
*/
void mmc_ungate_clock(struct mmc_host *host)
{
/*
* We should previously have gated the clock, so the clock shall
* be 0 here! The clock may however be 0 during initialization,
* when some request operations are performed before setting
* the frequency. When ungate is requested in that situation
* we just ignore the call.
*/
if (host->clk_old) {
WARN_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
__mmc_set_clock(host, host->clk_old);
}
}
void mmc_set_ungated(struct mmc_host *host)
{
unsigned long flags;
/*
* We've been given a new frequency while the clock is gated,
* so make sure we regard this as ungating it.
*/
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_gated = false;
spin_unlock_irqrestore(&host->clk_lock, flags);
}
#else
void mmc_set_ungated(struct mmc_host *host)
{
}
#endif
int mmc_execute_tuning(struct mmc_card *card)
{
struct mmc_host *host = card->host;
u32 opcode;
int err;
if (!host->ops->execute_tuning)
return 0;
if (mmc_card_mmc(card))
opcode = MMC_SEND_TUNING_BLOCK_HS200;
else
opcode = MMC_SEND_TUNING_BLOCK;
mmc_host_clk_hold(host);
err = host->ops->execute_tuning(host, opcode);
mmc_host_clk_release(host);
if (err)
pr_err("%s: tuning execution failed\n", mmc_hostname(host));
return err;
}
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
mmc_host_clk_hold(host);
host->ios.bus_mode = mode;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Change data bus width of a host.
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
mmc_host_clk_hold(host);
host->ios.bus_width = width;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/**
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
* @vdd: voltage (mV)
* @low_bits: prefer low bits in boundary cases
*
* This function returns the OCR bit number according to the provided @vdd
* value. If conversion is not possible a negative errno value returned.
*
* Depending on the @low_bits flag the function prefers low or high OCR bits
* on boundary voltages. For example,
* with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
* with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
*
* Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
*/
static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
{
const int max_bit = ilog2(MMC_VDD_35_36);
int bit;
if (vdd < 1650 || vdd > 3600)
return -EINVAL;
if (vdd >= 1650 && vdd <= 1950)
return ilog2(MMC_VDD_165_195);
if (low_bits)
vdd -= 1;
/* Base 2000 mV, step 100 mV, bit's base 8. */
bit = (vdd - 2000) / 100 + 8;
if (bit > max_bit)
return max_bit;
return bit;
}
/**
* mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
* @vdd_min: minimum voltage value (mV)
* @vdd_max: maximum voltage value (mV)
*
* This function returns the OCR mask bits according to the provided @vdd_min
* and @vdd_max values. If conversion is not possible the function returns 0.
*
* Notes wrt boundary cases:
* This function sets the OCR bits for all boundary voltages, for example
* [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
* MMC_VDD_34_35 mask.
*/
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
{
u32 mask = 0;
if (vdd_max < vdd_min)
return 0;
/* Prefer high bits for the boundary vdd_max values. */
vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
if (vdd_max < 0)
return 0;
/* Prefer low bits for the boundary vdd_min values. */
vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
if (vdd_min < 0)
return 0;
/* Fill the mask, from max bit to min bit. */
while (vdd_max >= vdd_min)
mask |= 1 << vdd_max--;
return mask;
}
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
#ifdef CONFIG_OF
/**
* mmc_of_parse_voltage - return mask of supported voltages
* @np: The device node need to be parsed.
* @mask: mask of voltages available for MMC/SD/SDIO
*
* 1. Return zero on success.
* 2. Return negative errno: voltage-range is invalid.
*/
int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
{
const u32 *voltage_ranges;
int num_ranges, i;
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
if (!voltage_ranges || !num_ranges) {
pr_info("%s: voltage-ranges unspecified\n", np->full_name);
return -EINVAL;
}
for (i = 0; i < num_ranges; i++) {
const int j = i * 2;
u32 ocr_mask;
ocr_mask = mmc_vddrange_to_ocrmask(
be32_to_cpu(voltage_ranges[j]),
be32_to_cpu(voltage_ranges[j + 1]));
if (!ocr_mask) {
pr_err("%s: voltage-range #%d is invalid\n",
np->full_name, i);
return -EINVAL;
}
*mask |= ocr_mask;
}
return 0;
}
EXPORT_SYMBOL(mmc_of_parse_voltage);
#endif /* CONFIG_OF */
#ifdef CONFIG_REGULATOR
/**
* mmc_regulator_get_ocrmask - return mask of supported voltages
* @supply: regulator to use
*
* This returns either a negative errno, or a mask of voltages that
* can be provided to MMC/SD/SDIO devices using the specified voltage
* regulator. This would normally be called before registering the
* MMC host adapter.
*/
int mmc_regulator_get_ocrmask(struct regulator *supply)
{
int result = 0;
int count;
int i;
int vdd_uV;
int vdd_mV;
count = regulator_count_voltages(supply);
if (count < 0)
return count;
for (i = 0; i < count; i++) {
vdd_uV = regulator_list_voltage(supply, i);
if (vdd_uV <= 0)
continue;
vdd_mV = vdd_uV / 1000;
result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
if (!result) {
vdd_uV = regulator_get_voltage(supply);
if (vdd_uV <= 0)
return vdd_uV;
vdd_mV = vdd_uV / 1000;
result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
return result;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
* @mmc: the host to regulate
* @supply: regulator to use
* @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
*
* Returns zero on success, else negative errno.
*
* MMC host drivers may use this to enable or disable a regulator using
* a particular supply voltage. This would normally be called from the
* set_ios() method.
*/
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
{
int result = 0;
int min_uV, max_uV;
if (vdd_bit) {
int tmp;
/*
* REVISIT mmc_vddrange_to_ocrmask() may have set some
* bits this regulator doesn't quite support ... don't
* be too picky, most cards and regulators are OK with
* a 0.1V range goof (it's a small error percentage).
*/
tmp = vdd_bit - ilog2(MMC_VDD_165_195);
if (tmp == 0) {
min_uV = 1650 * 1000;
max_uV = 1950 * 1000;
} else {
min_uV = 1900 * 1000 + tmp * 100 * 1000;
max_uV = min_uV + 100 * 1000;
}
result = regulator_set_voltage(supply, min_uV, max_uV);
if (result == 0 && !mmc->regulator_enabled) {
result = regulator_enable(supply);
if (!result)
mmc->regulator_enabled = true;
}
} else if (mmc->regulator_enabled) {
result = regulator_disable(supply);
if (result == 0)
mmc->regulator_enabled = false;
}
if (result)
dev_err(mmc_dev(mmc),
"could not set regulator OCR (%d)\n", result);
return result;
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
#endif /* CONFIG_REGULATOR */
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
int ret;
mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
if (IS_ERR(mmc->supply.vmmc)) {
if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "No vmmc regulator found\n");
} else {
ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
if (ret > 0)
mmc->ocr_avail = ret;
else
dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
}
if (IS_ERR(mmc->supply.vqmmc)) {
if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "No vqmmc regulator found\n");
}
return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
/*
* Mask off any voltages we don't support and select
* the lowest voltage
*/
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
{
int bit;
/*
* Sanity check the voltages that the card claims to
* support.
*/
if (ocr & 0x7F) {
dev_warn(mmc_dev(host),
"card claims to support voltages below defined range\n");
ocr &= ~0x7F;
}
ocr &= host->ocr_avail;
if (!ocr) {
dev_warn(mmc_dev(host), "no support for card's volts\n");
return 0;
}
if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
bit = ffs(ocr) - 1;
ocr &= 3 << bit;
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
ocr &= 3 << bit;
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
return ocr;
}
int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{
int err = 0;
int old_signal_voltage = host->ios.signal_voltage;
host->ios.signal_voltage = signal_voltage;
if (host->ops->start_signal_voltage_switch) {
mmc_host_clk_hold(host);
err = host->ops->start_signal_voltage_switch(host, &host->ios);
mmc_host_clk_release(host);
}
if (err)
host->ios.signal_voltage = old_signal_voltage;
return err;
}
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
{
struct mmc_command cmd = {0};
int err = 0;
u32 clock;
BUG_ON(!host);
/*
* Send CMD11 only if the request is to switch the card to
* 1.8V signalling.
*/
if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
return __mmc_set_signal_voltage(host, signal_voltage);
/*
* If we cannot switch voltages, return failure so the caller
* can continue without UHS mode
*/
if (!host->ops->start_signal_voltage_switch)
return -EPERM;
if (!host->ops->card_busy)
pr_warn("%s: cannot verify signal voltage switch\n",
mmc_hostname(host));
cmd.opcode = SD_SWITCH_VOLTAGE;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
/*
* Hold the clock reference so clock doesn't get auto gated during this
* voltage switch sequence.
*/
mmc_host_clk_hold(host);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto exit;
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
err = -EIO;
goto exit;
}
/*
* The card should drive cmd and dat[0:3] low immediately
* after the response of cmd11, but wait 1 ms to be sure
*/
mmc_delay(1);
if (host->ops->card_busy && !host->ops->card_busy(host)) {
err = -EAGAIN;
goto power_cycle;
}
/*
* During a signal voltage level switch, the clock must be gated
* for 5 ms according to the SD spec
*/
host->card_clock_off = true;
clock = host->ios.clock;
host->ios.clock = 0;
mmc_set_ios(host);
if (__mmc_set_signal_voltage(host, signal_voltage)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
*/
err = -EAGAIN;
host->ios.clock = clock;
mmc_set_ios(host);
host->card_clock_off = false;
goto power_cycle;
}
/* Keep clock gated for at least 5 ms */
mmc_delay(5);
host->ios.clock = clock;
mmc_set_ios(host);
host->card_clock_off = false;
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
/*
* Failure to switch is indicated by the card holding
* dat[0:3] low
*/
if (host->ops->card_busy && host->ops->card_busy(host))
err = -EAGAIN;
power_cycle:
if (err) {
pr_debug("%s: Signal voltage switch failed, "
"power cycling card\n", mmc_hostname(host));
mmc_power_cycle(host, ocr);
}
exit:
mmc_host_clk_release(host);
return err;
}
/*
* Select timing parameters for host.
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
mmc_host_clk_hold(host);
host->ios.timing = timing;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Select appropriate driver type for host.
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
mmc_host_clk_hold(host);
host->ios.drv_type = drv_type;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
* We then wait a bit for the power to stabilise. Finally,
* enable the bus drivers and clock to the card.
*
* We must _NOT_ enable the clock prior to power stablising.
*
* If a host does all the power sequencing itself, ignore the
* initial MMC_POWER_UP stage.
*/
void mmc_power_up(struct mmc_host *host, u32 ocr)
{
if (host->ios.power_mode == MMC_POWER_ON)
return;
mmc_host_clk_hold(host);
host->ios.vdd = fls(ocr) - 1;
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
else {
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
}
host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
/*
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
*/
mmc_delay(10);
host->ios.clock = host->f_init;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
/*
* This delay must be at least 74 clock sizes, or 1 ms, or the
* time required to reach a stable voltage.
*/
mmc_delay(10);
mmc_host_clk_release(host);
}
void mmc_power_off(struct mmc_host *host)
{
if (host->ios.power_mode == MMC_POWER_OFF)
return;
mmc_host_clk_hold(host);
host->ios.clock = 0;
host->ios.vdd = 0;
if (!mmc_host_is_spi(host)) {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.chip_select = MMC_CS_DONTCARE;
}
host->ios.power_mode = MMC_POWER_OFF;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
/*
* Some configurations, such as the 802.11 SDIO card in the OLPC
* XO-1.5, require a short delay after poweroff before the card
* can be successfully turned on again.
*/
mmc_delay(1);
mmc_host_clk_release(host);
}
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
{
mmc_power_off(host);
/* Wait at least 1 ms according to SD spec */
mmc_delay(1);
mmc_power_up(host, ocr);
}
/*
* Cleanup when the last reference to the bus operator is dropped.
*/
static void __mmc_release_bus(struct mmc_host *host)
{
BUG_ON(!host);
BUG_ON(host->bus_refs);
BUG_ON(!host->bus_dead);
host->bus_ops = NULL;
}
/*
* Increase reference count of bus operator
*/
static inline void mmc_bus_get(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->bus_refs++;
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Decrease reference count of bus operator and free it if
* it is the last reference.
*/
static inline void mmc_bus_put(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->bus_refs--;
if ((host->bus_refs == 0) && host->bus_ops)
__mmc_release_bus(host);
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
*/
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
{
unsigned long flags;
BUG_ON(!host);
BUG_ON(!ops);
WARN_ON(!host->claimed);
spin_lock_irqsave(&host->lock, flags);
BUG_ON(host->bus_ops);
BUG_ON(host->bus_refs);
host->bus_ops = ops;
host->bus_refs = 1;
host->bus_dead = 0;
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Remove the current bus handler from a host.
*/
void mmc_detach_bus(struct mmc_host *host)
{
unsigned long flags;
BUG_ON(!host);
WARN_ON(!host->claimed);
WARN_ON(!host->bus_ops);
spin_lock_irqsave(&host->lock, flags);
host->bus_dead = 1;
spin_unlock_irqrestore(&host->lock, flags);
mmc_bus_put(host);
}
static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
bool cd_irq)
{
#ifdef CONFIG_MMC_DEBUG
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->removed);
spin_unlock_irqrestore(&host->lock, flags);
#endif
/*
* If the device is configured as wakeup, we prevent a new sleep for
* 5 s to give provision for user space to consume the event.
*/
if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
device_can_wakeup(mmc_dev(host)))
pm_wakeup_event(mmc_dev(host), 5000);
host->detect_change = 1;
mmc_schedule_delayed_work(&host->detect, delay);
}
/**
* mmc_detect_change - process change of state on a MMC socket
* @host: host which changed state.
* @delay: optional delay to wait before detection (jiffies)
*
* MMC drivers should call this when they detect a card has been
* inserted or removed. The MMC layer will confirm that any
* present card is still functional, and initialize any newly
* inserted.
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
_mmc_detect_change(host, delay, true);
}
EXPORT_SYMBOL(mmc_detect_change);
void mmc_init_erase(struct mmc_card *card)
{
unsigned int sz;
if (is_power_of_2(card->erase_size))
card->erase_shift = ffs(card->erase_size) - 1;
else
card->erase_shift = 0;
/*
* It is possible to erase an arbitrarily large area of an SD or MMC
* card. That is not desirable because it can take a long time
* (minutes) potentially delaying more important I/O, and also the
* timeout calculations become increasingly hugely over-estimated.
* Consequently, 'pref_erase' is defined as a guide to limit erases
* to that size and alignment.
*
* For SD cards that define Allocation Unit size, limit erases to one
* Allocation Unit at a time. For MMC cards that define High Capacity
* Erase Size, whether it is switched on or not, limit to that size.
* Otherwise just have a stab at a good value. For modern cards it
* will end up being 4MiB. Note that if the value is too small, it
* can end up taking longer to erase.
*/
if (mmc_card_sd(card) && card->ssr.au) {
card->pref_erase = card->ssr.au;
card->erase_shift = ffs(card->ssr.au) - 1;
} else if (card->ext_csd.hc_erase_size) {
card->pref_erase = card->ext_csd.hc_erase_size;
} else if (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
card->pref_erase = 512 * 1024 / 512;
else if (sz < 512)
card->pref_erase = 1024 * 1024 / 512;
else if (sz < 1024)
card->pref_erase = 2 * 1024 * 1024 / 512;
else
card->pref_erase = 4 * 1024 * 1024 / 512;
if (card->pref_erase < card->erase_size)
card->pref_erase = card->erase_size;
else {
sz = card->pref_erase % card->erase_size;
if (sz)
card->pref_erase += card->erase_size - sz;
}
} else
card->pref_erase = 0;
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
unsigned int arg, unsigned int qty)
{
unsigned int erase_timeout;
if (arg == MMC_DISCARD_ARG ||
(arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
erase_timeout = card->ext_csd.trim_timeout;
} else if (card->ext_csd.erase_group_def & 1) {
/* High Capacity Erase Group Size uses HC timeouts */
if (arg == MMC_TRIM_ARG)
erase_timeout = card->ext_csd.trim_timeout;
else
erase_timeout = card->ext_csd.hc_erase_timeout;
} else {
/* CSD Erase Group Size uses write timeout */
unsigned int mult = (10 << card->csd.r2w_factor);
unsigned int timeout_clks = card->csd.tacc_clks * mult;
unsigned int timeout_us;
/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
if (card->csd.tacc_ns < 1000000)
timeout_us = (card->csd.tacc_ns * mult) / 1000;
else
timeout_us = (card->csd.tacc_ns / 1000) * mult;
/*
* ios.clock is only a target. The real clock rate might be
* less but not that much less, so fudge it by multiplying by 2.
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
(mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
/*
* Theoretically, the calculation could underflow so round up
* to 1ms in that case.
*/
if (!erase_timeout)
erase_timeout = 1;
}
/* Multiplier for secure operations */
if (arg & MMC_SECURE_ARGS) {
if (arg == MMC_SECURE_ERASE_ARG)
erase_timeout *= card->ext_csd.sec_erase_mult;
else
erase_timeout *= card->ext_csd.sec_trim_mult;
}
erase_timeout *= qty;
/*
* Ensure at least a 1 second timeout for SPI as per
* 'mmc_set_data_timeout()'
*/
if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
unsigned int erase_timeout;
if (card->ssr.erase_timeout) {
/* Erase timeout specified in SD Status Register (SSR) */
erase_timeout = card->ssr.erase_timeout * qty +
card->ssr.erase_offset;
} else {
/*
* Erase timeout not specified in SD Status Register (SSR) so
* use 250ms per write block.
*/
erase_timeout = 250 * qty;
}
/* Must not be less than 1 second */
if (erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
if (mmc_card_sd(card))
return mmc_sd_erase_timeout(card, arg, qty);
else
return mmc_mmc_erase_timeout(card, arg, qty);
}
static u32 mmc_get_erase_qty(struct mmc_card *card, u32 from, u32 to)
{
u32 qty = 0;
/*
* qty is used to calculate the erase timeout which depends on how many
* erase groups (or allocation units in SD terminology) are affected.
* We count erasing part of an erase group as one erase group.
* For SD, the allocation units are always a power of 2. For MMC, the
* erase group size is almost certainly also power of 2, but it does not
* seem to insist on that in the JEDEC standard, so we fall back to
* division in that case. SD may not specify an allocation unit size,
* in which case the timeout is based on the number of write blocks.
*
* Note that the timeout for secure trim 2 will only be correct if the
* number of erase groups specified is the same as the total of all
* preceding secure trim 1 commands. Since the power may have been
* lost since the secure trim 1 commands occurred, it is generally
* impossible to calculate the secure trim 2 timeout correctly.
*/
if (card->erase_shift)
qty += ((to >> card->erase_shift) -
(from >> card->erase_shift)) + 1;
else if (mmc_card_sd(card))
qty += to - from + 1;
else
qty += ((to / card->erase_size) -
(from / card->erase_size)) + 1;
return qty;
}
static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
struct mmc_card *card, u32 opcode, u32 arg, u32 qty)
{
struct mmc_command *cmd = cmdq_req->mrq.cmd;
int err;
memset(cmd, 0, sizeof(struct mmc_command));
cmd->opcode = opcode;
cmd->arg = arg;
if (cmd->opcode == MMC_ERASE) {
cmd->flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
cmd->busy_timeout = mmc_erase_timeout(card, arg, qty);
} else {
cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
}
err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
if (err) {
pr_err("mmc_erase: group start error %d, status %#x\n",
err, cmd->resp[0]);
return -EIO;
}
return 0;
}
static int mmc_cmdq_do_erase(struct mmc_cmdq_req *cmdq_req,
struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command *cmd = cmdq_req->mrq.cmd;
unsigned int qty = 0;
unsigned long timeout;
unsigned int fr, nr;
int err;
fr = from;
nr = to - from + 1;
trace_mmc_blk_erase_start(arg, fr, nr);
qty = mmc_get_erase_qty(card, from, to);
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_START,
from, qty);
if (err)
goto out;
err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_END,
to, qty);
if (err)
goto out;
err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE,
arg, qty);
if (err)
goto out;
timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
do {
memset(cmd, 0, sizeof(struct mmc_command));
cmd->opcode = MMC_SEND_STATUS;
cmd->arg = card->rca << 16;
cmd->flags = MMC_RSP_R1 | MMC_CMD_AC;
/* Do not retry else we can't see errors */
err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
if (err || (cmd->resp[0] & 0xFDF92000)) {
pr_err("error %d requesting status %#x\n",
err, cmd->resp[0]);
err = -EIO;
goto out;
}
/* Timeout if the device never becomes ready for data and
* never leaves the program state.
*/
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(card->host), __func__);
err = -EIO;
goto out;
}
} while (!(cmd->resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG));
out:
trace_mmc_blk_erase_end(arg, fr, nr);
return err;
}
static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command cmd = {0};
unsigned int qty = 0;
unsigned long timeout;
unsigned int fr, nr;
int err;
fr = from;
nr = to - from + 1;
trace_mmc_blk_erase_start(arg, fr, nr);
qty = mmc_get_erase_qty(card, from, to);
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_START;
else
cmd.opcode = MMC_ERASE_GROUP_START;
cmd.arg = from;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group start error %d, "
"status %#x\n", err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_END;
else
cmd.opcode = MMC_ERASE_GROUP_END;
cmd.arg = to;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group end error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: erase error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
if (mmc_host_is_spi(card->host))
goto out;
timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
do {
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
/* Do not retry else we can't see errors */
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err || (cmd.resp[0] & 0xFDF92000)) {
pr_err("error %d requesting status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
/* Timeout if the device never becomes ready for data and
* never leaves the program state.
*/
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(card->host), __func__);
err = -EIO;
goto out;
}
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
out:
trace_mmc_blk_erase_end(arg, fr, nr);
return err;
}
int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
unsigned int nr, unsigned int arg)
{
if (!(card->host->caps & MMC_CAP_ERASE) ||
!(card->csd.cmdclass & CCC_ERASE))
return -EOPNOTSUPP;
if (!card->erase_size)
return -EOPNOTSUPP;
if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
return -EOPNOTSUPP;
if ((arg & MMC_SECURE_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
if ((arg & MMC_TRIM_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
if (arg == MMC_SECURE_ERASE_ARG) {
if (from % card->erase_size || nr % card->erase_size)
return -EINVAL;
}
return 0;
}
int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
int ret;
ret = mmc_erase_sanity_check(card, from, nr, arg);
if (ret)
return ret;
if (arg == MMC_ERASE_ARG) {
rem = from % card->erase_size;
if (rem) {
rem = card->erase_size - rem;
from += rem;
if (nr > rem)
nr -= rem;
else
return 0;
}
rem = nr % card->erase_size;
if (rem)
nr -= rem;
}
if (nr == 0)
return 0;
to = from + nr;
if (to <= from)
return -EINVAL;
/* 'from' and 'to' are inclusive */
to -= 1;
return mmc_cmdq_do_erase(cmdq_req, card, from, to, arg);
}
EXPORT_SYMBOL(mmc_cmdq_erase);
/**
* mmc_erase - erase sectors.
* @card: card to erase
* @from: first sector to erase
* @nr: number of sectors to erase
* @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
*
* Caller must claim host before calling this function.
*/
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
int ret;
ret = mmc_erase_sanity_check(card, from, nr, arg);
if (ret)
return ret;
if (arg == MMC_ERASE_ARG) {
rem = from % card->erase_size;
if (rem) {
rem = card->erase_size - rem;
from += rem;
if (nr > rem)
nr -= rem;
else
return 0;
}
rem = nr % card->erase_size;
if (rem)
nr -= rem;
}
if (nr == 0)
return 0;
to = from + nr;
if (to <= from)
return -EINVAL;
/* 'from' and 'to' are inclusive */
to -= 1;
return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);
int mmc_can_erase(struct mmc_card *card)
{
if ((card->host->caps & MMC_CAP_ERASE) &&
(card->csd.cmdclass & CCC_ERASE) && card->erase_size)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_erase);
int mmc_can_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_trim);
int mmc_can_discard(struct mmc_card *card)
{
/*
* As there's no way to detect the discard support bit at v4.5
* use the s/w feature support filed.
*/
if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_discard);
int mmc_can_sanitize(struct mmc_card *card)
{
if (!mmc_can_trim(card) && !mmc_can_erase(card))
return 0;
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_sanitize);
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_secure_erase_trim);
int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr)
{
if (!card->erase_size)
return 0;
if (from % card->erase_size || nr % card->erase_size)
return 0;
return 1;
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
unsigned int arg)
{
struct mmc_host *host = card->host;
unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
unsigned int last_timeout = 0;
if (card->erase_shift)
max_qty = UINT_MAX >> card->erase_shift;
else if (mmc_card_sd(card))
max_qty = UINT_MAX;
else
max_qty = UINT_MAX / card->erase_size;
/* Find the largest qty with an OK timeout */
do {
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
if (timeout > host->max_busy_timeout)
break;
if (timeout < last_timeout)
break;
last_timeout = timeout;
y = x;
}
qty += y;
} while (y);
if (!qty)
return 0;
if (qty == 1)
return 1;
/* Convert qty to sectors */
if (card->erase_shift)
max_discard = --qty << card->erase_shift;
else if (mmc_card_sd(card))
max_discard = qty;
else
max_discard = --qty * card->erase_size;
return max_discard;
}
unsigned int mmc_calc_max_discard(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
if (!host->max_busy_timeout ||
(host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
return UINT_MAX;
/*
* Without erase_group_def set, MMC erase timeout depends on clock
* frequence which can change. In that case, the best choice is
* just the preferred erase size.
*/
if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
if (mmc_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
if (max_trim < max_discard)
max_discard = max_trim;
} else if (max_discard < card->erase_size) {
max_discard = 0;
}
pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
mmc_hostname(host), max_discard, host->max_busy_timeout);
return max_discard;
}
EXPORT_SYMBOL(mmc_calc_max_discard);
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
return 0;
cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = blocklen;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write)
{
struct mmc_command cmd = {0};
cmd.opcode = MMC_SET_BLOCK_COUNT;
cmd.arg = blockcount & 0x0000FFFF;
if (is_rel_write)
cmd.arg |= 1 << 31;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blockcount);
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return;
mmc_host_clk_hold(host);
host->ops->hw_reset(host);
mmc_host_clk_release(host);
}
int mmc_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
if (mmc_card_sdio(card))
return 0;
if (mmc_card_mmc(card) && (card->host->caps & MMC_CAP_HW_RESET)) {
rst_n_function = card->ext_csd.rst_n_function;
if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) !=
EXT_CSD_RST_N_ENABLED)
return 0;
}
return 1;
}
EXPORT_SYMBOL(mmc_can_reset);
static int mmc_do_hw_reset(struct mmc_host *host, int check)
{
struct mmc_card *card = host->card;
int ret;
if (!host->bus_ops->power_restore)
return -EOPNOTSUPP;
if (!card)
return -EINVAL;
if (!mmc_can_reset(card))
return -EOPNOTSUPP;
mmc_host_clk_hold(host);
mmc_set_clock(host, host->f_init);
if (mmc_card_mmc(card) && host->ops->hw_reset)
host->ops->hw_reset(host);
else
mmc_power_cycle(host, host->ocr_avail);
/* If the reset has happened, then a status command will fail */
if (check) {
struct mmc_command cmd = {0};
int err;
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (!err) {
mmc_host_clk_release(host);
return -ENOSYS;
}
}
if (mmc_host_is_spi(host)) {
host->ios.chip_select = MMC_CS_HIGH;
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
} else {
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
}
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
mmc_host_clk_release(host);
mmc_claim_host(host);
ret = host->bus_ops->power_restore(host);
mmc_release_host(host);
return ret;
}
/*
* mmc_cmdq_hw_reset: Helper API for doing
* reset_all of host and reinitializing card.
* This must be called with mmc_claim_host
* acquired by the caller.
*/
int mmc_cmdq_hw_reset(struct mmc_host *host)
{
if (!host->bus_ops->power_restore)
return -EOPNOTSUPP;
mmc_power_cycle(host, host->ocr_avail);
mmc_select_voltage(host, host->card->ocr);
return host->bus_ops->power_restore(host);
}
EXPORT_SYMBOL(mmc_cmdq_hw_reset);
int mmc_hw_reset(struct mmc_host *host)
{
return mmc_do_hw_reset(host, 0);
}
EXPORT_SYMBOL(mmc_hw_reset);
int mmc_hw_reset_check(struct mmc_host *host)
{
return mmc_do_hw_reset(host, 1);
}
EXPORT_SYMBOL(mmc_hw_reset_check);
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host, host->ocr_avail);
/*
* Some eMMCs (with VCCQ always on) may not be reset after power up, so
* do a hardware reset if possible.
*/
mmc_hw_reset_for_init(host);
/*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
* should be ignored by SD/eMMC cards.
*/
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
/* Order's important: probe SDIO, then SD, then MMC */
if (!mmc_attach_sdio(host))
return 0;
if (!mmc_attach_sd(host))
return 0;
if (!mmc_attach_mmc(host))
return 0;
mmc_power_off(host);
return -EIO;
}
int _mmc_detect_card_removed(struct mmc_host *host)
{
int ret;
if (host->caps & MMC_CAP_NONREMOVABLE)
return 0;
if (!host->card || mmc_card_removed(host->card))
return 1;
ret = host->bus_ops->alive(host);
/*
* Card detect status and alive check may be out of sync if card is
* removed slowly, when card detect switch changes while card/slot
* pads are still contacted in hardware (refer to "SD Card Mechanical
* Addendum, Appendix C: Card Detection Switch"). So reschedule a
* detect work 200ms later for this case.
*/
if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
mmc_detect_change(host, msecs_to_jiffies(200));
pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
}
if (ret) {
mmc_card_set_removed(host->card);
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
}
int mmc_detect_card_removed(struct mmc_host *host)
{
struct mmc_card *card = host->card;
int ret;
WARN_ON(!host->claimed);
if (!card)
return 1;
ret = mmc_card_removed(card);
/*
* The card will be considered unchanged unless we have been asked to
* detect a change or host requires polling to provide card detection.
*/
if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
return ret;
host->detect_change = 0;
if (!ret) {
ret = _mmc_detect_card_removed(host);
if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
/*
* Schedule a detect work as soon as possible to let a
* rescan handle the card removal.
*/
cancel_delayed_work(&host->detect);
_mmc_detect_change(host, 0, false);
}
}
return ret;
}
EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
if (host->trigger_card_event && host->ops->card_event) {
host->ops->card_event(host);
host->trigger_card_event = false;
}
if (host->rescan_disable)
return;
/* If there is a non-removable card registered, only scan once */
if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
return;
host->rescan_entered = 1;
mmc_bus_get(host);
/*
* if there is a _removable_ card registered, check whether it is
* still present
*/
if (host->bus_ops && !host->bus_dead
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
host->detect_change = 0;
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
*/
mmc_bus_put(host);
mmc_bus_get(host);
/* if there still is a card present, stop here */
if (host->bus_ops != NULL) {
mmc_bus_put(host);
goto out;
}
/*
* Only we can add a new handler, so it's safe to
* release the lock here.
*/
mmc_bus_put(host);
if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
host->ops->get_cd(host) == 0) {
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
goto out;
}
mmc_claim_host(host);
(void) mmc_rescan_try_freq(host, host->f_min);
mmc_release_host(host);
out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
void mmc_start_host(struct mmc_host *host)
{
mmc_claim_host(host);
host->f_init = max(freqs[0], host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
mmc_power_up(host, host->ocr_avail);
mmc_gpiod_request_cd_irq(host);
mmc_release_host(host);
_mmc_detect_change(host, 0, false);
}
void mmc_stop_host(struct mmc_host *host)
{
#ifdef CONFIG_MMC_DEBUG
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->removed = 1;
spin_unlock_irqrestore(&host->lock, flags);
#endif
if (host->slot.cd_irq >= 0)
disable_irq(host->slot.cd_irq);
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
mmc_bus_put(host);
return;
}
mmc_bus_put(host);
BUG_ON(host->card);
mmc_power_off(host);
}
int mmc_power_save_host(struct mmc_host *host)
{
int ret = 0;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
#endif
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead) {
mmc_bus_put(host);
return -EINVAL;
}
if (host->bus_ops->power_save)
ret = host->bus_ops->power_save(host);
mmc_bus_put(host);
mmc_power_off(host);
return ret;
}
EXPORT_SYMBOL(mmc_power_save_host);
int mmc_power_restore_host(struct mmc_host *host)
{
int ret;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
#endif
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead) {
mmc_bus_put(host);
return -EINVAL;
}
mmc_power_up(host, host->card->ocr);
mmc_claim_host(host);
ret = host->bus_ops->power_restore(host);
mmc_release_host(host);
mmc_bus_put(host);
return ret;
}
EXPORT_SYMBOL(mmc_power_restore_host);
/*
* Add barrier request to the requests in cache
*/
int mmc_cache_barrier(struct mmc_card *card)
{
struct mmc_host *host = card->host;
int err = 0;
if (!card->ext_csd.cache_ctrl ||
(card->quirks & MMC_QUIRK_CACHE_DISABLE))
goto out;
if (!mmc_card_mmc(card))
goto out;
if (!card->ext_csd.barrier_en)
return -ENOTSUPP;
/*
* If a device receives maximum supported barrier
* requests, a barrier command is treated as a
* flush command. Hence, it is betetr to use
* flush timeout instead a generic CMD6 timeout
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 0x2, 0);
if (err)
pr_err("%s: cache barrier error %d\n",
mmc_hostname(host), err);
out:
return err;
}
EXPORT_SYMBOL(mmc_cache_barrier);
/*
* Flush the cache to the non-volatile storage.
*/
int mmc_flush_cache(struct mmc_card *card)
{
int err = 0;
if (mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1) &&
(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1, 0);
if (err == -ETIMEDOUT) {
pr_err("%s: cache flush timeout\n",
mmc_hostname(card->host));
err = mmc_interrupt_hpi(card);
if (err) {
pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
mmc_hostname(card->host), err);
err = -ENODEV;
}
} else if (err) {
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
}
}
return err;
}
EXPORT_SYMBOL(mmc_flush_cache);
#ifdef CONFIG_PM
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
*/
int mmc_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
struct mmc_host *host = container_of(
notify_block, struct mmc_host, pm_notify);
unsigned long flags;
int err = 0;
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
if (!host->bus_ops)
break;
/* Validate prerequisites for suspend */
if (host->bus_ops->pre_suspend)
err = host->bus_ops->pre_suspend(host);
if (!err)
break;
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
_mmc_detect_change(host, 0, false);
}
return 0;
}
#endif
/**
* mmc_init_context_info() - init synchronization context
* @host: mmc host
*
* Init struct context_info needed to implement asynchronous
* request mechanism, used by mmc core, host driver and mmc requests
* supplier.
*/
void mmc_init_context_info(struct mmc_host *host)
{
spin_lock_init(&host->context_info.lock);
host->context_info.is_new_req = false;
host->context_info.is_done_rcv = false;
host->context_info.is_waiting_last_req = false;
init_waitqueue_head(&host->context_info.wait);
}
#ifdef CONFIG_MMC_EMBEDDED_SDIO
void mmc_set_embedded_sdio_data(struct mmc_host *host,
struct sdio_cis *cis,
struct sdio_cccr *cccr,
struct sdio_embedded_func *funcs,
int num_funcs)
{
host->embedded_sdio_data.cis = cis;
host->embedded_sdio_data.cccr = cccr;
host->embedded_sdio_data.funcs = funcs;
host->embedded_sdio_data.num_funcs = num_funcs;
}
EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
#endif
static int __init mmc_init(void)
{
int ret;
workqueue = alloc_ordered_workqueue("kmmcd", 0);
if (!workqueue)
return -ENOMEM;
ret = mmc_register_bus();
if (ret)
goto destroy_workqueue;
ret = mmc_register_host_class();
if (ret)
goto unregister_bus;
ret = sdio_register_bus();
if (ret)
goto unregister_host_class;
return 0;
unregister_host_class:
mmc_unregister_host_class();
unregister_bus:
mmc_unregister_bus();
destroy_workqueue:
destroy_workqueue(workqueue);
return ret;
}
static void __exit mmc_exit(void)
{
sdio_unregister_bus();
mmc_unregister_host_class();
mmc_unregister_bus();
destroy_workqueue(workqueue);
}
subsys_initcall(mmc_init);
module_exit(mmc_exit);
MODULE_LICENSE("GPL");
|
fedosis/android_kernel_xiaomi_msm8937
|
drivers/mmc/core/core.c
|
C
|
gpl-2.0
| 102,367
|
/*
* Copyright 2009-2012 Freescale Semiconductor, Inc. All Rights Reserved.
*/
/*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
/*!
* @file ipu_csi_enc.c
*
* @brief CSI Use case for video capture
*
* @ingroup IPU
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/ipu.h>
#include <mach/mipi_csi2.h>
#include "mxc_v4l2_capture.h"
#include "ipu_prp_sw.h"
#ifdef CAMERA_DBG
#define CAMERA_TRACE(x) (printk)x
#else
#define CAMERA_TRACE(x)
#endif
/*
* Function definitions
*/
/*!
* csi ENC callback function.
*
* @param irq int irq line
* @param dev_id void * device id
*
* @return status IRQ_HANDLED for handled
*/
static irqreturn_t csi_enc_callback(int irq, void *dev_id)
{
cam_data *cam = (cam_data *) dev_id;
if (cam->enc_callback == NULL)
return IRQ_HANDLED;
cam->enc_callback(irq, dev_id);
return IRQ_HANDLED;
}
/*!
* CSI ENC enable channel setup function
*
* @param cam struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_setup(cam_data *cam)
{
ipu_channel_params_t params;
u32 pixel_fmt;
int err = 0, sensor_protocol = 0;
dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
#ifdef CONFIG_MXC_MIPI_CSI2
void *mipi_csi2_info;
int ipu_id;
int csi_id;
#endif
CAMERA_TRACE("In csi_enc_setup\n");
if (!cam) {
printk(KERN_ERR "cam private is NULL\n");
return -ENXIO;
}
memset(¶ms, 0, sizeof(ipu_channel_params_t));
params.csi_mem.csi = cam->csi;
sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
switch (sensor_protocol) {
case IPU_CSI_CLK_MODE_GATED_CLK:
case IPU_CSI_CLK_MODE_NONGATED_CLK:
case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
params.csi_mem.interlaced = false;
break;
case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
params.csi_mem.interlaced = true;
break;
default:
printk(KERN_ERR "sensor protocol unsupported\n");
return -EINVAL;
}
if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
pixel_fmt = IPU_PIX_FMT_YUV420P;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
pixel_fmt = IPU_PIX_FMT_YUV422P;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
pixel_fmt = IPU_PIX_FMT_UYVY;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
pixel_fmt = IPU_PIX_FMT_YUYV;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
pixel_fmt = IPU_PIX_FMT_NV12;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
pixel_fmt = IPU_PIX_FMT_BGR24;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
pixel_fmt = IPU_PIX_FMT_RGB24;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
pixel_fmt = IPU_PIX_FMT_RGB565;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
pixel_fmt = IPU_PIX_FMT_BGR32;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
pixel_fmt = IPU_PIX_FMT_RGB32;
else {
printk(KERN_ERR "format not supported\n");
return -EINVAL;
}
#ifdef CONFIG_MXC_MIPI_CSI2
mipi_csi2_info = mipi_csi2_get_info();
if (mipi_csi2_info) {
if (mipi_csi2_get_status(mipi_csi2_info)) {
ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
if (cam->ipu == ipu_get_soc(ipu_id)
&& cam->csi == csi_id) {
params.csi_mem.mipi_en = true;
params.csi_mem.mipi_vc =
mipi_csi2_get_virtual_channel(mipi_csi2_info);
params.csi_mem.mipi_id =
mipi_csi2_get_datatype(mipi_csi2_info);
mipi_csi2_pixelclk_enable(mipi_csi2_info);
} else {
params.csi_mem.mipi_en = false;
params.csi_mem.mipi_vc = 0;
params.csi_mem.mipi_id = 0;
}
} else {
params.csi_mem.mipi_en = false;
params.csi_mem.mipi_vc = 0;
params.csi_mem.mipi_id = 0;
}
} else {
printk(KERN_ERR "Fail to get mipi_csi2_info!\n");
return -EPERM;
}
#endif
err = ipu_init_channel(cam->ipu, CSI_MEM, ¶ms);
if (err != 0) {
printk(KERN_ERR "ipu_init_channel %d\n", err);
return err;
}
err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
pixel_fmt, cam->v2f.fmt.pix.width,
cam->v2f.fmt.pix.height,
cam->v2f.fmt.pix.bytesperline,
cam->rotation,
dummy, dummy, 0,
cam->offset.u_offset,
cam->offset.v_offset);
if (err != 0) {
printk(KERN_ERR "CSI_MEM output buffer\n");
return err;
}
err = ipu_enable_channel(cam->ipu, CSI_MEM);
if (err < 0) {
printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
return err;
}
return err;
}
/*!
* function to update physical buffer address for encorder IDMA channel
*
* @param eba physical buffer address for encorder IDMA channel
* @param buffer_num int buffer 0 or buffer 1
*
* @return status
*/
static int csi_enc_eba_update(struct ipu_soc *ipu, dma_addr_t eba, int *buffer_num)
{
int err = 0;
pr_debug("eba %x\n", eba);
err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
*buffer_num, eba);
if (err != 0) {
ipu_clear_buffer_ready(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
*buffer_num);
err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
*buffer_num, eba);
if (err != 0) {
pr_err("ERROR: v4l2 capture: fail to update "
"buf%d\n", *buffer_num);
return err;
}
}
ipu_select_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER, *buffer_num);
*buffer_num = (*buffer_num == 0) ? 1 : 0;
return 0;
}
/*!
* Enable encoder task
* @param private struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_enabling_tasks(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");
if (cam->dummy_frame.vaddress &&
cam->dummy_frame.buffer.length
< PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage)) {
dma_free_coherent(0, cam->dummy_frame.buffer.length,
cam->dummy_frame.vaddress,
cam->dummy_frame.paddress);
cam->dummy_frame.vaddress = 0;
}
if (!cam->dummy_frame.vaddress) {
cam->dummy_frame.vaddress = dma_alloc_coherent(0,
PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
&cam->dummy_frame.paddress,
GFP_DMA | GFP_KERNEL);
if (cam->dummy_frame.vaddress == 0) {
pr_err("ERROR: v4l2 capture: Allocate dummy frame "
"failed.\n");
return -ENOBUFS;
}
cam->dummy_frame.buffer.length =
PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
}
cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
csi_enc_callback, 0, "Mxc Camera", cam);
if (err != 0) {
printk(KERN_ERR "Error registering rot irq\n");
return err;
}
err = csi_enc_setup(cam);
if (err != 0) {
printk(KERN_ERR "csi_enc_setup %d\n", err);
return err;
}
return err;
}
/*!
* Disable encoder task
* @param private struct cam_data * mxc capture instance
*
* @return int
*/
static int csi_enc_disabling_tasks(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
#ifdef CONFIG_MXC_MIPI_CSI2
void *mipi_csi2_info;
int ipu_id;
int csi_id;
#endif
err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
ipu_uninit_channel(cam->ipu, CSI_MEM);
#ifdef CONFIG_MXC_MIPI_CSI2
mipi_csi2_info = mipi_csi2_get_info();
if (mipi_csi2_info) {
if (mipi_csi2_get_status(mipi_csi2_info)) {
ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
if (cam->ipu == ipu_get_soc(ipu_id)
&& cam->csi == csi_id)
mipi_csi2_pixelclk_disable(mipi_csi2_info);
}
} else {
printk(KERN_ERR "Fail to get mipi_csi2_info!\n");
return -EPERM;
}
#endif
return err;
}
/*!
* Enable csi
* @param private struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_enable_csi(void *private)
{
cam_data *cam = (cam_data *) private;
return ipu_enable_csi(cam->ipu, cam->csi);
}
/*!
* Disable csi
* @param private struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_disable_csi(void *private)
{
cam_data *cam = (cam_data *) private;
/* free csi eof irq firstly.
* when disable csi, wait for idmac eof.
* it requests eof irq again */
ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
return ipu_disable_csi(cam->ipu, cam->csi);
}
/*!
* function to select CSI ENC as the working path
*
* @param private struct cam_data * mxc capture instance
*
* @return int
*/
int csi_enc_select(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
if (cam) {
cam->enc_update_eba = csi_enc_eba_update;
cam->enc_enable = csi_enc_enabling_tasks;
cam->enc_disable = csi_enc_disabling_tasks;
cam->enc_enable_csi = csi_enc_enable_csi;
cam->enc_disable_csi = csi_enc_disable_csi;
} else {
err = -EIO;
}
return err;
}
/*!
* function to de-select CSI ENC as the working path
*
* @param private struct cam_data * mxc capture instance
*
* @return int
*/
int csi_enc_deselect(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
if (cam) {
cam->enc_update_eba = NULL;
cam->enc_enable = NULL;
cam->enc_disable = NULL;
cam->enc_enable_csi = NULL;
cam->enc_disable_csi = NULL;
}
return err;
}
/*!
* Init the Encorder channels
*
* @return Error code indicating success or failure
*/
__init int csi_enc_init(void)
{
return 0;
}
/*!
* Deinit the Encorder channels
*
*/
void __exit csi_enc_exit(void)
{
}
module_init(csi_enc_init);
module_exit(csi_enc_exit);
EXPORT_SYMBOL(csi_enc_select);
EXPORT_SYMBOL(csi_enc_deselect);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("CSI ENC Driver");
MODULE_LICENSE("GPL");
|
zOrg1331/wandboard-kernel
|
drivers/media/video/mxc/capture/ipu_csi_enc.c
|
C
|
gpl-2.0
| 10,146
|
/*
* linux/mm/filemap.c
*
* Copyright (C) 1994-1999 Linus Torvalds
*/
/*
* This file handles the generic file mmap semantics used by
* most "normal" filesystems (but you don't /have/ to use this:
* the NFS filesystem used to do this differently, for example)
*/
#include <linux/export.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/aio.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/hash.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "internal.h"
#include "../fs/sreadahead_prof.h"
/*
* FIXME: remove all knowledge of the buffer layer from the core VM
*/
#include <linux/buffer_head.h> /* for try_to_free_buffers */
#include <asm/mman.h>
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
*
* Shared mappings now work. 15.8.1995 Bruno.
*
* finished 'unifying' the page and buffer cache and SMP-threaded the
* page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
*
* SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
*/
/*
* Lock ordering:
*
* ->i_mmap_mutex (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
*
* ->i_mutex
* ->i_mmap_mutex (truncate->unmap_mapping_range)
*
* ->mmap_sem
* ->i_mmap_mutex
* ->page_table_lock or pte_lock (various, mainly in memory.c)
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
* ->lock_page (access_process_vm)
*
* ->i_mutex (generic_file_buffered_write)
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
*
* bdi->wb.list_lock
* sb_lock (fs/fs-writeback.c)
* ->mapping->tree_lock (__sync_single_inode)
*
* ->i_mmap_mutex
* ->anon_vma.lock (vma_adjust)
*
* ->anon_vma.lock
* ->page_table_lock or pte_lock (anon_vma_prepare and various)
*
* ->page_table_lock or pte_lock
* ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->tree_lock (try_to_unmap_one)
* ->zone.lru_lock (follow_page->mark_page_accessed)
* ->zone.lru_lock (check_pte_range->isolate_lru_page)
* ->private_lock (page_remove_rmap->set_page_dirty)
* ->tree_lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
*
* ->i_mmap_mutex
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
/*
* Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the mapping's tree_lock.
*/
void __delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
/*
* if we're uptodate, flush out into the cleancache, otherwise
* invalidate any existing cleancache entries. We can't leave
* stale data around in the cleancache once our page is gone
*/
if (PageUptodate(page) && PageMappedToDisk(page))
cleancache_put_page(page);
else
cleancache_invalidate_page(mapping, page);
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
if (PageSwapBacked(page))
__dec_zone_page_state(page, NR_SHMEM);
BUG_ON(page_mapped(page));
/*
* Some filesystems seem to re-dirty the page even after
* the VM has canceled the dirty bit (eg ext3 journaling).
*
* Fix it up by doing a final dirty accounting check after
* having removed the page entirely.
*/
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
}
}
/**
* delete_from_page_cache - delete page from page cache
* @page: the page which the kernel is trying to remove from page cache
*
* This must be called only on pages that have been verified to be in the page
* cache and locked. It will never put the page into the free list, the caller
* has a reference on the page.
*/
void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
void (*freepage)(struct page *);
BUG_ON(!PageLocked(page));
freepage = mapping->a_ops->freepage;
spin_lock_irq(&mapping->tree_lock);
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
if (freepage)
freepage(page);
page_cache_release(page);
}
EXPORT_SYMBOL(delete_from_page_cache);
static int sleep_on_page(void *word)
{
io_schedule();
return 0;
}
static int sleep_on_page_killable(void *word)
{
sleep_on_page(word);
return fatal_signal_pending(current) ? -EINTR : 0;
}
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
* @start: offset in bytes where the range starts
* @end: offset in bytes where the range ends (inclusive)
* @sync_mode: enable synchronous operation
*
* Start writeback against all of a mapping's dirty pages that lie
* within the byte offsets <start, end> inclusive.
*
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
* opposed to a regular memory cleansing writeback. The difference between
* these two operations is that if a dirty page/buffer is encountered, it must
* be waited upon, and not just skipped over.
*/
int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end, int sync_mode)
{
int ret;
struct writeback_control wbc = {
.sync_mode = sync_mode,
.nr_to_write = LONG_MAX,
.range_start = start,
.range_end = end,
};
if (!mapping_cap_writeback_dirty(mapping))
return 0;
ret = do_writepages(mapping, &wbc);
return ret;
}
static inline int __filemap_fdatawrite(struct address_space *mapping,
int sync_mode)
{
return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
}
int filemap_fdatawrite(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite);
int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end)
{
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite_range);
/**
* filemap_flush - mostly a non-blocking flush
* @mapping: target address_space
*
* This is a mostly non-blocking flush. Not suitable for data-integrity
* purposes - I/O may not be started against all dirty pages.
*/
int filemap_flush(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
}
EXPORT_SYMBOL(filemap_flush);
/**
* filemap_fdatawait_range - wait for writeback to complete
* @mapping: address space structure to wait for
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Walk the list of under-writeback pages of the given address space
* in the given range and wait for all of them.
*/
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
loff_t end_byte)
{
pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
struct pagevec pvec;
int nr_pages;
int ret = 0;
if (end_byte < start_byte)
return 0;
pagevec_init(&pvec, 0);
while ((index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
unsigned i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/* until radix tree lookup accepts end_index */
if (page->index > end)
continue;
wait_on_page_writeback(page);
if (TestClearPageError(page))
ret = -EIO;
}
pagevec_release(&pvec);
cond_resched();
}
/* Check for outstanding write errors */
if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
ret = -ENOSPC;
if (test_and_clear_bit(AS_EIO, &mapping->flags))
ret = -EIO;
return ret;
}
EXPORT_SYMBOL(filemap_fdatawait_range);
/**
* filemap_fdatawait - wait for all under-writeback pages to complete
* @mapping: address space structure to wait for
*
* Walk the list of under-writeback pages of the given address space
* and wait for all of them.
*/
int filemap_fdatawait(struct address_space *mapping)
{
loff_t i_size = i_size_read(mapping->host);
if (i_size == 0)
return 0;
return filemap_fdatawait_range(mapping, 0, i_size - 1);
}
EXPORT_SYMBOL(filemap_fdatawait);
int filemap_write_and_wait(struct address_space *mapping)
{
int err = 0;
if (mapping->nrpages) {
err = filemap_fdatawrite(mapping);
/*
* Even if the above returned error, the pages may be
* written partially (e.g. -ENOSPC), so we wait for it.
* But the -EIO is special case, it may indicate the worst
* thing (e.g. bug) happened, so we avoid waiting for it.
*/
if (err != -EIO) {
int err2 = filemap_fdatawait(mapping);
if (!err)
err = err2;
}
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait);
/**
* filemap_write_and_wait_range - write out & wait on a file range
* @mapping: the address_space for the pages
* @lstart: offset in bytes where the range starts
* @lend: offset in bytes where the range ends (inclusive)
*
* Write out and wait upon file offsets lstart->lend, inclusive.
*
* Note that `lend' is inclusive (describes the last byte to be written) so
* that this function can be used to write to the very end-of-file (end = -1).
*/
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
int err = 0;
if (mapping->nrpages) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
if (err != -EIO) {
int err2 = filemap_fdatawait_range(mapping,
lstart, lend);
if (!err)
err = err2;
}
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait_range);
/**
* replace_page_cache_page - replace a pagecache page with a new one
* @old: page to be replaced
* @new: page to replace with
* @gfp_mask: allocation mode
*
* This function replaces a page in the pagecache with a new one. On
* success it acquires the pagecache reference for the new page and
* drops it for the old page. Both the old and new pages must be
* locked. This function does not add the new page to the LRU, the
* caller must do that.
*
* The remove + add is atomic. The only way this function can fail is
* memory allocation failure.
*/
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(old));
VM_BUG_ON(!PageLocked(new));
VM_BUG_ON(new->mapping);
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
pgoff_t offset = old->index;
freepage = mapping->a_ops->freepage;
page_cache_get(new);
new->mapping = mapping;
new->index = offset;
spin_lock_irq(&mapping->tree_lock);
__delete_from_page_cache(old);
error = radix_tree_insert(&mapping->page_tree, offset, new);
BUG_ON(error);
mapping->nrpages++;
__inc_zone_page_state(new, NR_FILE_PAGES);
if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
/* mem_cgroup codes must not be called under tree_lock */
mem_cgroup_replace_page_cache(old, new);
radix_tree_preload_end();
if (freepage)
freepage(old);
page_cache_release(old);
}
return error;
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
/**
* add_to_page_cache_locked - add a locked page to the pagecache
* @page: page to add
* @mapping: the page's address_space
* @offset: page index
* @gfp_mask: page allocation mode
*
* This function is used to add a page to the pagecache. It must be locked.
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageSwapBacked(page));
error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & GFP_RECLAIM_MASK);
if (error)
goto out;
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
page_cache_get(page);
page->mapping = mapping;
page->index = offset;
spin_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page);
if (likely(!error)) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
page_cache_release(page);
}
radix_tree_preload_end();
} else
mem_cgroup_uncharge_cache_page(page);
out:
return error;
}
EXPORT_SYMBOL(add_to_page_cache_locked);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
int ret;
ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0)
lru_cache_add_file(page);
return ret;
}
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
#ifdef CONFIG_NUMA
struct page *__page_cache_alloc(gfp_t gfp)
{
int n;
struct page *page;
if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie;
do {
cpuset_mems_cookie = get_mems_allowed();
n = cpuset_mem_spread_node();
page = alloc_pages_exact_node(n, gfp, 0);
} while (!put_mems_allowed(cpuset_mems_cookie) && !page);
return page;
}
return alloc_pages(gfp, 0);
}
EXPORT_SYMBOL(__page_cache_alloc);
#endif
/*
* In order to wait for pages to become available there must be
* waitqueues associated with pages. By using a hash table of
* waitqueues where the bucket discipline is to maintain all
* waiters on the same queue and wake all when any of the pages
* become available, and for the woken contexts to check to be
* sure the appropriate page became available, this saves space
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
static wait_queue_head_t *page_waitqueue(struct page *page)
{
const struct zone *zone = page_zone(page);
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
}
static inline void wake_up_page(struct page *page, int bit)
{
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (test_bit(bit_nr, &page->flags))
__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_on_page_bit);
int wait_on_page_bit_killable(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (!test_bit(bit_nr, &page->flags))
return 0;
return __wait_on_bit(page_waitqueue(page), &wait,
sleep_on_page_killable, TASK_KILLABLE);
}
/**
* add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
* @page: Page defining the wait queue of interest
* @waiter: Waiter to add to the queue
*
* Add an arbitrary @waiter to the wait queue for the nominated @page.
*/
void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
{
wait_queue_head_t *q = page_waitqueue(page);
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, waiter);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(add_page_wait_queue);
/**
* unlock_page - unlock a locked page
* @page: the page
*
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
* Also wakes sleepers in wait_on_page_writeback() because the wakeup
* mechananism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
*
* The mb is necessary to enforce ordering between the clear_bit and the read
* of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
*/
void unlock_page(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
clear_bit_unlock(PG_locked, &page->flags);
smp_mb__after_clear_bit();
wake_up_page(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
/**
* end_page_writeback - end writeback against a page
* @page: the page
*/
void end_page_writeback(struct page *page)
{
if (TestClearPageReclaim(page))
rotate_reclaimable_page(page);
if (!test_clear_page_writeback(page))
BUG();
smp_mb__after_clear_bit();
wake_up_page(page, PG_writeback);
}
EXPORT_SYMBOL(end_page_writeback);
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @page: the page to lock
*/
void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_page);
int __lock_page_killable(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
return __wait_on_bit_lock(page_waitqueue(page), &wait,
sleep_on_page_killable, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__lock_page_killable);
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
if (flags & FAULT_FLAG_ALLOW_RETRY) {
/*
* CAUTION! In this case, mmap_sem is not released
* even though return 0.
*/
if (flags & FAULT_FLAG_RETRY_NOWAIT)
return 0;
up_read(&mm->mmap_sem);
if (flags & FAULT_FLAG_KILLABLE)
wait_on_page_locked_killable(page);
else
wait_on_page_locked(page);
return 0;
} else {
if (flags & FAULT_FLAG_KILLABLE) {
int ret;
ret = __lock_page_killable(page);
if (ret) {
up_read(&mm->mmap_sem);
return 0;
}
} else
__lock_page(page);
return 1;
}
}
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
* Is there a pagecache struct page at the given (mapping, offset) tuple?
* If yes, increment its refcount and return it; if no, return NULL.
*/
struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
{
void **pagep;
struct page *page;
rcu_read_lock();
repeat:
page = NULL;
pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
if (pagep) {
page = radix_tree_deref_slot(pagep);
if (unlikely(!page))
goto out;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page))
goto repeat;
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so return it without
* attempting to raise page count.
*/
goto out;
}
if (!page_cache_get_speculative(page))
goto repeat;
/*
* Has the page moved?
* This is part of the lockless pagecache protocol. See
* include/linux/pagemap.h for details.
*/
if (unlikely(page != *pagep)) {
page_cache_release(page);
goto repeat;
}
}
out:
rcu_read_unlock();
return page;
}
EXPORT_SYMBOL(find_get_page);
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
* @offset: the page index
*
* Locates the desired pagecache page, locks it, increments its reference
* count and returns its address.
*
* Returns zero if the page was not present. find_lock_page() may sleep.
*/
struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
{
struct page *page;
repeat:
page = find_get_page(mapping, offset);
if (page && !radix_tree_exception(page)) {
lock_page(page);
/* Has the page been truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
page_cache_release(page);
goto repeat;
}
VM_BUG_ON(page->index != offset);
}
return page;
}
EXPORT_SYMBOL(find_lock_page);
/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
*
* Locates a page in the pagecache. If the page is not present, a new page
* is allocated using @gfp_mask and is added to the pagecache and to the VM's
* LRU list. The returned page is locked and has its reference count
* incremented.
*
* find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
* allocation!
*
* find_or_create_page() returns the desired page's address, or zero on
* memory exhaustion.
*/
struct page *find_or_create_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask)
{
struct page *page;
int err;
repeat:
page = find_lock_page(mapping, index);
if (!page) {
page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
/*
* We want a regular kernel memory (not highmem or DMA etc)
* allocation for the radix tree nodes, but we need to honour
* the context-specific requirements the caller has asked for.
* GFP_RECLAIM_MASK collects those requirements.
*/
err = add_to_page_cache_lru(page, mapping, index,
(gfp_mask & GFP_RECLAIM_MASK));
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
if (err == -EEXIST)
goto repeat;
}
}
return page;
}
EXPORT_SYMBOL(find_or_create_page);
/**
* find_get_pages - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page index
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages() will search for and return a group of up to
* @nr_pages pages in the mapping. The pages are placed at @pages.
* find_get_pages() takes a reference against the returned pages.
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages.
*
* find_get_pages() returns the number of pages which were found.
*/
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
WARN_ON(iter.index);
goto restart;
}
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so skip over it -
* we only reach this from invalidate_mapping_pages().
*/
continue;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
/**
* find_get_pages_contig - gang contiguous pagecache lookup
* @mapping: The address_space to search
* @index: The starting page index
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages_contig() works exactly like find_get_pages(), except
* that the returned number of pages are guaranteed to be contiguous.
*
* find_get_pages_contig() returns the number of pages which were found.
*/
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
/* The hole, there no reason to continue */
if (unlikely(!page))
break;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
goto restart;
}
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so stop looking for
* contiguous pages.
*/
break;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
/*
* must check mapping and index after taking the ref.
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
if (page->mapping == NULL || page->index != iter.index) {
page_cache_release(page);
break;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(find_get_pages_contig);
/**
* find_get_pages_tag - find and return pages that match @tag
* @mapping: the address_space to search
* @index: the starting page index
* @tag: the tag index
* @nr_pages: the maximum number of pages
* @pages: where the resulting pages are placed
*
* Like find_get_pages, except we only return pages which are tagged with
* @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_tagged(slot, &mapping->page_tree,
&iter, *index, tag) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
goto restart;
}
/*
* This function is never used on a shmem/tmpfs
* mapping, so a swap entry won't be found here.
*/
BUG();
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
if (ret)
*index = pages[ret - 1]->index + 1;
return ret;
}
EXPORT_SYMBOL(find_get_pages_tag);
/**
* grab_cache_page_nowait - returns locked page at given index in given cache
* @mapping: target address_space
* @index: the page index
*
* Same as grab_cache_page(), but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
*
* Clear __GFP_FS when allocating the page to avoid recursion into the fs
* and deadlock against the caller's locked page.
*/
struct page *
grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
{
struct page *page = find_get_page(mapping, index);
if (page) {
if (trylock_page(page))
return page;
page_cache_release(page);
return NULL;
}
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
page_cache_release(page);
page = NULL;
}
return page;
}
EXPORT_SYMBOL(grab_cache_page_nowait);
/*
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
* a _large_ part of the i/o request. Imagine the worst scenario:
*
* ---R__________________________________________B__________
* ^ reading here ^ bad block(assume 4k)
*
* read(R) => miss => readahead(R...B) => media error => frustrating retries
* => failing the whole request => read(R) => read(R+1) =>
* readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
* readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
* readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
*
* It is going insane. Fix it by quickly scaling down the readahead size.
*/
static void shrink_readahead_size_eio(struct file *filp,
struct file_ra_state *ra)
{
ra->ra_pages /= 4;
}
/**
* do_generic_file_read - generic file read routine
* @filp: the file to read
* @ppos: current file position
* @desc: read_descriptor
* @actor: read method
*
* This is a generic file read routine, and uses the
* mapping->a_ops->readpage() function for the actual low-level stuff.
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
*/
static void do_generic_file_read(struct file *filp, loff_t *ppos,
read_descriptor_t *desc, read_actor_t actor)
{
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
struct file_ra_state *ra = &filp->f_ra;
pgoff_t index;
pgoff_t last_index;
pgoff_t prev_index;
unsigned long offset; /* offset into pagecache page */
unsigned int prev_offset;
int error;
index = *ppos >> PAGE_CACHE_SHIFT;
prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
for (;;) {
struct page *page;
pgoff_t end_index;
loff_t isize;
unsigned long nr, ret;
cond_resched();
find_page:
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,
ra, filp,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_async_readahead(mapping,
ra, filp, page,
index, last_index - index);
}
if (!PageUptodate(page)) {
if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
if (!page->mapping)
goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
desc, offset))
goto page_not_up_to_date_locked;
unlock_page(page);
}
page_ok:
/*
* i_size must be checked after we know the page is Uptodate.
*
* Checking i_size after the check allows us to calculate
* the correct value for "nr", which means the zero-filled
* part of the page is not copied back to userspace (unless
* another truncate extends the file - this is desired though).
*/
isize = i_size_read(inode);
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(!isize || index > end_index)) {
page_cache_release(page);
goto out;
}
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
if (index == end_index) {
nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
if (nr <= offset) {
page_cache_release(page);
goto out;
}
}
nr = nr - offset;
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* When a sequential read accesses a page several times,
* only mark it as accessed the first time.
*/
if (prev_index != index || offset != prev_offset)
mark_page_accessed(page);
prev_index = index;
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
* we filled up (we may be padding etc), so we can only update
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
ret = actor(desc, page, offset, nr);
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
prev_offset = offset;
page_cache_release(page);
if (ret == nr && desc->count)
continue;
goto out;
page_not_up_to_date:
/* Get exclusive access to the page ... */
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
continue;
}
/* Did somebody else fill it already? */
if (PageUptodate(page)) {
unlock_page(page);
goto page_ok;
}
readpage:
/*
* A previous I/O error may have been due to temporary
* failures, eg. multipath errors.
* PG_error will be set again if readpage fails.
*/
ClearPageError(page);
/* Start the actual read. The read will unlock the page. */
error = mapping->a_ops->readpage(filp, page);
if (unlikely(error)) {
if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto find_page;
}
goto readpage_error;
}
if (!PageUptodate(page)) {
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
* invalidate_mapping_pages got it
*/
unlock_page(page);
page_cache_release(page);
goto find_page;
}
unlock_page(page);
shrink_readahead_size_eio(filp, ra);
error = -EIO;
goto readpage_error;
}
unlock_page(page);
}
goto page_ok;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
page_cache_release(page);
goto out;
no_cached_page:
/*
* Ok, it wasn't cached, so we need to create a new
* page..
*/
page = page_cache_alloc_cold(mapping);
if (!page) {
desc->error = -ENOMEM;
goto out;
}
error = add_to_page_cache_lru(page, mapping,
index, GFP_KERNEL);
if (error) {
page_cache_release(page);
if (error == -EEXIST)
goto find_page;
desc->error = error;
goto out;
}
goto readpage;
}
out:
ra->prev_pos = prev_index;
ra->prev_pos <<= PAGE_CACHE_SHIFT;
ra->prev_pos |= prev_offset;
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
file_accessed(filp);
}
/*
* Performs necessary checks before doing a write
* @iov: io vector request
* @nr_segs: number of segments in the iovec
* @count: number of bytes to write
* @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
*
* Adjust number of segments and amount of bytes to write (nr_segs should be
* properly initialized first). Returns appropriate error code that caller
* should return or zero in case that write should be allowed.
*/
int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags)
{
unsigned long seg;
size_t cnt = 0;
for (seg = 0; seg < *nr_segs; seg++) {
const struct iovec *iv = &iov[seg];
/*
* If any segment has a negative length, or the cumulative
* length ever wraps negative then return -EINVAL.
*/
cnt += iv->iov_len;
if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
return -EINVAL;
if (access_ok(access_flags, iv->iov_base, iv->iov_len))
continue;
if (seg == 0)
return -EFAULT;
*nr_segs = seg;
cnt -= iv->iov_len; /* This segment is no good */
break;
}
*count = cnt;
return 0;
}
EXPORT_SYMBOL(generic_segment_checks);
int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
unsigned long offset, unsigned long size)
{
struct iov_iter *iter = desc->arg.data;
unsigned long copied = 0;
if (size > desc->count)
size = desc->count;
copied = iov_iter_copy_to_user(page, iter, offset, size);
if (copied < size)
desc->error = -EFAULT;
iov_iter_advance(iter, copied);
desc->count -= copied;
desc->written += copied;
return copied;
}
/**
* generic_file_read_iter - generic filesystem read routine
* @iocb: kernel I/O control block
* @iov_iter: memory vector
* @pos: current file position
*/
ssize_t
generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct file *filp = iocb->ki_filp;
read_descriptor_t desc;
ssize_t retval = 0;
size_t count = iov_iter_count(iter);
loff_t *ppos = &iocb->ki_pos;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
loff_t size;
struct address_space *mapping;
struct inode *inode;
mapping = filp->f_mapping;
inode = mapping->host;
if (!count)
goto out; /* skip atime */
size = i_size_read(inode);
if (pos < size) {
retval = filemap_write_and_wait_range(mapping, pos,
pos + count - 1);
if (!retval) {
struct blk_plug plug;
blk_start_plug(&plug);
retval = mapping->a_ops->direct_IO(READ, iocb,
iter, pos);
blk_finish_plug(&plug);
}
if (retval > 0) {
*ppos = pos + retval;
count -= retval;
}
/*
* Btrfs can have a short DIO read if we encounter
* compressed extents, so if there was an error, or if
* we've already read everything we wanted to, or if
* there was a short read because we hit EOF, go ahead
* and return. Otherwise fallthrough to buffered io for
* the rest of the read.
*/
if (retval < 0 || !count || *ppos >= size) {
file_accessed(filp);
goto out;
}
}
}
desc.written = 0;
desc.arg.data = iter;
desc.count = count;
desc.error = 0;
do_generic_file_read(filp, ppos, &desc, file_read_iter_actor);
if (desc.written)
retval = desc.written;
else
retval = desc.error;
out:
return retval;
}
EXPORT_SYMBOL(generic_file_read_iter);
/**
* generic_file_aio_read - generic filesystem read routine
* @iocb: kernel I/O control block
* @iov: io vector request
* @nr_segs: number of segments in the iovec
* @pos: current file position
*
* This is the "read()" routine for all filesystems
* that can use the page cache directly.
*/
ssize_t
generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct iov_iter iter;
int ret;
size_t count;
count = 0;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return ret;
iov_iter_init(&iter, iov, nr_segs, count, 0);
return generic_file_read_iter(iocb, &iter, pos);
}
EXPORT_SYMBOL(generic_file_aio_read);
#ifdef CONFIG_MMU
/**
* page_cache_read - adds requested page to the page cache if not already there
* @file: file to read
* @offset: page index
*
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
static int page_cache_read(struct file *file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
int ret;
do {
page = page_cache_alloc_cold(mapping);
if (!page)
return -ENOMEM;
ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
ret = 0; /* losing race to add is OK */
page_cache_release(page);
} while (ret == AOP_TRUNCATED_PAGE);
return ret;
}
#define MMAP_LOTSAMISS (100)
/*
* Synchronous readahead happens when we don't even find
* a page in the page cache at all.
*/
static void do_sync_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
pgoff_t offset)
{
unsigned long ra_pages;
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
return;
if (!ra->ra_pages)
return;
if (VM_SequentialReadHint(vma)) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
}
/* Avoid banging the cache line if not needed */
if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
ra->mmap_miss++;
/*
* Do we miss much more than hit in this file? If so,
* stop bothering with read-ahead. It will only hurt.
*/
if (ra->mmap_miss > MMAP_LOTSAMISS)
return;
/*
* mmap read-around
*/
ra_pages = max_sane_readahead(ra->ra_pages);
ra->start = max_t(long, 0, offset - ra_pages / 2);
ra->size = ra_pages;
ra->async_size = ra_pages / 4;
ra_submit(ra, mapping, file);
}
/*
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further..
*/
static void do_async_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
struct page *page,
pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
if (PageReadahead(page))
page_cache_async_readahead(mapping, ra, file,
page, offset, ra->ra_pages);
}
/**
* filemap_fault - read in file data for page fault handling
* @vma: vma in which the fault was taken
* @vmf: struct vm_fault containing details of the fault
*
* filemap_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
* The goto's are kind of ugly, but this streamlines the normal case of having
* it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code.
*/
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int error;
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff;
struct page *page;
pgoff_t size;
int ret = 0;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (offset >= size)
return VM_FAULT_SIGBUS;
/*
* Do we have something in the page cache already?
*/
page = find_get_page(mapping, offset);
if (likely(page)) {
/*
* We found the page, so try async readahead before
* waiting for the lock.
*/
do_async_mmap_readahead(vma, ra, file, page, offset);
} else {
/* No page in the page cache at all */
do_sync_mmap_readahead(vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
/*
*/
sreadahead_prof(file, 0, 0);
/* */
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
if (!page)
goto no_cached_page;
}
if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
page_cache_release(page);
return ret | VM_FAULT_RETRY;
}
/* Did it get truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto retry_find;
}
VM_BUG_ON(page->index != offset);
/*
* We have a locked page in the page cache, now we need to check
* that it's up-to-date. If not, it is going to be due to an error.
*/
if (unlikely(!PageUptodate(page)))
goto page_not_uptodate;
/*
* Found the page and have a reference on it.
* We must recheck i_size under page lock.
*/
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(offset >= size)) {
unlock_page(page);
page_cache_release(page);
return VM_FAULT_SIGBUS;
}
vmf->page = page;
return ret | VM_FAULT_LOCKED;
no_cached_page:
/*
* We're only likely to ever get here if MADV_RANDOM is in
* effect.
*/
error = page_cache_read(file, offset);
/*
* The page we want has now been added to the page cache.
* In the unlikely event that someone removed it in the
* meantime, we'll just come back here and read it again.
*/
if (error >= 0)
goto retry_find;
/*
* An error return from page_cache_read can result if the
* system is low on memory, or a problem occurs while trying
* to schedule I/O.
*/
if (error == -ENOMEM)
return VM_FAULT_OOM;
return VM_FAULT_SIGBUS;
page_not_uptodate:
/*
* Umm, take care of errors if the page isn't up-to-date.
* Try to re-read it _once_. We do this synchronously,
* because there really aren't any performance issues here
* and we need to check for errors.
*/
ClearPageError(page);
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (!PageUptodate(page))
error = -EIO;
}
page_cache_release(page);
if (!error || error == AOP_TRUNCATED_PAGE)
goto retry_find;
/* Things didn't work out. Return zero to tell the mm layer so. */
shrink_readahead_size_eio(file, ra);
return VM_FAULT_SIGBUS;
}
EXPORT_SYMBOL(filemap_fault);
int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
/*
* We mark the page dirty already here so that when freeze is in
* progress, we are guaranteed that writeback during freezing will
* see the dirty page and writeprotect it again.
*/
set_page_dirty(page);
out:
sb_end_pagefault(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = filemap_page_mkwrite,
};
/* This is used for a general mmap of a disk file */
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
/*
* This is for filesystems which do not implement ->writepage.
*/
int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
{
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
return generic_file_mmap(file, vma);
}
#else
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
#endif /* CONFIG_MMU */
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct page *__read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err;
repeat:
page = find_get_page(mapping, index);
if (!page) {
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
goto repeat;
/* Presumably ENOMEM for radix tree node */
return ERR_PTR(err);
}
err = filler(data, page);
if (err < 0) {
page_cache_release(page);
page = ERR_PTR(err);
}
}
return page;
}
static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err;
retry:
page = __read_cache_page(mapping, index, filler, data, gfp);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
goto out;
lock_page(page);
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
goto retry;
}
if (PageUptodate(page)) {
unlock_page(page);
goto out;
}
err = filler(data, page);
if (err < 0) {
page_cache_release(page);
return ERR_PTR(err);
}
out:
mark_page_accessed(page);
return page;
}
/**
* read_cache_page_async - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page but don't wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_page_async);
static struct page *wait_on_page_read(struct page *page)
{
if (!IS_ERR(page)) {
wait_on_page_locked(page);
if (!PageUptodate(page)) {
page_cache_release(page);
page = ERR_PTR(-EIO);
}
}
return page;
}
/**
* read_cache_page_gfp - read into page cache, using specified page allocation flags.
* @mapping: the page's address_space
* @index: the page index
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
* any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_gfp(struct address_space *mapping,
pgoff_t index,
gfp_t gfp)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
}
EXPORT_SYMBOL(read_cache_page_gfp);
/**
* read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page then wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
}
EXPORT_SYMBOL(read_cache_page);
/*
* The logic we want is
*
* if suid or (sgid and xgrp)
* remove privs
*/
int should_remove_suid(struct dentry *dentry)
{
umode_t mode = dentry->d_inode->i_mode;
int kill = 0;
/* suid always must be killed */
if (unlikely(mode & S_ISUID))
kill = ATTR_KILL_SUID;
/*
* sgid without any exec bits is just a mandatory locking mark; leave
* it alone. If some exec bits are set, it's a real sgid; kill it.
*/
if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
kill |= ATTR_KILL_SGID;
if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
return kill;
return 0;
}
EXPORT_SYMBOL(should_remove_suid);
static int __remove_suid(struct dentry *dentry, int kill)
{
struct iattr newattrs;
newattrs.ia_valid = ATTR_FORCE | kill;
return notify_change(dentry, &newattrs);
}
int file_remove_suid(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
int killsuid;
int killpriv;
int error = 0;
/* Fast path for nothing security related */
if (IS_NOSEC(inode))
return 0;
killsuid = should_remove_suid(dentry);
killpriv = security_inode_need_killpriv(dentry);
if (killpriv < 0)
return killpriv;
if (killpriv)
error = security_inode_killpriv(dentry);
if (!error && killsuid)
error = __remove_suid(dentry, killsuid);
if (!error && (inode->i_sb->s_flags & MS_NOSEC))
inode->i_flags |= S_NOSEC;
return error;
}
EXPORT_SYMBOL(file_remove_suid);
/*
* Performs necessary checks before doing a write
*
* Can adjust writing position or amount of bytes to write.
* Returns appropriate error code that caller should return or
* zero in case that write should be allowed.
*/
inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
{
struct inode *inode = file->f_mapping->host;
unsigned long limit = rlimit(RLIMIT_FSIZE);
if (unlikely(*pos < 0))
return -EINVAL;
if (!isblk) {
/* FIXME: this is for backwards compatibility with 2.4 */
if (file->f_flags & O_APPEND)
*pos = i_size_read(inode);
if (limit != RLIM_INFINITY) {
if (*pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
if (*count > limit - (typeof(limit))*pos) {
*count = limit - (typeof(limit))*pos;
}
}
}
/*
* LFS rule
*/
if (unlikely(*pos + *count > MAX_NON_LFS &&
!(file->f_flags & O_LARGEFILE))) {
if (*pos >= MAX_NON_LFS) {
return -EFBIG;
}
if (*count > MAX_NON_LFS - (unsigned long)*pos) {
*count = MAX_NON_LFS - (unsigned long)*pos;
}
}
/*
* Are we about to exceed the fs block limit ?
*
* If we have written data it becomes a short write. If we have
* exceeded without writing data we send a signal and return EFBIG.
* Linus frestrict idea will clean these up nicely..
*/
if (likely(!isblk)) {
if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
if (*count || *pos > inode->i_sb->s_maxbytes) {
return -EFBIG;
}
/* zero-length writes at ->s_maxbytes are OK */
}
if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
*count = inode->i_sb->s_maxbytes - *pos;
} else {
#ifdef CONFIG_BLOCK
loff_t isize;
if (bdev_read_only(I_BDEV(inode)))
return -EPERM;
isize = i_size_read(inode);
if (*pos >= isize) {
if (*count || *pos > isize)
return -ENOSPC;
}
if (*pos + *count > isize)
*count = isize - *pos;
#else
return -EPERM;
#endif
}
return 0;
}
EXPORT_SYMBOL(generic_write_checks);
int pagecache_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
return aops->write_begin(file, mapping, pos, len, flags,
pagep, fsdata);
}
EXPORT_SYMBOL(pagecache_write_begin);
int pagecache_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
mark_page_accessed(page);
return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
}
EXPORT_SYMBOL(pagecache_write_end);
ssize_t
generic_file_direct_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos, loff_t *ppos, size_t count)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t written;
size_t write_len;
pgoff_t end;
if (count != iov_iter_count(iter)) {
written = iov_iter_shorten(iter, count);
if (written)
goto out;
}
write_len = count;
end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
if (written)
goto out;
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* without clobbering -EIOCBQUEUED from ->direct_IO().
*/
if (mapping->nrpages) {
written = invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
/*
* If a page can not be invalidated, return 0 to fall back
* to buffered write.
*/
if (written) {
if (written == -EBUSY)
return 0;
goto out;
}
}
written = mapping->a_ops->direct_IO(WRITE, iocb, iter, pos);
/*
* Finally, try again to invalidate clean pages which might have been
* cached by non-direct readahead, or faulted in by get_user_pages()
* if the source of the write was an mmap'ed region of the file
* we're writing. Either one is a pretty crazy thing to do,
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*/
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
}
if (written > 0) {
pos += written;
if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
*ppos = pos;
}
out:
return written;
}
EXPORT_SYMBOL(generic_file_direct_write_iter);
ssize_t
generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long *nr_segs, loff_t pos, loff_t *ppos,
size_t count, size_t ocount)
{
struct iov_iter iter;
ssize_t ret;
iov_iter_init(&iter, iov, *nr_segs, ocount, 0);
ret = generic_file_direct_write_iter(iocb, &iter, pos, ppos, count);
/* generic_file_direct_write_iter() might have shortened the vec */
if (*nr_segs != iter.nr_segs)
*nr_segs = iter.nr_segs;
return ret;
}
EXPORT_SYMBOL(generic_file_direct_write);
/*
* Find or create a page at the given pagecache position. Return the locked
* page. This function is specifically for buffered writes.
*/
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags)
{
int status;
gfp_t gfp_mask;
struct page *page;
gfp_t gfp_notmask = 0;
gfp_mask = mapping_gfp_mask(mapping);
if (mapping_cap_account_dirty(mapping))
gfp_mask |= __GFP_WRITE;
if (flags & AOP_FLAG_NOFS)
gfp_notmask = __GFP_FS;
repeat:
page = find_lock_page(mapping, index);
if (page)
goto found;
retry:
page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
if (!page)
return NULL;
if (is_cma_pageblock(page)) {
__free_page(page);
gfp_notmask |= __GFP_MOVABLE;
goto retry;
}
status = add_to_page_cache_lru(page, mapping, index,
GFP_KERNEL & ~gfp_notmask);
if (unlikely(status)) {
page_cache_release(page);
if (status == -EEXIST)
goto repeat;
return NULL;
}
found:
wait_on_page_writeback(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
static ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
struct address_space *mapping = file->f_mapping;
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
unsigned int flags = 0;
/*
* Copies from kernel address space cannot fail (NFSD is a big user).
*/
if (segment_eq(get_fs(), KERNEL_DS))
flags |= AOP_FLAG_UNINTERRUPTIBLE;
do {
struct page *page;
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
void *fsdata;
offset = (pos & (PAGE_CACHE_SIZE - 1));
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
iov_iter_count(i));
again:
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
*
* Not only is this an optimisation, but it is also required
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
break;
}
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status))
break;
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
pagefault_enable();
flush_dcache_page(page);
mark_page_accessed(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
if (unlikely(status < 0))
break;
copied = status;
cond_resched();
iov_iter_advance(i, copied);
if (unlikely(copied == 0)) {
/*
* If we were unable to copy any data at all, we must
* fall back to a single segment length write.
*
* If we didn't fallback here, we could livelock
* because not all segments in the iov can be copied at
* once without a pagefault.
*/
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
iov_iter_single_seg_count(i));
goto again;
}
pos += copied;
written += copied;
balance_dirty_pages_ratelimited(mapping);
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
} while (iov_iter_count(i));
return written ? written : status;
}
ssize_t
generic_file_buffered_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos, loff_t *ppos, size_t count, ssize_t written)
{
struct file *file = iocb->ki_filp;
ssize_t status;
if ((count + written) != iov_iter_count(iter)) {
int rc = iov_iter_shorten(iter, count + written);
if (rc)
return rc;
}
status = generic_perform_write(file, iter, pos);
if (likely(status >= 0)) {
written += status;
*ppos = pos + status;
}
return written ? written : status;
}
EXPORT_SYMBOL(generic_file_buffered_write_iter);
ssize_t
generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos, loff_t *ppos,
size_t count, ssize_t written)
{
struct iov_iter iter;
iov_iter_init(&iter, iov, nr_segs, count, written);
return generic_file_buffered_write_iter(iocb, &iter, pos, ppos,
count, written);
}
EXPORT_SYMBOL(generic_file_buffered_write);
/**
* __generic_file_aio_write - write data to a file
* @iocb: IO state structure (file, offset, etc.)
* @iter: iov_iter specifying memory to write
* @ppos: position where to write
*
* This function does all the work needed for actually writing data to a
* file. It does all basic checks, removes SUID from the file, updates
* modification times and calls proper subroutines depending on whether we
* do direct IO or a standard buffered write.
*
* It expects i_mutex to be grabbed unless we work on a block device or similar
* object which does not need locking at all.
*
* This function does *not* take care of syncing data in case of O_SYNC write.
* A caller has to handle it. This is mainly due to the fact that we want to
* avoid syncing under i_mutex.
*/
ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t *ppos)
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
size_t count; /* after file limit checks */
struct inode *inode = mapping->host;
loff_t pos;
ssize_t written;
ssize_t err;
count = iov_iter_count(iter);
pos = *ppos;
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
written = 0;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
goto out;
if (count == 0)
goto out;
err = file_remove_suid(file);
if (err)
goto out;
err = file_update_time(file);
if (err)
goto out;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
loff_t endbyte;
ssize_t written_buffered;
written = generic_file_direct_write_iter(iocb, iter, pos,
ppos, count);
if (written < 0 || written == count)
goto out;
/*
* direct-io write to a hole: fall through to buffered I/O
* for completing the rest of the request.
*/
pos += written;
count -= written;
iov_iter_advance(iter, written);
written_buffered = generic_file_buffered_write_iter(iocb, iter,
pos, ppos, count, written);
/*
* If generic_file_buffered_write() retuned a synchronous error
* then we want to return the number of bytes which were
* direct-written, or the error code if that was zero. Note
* that this differs from normal direct-io semantics, which
* will return -EFOO even if some bytes were written.
*/
if (written_buffered < 0) {
err = written_buffered;
goto out;
}
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
*/
endbyte = pos + written_buffered - written - 1;
err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
if (err == 0) {
written = written_buffered;
invalidate_mapping_pages(mapping,
pos >> PAGE_CACHE_SHIFT,
endbyte >> PAGE_CACHE_SHIFT);
} else {
/*
* We don't know how much we wrote, so just return
* the number of bytes which were direct-written
*/
}
} else {
iter->count = count;
written = generic_file_buffered_write_iter(iocb, iter,
pos, ppos, count, written);
}
out:
current->backing_dev_info = NULL;
return written ? written : err;
}
EXPORT_SYMBOL(__generic_file_write_iter);
ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
mutex_lock(&inode->i_mutex);
ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
if (err < 0 && ret > 0)
ret = err;
}
return ret;
}
EXPORT_SYMBOL(generic_file_write_iter);
ssize_t
__generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
struct iov_iter iter;
size_t count;
int ret;
count = 0;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
goto out;
iov_iter_init(&iter, iov, nr_segs, count, 0);
ret = __generic_file_write_iter(iocb, &iter, ppos);
out:
return ret;
}
EXPORT_SYMBOL(__generic_file_aio_write);
/**
* generic_file_aio_write - write data to a file
* @iocb: IO state structure
* @iov: vector with data to write
* @nr_segs: number of segments in the vector
* @pos: position in file where to write
*
* This is a wrapper around __generic_file_aio_write() to be used by most
* filesystems. It takes care of syncing the file in case of O_SYNC file
* and acquires i_mutex as needed.
*/
ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
blk_start_plug(&plug);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
if (err < 0 && ret > 0)
ret = err;
}
blk_finish_plug(&plug);
sb_end_write(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(generic_file_aio_write);
/**
* try_to_release_page() - release old fs-specific metadata on a page
*
* @page: the page which the kernel is trying to free
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
* (presumably at page->private). If the release was successful, return `1'.
* Otherwise return zero.
*
* This may also be called if PG_fscache is set on a page, indicating that the
* page is known to the local caching routines.
*
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
*
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
struct address_space * const mapping = page->mapping;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
}
EXPORT_SYMBOL(try_to_release_page);
|
shengdie/Dorimanx-LG-G2-D802-Kernel
|
mm/filemap.c
|
C
|
gpl-2.0
| 68,410
|
/* Map (unsigned int) keys to (source file, line, column) triples.
Copyright (C) 2001-2016 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>.
In other words, you are welcome to use, share and improve this program.
You are forbidden to forbid anyone else to use, share and improve
what you give them. Help stamp out software-hoarding! */
#include "config.h"
#include "system.h"
#include "line-map.h"
#include "cpplib.h"
#include "internal.h"
#include "hashtab.h"
/* Do not track column numbers higher than this one. As a result, the
range of column_bits is [12, 18] (or 0 if column numbers are
disabled). */
const unsigned int LINE_MAP_MAX_COLUMN_NUMBER = (1U << 12);
/* Do not pack ranges if locations get higher than this.
If you change this, update:
gcc.dg/plugin/location_overflow_plugin.c
gcc.dg/plugin/location-overflow-test-*.c. */
const source_location LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES = 0x50000000;
/* Do not track column numbers if locations get higher than this.
If you change this, update:
gcc.dg/plugin/location_overflow_plugin.c
gcc.dg/plugin/location-overflow-test-*.c. */
const source_location LINE_MAP_MAX_LOCATION_WITH_COLS = 0x60000000;
/* Highest possible source location encoded within an ordinary or
macro map. */
const source_location LINE_MAP_MAX_SOURCE_LOCATION = 0x70000000;
static void trace_include (const struct line_maps *, const line_map_ordinary *);
static const line_map_ordinary * linemap_ordinary_map_lookup (struct line_maps *,
source_location);
static const line_map_macro* linemap_macro_map_lookup (struct line_maps *,
source_location);
static source_location linemap_macro_map_loc_to_def_point
(const line_map_macro *, source_location);
static source_location linemap_macro_map_loc_unwind_toward_spelling
(line_maps *set, const line_map_macro *, source_location);
static source_location linemap_macro_map_loc_to_exp_point
(const line_map_macro *, source_location);
static source_location linemap_macro_loc_to_spelling_point
(struct line_maps *, source_location, const line_map_ordinary **);
static source_location linemap_macro_loc_to_def_point (struct line_maps *,
source_location,
const line_map_ordinary **);
static source_location linemap_macro_loc_to_exp_point (struct line_maps *,
source_location,
const line_map_ordinary **);
/* Counters defined in macro.c. */
extern unsigned num_expanded_macros_counter;
extern unsigned num_macro_tokens_counter;
/* Hash function for location_adhoc_data hashtable. */
static hashval_t
location_adhoc_data_hash (const void *l)
{
const struct location_adhoc_data *lb =
(const struct location_adhoc_data *) l;
return ((hashval_t) lb->locus
+ (hashval_t) lb->src_range.m_start
+ (hashval_t) lb->src_range.m_finish
+ (size_t) lb->data);
}
/* Compare function for location_adhoc_data hashtable. */
static int
location_adhoc_data_eq (const void *l1, const void *l2)
{
const struct location_adhoc_data *lb1 =
(const struct location_adhoc_data *) l1;
const struct location_adhoc_data *lb2 =
(const struct location_adhoc_data *) l2;
return (lb1->locus == lb2->locus
&& lb1->src_range.m_start == lb2->src_range.m_start
&& lb1->src_range.m_finish == lb2->src_range.m_finish
&& lb1->data == lb2->data);
}
/* Update the hashtable when location_adhoc_data is reallocated. */
static int
location_adhoc_data_update (void **slot, void *data)
{
*((char **) slot) += *((long long *) data);
return 1;
}
/* Rebuild the hash table from the location adhoc data. */
void
rebuild_location_adhoc_htab (struct line_maps *set)
{
unsigned i;
set->location_adhoc_data_map.htab =
htab_create (100, location_adhoc_data_hash, location_adhoc_data_eq, NULL);
for (i = 0; i < set->location_adhoc_data_map.curr_loc; i++)
htab_find_slot (set->location_adhoc_data_map.htab,
set->location_adhoc_data_map.data + i, INSERT);
}
/* Helper function for get_combined_adhoc_loc.
Can the given LOCUS + SRC_RANGE and DATA pointer be stored compactly
within a source_location, without needing to use an ad-hoc location. */
static bool
can_be_stored_compactly_p (struct line_maps *set,
source_location locus,
source_range src_range,
void *data)
{
/* If there's an ad-hoc pointer, we can't store it directly in the
source_location, we need the lookaside. */
if (data)
return false;
/* We only store ranges that begin at the locus and that are sufficiently
"sane". */
if (src_range.m_start != locus)
return false;
if (src_range.m_finish < src_range.m_start)
return false;
if (src_range.m_start < RESERVED_LOCATION_COUNT)
return false;
if (locus >= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES)
return false;
/* All 3 locations must be within ordinary maps, typically, the same
ordinary map. */
source_location lowest_macro_loc = LINEMAPS_MACRO_LOWEST_LOCATION (set);
if (locus >= lowest_macro_loc)
return false;
if (src_range.m_start >= lowest_macro_loc)
return false;
if (src_range.m_finish >= lowest_macro_loc)
return false;
/* Passed all tests. */
return true;
}
/* Combine LOCUS and DATA to a combined adhoc loc. */
source_location
get_combined_adhoc_loc (struct line_maps *set,
source_location locus,
source_range src_range,
void *data)
{
struct location_adhoc_data lb;
struct location_adhoc_data **slot;
if (IS_ADHOC_LOC (locus))
locus
= set->location_adhoc_data_map.data[locus & MAX_SOURCE_LOCATION].locus;
if (locus == 0 && data == NULL)
return 0;
/* Any ordinary locations ought to be "pure" at this point: no
compressed ranges. */
linemap_assert (locus < RESERVED_LOCATION_COUNT
|| locus >= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES
|| locus >= LINEMAPS_MACRO_LOWEST_LOCATION (set)
|| pure_location_p (set, locus));
/* Consider short-range optimization. */
if (can_be_stored_compactly_p (set, locus, src_range, data))
{
/* The low bits ought to be clear. */
linemap_assert (pure_location_p (set, locus));
const line_map *map = linemap_lookup (set, locus);
const line_map_ordinary *ordmap = linemap_check_ordinary (map);
unsigned int int_diff = src_range.m_finish - src_range.m_start;
unsigned int col_diff = (int_diff >> ordmap->m_range_bits);
if (col_diff < (1U << ordmap->m_range_bits))
{
source_location packed = locus | col_diff;
set->num_optimized_ranges++;
return packed;
}
}
/* We can also compactly store locations
when locus == start == finish (and data is NULL). */
if (locus == src_range.m_start
&& locus == src_range.m_finish
&& !data)
return locus;
if (!data)
set->num_unoptimized_ranges++;
lb.locus = locus;
lb.src_range = src_range;
lb.data = data;
slot = (struct location_adhoc_data **)
htab_find_slot (set->location_adhoc_data_map.htab, &lb, INSERT);
if (*slot == NULL)
{
if (set->location_adhoc_data_map.curr_loc >=
set->location_adhoc_data_map.allocated)
{
char *orig_data = (char *) set->location_adhoc_data_map.data;
long long offset;
/* Cast away extern "C" from the type of xrealloc. */
line_map_realloc reallocator = (set->reallocator
? set->reallocator
: (line_map_realloc) xrealloc);
if (set->location_adhoc_data_map.allocated == 0)
set->location_adhoc_data_map.allocated = 128;
else
set->location_adhoc_data_map.allocated *= 2;
set->location_adhoc_data_map.data = (struct location_adhoc_data *)
reallocator (set->location_adhoc_data_map.data,
set->location_adhoc_data_map.allocated
* sizeof (struct location_adhoc_data));
offset = (char *) (set->location_adhoc_data_map.data) - orig_data;
if (set->location_adhoc_data_map.allocated > 128)
htab_traverse (set->location_adhoc_data_map.htab,
location_adhoc_data_update, &offset);
}
*slot = set->location_adhoc_data_map.data
+ set->location_adhoc_data_map.curr_loc;
set->location_adhoc_data_map.data[set->location_adhoc_data_map.curr_loc++]
= lb;
}
return ((*slot) - set->location_adhoc_data_map.data) | 0x80000000;
}
/* Return the data for the adhoc loc. */
void *
get_data_from_adhoc_loc (struct line_maps *set, source_location loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].data;
}
/* Return the location for the adhoc loc. */
source_location
get_location_from_adhoc_loc (struct line_maps *set, source_location loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
}
/* Return the source_range for adhoc location LOC. */
static source_range
get_range_from_adhoc_loc (struct line_maps *set, source_location loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].src_range;
}
/* Get the source_range of location LOC, either from the ad-hoc
lookaside table, or embedded inside LOC itself. */
source_range
get_range_from_loc (struct line_maps *set,
source_location loc)
{
if (IS_ADHOC_LOC (loc))
return get_range_from_adhoc_loc (set, loc);
/* For ordinary maps, extract packed range. */
if (loc >= RESERVED_LOCATION_COUNT
&& loc < LINEMAPS_MACRO_LOWEST_LOCATION (set)
&& loc <= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES)
{
const line_map *map = linemap_lookup (set, loc);
const line_map_ordinary *ordmap = linemap_check_ordinary (map);
source_range result;
int offset = loc & ((1 << ordmap->m_range_bits) - 1);
result.m_start = loc - offset;
result.m_finish = result.m_start + (offset << ordmap->m_range_bits);
return result;
}
return source_range::from_location (loc);
}
/* Get whether location LOC is a "pure" location, or
whether it is an ad-hoc location, or embeds range information. */
bool
pure_location_p (line_maps *set, source_location loc)
{
if (IS_ADHOC_LOC (loc))
return false;
const line_map *map = linemap_lookup (set, loc);
const line_map_ordinary *ordmap = linemap_check_ordinary (map);
if (loc & ((1U << ordmap->m_range_bits) - 1))
return false;
return true;
}
/* Finalize the location_adhoc_data structure. */
void
location_adhoc_data_fini (struct line_maps *set)
{
htab_delete (set->location_adhoc_data_map.htab);
}
/* Initialize a line map set. */
void
linemap_init (struct line_maps *set,
source_location builtin_location)
{
memset (set, 0, sizeof (struct line_maps));
set->highest_location = RESERVED_LOCATION_COUNT - 1;
set->highest_line = RESERVED_LOCATION_COUNT - 1;
set->location_adhoc_data_map.htab =
htab_create (100, location_adhoc_data_hash, location_adhoc_data_eq, NULL);
set->builtin_location = builtin_location;
}
/* Check for and warn about line_maps entered but not exited. */
void
linemap_check_files_exited (struct line_maps *set)
{
const line_map_ordinary *map;
/* Depending upon whether we are handling preprocessed input or
not, this can be a user error or an ICE. */
for (map = LINEMAPS_LAST_ORDINARY_MAP (set);
! MAIN_FILE_P (map);
map = INCLUDED_FROM (set, map))
fprintf (stderr, "line-map.c: file \"%s\" entered but not left\n",
ORDINARY_MAP_FILE_NAME (map));
}
/* Create a new line map in the line map set SET, and return it.
REASON is the reason of creating the map. It determines the type
of map created (ordinary or macro map). Note that ordinary maps and
macro maps are allocated in different memory location. */
static struct line_map *
new_linemap (struct line_maps *set,
enum lc_reason reason)
{
/* Depending on this variable, a macro map would be allocated in a
different memory location than an ordinary map. */
bool macro_map_p = (reason == LC_ENTER_MACRO);
struct line_map *result;
if (LINEMAPS_USED (set, macro_map_p) == LINEMAPS_ALLOCATED (set, macro_map_p))
{
/* We ran out of allocated line maps. Let's allocate more. */
unsigned alloc_size;
/* Cast away extern "C" from the type of xrealloc. */
line_map_realloc reallocator = (set->reallocator
? set->reallocator
: (line_map_realloc) xrealloc);
line_map_round_alloc_size_func round_alloc_size =
set->round_alloc_size;
size_t map_size = (macro_map_p
? sizeof (line_map_macro)
: sizeof (line_map_ordinary));
/* We are going to execute some dance to try to reduce the
overhead of the memory allocator, in case we are using the
ggc-page.c one.
The actual size of memory we are going to get back from the
allocator is the smallest power of 2 that is greater than the
size we requested. So let's consider that size then. */
alloc_size =
(2 * LINEMAPS_ALLOCATED (set, macro_map_p) + 256)
* map_size;
/* Get the actual size of memory that is going to be allocated
by the allocator. */
alloc_size = round_alloc_size (alloc_size);
/* Now alloc_size contains the exact memory size we would get if
we have asked for the initial alloc_size amount of memory.
Let's get back to the number of macro map that amounts
to. */
LINEMAPS_ALLOCATED (set, macro_map_p) =
alloc_size / map_size;
/* And now let's really do the re-allocation. */
if (macro_map_p)
{
set->info_macro.maps
= (line_map_macro *) (*reallocator) (set->info_macro.maps,
(LINEMAPS_ALLOCATED (set, macro_map_p)
* map_size));
result = &set->info_macro.maps[LINEMAPS_USED (set, macro_map_p)];
}
else
{
set->info_ordinary.maps =
(line_map_ordinary *) (*reallocator) (set->info_ordinary.maps,
(LINEMAPS_ALLOCATED (set, macro_map_p)
* map_size));
result = &set->info_ordinary.maps[LINEMAPS_USED (set, macro_map_p)];
}
memset (result, 0,
((LINEMAPS_ALLOCATED (set, macro_map_p)
- LINEMAPS_USED (set, macro_map_p))
* map_size));
}
else
{
if (macro_map_p)
result = &set->info_macro.maps[LINEMAPS_USED (set, macro_map_p)];
else
result = &set->info_ordinary.maps[LINEMAPS_USED (set, macro_map_p)];
}
LINEMAPS_USED (set, macro_map_p)++;
result->reason = reason;
return result;
}
/* Add a mapping of logical source line to physical source file and
line number.
The text pointed to by TO_FILE must have a lifetime
at least as long as the final call to lookup_line (). An empty
TO_FILE means standard input. If reason is LC_LEAVE, and
TO_FILE is NULL, then TO_FILE, TO_LINE and SYSP are given their
natural values considering the file we are returning to.
FROM_LINE should be monotonic increasing across calls to this
function. A call to this function can relocate the previous set of
maps, so any stored line_map pointers should not be used. */
const struct line_map *
linemap_add (struct line_maps *set, enum lc_reason reason,
unsigned int sysp, const char *to_file, linenum_type to_line)
{
/* Generate a start_location above the current highest_location.
If possible, make the low range bits be zero. */
source_location start_location;
if (set->highest_location < LINE_MAP_MAX_LOCATION_WITH_COLS)
{
start_location = set->highest_location + (1 << set->default_range_bits);
if (set->default_range_bits)
start_location &= ~((1 << set->default_range_bits) - 1);
linemap_assert (0 == (start_location
& ((1 << set->default_range_bits) - 1)));
}
else
start_location = set->highest_location + 1;
linemap_assert (!(LINEMAPS_ORDINARY_USED (set)
&& (start_location
< MAP_START_LOCATION (LINEMAPS_LAST_ORDINARY_MAP (set)))));
/* When we enter the file for the first time reason cannot be
LC_RENAME. */
linemap_assert (!(set->depth == 0 && reason == LC_RENAME));
/* If we are leaving the main file, return a NULL map. */
if (reason == LC_LEAVE
&& MAIN_FILE_P (LINEMAPS_LAST_ORDINARY_MAP (set))
&& to_file == NULL)
{
set->depth--;
return NULL;
}
linemap_assert (reason != LC_ENTER_MACRO);
line_map_ordinary *map = linemap_check_ordinary (new_linemap (set, reason));
if (to_file && *to_file == '\0' && reason != LC_RENAME_VERBATIM)
to_file = "<stdin>";
if (reason == LC_RENAME_VERBATIM)
reason = LC_RENAME;
if (reason == LC_LEAVE)
{
/* When we are just leaving an "included" file, and jump to the next
location inside the "includer" right after the #include
"included", this variable points the map in use right before the
#include "included", inside the same "includer" file. */
line_map_ordinary *from;
bool error;
if (MAIN_FILE_P (map - 1))
{
/* So this _should_ mean we are leaving the main file --
effectively ending the compilation unit. But to_file not
being NULL means the caller thinks we are leaving to
another file. This is an erroneous behaviour but we'll
try to recover from it. Let's pretend we are not leaving
the main file. */
error = true;
reason = LC_RENAME;
from = map - 1;
}
else
{
/* (MAP - 1) points to the map we are leaving. The
map from which (MAP - 1) got included should be the map
that comes right before MAP in the same file. */
from = INCLUDED_FROM (set, map - 1);
error = to_file && filename_cmp (ORDINARY_MAP_FILE_NAME (from),
to_file);
}
/* Depending upon whether we are handling preprocessed input or
not, this can be a user error or an ICE. */
if (error)
fprintf (stderr, "line-map.c: file \"%s\" left but not entered\n",
to_file);
/* A TO_FILE of NULL is special - we use the natural values. */
if (error || to_file == NULL)
{
to_file = ORDINARY_MAP_FILE_NAME (from);
to_line = SOURCE_LINE (from, from[1].start_location);
sysp = ORDINARY_MAP_IN_SYSTEM_HEADER_P (from);
}
}
map->sysp = sysp;
map->start_location = start_location;
map->to_file = to_file;
map->to_line = to_line;
LINEMAPS_ORDINARY_CACHE (set) = LINEMAPS_ORDINARY_USED (set) - 1;
map->m_column_and_range_bits = 0;
map->m_range_bits = 0;
set->highest_location = start_location;
set->highest_line = start_location;
set->max_column_hint = 0;
/* This assertion is placed after set->highest_location has
been updated, since the latter affects
linemap_location_from_macro_expansion_p, which ultimately affects
pure_location_p. */
linemap_assert (pure_location_p (set, start_location));
if (reason == LC_ENTER)
{
map->included_from =
set->depth == 0 ? -1 : (int) (LINEMAPS_ORDINARY_USED (set) - 2);
set->depth++;
if (set->trace_includes)
trace_include (set, map);
}
else if (reason == LC_RENAME)
map->included_from = ORDINARY_MAP_INCLUDER_FILE_INDEX (&map[-1]);
else if (reason == LC_LEAVE)
{
set->depth--;
map->included_from =
ORDINARY_MAP_INCLUDER_FILE_INDEX (INCLUDED_FROM (set, map - 1));
}
return map;
}
/* Returns TRUE if the line table set tracks token locations across
macro expansion, FALSE otherwise. */
bool
linemap_tracks_macro_expansion_locs_p (struct line_maps *set)
{
return LINEMAPS_MACRO_MAPS (set) != NULL;
}
/* Create a macro map. A macro map encodes source locations of tokens
that are part of a macro replacement-list, at a macro expansion
point. See the extensive comments of struct line_map and struct
line_map_macro, in line-map.h.
This map shall be created when the macro is expanded. The map
encodes the source location of the expansion point of the macro as
well as the "original" source location of each token that is part
of the macro replacement-list. If a macro is defined but never
expanded, it has no macro map. SET is the set of maps the macro
map should be part of. MACRO_NODE is the macro which the new macro
map should encode source locations for. EXPANSION is the location
of the expansion point of MACRO. For function-like macros
invocations, it's best to make it point to the closing parenthesis
of the macro, rather than the the location of the first character
of the macro. NUM_TOKENS is the number of tokens that are part of
the replacement-list of MACRO.
Note that when we run out of the integer space available for source
locations, this function returns NULL. In that case, callers of
this function cannot encode {line,column} pairs into locations of
macro tokens anymore. */
const line_map_macro *
linemap_enter_macro (struct line_maps *set, struct cpp_hashnode *macro_node,
source_location expansion, unsigned int num_tokens)
{
line_map_macro *map;
source_location start_location;
/* Cast away extern "C" from the type of xrealloc. */
line_map_realloc reallocator = (set->reallocator
? set->reallocator
: (line_map_realloc) xrealloc);
start_location = LINEMAPS_MACRO_LOWEST_LOCATION (set) - num_tokens;
if (start_location <= set->highest_line
|| start_location > LINEMAPS_MACRO_LOWEST_LOCATION (set))
/* We ran out of macro map space. */
return NULL;
map = linemap_check_macro (new_linemap (set, LC_ENTER_MACRO));
map->start_location = start_location;
map->macro = macro_node;
map->n_tokens = num_tokens;
map->macro_locations
= (source_location*) reallocator (NULL,
2 * num_tokens
* sizeof (source_location));
map->expansion = expansion;
memset (MACRO_MAP_LOCATIONS (map), 0,
num_tokens * sizeof (source_location));
LINEMAPS_MACRO_CACHE (set) = LINEMAPS_MACRO_USED (set) - 1;
return map;
}
/* Create and return a virtual location for a token that is part of a
macro expansion-list at a macro expansion point. See the comment
inside struct line_map_macro to see what an expansion-list exactly
is.
A call to this function must come after a call to
linemap_enter_macro.
MAP is the map into which the source location is created. TOKEN_NO
is the index of the token in the macro replacement-list, starting
at number 0.
ORIG_LOC is the location of the token outside of this macro
expansion. If the token comes originally from the macro
definition, it is the locus in the macro definition; otherwise it
is a location in the context of the caller of this macro expansion
(which is a virtual location or a source location if the caller is
itself a macro expansion or not).
ORIG_PARM_REPLACEMENT_LOC is the location in the macro definition,
either of the token itself or of a macro parameter that it
replaces. */
source_location
linemap_add_macro_token (const line_map_macro *map,
unsigned int token_no,
source_location orig_loc,
source_location orig_parm_replacement_loc)
{
source_location result;
linemap_assert (linemap_macro_expansion_map_p (map));
linemap_assert (token_no < MACRO_MAP_NUM_MACRO_TOKENS (map));
MACRO_MAP_LOCATIONS (map)[2 * token_no] = orig_loc;
MACRO_MAP_LOCATIONS (map)[2 * token_no + 1] = orig_parm_replacement_loc;
result = MAP_START_LOCATION (map) + token_no;
return result;
}
/* Return a source_location for the start (i.e. column==0) of
(physical) line TO_LINE in the current source file (as in the
most recent linemap_add). MAX_COLUMN_HINT is the highest column
number we expect to use in this line (but it does not change
the highest_location). */
source_location
linemap_line_start (struct line_maps *set, linenum_type to_line,
unsigned int max_column_hint)
{
line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (set);
source_location highest = set->highest_location;
source_location r;
linenum_type last_line =
SOURCE_LINE (map, set->highest_line);
int line_delta = to_line - last_line;
bool add_map = false;
linemap_assert (map->m_column_and_range_bits >= map->m_range_bits);
int effective_column_bits = map->m_column_and_range_bits - map->m_range_bits;
if (line_delta < 0
|| (line_delta > 10
&& line_delta * map->m_column_and_range_bits > 1000)
|| (max_column_hint >= (1U << effective_column_bits))
|| (max_column_hint <= 80 && effective_column_bits >= 10)
|| (highest > LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES
&& map->m_range_bits > 0)
|| (highest > LINE_MAP_MAX_LOCATION_WITH_COLS
&& (set->max_column_hint || highest >= LINE_MAP_MAX_SOURCE_LOCATION)))
add_map = true;
else
max_column_hint = set->max_column_hint;
if (add_map)
{
int column_bits;
int range_bits;
if (max_column_hint > LINE_MAP_MAX_COLUMN_NUMBER
|| highest > LINE_MAP_MAX_LOCATION_WITH_COLS)
{
/* If the column number is ridiculous or we've allocated a huge
number of source_locations, give up on column numbers
(and on packed ranges). */
max_column_hint = 0;
column_bits = 0;
range_bits = 0;
if (highest > LINE_MAP_MAX_SOURCE_LOCATION)
return 0;
}
else
{
column_bits = 7;
if (highest <= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES)
range_bits = set->default_range_bits;
else
range_bits = 0;
while (max_column_hint >= (1U << column_bits))
column_bits++;
max_column_hint = 1U << column_bits;
column_bits += range_bits;
}
/* Allocate the new line_map. However, if the current map only has a
single line we can sometimes just increase its column_bits instead. */
if (line_delta < 0
|| last_line != ORDINARY_MAP_STARTING_LINE_NUMBER (map)
|| SOURCE_COLUMN (map, highest) >= (1U << column_bits)
|| range_bits < map->m_range_bits)
map = linemap_check_ordinary
(const_cast <line_map *>
(linemap_add (set, LC_RENAME,
ORDINARY_MAP_IN_SYSTEM_HEADER_P (map),
ORDINARY_MAP_FILE_NAME (map),
to_line)));
map->m_column_and_range_bits = column_bits;
map->m_range_bits = range_bits;
r = (MAP_START_LOCATION (map)
+ ((to_line - ORDINARY_MAP_STARTING_LINE_NUMBER (map))
<< column_bits));
}
else
r = set->highest_line + (line_delta << map->m_column_and_range_bits);
/* Locations of ordinary tokens are always lower than locations of
macro tokens. */
if (r >= LINEMAPS_MACRO_LOWEST_LOCATION (set))
return 0;
set->highest_line = r;
if (r > set->highest_location)
set->highest_location = r;
set->max_column_hint = max_column_hint;
/* At this point, we expect one of:
(a) the normal case: a "pure" location with 0 range bits, or
(b) we've gone past LINE_MAP_MAX_LOCATION_WITH_COLS so can't track
columns anymore (or ranges), or
(c) we're in a region with a column hint exceeding
LINE_MAP_MAX_COLUMN_NUMBER, so column-tracking is off,
with column_bits == 0. */
linemap_assert (pure_location_p (set, r)
|| r >= LINE_MAP_MAX_LOCATION_WITH_COLS
|| map->m_column_and_range_bits == 0);
linemap_assert (SOURCE_LINE (map, r) == to_line);
return r;
}
/* Encode and return a source_location from a column number. The
source line considered is the last source line used to call
linemap_line_start, i.e, the last source line which a location was
encoded from. */
source_location
linemap_position_for_column (struct line_maps *set, unsigned int to_column)
{
source_location r = set->highest_line;
linemap_assert
(!linemap_macro_expansion_map_p (LINEMAPS_LAST_ORDINARY_MAP (set)));
if (to_column >= set->max_column_hint)
{
if (r > LINE_MAP_MAX_LOCATION_WITH_COLS
|| to_column > LINE_MAP_MAX_COLUMN_NUMBER)
{
/* Running low on source_locations - disable column numbers. */
return r;
}
else
{
line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (set);
r = linemap_line_start (set, SOURCE_LINE (map, r), to_column + 50);
}
}
line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (set);
r = r + (to_column << map->m_range_bits);
if (r >= set->highest_location)
set->highest_location = r;
return r;
}
/* Encode and return a source location from a given line and
column. */
source_location
linemap_position_for_line_and_column (line_maps *set,
const line_map_ordinary *ord_map,
linenum_type line,
unsigned column)
{
linemap_assert (ORDINARY_MAP_STARTING_LINE_NUMBER (ord_map) <= line);
source_location r = MAP_START_LOCATION (ord_map);
r += ((line - ORDINARY_MAP_STARTING_LINE_NUMBER (ord_map))
<< ord_map->m_column_and_range_bits);
if (r <= LINE_MAP_MAX_LOCATION_WITH_COLS)
r += ((column & ((1 << ord_map->m_column_and_range_bits) - 1))
<< ord_map->m_range_bits);
source_location upper_limit = LINEMAPS_MACRO_LOWEST_LOCATION (set);
if (r >= upper_limit)
r = upper_limit - 1;
if (r > set->highest_location)
set->highest_location = r;
return r;
}
/* Encode and return a source_location starting from location LOC and
shifting it by OFFSET columns. This function does not support
virtual locations. */
source_location
linemap_position_for_loc_and_offset (struct line_maps *set,
source_location loc,
unsigned int offset)
{
const line_map_ordinary * map = NULL;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
/* This function does not support virtual locations yet. */
if (linemap_assert_fails
(!linemap_location_from_macro_expansion_p (set, loc)))
return loc;
if (offset == 0
/* Adding an offset to a reserved location (like
UNKNOWN_LOCATION for the C/C++ FEs) does not really make
sense. So let's leave the location intact in that case. */
|| loc < RESERVED_LOCATION_COUNT)
return loc;
/* We find the real location and shift it. */
loc = linemap_resolve_location (set, loc, LRK_SPELLING_LOCATION, &map);
/* The new location (loc + offset) should be higher than the first
location encoded by MAP. This can fail if the line information
is messed up because of line directives (see PR66415). */
if (MAP_START_LOCATION (map) >= loc + offset)
return loc;
linenum_type line = SOURCE_LINE (map, loc);
unsigned int column = SOURCE_COLUMN (map, loc);
/* If MAP is not the last line map of its set, then the new location
(loc + offset) should be less than the first location encoded by
the next line map of the set. Otherwise, we try to encode the
location in the next map. */
while (map != LINEMAPS_LAST_ORDINARY_MAP (set)
&& loc + offset >= MAP_START_LOCATION (&map[1]))
{
map = &map[1];
/* If the next map starts in a higher line, we cannot encode the
location there. */
if (line < ORDINARY_MAP_STARTING_LINE_NUMBER (map))
return loc;
}
offset += column;
if (linemap_assert_fails (offset < (1u << map->m_column_and_range_bits)))
return loc;
source_location r =
linemap_position_for_line_and_column (set, map, line, offset);
if (linemap_assert_fails (r <= set->highest_location)
|| linemap_assert_fails (map == linemap_lookup (set, r)))
return loc;
return r;
}
/* Given a virtual source location yielded by a map (either an
ordinary or a macro map), returns that map. */
const struct line_map*
linemap_lookup (struct line_maps *set, source_location line)
{
if (IS_ADHOC_LOC (line))
line = set->location_adhoc_data_map.data[line & MAX_SOURCE_LOCATION].locus;
if (linemap_location_from_macro_expansion_p (set, line))
return linemap_macro_map_lookup (set, line);
return linemap_ordinary_map_lookup (set, line);
}
/* Given a source location yielded by an ordinary map, returns that
map. Since the set is built chronologically, the logical lines are
monotonic increasing, and so the list is sorted and we can use a
binary search. */
static const line_map_ordinary *
linemap_ordinary_map_lookup (struct line_maps *set, source_location line)
{
unsigned int md, mn, mx;
const line_map_ordinary *cached, *result;
if (IS_ADHOC_LOC (line))
line = set->location_adhoc_data_map.data[line & MAX_SOURCE_LOCATION].locus;
if (set == NULL || line < RESERVED_LOCATION_COUNT)
return NULL;
mn = LINEMAPS_ORDINARY_CACHE (set);
mx = LINEMAPS_ORDINARY_USED (set);
cached = LINEMAPS_ORDINARY_MAP_AT (set, mn);
/* We should get a segfault if no line_maps have been added yet. */
if (line >= MAP_START_LOCATION (cached))
{
if (mn + 1 == mx || line < MAP_START_LOCATION (&cached[1]))
return cached;
}
else
{
mx = mn;
mn = 0;
}
while (mx - mn > 1)
{
md = (mn + mx) / 2;
if (MAP_START_LOCATION (LINEMAPS_ORDINARY_MAP_AT (set, md)) > line)
mx = md;
else
mn = md;
}
LINEMAPS_ORDINARY_CACHE (set) = mn;
result = LINEMAPS_ORDINARY_MAP_AT (set, mn);
linemap_assert (line >= MAP_START_LOCATION (result));
return result;
}
/* Given a source location yielded by a macro map, returns that map.
Since the set is built chronologically, the logical lines are
monotonic decreasing, and so the list is sorted and we can use a
binary search. */
static const line_map_macro *
linemap_macro_map_lookup (struct line_maps *set, source_location line)
{
unsigned int md, mn, mx;
const struct line_map_macro *cached, *result;
if (IS_ADHOC_LOC (line))
line = set->location_adhoc_data_map.data[line & MAX_SOURCE_LOCATION].locus;
linemap_assert (line >= LINEMAPS_MACRO_LOWEST_LOCATION (set));
if (set == NULL)
return NULL;
mn = LINEMAPS_MACRO_CACHE (set);
mx = LINEMAPS_MACRO_USED (set);
cached = LINEMAPS_MACRO_MAP_AT (set, mn);
if (line >= MAP_START_LOCATION (cached))
{
if (mn == 0 || line < MAP_START_LOCATION (&cached[-1]))
return cached;
mx = mn - 1;
mn = 0;
}
while (mn < mx)
{
md = (mx + mn) / 2;
if (MAP_START_LOCATION (LINEMAPS_MACRO_MAP_AT (set, md)) > line)
mn = md + 1;
else
mx = md;
}
LINEMAPS_MACRO_CACHE (set) = mx;
result = LINEMAPS_MACRO_MAP_AT (set, LINEMAPS_MACRO_CACHE (set));
linemap_assert (MAP_START_LOCATION (result) <= line);
return result;
}
/* Return TRUE if MAP encodes locations coming from a macro
replacement-list at macro expansion point. */
bool
linemap_macro_expansion_map_p (const struct line_map *map)
{
if (!map)
return false;
return (map->reason == LC_ENTER_MACRO);
}
/* If LOCATION is the locus of a token in a replacement-list of a
macro expansion return the location of the macro expansion point.
Read the comments of struct line_map and struct line_map_macro in
line-map.h to understand what a macro expansion point is. */
static source_location
linemap_macro_map_loc_to_exp_point (const line_map_macro *map,
source_location location ATTRIBUTE_UNUSED)
{
linemap_assert (linemap_macro_expansion_map_p (map)
&& location >= MAP_START_LOCATION (map));
/* Make sure LOCATION is correct. */
linemap_assert ((location - MAP_START_LOCATION (map))
< MACRO_MAP_NUM_MACRO_TOKENS (map));
return MACRO_MAP_EXPANSION_POINT_LOCATION (map);
}
/* LOCATION is the source location of a token that belongs to a macro
replacement-list as part of the macro expansion denoted by MAP.
Return the location of the token at the definition point of the
macro. */
static source_location
linemap_macro_map_loc_to_def_point (const line_map_macro *map,
source_location location)
{
unsigned token_no;
linemap_assert (linemap_macro_expansion_map_p (map)
&& location >= MAP_START_LOCATION (map));
linemap_assert (location >= RESERVED_LOCATION_COUNT);
token_no = location - MAP_START_LOCATION (map);
linemap_assert (token_no < MACRO_MAP_NUM_MACRO_TOKENS (map));
location = MACRO_MAP_LOCATIONS (map)[2 * token_no + 1];
return location;
}
/* If LOCATION is the locus of a token that is an argument of a
function-like macro M and appears in the expansion of M, return the
locus of that argument in the context of the caller of M.
In other words, this returns the xI location presented in the
comments of line_map_macro above. */
source_location
linemap_macro_map_loc_unwind_toward_spelling (line_maps *set,
const line_map_macro* map,
source_location location)
{
unsigned token_no;
if (IS_ADHOC_LOC (location))
location = get_location_from_adhoc_loc (set, location);
linemap_assert (linemap_macro_expansion_map_p (map)
&& location >= MAP_START_LOCATION (map));
linemap_assert (location >= RESERVED_LOCATION_COUNT);
linemap_assert (!IS_ADHOC_LOC (location));
token_no = location - MAP_START_LOCATION (map);
linemap_assert (token_no < MACRO_MAP_NUM_MACRO_TOKENS (map));
location = MACRO_MAP_LOCATIONS (map)[2 * token_no];
return location;
}
/* Return the source line number corresponding to source location
LOCATION. SET is the line map set LOCATION comes from. If
LOCATION is the source location of token that is part of the
replacement-list of a macro expansion return the line number of the
macro expansion point. */
int
linemap_get_expansion_line (struct line_maps *set,
source_location location)
{
const line_map_ordinary *map = NULL;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
if (location < RESERVED_LOCATION_COUNT)
return 0;
location =
linemap_macro_loc_to_exp_point (set, location, &map);
return SOURCE_LINE (map, location);
}
/* Return the path of the file corresponding to source code location
LOCATION.
If LOCATION is the source location of token that is part of the
replacement-list of a macro expansion return the file path of the
macro expansion point.
SET is the line map set LOCATION comes from. */
const char*
linemap_get_expansion_filename (struct line_maps *set,
source_location location)
{
const struct line_map_ordinary *map = NULL;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
if (location < RESERVED_LOCATION_COUNT)
return NULL;
location =
linemap_macro_loc_to_exp_point (set, location, &map);
return LINEMAP_FILE (map);
}
/* Return the name of the macro associated to MACRO_MAP. */
const char*
linemap_map_get_macro_name (const line_map_macro *macro_map)
{
linemap_assert (macro_map && linemap_macro_expansion_map_p (macro_map));
return (const char*) NODE_NAME (MACRO_MAP_MACRO (macro_map));
}
/* Return a positive value if LOCATION is the locus of a token that is
located in a system header, O otherwise. It returns 1 if LOCATION
is the locus of a token that is located in a system header, and 2
if LOCATION is the locus of a token located in a C system header
that therefore needs to be extern "C" protected in C++.
Note that this function returns 1 if LOCATION belongs to a token
that is part of a macro replacement-list defined in a system
header, but expanded in a non-system file. */
int
linemap_location_in_system_header_p (struct line_maps *set,
source_location location)
{
const struct line_map *map = NULL;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
if (location < RESERVED_LOCATION_COUNT)
return false;
/* Let's look at where the token for LOCATION comes from. */
while (true)
{
map = linemap_lookup (set, location);
if (map != NULL)
{
if (!linemap_macro_expansion_map_p (map))
/* It's a normal token. */
return LINEMAP_SYSP (linemap_check_ordinary (map));
else
{
const line_map_macro *macro_map = linemap_check_macro (map);
/* It's a token resulting from a macro expansion. */
source_location loc =
linemap_macro_map_loc_unwind_toward_spelling (set, macro_map, location);
if (loc < RESERVED_LOCATION_COUNT)
/* This token might come from a built-in macro. Let's
look at where that macro got expanded. */
location = linemap_macro_map_loc_to_exp_point (macro_map, location);
else
location = loc;
}
}
else
break;
}
return false;
}
/* Return TRUE if LOCATION is a source code location of a token coming
from a macro replacement-list at a macro expansion point, FALSE
otherwise. */
bool
linemap_location_from_macro_expansion_p (const struct line_maps *set,
source_location location)
{
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
linemap_assert (location <= MAX_SOURCE_LOCATION
&& (set->highest_location
< LINEMAPS_MACRO_LOWEST_LOCATION (set)));
if (set == NULL)
return false;
return (location > set->highest_location);
}
/* Given two virtual locations *LOC0 and *LOC1, return the first
common macro map in their macro expansion histories. Return NULL
if no common macro was found. *LOC0 (resp. *LOC1) is set to the
virtual location of the token inside the resulting macro. */
static const struct line_map*
first_map_in_common_1 (struct line_maps *set,
source_location *loc0,
source_location *loc1)
{
source_location l0 = *loc0, l1 = *loc1;
const struct line_map *map0 = linemap_lookup (set, l0),
*map1 = linemap_lookup (set, l1);
while (linemap_macro_expansion_map_p (map0)
&& linemap_macro_expansion_map_p (map1)
&& (map0 != map1))
{
if (MAP_START_LOCATION (map0) < MAP_START_LOCATION (map1))
{
l0 = linemap_macro_map_loc_to_exp_point (linemap_check_macro (map0),
l0);
map0 = linemap_lookup (set, l0);
}
else
{
l1 = linemap_macro_map_loc_to_exp_point (linemap_check_macro (map1),
l1);
map1 = linemap_lookup (set, l1);
}
}
if (map0 == map1)
{
*loc0 = l0;
*loc1 = l1;
return map0;
}
return NULL;
}
/* Given two virtual locations LOC0 and LOC1, return the first common
macro map in their macro expansion histories. Return NULL if no
common macro was found. *RES_LOC0 (resp. *RES_LOC1) is set to the
virtual location of the token inside the resulting macro, upon
return of a non-NULL result. */
static const struct line_map*
first_map_in_common (struct line_maps *set,
source_location loc0,
source_location loc1,
source_location *res_loc0,
source_location *res_loc1)
{
*res_loc0 = loc0;
*res_loc1 = loc1;
return first_map_in_common_1 (set, res_loc0, res_loc1);
}
/* Return a positive value if PRE denotes the location of a token that
comes before the token of POST, 0 if PRE denotes the location of
the same token as the token for POST, and a negative value
otherwise. */
int
linemap_compare_locations (struct line_maps *set,
source_location pre,
source_location post)
{
bool pre_virtual_p, post_virtual_p;
source_location l0 = pre, l1 = post;
if (IS_ADHOC_LOC (l0))
l0 = get_location_from_adhoc_loc (set, l0);
if (IS_ADHOC_LOC (l1))
l1 = get_location_from_adhoc_loc (set, l1);
if (l0 == l1)
return 0;
if ((pre_virtual_p = linemap_location_from_macro_expansion_p (set, l0)))
l0 = linemap_resolve_location (set, l0,
LRK_MACRO_EXPANSION_POINT,
NULL);
if ((post_virtual_p = linemap_location_from_macro_expansion_p (set, l1)))
l1 = linemap_resolve_location (set, l1,
LRK_MACRO_EXPANSION_POINT,
NULL);
if (l0 == l1
&& pre_virtual_p
&& post_virtual_p)
{
/* So pre and post represent two tokens that are present in a
same macro expansion. Let's see if the token for pre was
before the token for post in that expansion. */
unsigned i0, i1;
const struct line_map *map =
first_map_in_common (set, pre, post, &l0, &l1);
if (map == NULL)
/* This should not be possible. */
abort ();
i0 = l0 - MAP_START_LOCATION (map);
i1 = l1 - MAP_START_LOCATION (map);
return i1 - i0;
}
if (IS_ADHOC_LOC (l0))
l0 = get_location_from_adhoc_loc (set, l0);
if (IS_ADHOC_LOC (l1))
l1 = get_location_from_adhoc_loc (set, l1);
return l1 - l0;
}
/* Print an include trace, for e.g. the -H option of the preprocessor. */
static void
trace_include (const struct line_maps *set, const line_map_ordinary *map)
{
unsigned int i = set->depth;
while (--i)
putc ('.', stderr);
fprintf (stderr, " %s\n", ORDINARY_MAP_FILE_NAME (map));
}
/* Return the spelling location of the token wherever it comes from,
whether part of a macro definition or not.
This is a subroutine for linemap_resolve_location. */
static source_location
linemap_macro_loc_to_spelling_point (struct line_maps *set,
source_location location,
const line_map_ordinary **original_map)
{
struct line_map *map;
linemap_assert (set && location >= RESERVED_LOCATION_COUNT);
while (true)
{
map = const_cast <line_map *> (linemap_lookup (set, location));
if (!linemap_macro_expansion_map_p (map))
break;
location
= linemap_macro_map_loc_unwind_toward_spelling
(set, linemap_check_macro (map),
location);
}
if (original_map)
*original_map = linemap_check_ordinary (map);
return location;
}
/* If LOCATION is the source location of a token that belongs to a
macro replacement-list -- as part of a macro expansion -- then
return the location of the token at the definition point of the
macro. Otherwise, return LOCATION. SET is the set of maps
location come from. ORIGINAL_MAP is an output parm. If non NULL,
the function sets *ORIGINAL_MAP to the ordinary (non-macro) map the
returned location comes from.
This is a subroutine of linemap_resolve_location. */
static source_location
linemap_macro_loc_to_def_point (struct line_maps *set,
source_location location,
const line_map_ordinary **original_map)
{
struct line_map *map;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
linemap_assert (set && location >= RESERVED_LOCATION_COUNT);
while (true)
{
map = const_cast <line_map *> (linemap_lookup (set, location));
if (!linemap_macro_expansion_map_p (map))
break;
location =
linemap_macro_map_loc_to_def_point (linemap_check_macro (map),
location);
}
if (original_map)
*original_map = linemap_check_ordinary (map);
return location;
}
/* If LOCATION is the source location of a token that belongs to a
macro replacement-list -- at a macro expansion point -- then return
the location of the topmost expansion point of the macro. We say
topmost because if we are in the context of a nested macro
expansion, the function returns the source location of the first
macro expansion that triggered the nested expansions.
Otherwise, return LOCATION. SET is the set of maps location come
from. ORIGINAL_MAP is an output parm. If non NULL, the function
sets *ORIGINAL_MAP to the ordinary (non-macro) map the returned
location comes from.
This is a subroutine of linemap_resolve_location. */
static source_location
linemap_macro_loc_to_exp_point (struct line_maps *set,
source_location location,
const line_map_ordinary **original_map)
{
struct line_map *map;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
linemap_assert (set && location >= RESERVED_LOCATION_COUNT);
while (true)
{
map = const_cast <line_map *> (linemap_lookup (set, location));
if (!linemap_macro_expansion_map_p (map))
break;
location = linemap_macro_map_loc_to_exp_point (linemap_check_macro (map),
location);
}
if (original_map)
*original_map = linemap_check_ordinary (map);
return location;
}
/* Resolve a virtual location into either a spelling location, an
expansion point location or a token argument replacement point
location. Return the map that encodes the virtual location as well
as the resolved location.
If LOC is *NOT* the location of a token resulting from the
expansion of a macro, then the parameter LRK (which stands for
Location Resolution Kind) is ignored and the resulting location
just equals the one given in argument.
Now if LOC *IS* the location of a token resulting from the
expansion of a macro, this is what happens.
* If LRK is set to LRK_MACRO_EXPANSION_POINT
-------------------------------
The virtual location is resolved to the first macro expansion point
that led to this macro expansion.
* If LRK is set to LRK_SPELLING_LOCATION
-------------------------------------
The virtual location is resolved to the locus where the token has
been spelled in the source. This can follow through all the macro
expansions that led to the token.
* If LRK is set to LRK_MACRO_DEFINITION_LOCATION
--------------------------------------
The virtual location is resolved to the locus of the token in the
context of the macro definition.
If LOC is the locus of a token that is an argument of a
function-like macro [replacing a parameter in the replacement list
of the macro] the virtual location is resolved to the locus of the
parameter that is replaced, in the context of the definition of the
macro.
If LOC is the locus of a token that is not an argument of a
function-like macro, then the function behaves as if LRK was set to
LRK_SPELLING_LOCATION.
If MAP is not NULL, *MAP is set to the map encoding the
returned location. Note that if the returned location wasn't originally
encoded by a map, then *MAP is set to NULL. This can happen if LOC
resolves to a location reserved for the client code, like
UNKNOWN_LOCATION or BUILTINS_LOCATION in GCC. */
source_location
linemap_resolve_location (struct line_maps *set,
source_location loc,
enum location_resolution_kind lrk,
const line_map_ordinary **map)
{
source_location locus = loc;
if (IS_ADHOC_LOC (loc))
locus = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
if (locus < RESERVED_LOCATION_COUNT)
{
/* A reserved location wasn't encoded in a map. Let's return a
NULL map here, just like what linemap_ordinary_map_lookup
does. */
if (map)
*map = NULL;
return loc;
}
switch (lrk)
{
case LRK_MACRO_EXPANSION_POINT:
loc = linemap_macro_loc_to_exp_point (set, loc, map);
break;
case LRK_SPELLING_LOCATION:
loc = linemap_macro_loc_to_spelling_point (set, loc, map);
break;
case LRK_MACRO_DEFINITION_LOCATION:
loc = linemap_macro_loc_to_def_point (set, loc, map);
break;
default:
abort ();
}
return loc;
}
/*
Suppose that LOC is the virtual location of a token T coming from
the expansion of a macro M. This function then steps up to get the
location L of the point where M got expanded. If L is a spelling
location inside a macro expansion M', then this function returns
the locus of the point where M' was expanded. Said otherwise, this
function returns the location of T in the context that triggered
the expansion of M.
*LOC_MAP must be set to the map of LOC. This function then sets it
to the map of the returned location. */
source_location
linemap_unwind_toward_expansion (struct line_maps *set,
source_location loc,
const struct line_map **map)
{
source_location resolved_location;
const line_map_macro *macro_map = linemap_check_macro (*map);
const struct line_map *resolved_map;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
resolved_location =
linemap_macro_map_loc_unwind_toward_spelling (set, macro_map, loc);
resolved_map = linemap_lookup (set, resolved_location);
if (!linemap_macro_expansion_map_p (resolved_map))
{
resolved_location = linemap_macro_map_loc_to_exp_point (macro_map, loc);
resolved_map = linemap_lookup (set, resolved_location);
}
*map = resolved_map;
return resolved_location;
}
/* If LOC is the virtual location of a token coming from the expansion
of a macro M and if its spelling location is reserved (e.g, a
location for a built-in token), then this function unwinds (using
linemap_unwind_toward_expansion) the location until a location that
is not reserved and is not in a system header is reached. In other
words, this unwinds the reserved location until a location that is
in real source code is reached.
Otherwise, if the spelling location for LOC is not reserved or if
LOC doesn't come from the expansion of a macro, the function
returns LOC as is and *MAP is not touched.
*MAP is set to the map of the returned location if the later is
different from LOC. */
source_location
linemap_unwind_to_first_non_reserved_loc (struct line_maps *set,
source_location loc,
const struct line_map **map)
{
source_location resolved_loc;
const struct line_map *map0 = NULL;
const line_map_ordinary *map1 = NULL;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
map0 = linemap_lookup (set, loc);
if (!linemap_macro_expansion_map_p (map0))
return loc;
resolved_loc = linemap_resolve_location (set, loc,
LRK_SPELLING_LOCATION,
&map1);
if (resolved_loc >= RESERVED_LOCATION_COUNT
&& !LINEMAP_SYSP (map1))
return loc;
while (linemap_macro_expansion_map_p (map0)
&& (resolved_loc < RESERVED_LOCATION_COUNT
|| LINEMAP_SYSP (map1)))
{
loc = linemap_unwind_toward_expansion (set, loc, &map0);
resolved_loc = linemap_resolve_location (set, loc,
LRK_SPELLING_LOCATION,
&map1);
}
if (map != NULL)
*map = map0;
return loc;
}
/* Expand source code location LOC and return a user readable source
code location. LOC must be a spelling (non-virtual) location. If
it's a location < RESERVED_LOCATION_COUNT a zeroed expanded source
location is returned. */
expanded_location
linemap_expand_location (struct line_maps *set,
const struct line_map *map,
source_location loc)
{
expanded_location xloc;
memset (&xloc, 0, sizeof (xloc));
if (IS_ADHOC_LOC (loc))
{
xloc.data
= set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].data;
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
}
if (loc < RESERVED_LOCATION_COUNT)
/* The location for this token wasn't generated from a line map.
It was probably a location for a builtin token, chosen by some
client code. Let's not try to expand the location in that
case. */;
else if (map == NULL)
/* We shouldn't be getting a NULL map with a location that is not
reserved by the client code. */
abort ();
else
{
/* MAP must be an ordinary map and LOC must be non-virtual,
encoded into this map, obviously; the accessors used on MAP
below ensure it is ordinary. Let's just assert the
non-virtualness of LOC here. */
if (linemap_location_from_macro_expansion_p (set, loc))
abort ();
const line_map_ordinary *ord_map = linemap_check_ordinary (map);
xloc.file = LINEMAP_FILE (ord_map);
xloc.line = SOURCE_LINE (ord_map, loc);
xloc.column = SOURCE_COLUMN (ord_map, loc);
xloc.sysp = LINEMAP_SYSP (ord_map) != 0;
}
return xloc;
}
/* Dump line map at index IX in line table SET to STREAM. If STREAM
is NULL, use stderr. IS_MACRO is true if the caller wants to
dump a macro map, false otherwise. */
void
linemap_dump (FILE *stream, struct line_maps *set, unsigned ix, bool is_macro)
{
const char *lc_reasons_v[LC_ENTER_MACRO + 1]
= { "LC_ENTER", "LC_LEAVE", "LC_RENAME", "LC_RENAME_VERBATIM",
"LC_ENTER_MACRO" };
const char *reason;
const line_map *map;
if (stream == NULL)
stream = stderr;
if (!is_macro)
map = LINEMAPS_ORDINARY_MAP_AT (set, ix);
else
map = LINEMAPS_MACRO_MAP_AT (set, ix);
reason = (map->reason <= LC_ENTER_MACRO) ? lc_reasons_v[map->reason] : "???";
fprintf (stream, "Map #%u [%p] - LOC: %u - REASON: %s - SYSP: %s\n",
ix, (void *) map, map->start_location, reason,
((!is_macro
&& ORDINARY_MAP_IN_SYSTEM_HEADER_P (linemap_check_ordinary (map)))
? "yes" : "no"));
if (!is_macro)
{
const line_map_ordinary *ord_map = linemap_check_ordinary (map);
unsigned includer_ix;
const line_map_ordinary *includer_map;
includer_ix = ORDINARY_MAP_INCLUDER_FILE_INDEX (ord_map);
includer_map = includer_ix < LINEMAPS_ORDINARY_USED (set)
? LINEMAPS_ORDINARY_MAP_AT (set, includer_ix)
: NULL;
fprintf (stream, "File: %s:%d\n", ORDINARY_MAP_FILE_NAME (ord_map),
ORDINARY_MAP_STARTING_LINE_NUMBER (ord_map));
fprintf (stream, "Included from: [%d] %s\n", includer_ix,
includer_map ? ORDINARY_MAP_FILE_NAME (includer_map) : "None");
}
else
{
const line_map_macro *macro_map = linemap_check_macro (map);
fprintf (stream, "Macro: %s (%u tokens)\n",
linemap_map_get_macro_name (macro_map),
MACRO_MAP_NUM_MACRO_TOKENS (macro_map));
}
fprintf (stream, "\n");
}
/* Dump debugging information about source location LOC into the file
stream STREAM. SET is the line map set LOC comes from. */
void
linemap_dump_location (struct line_maps *set,
source_location loc,
FILE *stream)
{
const line_map_ordinary *map;
source_location location;
const char *path = "", *from = "";
int l = -1, c = -1, s = -1, e = -1;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
if (loc == 0)
return;
location =
linemap_resolve_location (set, loc, LRK_MACRO_DEFINITION_LOCATION, &map);
if (map == NULL)
/* Only reserved locations can be tolerated in this case. */
linemap_assert (location < RESERVED_LOCATION_COUNT);
else
{
path = LINEMAP_FILE (map);
l = SOURCE_LINE (map, location);
c = SOURCE_COLUMN (map, location);
s = LINEMAP_SYSP (map) != 0;
e = location != loc;
if (e)
from = "N/A";
else
from = (INCLUDED_FROM (set, map))
? LINEMAP_FILE (INCLUDED_FROM (set, map))
: "<NULL>";
}
/* P: path, L: line, C: column, S: in-system-header, M: map address,
E: macro expansion?, LOC: original location, R: resolved location */
fprintf (stream, "{P:%s;F:%s;L:%d;C:%d;S:%d;M:%p;E:%d,LOC:%d,R:%d}",
path, from, l, c, s, (void*)map, e, loc, location);
}
/* Return the highest location emitted for a given file for which
there is a line map in SET. FILE_NAME is the file name to
consider. If the function returns TRUE, *LOC is set to the highest
location emitted for that file. */
bool
linemap_get_file_highest_location (struct line_maps *set,
const char *file_name,
source_location *loc)
{
/* If the set is empty or no ordinary map has been created then
there is no file to look for ... */
if (set == NULL || set->info_ordinary.used == 0)
return false;
/* Now look for the last ordinary map created for FILE_NAME. */
int i;
for (i = set->info_ordinary.used - 1; i >= 0; --i)
{
const char *fname = set->info_ordinary.maps[i].to_file;
if (fname && !filename_cmp (fname, file_name))
break;
}
if (i < 0)
return false;
/* The highest location for a given map is either the starting
location of the next map minus one, or -- if the map is the
latest one -- the highest location of the set. */
source_location result;
if (i == (int) set->info_ordinary.used - 1)
result = set->highest_location;
else
result = set->info_ordinary.maps[i + 1].start_location - 1;
*loc = result;
return true;
}
/* Compute and return statistics about the memory consumption of some
parts of the line table SET. */
void
linemap_get_statistics (struct line_maps *set,
struct linemap_stats *s)
{
long ordinary_maps_allocated_size, ordinary_maps_used_size,
macro_maps_allocated_size, macro_maps_used_size,
macro_maps_locations_size = 0, duplicated_macro_maps_locations_size = 0;
const line_map_macro *cur_map;
ordinary_maps_allocated_size =
LINEMAPS_ORDINARY_ALLOCATED (set) * sizeof (struct line_map_ordinary);
ordinary_maps_used_size =
LINEMAPS_ORDINARY_USED (set) * sizeof (struct line_map_ordinary);
macro_maps_allocated_size =
LINEMAPS_MACRO_ALLOCATED (set) * sizeof (struct line_map_macro);
for (cur_map = LINEMAPS_MACRO_MAPS (set);
cur_map && cur_map <= LINEMAPS_LAST_MACRO_MAP (set);
++cur_map)
{
unsigned i;
linemap_assert (linemap_macro_expansion_map_p (cur_map));
macro_maps_locations_size +=
2 * MACRO_MAP_NUM_MACRO_TOKENS (cur_map) * sizeof (source_location);
for (i = 0; i < 2 * MACRO_MAP_NUM_MACRO_TOKENS (cur_map); i += 2)
{
if (MACRO_MAP_LOCATIONS (cur_map)[i] ==
MACRO_MAP_LOCATIONS (cur_map)[i + 1])
duplicated_macro_maps_locations_size +=
sizeof (source_location);
}
}
macro_maps_used_size =
LINEMAPS_MACRO_USED (set) * sizeof (struct line_map_macro);
s->num_ordinary_maps_allocated = LINEMAPS_ORDINARY_ALLOCATED (set);
s->num_ordinary_maps_used = LINEMAPS_ORDINARY_USED (set);
s->ordinary_maps_allocated_size = ordinary_maps_allocated_size;
s->ordinary_maps_used_size = ordinary_maps_used_size;
s->num_expanded_macros = num_expanded_macros_counter;
s->num_macro_tokens = num_macro_tokens_counter;
s->num_macro_maps_used = LINEMAPS_MACRO_USED (set);
s->macro_maps_allocated_size = macro_maps_allocated_size;
s->macro_maps_locations_size = macro_maps_locations_size;
s->macro_maps_used_size = macro_maps_used_size;
s->duplicated_macro_maps_locations_size =
duplicated_macro_maps_locations_size;
s->adhoc_table_size = (set->location_adhoc_data_map.allocated
* sizeof (struct location_adhoc_data));
s->adhoc_table_entries_used = set->location_adhoc_data_map.curr_loc;
}
/* Dump line table SET to STREAM. If STREAM is NULL, stderr is used.
NUM_ORDINARY specifies how many ordinary maps to dump. NUM_MACRO
specifies how many macro maps to dump. */
void
line_table_dump (FILE *stream, struct line_maps *set, unsigned int num_ordinary,
unsigned int num_macro)
{
unsigned int i;
if (set == NULL)
return;
if (stream == NULL)
stream = stderr;
fprintf (stream, "# of ordinary maps: %d\n", LINEMAPS_ORDINARY_USED (set));
fprintf (stream, "# of macro maps: %d\n", LINEMAPS_MACRO_USED (set));
fprintf (stream, "Include stack depth: %d\n", set->depth);
fprintf (stream, "Highest location: %u\n", set->highest_location);
if (num_ordinary)
{
fprintf (stream, "\nOrdinary line maps\n");
for (i = 0; i < num_ordinary && i < LINEMAPS_ORDINARY_USED (set); i++)
linemap_dump (stream, set, i, false);
fprintf (stream, "\n");
}
if (num_macro)
{
fprintf (stream, "\nMacro line maps\n");
for (i = 0; i < num_macro && i < LINEMAPS_MACRO_USED (set); i++)
linemap_dump (stream, set, i, true);
fprintf (stream, "\n");
}
}
/* struct source_range. */
/* Is there any part of this range on the given line? */
bool
source_range::intersects_line_p (const char *file, int line) const
{
expanded_location exploc_start
= linemap_client_expand_location_to_spelling_point (m_start);
if (file != exploc_start.file)
return false;
if (line < exploc_start.line)
return false;
expanded_location exploc_finish
= linemap_client_expand_location_to_spelling_point (m_finish);
if (file != exploc_finish.file)
return false;
if (line > exploc_finish.line)
return false;
return true;
}
/* class rich_location. */
/* Construct a rich_location with location LOC as its initial range. */
rich_location::rich_location (line_maps *set, source_location loc) :
m_loc (loc),
m_num_ranges (0),
m_have_expanded_location (false),
m_num_fixit_hints (0)
{
/* Set up the 0th range, extracting any range from LOC. */
source_range src_range = get_range_from_loc (set, loc);
add_range (src_range, true);
m_ranges[0].m_caret = lazily_expand_location ();
}
/* Construct a rich_location with source_range SRC_RANGE as its
initial range. */
rich_location::rich_location (source_range src_range)
: m_loc (src_range.m_start),
m_num_ranges (0),
m_have_expanded_location (false),
m_num_fixit_hints (0)
{
/* Set up the 0th range: */
add_range (src_range, true);
}
/* The destructor for class rich_location. */
rich_location::~rich_location ()
{
for (unsigned int i = 0; i < m_num_fixit_hints; i++)
delete m_fixit_hints[i];
}
/* Get an expanded_location for this rich_location's primary
location. */
expanded_location
rich_location::lazily_expand_location ()
{
if (!m_have_expanded_location)
{
m_expanded_location
= linemap_client_expand_location_to_spelling_point (m_loc);
m_have_expanded_location = true;
}
return m_expanded_location;
}
/* Set the column of the primary location. This can only be called for
rich_location instances for which the primary location has
caret==start==finish. */
void
rich_location::override_column (int column)
{
lazily_expand_location ();
gcc_assert (m_ranges[0].m_show_caret_p);
gcc_assert (m_ranges[0].m_caret.column == m_expanded_location.column);
gcc_assert (m_ranges[0].m_start.column == m_expanded_location.column);
gcc_assert (m_ranges[0].m_finish.column == m_expanded_location.column);
m_expanded_location.column = column;
m_ranges[0].m_caret.column = column;
m_ranges[0].m_start.column = column;
m_ranges[0].m_finish.column = column;
}
/* Add the given range. */
void
rich_location::add_range (source_location start, source_location finish,
bool show_caret_p)
{
linemap_assert (m_num_ranges < MAX_RANGES);
location_range *range = &m_ranges[m_num_ranges++];
range->m_start = linemap_client_expand_location_to_spelling_point (start);
range->m_finish = linemap_client_expand_location_to_spelling_point (finish);
range->m_caret = range->m_start;
range->m_show_caret_p = show_caret_p;
}
/* Add the given range. */
void
rich_location::add_range (source_range src_range, bool show_caret_p)
{
linemap_assert (m_num_ranges < MAX_RANGES);
add_range (src_range.m_start, src_range.m_finish, show_caret_p);
}
void
rich_location::add_range (location_range *src_range)
{
linemap_assert (m_num_ranges < MAX_RANGES);
m_ranges[m_num_ranges++] = *src_range;
}
/* Add or overwrite the location given by IDX, setting its location to LOC,
and setting its "should my caret be printed" flag to SHOW_CARET_P.
It must either overwrite an existing location, or add one *exactly* on
the end of the array.
This is primarily for use by gcc when implementing diagnostic format
decoders e.g.
- the "+" in the C/C++ frontends, for handling format codes like "%q+D"
(which writes the source location of a tree back into location 0 of
the rich_location), and
- the "%C" and "%L" format codes in the Fortran frontend. */
void
rich_location::set_range (line_maps *set, unsigned int idx,
source_location loc, bool show_caret_p)
{
linemap_assert (idx < MAX_RANGES);
/* We can either overwrite an existing range, or add one exactly
on the end of the array. */
linemap_assert (idx <= m_num_ranges);
source_range src_range = get_range_from_loc (set, loc);
location_range *locrange = &m_ranges[idx];
locrange->m_start
= linemap_client_expand_location_to_spelling_point (src_range.m_start);
locrange->m_finish
= linemap_client_expand_location_to_spelling_point (src_range.m_finish);
locrange->m_show_caret_p = show_caret_p;
locrange->m_caret
= linemap_client_expand_location_to_spelling_point (loc);
/* Are we adding a range onto the end? */
if (idx == m_num_ranges)
m_num_ranges = idx + 1;
if (idx == 0)
{
m_loc = loc;
/* Mark any cached value here as dirty. */
m_have_expanded_location = false;
}
}
/* Add a fixit-hint, suggesting insertion of NEW_CONTENT
at WHERE. */
void
rich_location::add_fixit_insert (source_location where,
const char *new_content)
{
linemap_assert (m_num_fixit_hints < MAX_FIXIT_HINTS);
m_fixit_hints[m_num_fixit_hints++]
= new fixit_insert (where, new_content);
}
/* Add a fixit-hint, suggesting removal of the content at
SRC_RANGE. */
void
rich_location::add_fixit_remove (source_range src_range)
{
linemap_assert (m_num_fixit_hints < MAX_FIXIT_HINTS);
m_fixit_hints[m_num_fixit_hints++] = new fixit_remove (src_range);
}
/* Add a fixit-hint, suggesting replacement of the content at
SRC_RANGE with NEW_CONTENT. */
void
rich_location::add_fixit_replace (source_range src_range,
const char *new_content)
{
linemap_assert (m_num_fixit_hints < MAX_FIXIT_HINTS);
m_fixit_hints[m_num_fixit_hints++]
= new fixit_replace (src_range, new_content);
}
/* class fixit_insert. */
fixit_insert::fixit_insert (source_location where,
const char *new_content)
: m_where (where),
m_bytes (xstrdup (new_content)),
m_len (strlen (new_content))
{
}
fixit_insert::~fixit_insert ()
{
free (m_bytes);
}
/* Implementation of fixit_hint::affects_line_p for fixit_insert. */
bool
fixit_insert::affects_line_p (const char *file, int line)
{
expanded_location exploc
= linemap_client_expand_location_to_spelling_point (m_where);
if (file == exploc.file)
if (line == exploc.line)
return true;
return false;
}
/* class fixit_remove. */
fixit_remove::fixit_remove (source_range src_range)
: m_src_range (src_range)
{
}
/* Implementation of fixit_hint::affects_line_p for fixit_remove. */
bool
fixit_remove::affects_line_p (const char *file, int line)
{
return m_src_range.intersects_line_p (file, line);
}
/* class fixit_replace. */
fixit_replace::fixit_replace (source_range src_range,
const char *new_content)
: m_src_range (src_range),
m_bytes (xstrdup (new_content)),
m_len (strlen (new_content))
{
}
fixit_replace::~fixit_replace ()
{
free (m_bytes);
}
/* Implementation of fixit_hint::affects_line_p for fixit_replace. */
bool
fixit_replace::affects_line_p (const char *file, int line)
{
return m_src_range.intersects_line_p (file, line);
}
|
h4ck3rm1k3/gcc-1
|
libcpp/line-map.c
|
C
|
gpl-2.0
| 71,107
|
/*
* libquicktime yuv4 encoder
*
* Copyright (c) 2011 Carl Eugen Hoyos
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "internal.h"
static av_cold int yuv4_encode_init(AVCodecContext *avctx)
{
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
return AVERROR(ENOMEM);
}
return 0;
}
static int yuv4_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic, int *got_packet)
{
uint8_t *dst;
uint8_t *y, *u, *v;
int i, j, ret;
if ((ret = ff_alloc_packet2(avctx, pkt, 6 * (avctx->width + 1 >> 1) * (avctx->height + 1 >> 1))) < 0)
return ret;
dst = pkt->data;
avctx->coded_frame->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
y = pic->data[0];
u = pic->data[1];
v = pic->data[2];
for (i = 0; i < avctx->height + 1 >> 1; i++) {
for (j = 0; j < avctx->width + 1 >> 1; j++) {
*dst++ = u[j] ^ 0x80;
*dst++ = v[j] ^ 0x80;
*dst++ = y[ 2 * j ];
*dst++ = y[ 2 * j + 1];
*dst++ = y[pic->linesize[0] + 2 * j ];
*dst++ = y[pic->linesize[0] + 2 * j + 1];
}
y += 2 * pic->linesize[0];
u += pic->linesize[1];
v += pic->linesize[2];
}
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
static av_cold int yuv4_encode_close(AVCodecContext *avctx)
{
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_yuv4_encoder = {
.name = "yuv4",
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed 4:2:0"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_YUV4,
.init = yuv4_encode_init,
.encode2 = yuv4_encode_frame,
.close = yuv4_encode_close,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
};
|
tojo9900/vice
|
src/lib/libffmpeg/libavcodec/yuv4enc.c
|
C
|
gpl-2.0
| 2,758
|
/*
* arch/s390/kernel/sys_s390.c
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Thomas Spatzier (tspat@de.ibm.com)
*
* Derived from "arch/i386/kernel/sys_i386.c"
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/s390
* platform.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/unistd.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
#include "entry.h"
/*
* Perform the mmap() system call. Linux for S/390 isn't able to handle more
* than 5 system call parameters, so this system call uses a memory block
* for parameter passing.
*/
struct s390_mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
{
struct s390_mmap_arg_struct a;
int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.
*/
SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
unsigned long, third, void __user *, ptr)
{
if (call >> 16)
return -EINVAL;
/* The s390 sys_ipc variant has only five parameters instead of six
* like the generic variant. The only difference is the handling of
* the SEMTIMEDOP subcall where on s390 the third parameter is used
* as a pointer to a struct timespec where the generic variant uses
* the fifth parameter.
* Therefore we can call the generic variant by simply passing the
* third parameter also as fifth parameter.
*/
return sys_ipc(call, first, second, third, ptr, third);
}
#ifdef CONFIG_64BIT
SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
if (current->personality == PER_LINUX32 && personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
return ret;
}
#endif /* CONFIG_64BIT */
/*
* Wrapper function for sys_fadvise64/fadvise64_64
*/
#ifndef CONFIG_64BIT
SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
size_t, len, int, advice)
{
return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
len, advice);
}
struct fadvise64_64_args {
int fd;
long long offset;
long long len;
int advice;
};
SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
{
struct fadvise64_64_args a;
if ( copy_from_user(&a, args, sizeof(a)) )
return -EFAULT;
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
/*
* This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
* 64 bit argument "len" is split into the upper and lower 32 bits. The
* system call wrapper in the user space loads the value to %r6/%r7.
* The code in entry.S keeps the values in %r2 - %r6 where they are and
* stores %r7 to 96(%r15). But the standard C linkage requires that
* the whole 64 bit value for len is stored on the stack and doesn't
* use %r6 at all. So s390_fallocate has to convert the arguments from
* %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
* to
* %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
*/
SYSCALL_DEFINE(s390_fallocate)(int fd, int mode, loff_t offset,
u32 len_high, u32 len_low)
{
return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_s390_fallocate(long fd, long mode, loff_t offset,
long len_high, long len_low)
{
return SYSC_s390_fallocate((int) fd, (int) mode, offset,
(u32) len_high, (u32) len_low);
}
SYSCALL_ALIAS(sys_s390_fallocate, SyS_s390_fallocate);
#endif
#endif
|
Jackeagle/android_kernel_sony_c2305
|
arch/s390/kernel/sys_s390.c
|
C
|
gpl-2.0
| 4,389
|
/*
* GIT - The information manager from hell
*
* Copyright (C) Linus Torvalds, 2005
* Copyright (C) Johannes Schindelin, 2005
*
*/
#include "cache.h"
#include "exec_cmd.h"
#include "strbuf.h"
#include "quote.h"
typedef struct config_file {
struct config_file *prev;
FILE *f;
const char *name;
int linenr;
int eof;
struct strbuf value;
struct strbuf var;
} config_file;
static config_file *cf;
static int zlib_compression_seen;
#define MAX_INCLUDE_DEPTH 10
static const char include_depth_advice[] =
"exceeded maximum include depth (%d) while including\n"
" %s\n"
"from\n"
" %s\n"
"Do you have circular includes?";
static int handle_path_include(const char *path, struct config_include_data *inc)
{
int ret = 0;
struct strbuf buf = STRBUF_INIT;
char *expanded = expand_user_path(path);
if (!expanded)
return error("Could not expand include path '%s'", path);
path = expanded;
/*
* Use an absolute path as-is, but interpret relative paths
* based on the including config file.
*/
if (!is_absolute_path(path)) {
char *slash;
if (!cf || !cf->name)
return error("relative config includes must come from files");
slash = find_last_dir_sep(cf->name);
if (slash)
strbuf_add(&buf, cf->name, slash - cf->name + 1);
strbuf_addstr(&buf, path);
path = buf.buf;
}
if (!access_or_die(path, R_OK)) {
if (++inc->depth > MAX_INCLUDE_DEPTH)
die(include_depth_advice, MAX_INCLUDE_DEPTH, path,
cf && cf->name ? cf->name : "the command line");
ret = git_config_from_file(git_config_include, path, inc);
inc->depth--;
}
strbuf_release(&buf);
free(expanded);
return ret;
}
int git_config_include(const char *var, const char *value, void *data)
{
struct config_include_data *inc = data;
const char *type;
int ret;
/*
* Pass along all values, including "include" directives; this makes it
* possible to query information on the includes themselves.
*/
ret = inc->fn(var, value, inc->data);
if (ret < 0)
return ret;
type = skip_prefix(var, "include.");
if (!type)
return ret;
if (!strcmp(type, "path"))
ret = handle_path_include(value, inc);
return ret;
}
static void lowercase(char *p)
{
for (; *p; p++)
*p = tolower(*p);
}
void git_config_push_parameter(const char *text)
{
struct strbuf env = STRBUF_INIT;
const char *old = getenv(CONFIG_DATA_ENVIRONMENT);
if (old) {
strbuf_addstr(&env, old);
strbuf_addch(&env, ' ');
}
sq_quote_buf(&env, text);
setenv(CONFIG_DATA_ENVIRONMENT, env.buf, 1);
strbuf_release(&env);
}
int git_config_parse_parameter(const char *text,
config_fn_t fn, void *data)
{
struct strbuf **pair;
pair = strbuf_split_str(text, '=', 2);
if (!pair[0])
return error("bogus config parameter: %s", text);
if (pair[0]->len && pair[0]->buf[pair[0]->len - 1] == '=')
strbuf_setlen(pair[0], pair[0]->len - 1);
strbuf_trim(pair[0]);
if (!pair[0]->len) {
strbuf_list_free(pair);
return error("bogus config parameter: %s", text);
}
lowercase(pair[0]->buf);
if (fn(pair[0]->buf, pair[1] ? pair[1]->buf : NULL, data) < 0) {
strbuf_list_free(pair);
return -1;
}
strbuf_list_free(pair);
return 0;
}
int git_config_from_parameters(config_fn_t fn, void *data)
{
const char *env = getenv(CONFIG_DATA_ENVIRONMENT);
char *envw;
const char **argv = NULL;
int nr = 0, alloc = 0;
int i;
if (!env)
return 0;
/* sq_dequote will write over it */
envw = xstrdup(env);
if (sq_dequote_to_argv(envw, &argv, &nr, &alloc) < 0) {
free(envw);
return error("bogus format in " CONFIG_DATA_ENVIRONMENT);
}
for (i = 0; i < nr; i++) {
if (git_config_parse_parameter(argv[i], fn, data) < 0) {
free(argv);
free(envw);
return -1;
}
}
free(argv);
free(envw);
return nr > 0;
}
static int get_next_char(void)
{
int c;
FILE *f;
c = '\n';
if (cf && ((f = cf->f) != NULL)) {
c = fgetc(f);
if (c == '\r') {
/* DOS like systems */
c = fgetc(f);
if (c != '\n') {
ungetc(c, f);
c = '\r';
}
}
if (c == '\n')
cf->linenr++;
if (c == EOF) {
cf->eof = 1;
c = '\n';
}
}
return c;
}
static char *parse_value(void)
{
int quote = 0, comment = 0, space = 0;
strbuf_reset(&cf->value);
for (;;) {
int c = get_next_char();
if (c == '\n') {
if (quote) {
cf->linenr--;
return NULL;
}
return cf->value.buf;
}
if (comment)
continue;
if (isspace(c) && !quote) {
if (cf->value.len)
space++;
continue;
}
if (!quote) {
if (c == ';' || c == '#') {
comment = 1;
continue;
}
}
for (; space; space--)
strbuf_addch(&cf->value, ' ');
if (c == '\\') {
c = get_next_char();
switch (c) {
case '\n':
continue;
case 't':
c = '\t';
break;
case 'b':
c = '\b';
break;
case 'n':
c = '\n';
break;
/* Some characters escape as themselves */
case '\\': case '"':
break;
/* Reject unknown escape sequences */
default:
return NULL;
}
strbuf_addch(&cf->value, c);
continue;
}
if (c == '"') {
quote = 1-quote;
continue;
}
strbuf_addch(&cf->value, c);
}
}
static inline int iskeychar(int c)
{
return isalnum(c) || c == '-';
}
static int get_value(config_fn_t fn, void *data, struct strbuf *name)
{
int c;
char *value;
/* Get the full name */
for (;;) {
c = get_next_char();
if (cf->eof)
break;
if (!iskeychar(c))
break;
strbuf_addch(name, tolower(c));
}
while (c == ' ' || c == '\t')
c = get_next_char();
value = NULL;
if (c != '\n') {
if (c != '=')
return -1;
value = parse_value();
if (!value)
return -1;
}
return fn(name->buf, value, data);
}
static int get_extended_base_var(struct strbuf *name, int c)
{
do {
if (c == '\n')
goto error_incomplete_line;
c = get_next_char();
} while (isspace(c));
/* We require the format to be '[base "extension"]' */
if (c != '"')
return -1;
strbuf_addch(name, '.');
for (;;) {
int c = get_next_char();
if (c == '\n')
goto error_incomplete_line;
if (c == '"')
break;
if (c == '\\') {
c = get_next_char();
if (c == '\n')
goto error_incomplete_line;
}
strbuf_addch(name, c);
}
/* Final ']' */
if (get_next_char() != ']')
return -1;
return 0;
error_incomplete_line:
cf->linenr--;
return -1;
}
static int get_base_var(struct strbuf *name)
{
for (;;) {
int c = get_next_char();
if (cf->eof)
return -1;
if (c == ']')
return 0;
if (isspace(c))
return get_extended_base_var(name, c);
if (!iskeychar(c) && c != '.')
return -1;
strbuf_addch(name, tolower(c));
}
}
static int git_parse_file(config_fn_t fn, void *data)
{
int comment = 0;
int baselen = 0;
struct strbuf *var = &cf->var;
/* U+FEFF Byte Order Mark in UTF8 */
static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf";
const unsigned char *bomptr = utf8_bom;
for (;;) {
int c = get_next_char();
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
* own, but e.g. Windows Notepad will do it happily. */
if ((unsigned char) c == *bomptr) {
bomptr++;
continue;
} else {
/* Do not tolerate partial BOM. */
if (bomptr != utf8_bom)
break;
/* No BOM at file beginning. Cool. */
bomptr = NULL;
}
}
if (c == '\n') {
if (cf->eof)
return 0;
comment = 0;
continue;
}
if (comment || isspace(c))
continue;
if (c == '#' || c == ';') {
comment = 1;
continue;
}
if (c == '[') {
/* Reset prior to determining a new stem */
strbuf_reset(var);
if (get_base_var(var) < 0 || var->len < 1)
break;
strbuf_addch(var, '.');
baselen = var->len;
continue;
}
if (!isalpha(c))
break;
/*
* Truncate the var name back to the section header
* stem prior to grabbing the suffix part of the name
* and the value.
*/
strbuf_setlen(var, baselen);
strbuf_addch(var, tolower(c));
if (get_value(fn, data, var) < 0)
break;
}
die("bad config file line %d in %s", cf->linenr, cf->name);
}
static int parse_unit_factor(const char *end, uintmax_t *val)
{
if (!*end)
return 1;
else if (!strcasecmp(end, "k")) {
*val *= 1024;
return 1;
}
else if (!strcasecmp(end, "m")) {
*val *= 1024 * 1024;
return 1;
}
else if (!strcasecmp(end, "g")) {
*val *= 1024 * 1024 * 1024;
return 1;
}
return 0;
}
static int git_parse_long(const char *value, long *ret)
{
if (value && *value) {
char *end;
intmax_t val;
uintmax_t uval;
uintmax_t factor = 1;
errno = 0;
val = strtoimax(value, &end, 0);
if (errno == ERANGE)
return 0;
if (!parse_unit_factor(end, &factor))
return 0;
uval = abs(val);
uval *= factor;
if ((uval > maximum_signed_value_of_type(long)) ||
(abs(val) > uval))
return 0;
val *= factor;
*ret = val;
return 1;
}
return 0;
}
int git_parse_ulong(const char *value, unsigned long *ret)
{
if (value && *value) {
char *end;
uintmax_t val;
uintmax_t oldval;
errno = 0;
val = strtoumax(value, &end, 0);
if (errno == ERANGE)
return 0;
oldval = val;
if (!parse_unit_factor(end, &val))
return 0;
if ((val > maximum_unsigned_value_of_type(long)) ||
(oldval > val))
return 0;
*ret = val;
return 1;
}
return 0;
}
static void die_bad_config(const char *name)
{
if (cf && cf->name)
die("bad config value for '%s' in %s", name, cf->name);
die("bad config value for '%s'", name);
}
int git_config_int(const char *name, const char *value)
{
long ret = 0;
if (!git_parse_long(value, &ret))
die_bad_config(name);
return ret;
}
unsigned long git_config_ulong(const char *name, const char *value)
{
unsigned long ret;
if (!git_parse_ulong(value, &ret))
die_bad_config(name);
return ret;
}
static int git_config_maybe_bool_text(const char *name, const char *value)
{
if (!value)
return 1;
if (!*value)
return 0;
if (!strcasecmp(value, "true")
|| !strcasecmp(value, "yes")
|| !strcasecmp(value, "on"))
return 1;
if (!strcasecmp(value, "false")
|| !strcasecmp(value, "no")
|| !strcasecmp(value, "off"))
return 0;
return -1;
}
int git_config_maybe_bool(const char *name, const char *value)
{
long v = git_config_maybe_bool_text(name, value);
if (0 <= v)
return v;
if (git_parse_long(value, &v))
return !!v;
return -1;
}
int git_config_bool_or_int(const char *name, const char *value, int *is_bool)
{
int v = git_config_maybe_bool_text(name, value);
if (0 <= v) {
*is_bool = 1;
return v;
}
*is_bool = 0;
return git_config_int(name, value);
}
int git_config_bool(const char *name, const char *value)
{
int discard;
return !!git_config_bool_or_int(name, value, &discard);
}
int git_config_string(const char **dest, const char *var, const char *value)
{
if (!value)
return config_error_nonbool(var);
*dest = xstrdup(value);
return 0;
}
int git_config_pathname(const char **dest, const char *var, const char *value)
{
if (!value)
return config_error_nonbool(var);
*dest = expand_user_path(value);
if (!*dest)
die("Failed to expand user dir in: '%s'", value);
return 0;
}
static int git_default_core_config(const char *var, const char *value)
{
/* This needs a better name */
if (!strcmp(var, "core.filemode")) {
trust_executable_bit = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.trustctime")) {
trust_ctime = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.statinfo")) {
if (!strcasecmp(value, "default"))
check_stat = 1;
else if (!strcasecmp(value, "minimal"))
check_stat = 0;
}
if (!strcmp(var, "core.quotepath")) {
quote_path_fully = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.symlinks")) {
has_symlinks = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.ignorecase")) {
ignore_case = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.attributesfile"))
return git_config_pathname(&git_attributes_file, var, value);
if (!strcmp(var, "core.bare")) {
is_bare_repository_cfg = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.ignorestat")) {
assume_unchanged = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.prefersymlinkrefs")) {
prefer_symlink_refs = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.logallrefupdates")) {
log_all_ref_updates = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.warnambiguousrefs")) {
warn_ambiguous_refs = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.abbrev")) {
int abbrev = git_config_int(var, value);
if (abbrev < minimum_abbrev || abbrev > 40)
return -1;
default_abbrev = abbrev;
return 0;
}
if (!strcmp(var, "core.loosecompression")) {
int level = git_config_int(var, value);
if (level == -1)
level = Z_DEFAULT_COMPRESSION;
else if (level < 0 || level > Z_BEST_COMPRESSION)
die("bad zlib compression level %d", level);
zlib_compression_level = level;
zlib_compression_seen = 1;
return 0;
}
if (!strcmp(var, "core.compression")) {
int level = git_config_int(var, value);
if (level == -1)
level = Z_DEFAULT_COMPRESSION;
else if (level < 0 || level > Z_BEST_COMPRESSION)
die("bad zlib compression level %d", level);
core_compression_level = level;
core_compression_seen = 1;
if (!zlib_compression_seen)
zlib_compression_level = level;
return 0;
}
if (!strcmp(var, "core.packedgitwindowsize")) {
int pgsz_x2 = getpagesize() * 2;
packed_git_window_size = git_config_ulong(var, value);
/* This value must be multiple of (pagesize * 2) */
packed_git_window_size /= pgsz_x2;
if (packed_git_window_size < 1)
packed_git_window_size = 1;
packed_git_window_size *= pgsz_x2;
return 0;
}
if (!strcmp(var, "core.bigfilethreshold")) {
big_file_threshold = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.packedgitlimit")) {
packed_git_limit = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.deltabasecachelimit")) {
delta_base_cache_limit = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.logpackaccess"))
return git_config_string(&log_pack_access, var, value);
if (!strcmp(var, "core.autocrlf")) {
if (value && !strcasecmp(value, "input")) {
if (core_eol == EOL_CRLF)
return error("core.autocrlf=input conflicts with core.eol=crlf");
auto_crlf = AUTO_CRLF_INPUT;
return 0;
}
auto_crlf = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.safecrlf")) {
if (value && !strcasecmp(value, "warn")) {
safe_crlf = SAFE_CRLF_WARN;
return 0;
}
safe_crlf = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.eol")) {
if (value && !strcasecmp(value, "lf"))
core_eol = EOL_LF;
else if (value && !strcasecmp(value, "crlf"))
core_eol = EOL_CRLF;
else if (value && !strcasecmp(value, "native"))
core_eol = EOL_NATIVE;
else
core_eol = EOL_UNSET;
if (core_eol == EOL_CRLF && auto_crlf == AUTO_CRLF_INPUT)
return error("core.autocrlf=input conflicts with core.eol=crlf");
return 0;
}
if (!strcmp(var, "core.notesref")) {
notes_ref_name = xstrdup(value);
return 0;
}
if (!strcmp(var, "core.pager"))
return git_config_string(&pager_program, var, value);
if (!strcmp(var, "core.editor"))
return git_config_string(&editor_program, var, value);
if (!strcmp(var, "core.commentchar")) {
const char *comment;
int ret = git_config_string(&comment, var, value);
if (!ret)
comment_line_char = comment[0];
return ret;
}
if (!strcmp(var, "core.askpass"))
return git_config_string(&askpass_program, var, value);
if (!strcmp(var, "core.excludesfile"))
return git_config_pathname(&excludes_file, var, value);
if (!strcmp(var, "core.whitespace")) {
if (!value)
return config_error_nonbool(var);
whitespace_rule_cfg = parse_whitespace_rule(value);
return 0;
}
if (!strcmp(var, "core.fsyncobjectfiles")) {
fsync_object_files = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.preloadindex")) {
core_preload_index = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.createobject")) {
if (!strcmp(value, "rename"))
object_creation_mode = OBJECT_CREATION_USES_RENAMES;
else if (!strcmp(value, "link"))
object_creation_mode = OBJECT_CREATION_USES_HARDLINKS;
else
die("Invalid mode for object creation: %s", value);
return 0;
}
if (!strcmp(var, "core.sparsecheckout")) {
core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.precomposeunicode")) {
precomposed_unicode = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.hidedotfiles")) {
if (value && !strcasecmp(value, "dotgitonly")) {
hide_dotfiles = HIDE_DOTFILES_DOTGITONLY;
return 0;
}
hide_dotfiles = git_config_bool(var, value);
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_i18n_config(const char *var, const char *value)
{
if (!strcmp(var, "i18n.commitencoding"))
return git_config_string(&git_commit_encoding, var, value);
if (!strcmp(var, "i18n.logoutputencoding"))
return git_config_string(&git_log_output_encoding, var, value);
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_branch_config(const char *var, const char *value)
{
if (!strcmp(var, "branch.autosetupmerge")) {
if (value && !strcasecmp(value, "always")) {
git_branch_track = BRANCH_TRACK_ALWAYS;
return 0;
}
git_branch_track = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "branch.autosetuprebase")) {
if (!value)
return config_error_nonbool(var);
else if (!strcmp(value, "never"))
autorebase = AUTOREBASE_NEVER;
else if (!strcmp(value, "local"))
autorebase = AUTOREBASE_LOCAL;
else if (!strcmp(value, "remote"))
autorebase = AUTOREBASE_REMOTE;
else if (!strcmp(value, "always"))
autorebase = AUTOREBASE_ALWAYS;
else
return error("Malformed value for %s", var);
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_push_config(const char *var, const char *value)
{
if (!strcmp(var, "push.default")) {
if (!value)
return config_error_nonbool(var);
else if (!strcmp(value, "nothing"))
push_default = PUSH_DEFAULT_NOTHING;
else if (!strcmp(value, "matching"))
push_default = PUSH_DEFAULT_MATCHING;
else if (!strcmp(value, "simple"))
push_default = PUSH_DEFAULT_SIMPLE;
else if (!strcmp(value, "upstream"))
push_default = PUSH_DEFAULT_UPSTREAM;
else if (!strcmp(value, "tracking")) /* deprecated */
push_default = PUSH_DEFAULT_UPSTREAM;
else if (!strcmp(value, "current"))
push_default = PUSH_DEFAULT_CURRENT;
else {
error("Malformed value for %s: %s", var, value);
return error("Must be one of nothing, matching, simple, "
"upstream or current.");
}
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_mailmap_config(const char *var, const char *value)
{
if (!strcmp(var, "mailmap.file"))
return git_config_string(&git_mailmap_file, var, value);
if (!strcmp(var, "mailmap.blob"))
return git_config_string(&git_mailmap_blob, var, value);
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
int git_default_config(const char *var, const char *value, void *dummy)
{
if (!prefixcmp(var, "core."))
return git_default_core_config(var, value);
if (!prefixcmp(var, "user."))
return git_ident_config(var, value, dummy);
if (!prefixcmp(var, "i18n."))
return git_default_i18n_config(var, value);
if (!prefixcmp(var, "branch."))
return git_default_branch_config(var, value);
if (!prefixcmp(var, "push."))
return git_default_push_config(var, value);
if (!prefixcmp(var, "mailmap."))
return git_default_mailmap_config(var, value);
if (!prefixcmp(var, "advice."))
return git_default_advice_config(var, value);
if (!strcmp(var, "pager.color") || !strcmp(var, "color.pager")) {
pager_use_color = git_config_bool(var,value);
return 0;
}
if (!strcmp(var, "pack.packsizelimit")) {
pack_size_limit_cfg = git_config_ulong(var, value);
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
int git_config_from_file(config_fn_t fn, const char *filename, void *data)
{
int ret;
FILE *f = fopen(filename, "r");
ret = -1;
if (f) {
config_file top;
/* push config-file parsing state stack */
top.prev = cf;
top.f = f;
top.name = filename;
top.linenr = 1;
top.eof = 0;
strbuf_init(&top.value, 1024);
strbuf_init(&top.var, 1024);
cf = ⊤
ret = git_parse_file(fn, data);
/* pop config-file parsing state stack */
strbuf_release(&top.value);
strbuf_release(&top.var);
cf = top.prev;
fclose(f);
}
return ret;
}
const char *git_etc_gitconfig(void)
{
static const char *system_wide;
if (!system_wide)
system_wide = system_path(ETC_GITCONFIG);
return system_wide;
}
int git_env_bool(const char *k, int def)
{
const char *v = getenv(k);
return v ? git_config_bool(k, v) : def;
}
int git_config_system(void)
{
return !git_env_bool("GIT_CONFIG_NOSYSTEM", 0);
}
int git_config_early(config_fn_t fn, void *data, const char *repo_config)
{
int ret = 0, found = 0;
char *xdg_config = NULL;
char *user_config = NULL;
home_config_paths(&user_config, &xdg_config, "config");
if (git_config_system() && !access_or_die(git_etc_gitconfig(), R_OK)) {
ret += git_config_from_file(fn, git_etc_gitconfig(),
data);
found += 1;
}
if (xdg_config && !access_or_die(xdg_config, R_OK)) {
ret += git_config_from_file(fn, xdg_config, data);
found += 1;
}
if (user_config && !access_or_die(user_config, R_OK)) {
ret += git_config_from_file(fn, user_config, data);
found += 1;
}
if (repo_config && !access_or_die(repo_config, R_OK)) {
ret += git_config_from_file(fn, repo_config, data);
found += 1;
}
switch (git_config_from_parameters(fn, data)) {
case -1: /* error */
die("unable to parse command-line config");
break;
case 0: /* found nothing */
break;
default: /* found at least one item */
found++;
break;
}
free(xdg_config);
free(user_config);
return ret == 0 ? found : ret;
}
int git_config_with_options(config_fn_t fn, void *data,
const char *filename, int respect_includes)
{
char *repo_config = NULL;
int ret;
struct config_include_data inc = CONFIG_INCLUDE_INIT;
if (respect_includes) {
inc.fn = fn;
inc.data = data;
fn = git_config_include;
data = &inc;
}
/*
* If we have a specific filename, use it. Otherwise, follow the
* regular lookup sequence.
*/
if (filename)
return git_config_from_file(fn, filename, data);
repo_config = git_pathdup("config");
ret = git_config_early(fn, data, repo_config);
if (repo_config)
free(repo_config);
return ret;
}
int git_config(config_fn_t fn, void *data)
{
return git_config_with_options(fn, data, NULL, 1);
}
/*
* Find all the stuff for git_config_set() below.
*/
#define MAX_MATCHES 512
static struct {
int baselen;
char *key;
int do_not_match;
regex_t *value_regex;
int multi_replace;
size_t offset[MAX_MATCHES];
enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state;
int seen;
} store;
static int matches(const char *key, const char *value)
{
return !strcmp(key, store.key) &&
(store.value_regex == NULL ||
(store.do_not_match ^
!regexec(store.value_regex, value, 0, NULL, 0)));
}
static int store_aux(const char *key, const char *value, void *cb)
{
const char *ep;
size_t section_len;
FILE *f = cf->f;
switch (store.state) {
case KEY_SEEN:
if (matches(key, value)) {
if (store.seen == 1 && store.multi_replace == 0) {
warning("%s has multiple values", key);
} else if (store.seen >= MAX_MATCHES) {
error("too many matches for %s", key);
return 1;
}
store.offset[store.seen] = ftell(f);
store.seen++;
}
break;
case SECTION_SEEN:
/*
* What we are looking for is in store.key (both
* section and var), and its section part is baselen
* long. We found key (again, both section and var).
* We would want to know if this key is in the same
* section as what we are looking for. We already
* know we are in the same section as what should
* hold store.key.
*/
ep = strrchr(key, '.');
section_len = ep - key;
if ((section_len != store.baselen) ||
memcmp(key, store.key, section_len+1)) {
store.state = SECTION_END_SEEN;
break;
}
/*
* Do not increment matches: this is no match, but we
* just made sure we are in the desired section.
*/
store.offset[store.seen] = ftell(f);
/* fallthru */
case SECTION_END_SEEN:
case START:
if (matches(key, value)) {
store.offset[store.seen] = ftell(f);
store.state = KEY_SEEN;
store.seen++;
} else {
if (strrchr(key, '.') - key == store.baselen &&
!strncmp(key, store.key, store.baselen)) {
store.state = SECTION_SEEN;
store.offset[store.seen] = ftell(f);
}
}
}
return 0;
}
static int write_error(const char *filename)
{
error("failed to write new configuration file %s", filename);
/* Same error code as "failed to rename". */
return 4;
}
static int store_write_section(int fd, const char *key)
{
const char *dot;
int i, success;
struct strbuf sb = STRBUF_INIT;
dot = memchr(key, '.', store.baselen);
if (dot) {
strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key);
for (i = dot - key + 1; i < store.baselen; i++) {
if (key[i] == '"' || key[i] == '\\')
strbuf_addch(&sb, '\\');
strbuf_addch(&sb, key[i]);
}
strbuf_addstr(&sb, "\"]\n");
} else {
strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
}
success = write_in_full(fd, sb.buf, sb.len) == sb.len;
strbuf_release(&sb);
return success;
}
static int store_write_pair(int fd, const char *key, const char *value)
{
int i, success;
int length = strlen(key + store.baselen + 1);
const char *quote = "";
struct strbuf sb = STRBUF_INIT;
/*
* Check to see if the value needs to be surrounded with a dq pair.
* Note that problematic characters are always backslash-quoted; this
* check is about not losing leading or trailing SP and strings that
* follow beginning-of-comment characters (i.e. ';' and '#') by the
* configuration parser.
*/
if (value[0] == ' ')
quote = "\"";
for (i = 0; value[i]; i++)
if (value[i] == ';' || value[i] == '#')
quote = "\"";
if (i && value[i - 1] == ' ')
quote = "\"";
strbuf_addf(&sb, "\t%.*s = %s",
length, key + store.baselen + 1, quote);
for (i = 0; value[i]; i++)
switch (value[i]) {
case '\n':
strbuf_addstr(&sb, "\\n");
break;
case '\t':
strbuf_addstr(&sb, "\\t");
break;
case '"':
case '\\':
strbuf_addch(&sb, '\\');
default:
strbuf_addch(&sb, value[i]);
break;
}
strbuf_addf(&sb, "%s\n", quote);
success = write_in_full(fd, sb.buf, sb.len) == sb.len;
strbuf_release(&sb);
return success;
}
static ssize_t find_beginning_of_line(const char *contents, size_t size,
size_t offset_, int *found_bracket)
{
size_t equal_offset = size, bracket_offset = size;
ssize_t offset;
contline:
for (offset = offset_-2; offset > 0
&& contents[offset] != '\n'; offset--)
switch (contents[offset]) {
case '=': equal_offset = offset; break;
case ']': bracket_offset = offset; break;
}
if (offset > 0 && contents[offset-1] == '\\') {
offset_ = offset;
goto contline;
}
if (bracket_offset < equal_offset) {
*found_bracket = 1;
offset = bracket_offset+1;
} else
offset++;
return offset;
}
int git_config_set_in_file(const char *config_filename,
const char *key, const char *value)
{
return git_config_set_multivar_in_file(config_filename, key, value, NULL, 0);
}
int git_config_set(const char *key, const char *value)
{
return git_config_set_multivar(key, value, NULL, 0);
}
/*
* Auxiliary function to sanity-check and split the key into the section
* identifier and variable name.
*
* Returns 0 on success, -1 when there is an invalid character in the key and
* -2 if there is no section name in the key.
*
* store_key - pointer to char* which will hold a copy of the key with
* lowercase section and variable name
* baselen - pointer to int which will hold the length of the
* section + subsection part, can be NULL
*/
int git_config_parse_key(const char *key, char **store_key, int *baselen_)
{
int i, dot, baselen;
const char *last_dot = strrchr(key, '.');
/*
* Since "key" actually contains the section name and the real
* key name separated by a dot, we have to know where the dot is.
*/
if (last_dot == NULL || last_dot == key) {
error("key does not contain a section: %s", key);
return -CONFIG_NO_SECTION_OR_NAME;
}
if (!last_dot[1]) {
error("key does not contain variable name: %s", key);
return -CONFIG_NO_SECTION_OR_NAME;
}
baselen = last_dot - key;
if (baselen_)
*baselen_ = baselen;
/*
* Validate the key and while at it, lower case it for matching.
*/
*store_key = xmalloc(strlen(key) + 1);
dot = 0;
for (i = 0; key[i]; i++) {
unsigned char c = key[i];
if (c == '.')
dot = 1;
/* Leave the extended basename untouched.. */
if (!dot || i > baselen) {
if (!iskeychar(c) ||
(i == baselen + 1 && !isalpha(c))) {
error("invalid key: %s", key);
goto out_free_ret_1;
}
c = tolower(c);
} else if (c == '\n') {
error("invalid key (newline): %s", key);
goto out_free_ret_1;
}
(*store_key)[i] = c;
}
(*store_key)[i] = 0;
return 0;
out_free_ret_1:
free(*store_key);
*store_key = NULL;
return -CONFIG_INVALID_KEY;
}
/*
* If value==NULL, unset in (remove from) config,
* if value_regex!=NULL, disregard key/value pairs where value does not match.
* if multi_replace==0, nothing, or only one matching key/value is replaced,
* else all matching key/values (regardless how many) are removed,
* before the new pair is written.
*
* Returns 0 on success.
*
* This function does this:
*
* - it locks the config file by creating ".git/config.lock"
*
* - it then parses the config using store_aux() as validator to find
* the position on the key/value pair to replace. If it is to be unset,
* it must be found exactly once.
*
* - the config file is mmap()ed and the part before the match (if any) is
* written to the lock file, then the changed part and the rest.
*
* - the config file is removed and the lock file rename()d to it.
*
*/
int git_config_set_multivar_in_file(const char *config_filename,
const char *key, const char *value,
const char *value_regex, int multi_replace)
{
int fd = -1, in_fd;
int ret;
struct lock_file *lock = NULL;
char *filename_buf = NULL;
/* parse-key returns negative; flip the sign to feed exit(3) */
ret = 0 - git_config_parse_key(key, &store.key, &store.baselen);
if (ret)
goto out_free;
store.multi_replace = multi_replace;
if (!config_filename)
config_filename = filename_buf = git_pathdup("config");
/*
* The lock serves a purpose in addition to locking: the new
* contents of .git/config will be written into it.
*/
lock = xcalloc(sizeof(struct lock_file), 1);
fd = hold_lock_file_for_update(lock, config_filename, 0);
if (fd < 0) {
error("could not lock config file %s: %s", config_filename, strerror(errno));
free(store.key);
ret = CONFIG_NO_LOCK;
goto out_free;
}
/*
* If .git/config does not exist yet, write a minimal version.
*/
in_fd = open(config_filename, O_RDONLY);
if ( in_fd < 0 ) {
free(store.key);
if ( ENOENT != errno ) {
error("opening %s: %s", config_filename,
strerror(errno));
ret = CONFIG_INVALID_FILE; /* same as "invalid config file" */
goto out_free;
}
/* if nothing to unset, error out */
if (value == NULL) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
store.key = (char *)key;
if (!store_write_section(fd, key) ||
!store_write_pair(fd, key, value))
goto write_err_out;
} else {
struct stat st;
char *contents;
size_t contents_sz, copy_begin, copy_end;
int i, new_line = 0;
if (value_regex == NULL)
store.value_regex = NULL;
else {
if (value_regex[0] == '!') {
store.do_not_match = 1;
value_regex++;
} else
store.do_not_match = 0;
store.value_regex = (regex_t*)xmalloc(sizeof(regex_t));
if (regcomp(store.value_regex, value_regex,
REG_EXTENDED)) {
error("invalid pattern: %s", value_regex);
free(store.value_regex);
ret = CONFIG_INVALID_PATTERN;
goto out_free;
}
}
store.offset[0] = 0;
store.state = START;
store.seen = 0;
/*
* After this, store.offset will contain the *end* offset
* of the last match, or remain at 0 if no match was found.
* As a side effect, we make sure to transform only a valid
* existing config file.
*/
if (git_config_from_file(store_aux, config_filename, NULL)) {
error("invalid config file %s", config_filename);
free(store.key);
if (store.value_regex != NULL) {
regfree(store.value_regex);
free(store.value_regex);
}
ret = CONFIG_INVALID_FILE;
goto out_free;
}
free(store.key);
if (store.value_regex != NULL) {
regfree(store.value_regex);
free(store.value_regex);
}
/* if nothing to unset, or too many matches, error out */
if ((store.seen == 0 && value == NULL) ||
(store.seen > 1 && multi_replace == 0)) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
fstat(in_fd, &st);
contents_sz = xsize_t(st.st_size);
contents = xmmap(NULL, contents_sz, PROT_READ,
MAP_PRIVATE, in_fd, 0);
close(in_fd);
if (store.seen == 0)
store.seen = 1;
for (i = 0, copy_begin = 0; i < store.seen; i++) {
if (store.offset[i] == 0) {
store.offset[i] = copy_end = contents_sz;
} else if (store.state != KEY_SEEN) {
copy_end = store.offset[i];
} else
copy_end = find_beginning_of_line(
contents, contents_sz,
store.offset[i]-2, &new_line);
if (copy_end > 0 && contents[copy_end-1] != '\n')
new_line = 1;
/* write the first part of the config */
if (copy_end > copy_begin) {
if (write_in_full(fd, contents + copy_begin,
copy_end - copy_begin) <
copy_end - copy_begin)
goto write_err_out;
if (new_line &&
write_str_in_full(fd, "\n") != 1)
goto write_err_out;
}
copy_begin = store.offset[i];
}
/* write the pair (value == NULL means unset) */
if (value != NULL) {
if (store.state == START) {
if (!store_write_section(fd, key))
goto write_err_out;
}
if (!store_write_pair(fd, key, value))
goto write_err_out;
}
/* write the rest of the config */
if (copy_begin < contents_sz)
if (write_in_full(fd, contents + copy_begin,
contents_sz - copy_begin) <
contents_sz - copy_begin)
goto write_err_out;
munmap(contents, contents_sz);
}
if (commit_lock_file(lock) < 0) {
error("could not commit config file %s", config_filename);
ret = CONFIG_NO_WRITE;
goto out_free;
}
/*
* lock is committed, so don't try to roll it back below.
* NOTE: Since lockfile.c keeps a linked list of all created
* lock_file structures, it isn't safe to free(lock). It's
* better to just leave it hanging around.
*/
lock = NULL;
ret = 0;
out_free:
if (lock)
rollback_lock_file(lock);
free(filename_buf);
return ret;
write_err_out:
ret = write_error(lock->filename);
goto out_free;
}
int git_config_set_multivar(const char *key, const char *value,
const char *value_regex, int multi_replace)
{
return git_config_set_multivar_in_file(NULL, key, value, value_regex,
multi_replace);
}
static int section_name_match (const char *buf, const char *name)
{
int i = 0, j = 0, dot = 0;
if (buf[i] != '[')
return 0;
for (i = 1; buf[i] && buf[i] != ']'; i++) {
if (!dot && isspace(buf[i])) {
dot = 1;
if (name[j++] != '.')
break;
for (i++; isspace(buf[i]); i++)
; /* do nothing */
if (buf[i] != '"')
break;
continue;
}
if (buf[i] == '\\' && dot)
i++;
else if (buf[i] == '"' && dot) {
for (i++; isspace(buf[i]); i++)
; /* do_nothing */
break;
}
if (buf[i] != name[j++])
break;
}
if (buf[i] == ']' && name[j] == 0) {
/*
* We match, now just find the right length offset by
* gobbling up any whitespace after it, as well
*/
i++;
for (; buf[i] && isspace(buf[i]); i++)
; /* do nothing */
return i;
}
return 0;
}
static int section_name_is_ok(const char *name)
{
/* Empty section names are bogus. */
if (!*name)
return 0;
/*
* Before a dot, we must be alphanumeric or dash. After the first dot,
* anything goes, so we can stop checking.
*/
for (; *name && *name != '.'; name++)
if (*name != '-' && !isalnum(*name))
return 0;
return 1;
}
/* if new_name == NULL, the section is removed instead */
int git_config_rename_section_in_file(const char *config_filename,
const char *old_name, const char *new_name)
{
int ret = 0, remove = 0;
char *filename_buf = NULL;
struct lock_file *lock;
int out_fd;
char buf[1024];
FILE *config_file;
if (new_name && !section_name_is_ok(new_name)) {
ret = error("invalid section name: %s", new_name);
goto out;
}
if (!config_filename)
config_filename = filename_buf = git_pathdup("config");
lock = xcalloc(sizeof(struct lock_file), 1);
out_fd = hold_lock_file_for_update(lock, config_filename, 0);
if (out_fd < 0) {
ret = error("could not lock config file %s", config_filename);
goto out;
}
if (!(config_file = fopen(config_filename, "rb"))) {
/* no config file means nothing to rename, no error */
goto unlock_and_out;
}
while (fgets(buf, sizeof(buf), config_file)) {
int i;
int length;
char *output = buf;
for (i = 0; buf[i] && isspace(buf[i]); i++)
; /* do nothing */
if (buf[i] == '[') {
/* it's a section */
int offset = section_name_match(&buf[i], old_name);
if (offset > 0) {
ret++;
if (new_name == NULL) {
remove = 1;
continue;
}
store.baselen = strlen(new_name);
if (!store_write_section(out_fd, new_name)) {
ret = write_error(lock->filename);
goto out;
}
/*
* We wrote out the new section, with
* a newline, now skip the old
* section's length
*/
output += offset + i;
if (strlen(output) > 0) {
/*
* More content means there's
* a declaration to put on the
* next line; indent with a
* tab
*/
output -= 1;
output[0] = '\t';
}
}
remove = 0;
}
if (remove)
continue;
length = strlen(output);
if (write_in_full(out_fd, output, length) != length) {
ret = write_error(lock->filename);
goto out;
}
}
fclose(config_file);
unlock_and_out:
if (commit_lock_file(lock) < 0)
ret = error("could not commit config file %s", config_filename);
out:
free(filename_buf);
return ret;
}
int git_config_rename_section(const char *old_name, const char *new_name)
{
return git_config_rename_section_in_file(NULL, old_name, new_name);
}
/*
* Call this to report error for your variable that should not
* get a boolean value (i.e. "[my] var" means "true").
*/
#undef config_error_nonbool
int config_error_nonbool(const char *var)
{
return error("Missing value for '%s'", var);
}
int parse_config_key(const char *var,
const char *section,
const char **subsection, int *subsection_len,
const char **key)
{
int section_len = strlen(section);
const char *dot;
/* Does it start with "section." ? */
if (prefixcmp(var, section) || var[section_len] != '.')
return -1;
/*
* Find the key; we don't know yet if we have a subsection, but we must
* parse backwards from the end, since the subsection may have dots in
* it, too.
*/
dot = strrchr(var, '.');
*key = dot + 1;
/* Did we have a subsection at all? */
if (dot == var + section_len) {
*subsection = NULL;
*subsection_len = 0;
}
else {
*subsection = var + section_len + 1;
*subsection_len = dot - *subsection;
}
return 0;
}
|
Devindik/origin
|
config.c
|
C
|
gpl-2.0
| 40,138
|
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754sp.h"
/* close to ieeep754sp_logb
*/
ieee754sp ieee754sp_frexp(ieee754sp x, int *eptr)
{
COMPXSP;
CLEARCX;
EXPLODEXSP;
switch (xc) {
case IEEE754_CLASS_SNAN:
case IEEE754_CLASS_QNAN:
case IEEE754_CLASS_INF:
case IEEE754_CLASS_ZERO:
*eptr = 0;
return x;
case IEEE754_CLASS_DNORM:
SPDNORMX;
break;
case IEEE754_CLASS_NORM:
break;
}
*eptr = xe + 1;
return buildsp(xs, -1 + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
}
|
evolver56k/xpenology
|
arch/mips/math-emu/sp_frexp.c
|
C
|
gpl-2.0
| 1,405
|
/* LibTomCrypt, modular cryptographic library -- Tom St Denis
*
* LibTomCrypt is a library that provides various cryptographic
* algorithms in a highly modular and flexible manner.
*
* The library is free for all purposes without any express
* guarantee it works.
*/
#include "tomcrypt_private.h"
/**
@file rsa_import.c
Import a PKCS RSA key, Tom St Denis
*/
#ifdef LTC_MRSA
/**
Import an RSAPublicKey or RSAPrivateKey [two-prime only, only support >= 1024-bit keys, defined in PKCS #1 v2.1]
@param in The packet to import from
@param inlen It's length (octets)
@param key [out] Destination for newly imported key
@return CRYPT_OK if successful, upon error allocated memory is freed
*/
int rsa_import(const unsigned char *in, unsigned long inlen, rsa_key *key)
{
int err;
void *zero;
unsigned char *tmpbuf=NULL;
unsigned long tmpbuf_len, len;
LTC_ARGCHK(in != NULL);
LTC_ARGCHK(key != NULL);
LTC_ARGCHK(ltc_mp.name != NULL);
/* init key */
if ((err = mp_init_multi(&key->e, &key->d, &key->N, &key->dQ,
&key->dP, &key->qP, &key->p, &key->q, NULL)) != CRYPT_OK) {
return err;
}
/* see if the OpenSSL DER format RSA public key will work */
tmpbuf_len = inlen;
tmpbuf = XCALLOC(1, tmpbuf_len);
if (tmpbuf == NULL) {
err = CRYPT_MEM;
goto LBL_ERR;
}
len = 0;
err = x509_decode_subject_public_key_info(in, inlen,
PKA_RSA, tmpbuf, &tmpbuf_len,
LTC_ASN1_NULL, NULL, &len);
if (err == CRYPT_OK) { /* SubjectPublicKeyInfo format */
/* now it should be SEQUENCE { INTEGER, INTEGER } */
if ((err = der_decode_sequence_multi(tmpbuf, tmpbuf_len,
LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_INTEGER, 1UL, key->e,
LTC_ASN1_EOL, 0UL, NULL)) != CRYPT_OK) {
goto LBL_ERR;
}
key->type = PK_PUBLIC;
err = CRYPT_OK;
goto LBL_FREE;
}
/* not SSL public key, try to match against PKCS #1 standards */
err = der_decode_sequence_multi(in, inlen, LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_EOL, 0UL, NULL);
if (err != CRYPT_OK && err != CRYPT_INPUT_TOO_LONG) {
goto LBL_ERR;
}
if (mp_cmp_d(key->N, 0) == LTC_MP_EQ) {
if ((err = mp_init(&zero)) != CRYPT_OK) {
goto LBL_ERR;
}
/* it's a private key */
if ((err = der_decode_sequence_multi(in, inlen,
LTC_ASN1_INTEGER, 1UL, zero,
LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_INTEGER, 1UL, key->e,
LTC_ASN1_INTEGER, 1UL, key->d,
LTC_ASN1_INTEGER, 1UL, key->p,
LTC_ASN1_INTEGER, 1UL, key->q,
LTC_ASN1_INTEGER, 1UL, key->dP,
LTC_ASN1_INTEGER, 1UL, key->dQ,
LTC_ASN1_INTEGER, 1UL, key->qP,
LTC_ASN1_EOL, 0UL, NULL)) != CRYPT_OK) {
mp_clear(zero);
goto LBL_ERR;
}
mp_clear(zero);
key->type = PK_PRIVATE;
} else if (mp_cmp_d(key->N, 1) == LTC_MP_EQ) {
/* we don't support multi-prime RSA */
err = CRYPT_PK_INVALID_TYPE;
goto LBL_ERR;
} else {
/* it's a public key and we lack e */
if ((err = der_decode_sequence_multi(in, inlen,
LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_INTEGER, 1UL, key->e,
LTC_ASN1_EOL, 0UL, NULL)) != CRYPT_OK) {
goto LBL_ERR;
}
key->type = PK_PUBLIC;
}
err = CRYPT_OK;
goto LBL_FREE;
LBL_ERR:
mp_clear_multi(key->d, key->e, key->N, key->dQ, key->dP, key->qP, key->p, key->q, NULL);
LBL_FREE:
if (tmpbuf != NULL) {
XFREE(tmpbuf);
}
return err;
}
#endif /* LTC_MRSA */
/* ref: $Format:%D$ */
/* git commit: $Format:%H$ */
/* commit time: $Format:%ai$ */
|
mangosthree/server
|
dep/tomlib/Crypt/src/pk/rsa/rsa_import.c
|
C
|
gpl-2.0
| 4,169
|
/*
Lucy the Diamond Girl - Game where player collects diamonds.
Copyright (C) 2005-2015 Joni Yrjänä <joniyrjana@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Complete license can be found in the LICENSE file.
*/
#include "widget.h"
#include <assert.h>
void widget_set_navigation_right(struct widget * widget, struct widget * right)
{
assert(widget != NULL);
assert(right != NULL);
assert(widget != right);
assert(widget->navigation_right_ == NULL);
widget->navigation_right_ = right;
stack_push(widget->widgets_linking_to_this, right);
stack_push(right->widgets_linking_to_this, widget);
}
|
Peanhua/diamond-girl
|
src/widget_set_navigation_right.c
|
C
|
gpl-2.0
| 1,294
|
/* Eye Of Mate - Main Window
*
* Copyright (C) 2000-2008 The Free Software Foundation
*
* Author: Lucas Rocha <lucasr@gnome.org>
*
* Based on code by:
* - Federico Mena-Quintero <federico@gnu.org>
* - Jens Finke <jens@gnome.org>
* Based on evince code (shell/ev-window.c) by:
* - Martin Kretzschmar <martink@gnome.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <math.h>
#include "eom-window.h"
#include "eom-scroll-view.h"
#include "eom-debug.h"
#include "eom-file-chooser.h"
#include "eom-thumb-view.h"
#include "eom-list-store.h"
#include "eom-sidebar.h"
#include "eom-statusbar.h"
#include "eom-preferences-dialog.h"
#include "eom-properties-dialog.h"
#include "eom-print.h"
#include "eom-error-message-area.h"
#include "eom-application.h"
#include "eom-thumb-nav.h"
#include "eom-config-keys.h"
#include "eom-job-queue.h"
#include "eom-jobs.h"
#include "eom-util.h"
#include "eom-save-as-dialog-helper.h"
#include "eom-plugin-engine.h"
#include "eom-close-confirmation-dialog.h"
#include "eom-clipboard-handler.h"
#include "eom-enum-types.h"
#include "egg-toolbar-editor.h"
#include "egg-editable-toolbar.h"
#include "egg-toolbars-model.h"
#include <glib.h>
#include <glib-object.h>
#include <glib/gi18n.h>
#include <gio/gio.h>
#include <gdk/gdkkeysyms.h>
#include <gio/gdesktopappinfo.h>
#include <gtk/gtk.h>
#if HAVE_LCMS
#include <X11/Xlib.h>
#include <X11/Xatom.h>
#ifdef GDK_WINDOWING_X11
#include <gdk/gdkx.h>
#endif
#include <lcms2.h>
#endif
#define MATE_DESKTOP_USE_UNSTABLE_API
#include <libmate-desktop/mate-desktop-utils.h>
#include <libmate-desktop/mate-aboutdialog.h>
#define EOM_WINDOW_GET_PRIVATE(object) \
(G_TYPE_INSTANCE_GET_PRIVATE ((object), EOM_TYPE_WINDOW, EomWindowPrivate))
G_DEFINE_TYPE (EomWindow, eom_window, GTK_TYPE_WINDOW);
#define EOM_WINDOW_MIN_WIDTH 440
#define EOM_WINDOW_MIN_HEIGHT 350
#define EOM_WINDOW_DEFAULT_WIDTH 540
#define EOM_WINDOW_DEFAULT_HEIGHT 450
#define EOM_WINDOW_FULLSCREEN_TIMEOUT 5 * 1000
#define EOM_WINDOW_FULLSCREEN_POPUP_THRESHOLD 5
#define EOM_RECENT_FILES_GROUP "Graphics"
#define EOM_RECENT_FILES_APP_NAME "Eye of MATE Image Viewer"
#define EOM_RECENT_FILES_LIMIT 5
#define EOM_WALLPAPER_FILENAME "eom-wallpaper"
#define is_rtl (gtk_widget_get_default_direction () == GTK_TEXT_DIR_RTL)
#if GTK_CHECK_VERSION (3, 2, 0)
#define gtk_hbox_new(X,Y) gtk_box_new(GTK_ORIENTATION_HORIZONTAL,Y)
#define gtk_vbox_new(X,Y) gtk_box_new(GTK_ORIENTATION_VERTICAL,Y)
#endif
typedef enum {
EOM_WINDOW_STATUS_UNKNOWN,
EOM_WINDOW_STATUS_INIT,
EOM_WINDOW_STATUS_NORMAL
} EomWindowStatus;
enum {
PROP_0,
PROP_COLLECTION_POS,
PROP_COLLECTION_RESIZABLE,
PROP_STARTUP_FLAGS
};
enum {
SIGNAL_PREPARED,
SIGNAL_LAST
};
static guint signals[SIGNAL_LAST] = { 0 };
struct _EomWindowPrivate {
GSettings *view_settings;
GSettings *ui_settings;
GSettings *fullscreen_settings;
GSettings *lockdown_settings;
EomListStore *store;
EomImage *image;
EomWindowMode mode;
EomWindowStatus status;
GtkUIManager *ui_mgr;
GtkWidget *box;
GtkWidget *layout;
GtkWidget *cbox;
GtkWidget *view;
GtkWidget *sidebar;
GtkWidget *thumbview;
GtkWidget *statusbar;
GtkWidget *nav;
GtkWidget *message_area;
GtkWidget *toolbar;
GObject *properties_dlg;
GtkActionGroup *actions_window;
GtkActionGroup *actions_image;
GtkActionGroup *actions_collection;
GtkActionGroup *actions_recent;
GtkWidget *fullscreen_popup;
GSource *fullscreen_timeout_source;
gboolean slideshow_random;
gboolean slideshow_loop;
gint slideshow_switch_timeout;
GSource *slideshow_switch_source;
guint recent_menu_id;
EomJob *load_job;
EomJob *transform_job;
EomJob *save_job;
GFile *last_save_as_folder;
EomJob *copy_job;
guint image_info_message_cid;
guint tip_message_cid;
guint copy_file_cid;
EomStartupFlags flags;
GSList *file_list;
EomWindowCollectionPos collection_position;
gboolean collection_resizable;
GtkActionGroup *actions_open_with;
guint open_with_menu_id;
gboolean save_disabled;
gboolean needs_reload_confirmation;
GtkPageSetup *page_setup;
#ifdef HAVE_LCMS
cmsHPROFILE *display_profile;
#endif
};
static void eom_window_cmd_fullscreen (GtkAction *action, gpointer user_data);
static void eom_window_run_fullscreen (EomWindow *window, gboolean slideshow);
static void eom_window_cmd_slideshow (GtkAction *action, gpointer user_data);
static void eom_window_cmd_pause_slideshow (GtkAction *action, gpointer user_data);
static void eom_window_stop_fullscreen (EomWindow *window, gboolean slideshow);
static void eom_job_load_cb (EomJobLoad *job, gpointer data);
static void eom_job_save_progress_cb (EomJobSave *job, float progress, gpointer data);
static void eom_job_progress_cb (EomJobLoad *job, float progress, gpointer data);
static void eom_job_transform_cb (EomJobTransform *job, gpointer data);
static void fullscreen_set_timeout (EomWindow *window);
static void fullscreen_clear_timeout (EomWindow *window);
static void update_action_groups_state (EomWindow *window);
static void open_with_launch_application_cb (GtkAction *action, gpointer callback_data);
static void eom_window_update_openwith_menu (EomWindow *window, EomImage *image);
static void eom_window_list_store_image_added (GtkTreeModel *tree_model,
GtkTreePath *path,
GtkTreeIter *iter,
gpointer user_data);
static void eom_window_list_store_image_removed (GtkTreeModel *tree_model,
GtkTreePath *path,
gpointer user_data);
static void eom_window_set_wallpaper (EomWindow *window, const gchar *filename, const gchar *visible_filename);
static gboolean eom_window_save_images (EomWindow *window, GList *images);
static void eom_window_finish_saving (EomWindow *window);
static GAppInfo *get_appinfo_for_editor (EomWindow *window);
static GQuark
eom_window_error_quark (void)
{
static GQuark q = 0;
if (q == 0)
q = g_quark_from_static_string ("eom-window-error-quark");
return q;
}
static void
eom_window_set_collection_mode (EomWindow *window, EomWindowCollectionPos position, gboolean resizable)
{
EomWindowPrivate *priv;
GtkWidget *hpaned;
EomThumbNavMode mode = EOM_THUMB_NAV_MODE_ONE_ROW;
eom_debug (DEBUG_PREFERENCES);
g_return_if_fail (EOM_IS_WINDOW (window));
priv = window->priv;
if (priv->collection_position == position &&
priv->collection_resizable == resizable)
return;
priv->collection_position = position;
priv->collection_resizable = resizable;
hpaned = gtk_widget_get_parent (priv->sidebar);
g_object_ref (hpaned);
g_object_ref (priv->nav);
gtk_container_remove (GTK_CONTAINER (priv->layout), hpaned);
gtk_container_remove (GTK_CONTAINER (priv->layout), priv->nav);
gtk_widget_destroy (priv->layout);
switch (position) {
case EOM_WINDOW_COLLECTION_POS_BOTTOM:
case EOM_WINDOW_COLLECTION_POS_TOP:
if (resizable) {
mode = EOM_THUMB_NAV_MODE_MULTIPLE_ROWS;
#if GTK_CHECK_VERSION (3, 2, 0)
priv->layout = gtk_paned_new (GTK_ORIENTATION_VERTICAL);
#else
priv->layout = gtk_vpaned_new ();
#endif
if (position == EOM_WINDOW_COLLECTION_POS_BOTTOM) {
gtk_paned_pack1 (GTK_PANED (priv->layout), hpaned, TRUE, FALSE);
gtk_paned_pack2 (GTK_PANED (priv->layout), priv->nav, FALSE, TRUE);
} else {
gtk_paned_pack1 (GTK_PANED (priv->layout), priv->nav, FALSE, TRUE);
gtk_paned_pack2 (GTK_PANED (priv->layout), hpaned, TRUE, FALSE);
}
} else {
mode = EOM_THUMB_NAV_MODE_ONE_ROW;
priv->layout = gtk_vbox_new (FALSE, 2);
if (position == EOM_WINDOW_COLLECTION_POS_BOTTOM) {
gtk_box_pack_start (GTK_BOX (priv->layout), hpaned, TRUE, TRUE, 0);
gtk_box_pack_start (GTK_BOX (priv->layout), priv->nav, FALSE, FALSE, 0);
} else {
gtk_box_pack_start (GTK_BOX (priv->layout), priv->nav, FALSE, FALSE, 0);
gtk_box_pack_start (GTK_BOX (priv->layout), hpaned, TRUE, TRUE, 0);
}
}
break;
case EOM_WINDOW_COLLECTION_POS_LEFT:
case EOM_WINDOW_COLLECTION_POS_RIGHT:
if (resizable) {
mode = EOM_THUMB_NAV_MODE_MULTIPLE_COLUMNS;
#if GTK_CHECK_VERSION (3, 2, 0)
priv->layout = gtk_paned_new (GTK_ORIENTATION_HORIZONTAL);
#else
priv->layout = gtk_hpaned_new ();
#endif
if (position == EOM_WINDOW_COLLECTION_POS_LEFT) {
gtk_paned_pack1 (GTK_PANED (priv->layout), priv->nav, FALSE, TRUE);
gtk_paned_pack2 (GTK_PANED (priv->layout), hpaned, TRUE, FALSE);
} else {
gtk_paned_pack1 (GTK_PANED (priv->layout), hpaned, TRUE, FALSE);
gtk_paned_pack2 (GTK_PANED (priv->layout), priv->nav, FALSE, TRUE);
}
} else {
mode = EOM_THUMB_NAV_MODE_ONE_COLUMN;
priv->layout = gtk_hbox_new (FALSE, 2);
if (position == EOM_WINDOW_COLLECTION_POS_LEFT) {
gtk_box_pack_start (GTK_BOX (priv->layout), priv->nav, FALSE, FALSE, 0);
gtk_box_pack_start (GTK_BOX (priv->layout), hpaned, TRUE, TRUE, 0);
} else {
gtk_box_pack_start (GTK_BOX (priv->layout), hpaned, TRUE, TRUE, 0);
gtk_box_pack_start (GTK_BOX (priv->layout), priv->nav, FALSE, FALSE, 0);
}
}
break;
}
gtk_box_pack_end (GTK_BOX (priv->cbox), priv->layout, TRUE, TRUE, 0);
eom_thumb_nav_set_mode (EOM_THUMB_NAV (priv->nav), mode);
if (priv->mode != EOM_WINDOW_MODE_UNKNOWN) {
update_action_groups_state (window);
}
}
static void
eom_window_can_save_changed_cb (GSettings *settings, gchar *key, gpointer user_data)
{
EomWindowPrivate *priv;
EomWindow *window;
gboolean save_disabled = FALSE;
GtkAction *action_save, *action_save_as;
eom_debug (DEBUG_PREFERENCES);
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = EOM_WINDOW (user_data)->priv;
save_disabled = g_settings_get_boolean (settings, key);
priv->save_disabled = save_disabled;
action_save =
gtk_action_group_get_action (priv->actions_image, "ImageSave");
action_save_as =
gtk_action_group_get_action (priv->actions_image, "ImageSaveAs");
if (priv->save_disabled) {
gtk_action_set_sensitive (action_save, FALSE);
gtk_action_set_sensitive (action_save_as, FALSE);
} else {
EomImage *image = eom_window_get_image (window);
if (EOM_IS_IMAGE (image)) {
gtk_action_set_sensitive (action_save,
eom_image_is_modified (image));
gtk_action_set_sensitive (action_save_as, TRUE);
}
}
}
#ifdef HAVE_LCMS
static cmsHPROFILE *
eom_window_get_display_profile (GdkScreen *screen)
{
Display *dpy;
Atom icc_atom, type;
int format;
gulong nitems;
gulong bytes_after;
gulong length;
guchar *str;
int result;
cmsHPROFILE *profile;
char *atom_name;
dpy = GDK_DISPLAY_XDISPLAY (gdk_screen_get_display (screen));
if (gdk_screen_get_number (screen) > 0)
atom_name = g_strdup_printf ("_ICC_PROFILE_%d", gdk_screen_get_number (screen));
else
atom_name = g_strdup ("_ICC_PROFILE");
icc_atom = gdk_x11_get_xatom_by_name_for_display (gdk_screen_get_display (screen), atom_name);
g_free (atom_name);
result = XGetWindowProperty (dpy,
GDK_WINDOW_XID (gdk_screen_get_root_window (screen)),
icc_atom,
0,
G_MAXLONG,
False,
XA_CARDINAL,
&type,
&format,
&nitems,
&bytes_after,
(guchar **)&str);
/* TODO: handle bytes_after != 0 */
if ((result == Success) && (type == XA_CARDINAL) && (nitems > 0)) {
switch (format)
{
case 8:
length = nitems;
break;
case 16:
length = sizeof(short) * nitems;
break;
case 32:
length = sizeof(long) * nitems;
break;
default:
eom_debug_message (DEBUG_LCMS, "Unable to read profile, not correcting");
XFree (str);
return NULL;
}
profile = cmsOpenProfileFromMem (str, length);
if (G_UNLIKELY (profile == NULL)) {
eom_debug_message (DEBUG_LCMS,
"Invalid display profile, "
"not correcting");
}
XFree (str);
} else {
profile = NULL;
eom_debug_message (DEBUG_LCMS, "No profile, not correcting");
}
return profile;
}
#endif
static void
update_image_pos (EomWindow *window)
{
EomWindowPrivate *priv;
gint pos = -1, n_images = 0;
priv = window->priv;
n_images = eom_list_store_length (EOM_LIST_STORE (priv->store));
if (n_images > 0) {
pos = eom_list_store_get_pos_by_image (EOM_LIST_STORE (priv->store),
priv->image);
}
/* Images: (image pos) / (n_total_images) */
eom_statusbar_set_image_number (EOM_STATUSBAR (priv->statusbar),
pos + 1,
n_images);
}
static void
update_status_bar (EomWindow *window)
{
EomWindowPrivate *priv;
char *str = NULL;
g_return_if_fail (EOM_IS_WINDOW (window));
eom_debug (DEBUG_WINDOW);
priv = window->priv;
if (priv->image != NULL &&
eom_image_has_data (priv->image, EOM_IMAGE_DATA_DIMENSION)) {
int zoom, width, height;
goffset bytes = 0;
zoom = floor (100 * eom_scroll_view_get_zoom (EOM_SCROLL_VIEW (priv->view)) + 0.5);
eom_image_get_size (priv->image, &width, &height);
bytes = eom_image_get_bytes (priv->image);
if ((width > 0) && (height > 0)) {
char *size_string;
size_string = g_format_size (bytes);
/* Translators: This is the string displayed in the statusbar
* The tokens are from left to right:
* - image width
* - image height
* - image size in bytes
* - zoom in percent */
str = g_strdup_printf (ngettext("%i × %i pixel %s %i%%",
"%i × %i pixels %s %i%%", height),
width,
height,
size_string,
zoom);
g_free (size_string);
}
update_image_pos (window);
}
gtk_statusbar_pop (GTK_STATUSBAR (priv->statusbar),
priv->image_info_message_cid);
gtk_statusbar_push (GTK_STATUSBAR (priv->statusbar),
priv->image_info_message_cid, str ? str : "");
g_free (str);
}
static void
eom_window_set_message_area (EomWindow *window,
GtkWidget *message_area)
{
if (window->priv->message_area == message_area)
return;
if (window->priv->message_area != NULL)
gtk_widget_destroy (window->priv->message_area);
window->priv->message_area = message_area;
if (message_area == NULL) return;
gtk_box_pack_start (GTK_BOX (window->priv->cbox),
window->priv->message_area,
FALSE,
FALSE,
0);
g_object_add_weak_pointer (G_OBJECT (window->priv->message_area),
(void *) &window->priv->message_area);
}
static void
update_action_groups_state (EomWindow *window)
{
EomWindowPrivate *priv;
GtkAction *action_collection;
GtkAction *action_sidebar;
GtkAction *action_fscreen;
GtkAction *action_sshow;
GtkAction *action_print;
gboolean print_disabled = FALSE;
gboolean show_image_collection = FALSE;
gint n_images = 0;
g_return_if_fail (EOM_IS_WINDOW (window));
eom_debug (DEBUG_WINDOW);
priv = window->priv;
action_collection =
gtk_action_group_get_action (priv->actions_window,
"ViewImageCollection");
action_sidebar =
gtk_action_group_get_action (priv->actions_window,
"ViewSidebar");
action_fscreen =
gtk_action_group_get_action (priv->actions_image,
"ViewFullscreen");
action_sshow =
gtk_action_group_get_action (priv->actions_collection,
"ViewSlideshow");
action_print =
gtk_action_group_get_action (priv->actions_image,
"ImagePrint");
g_assert (action_collection != NULL);
g_assert (action_sidebar != NULL);
g_assert (action_fscreen != NULL);
g_assert (action_sshow != NULL);
g_assert (action_print != NULL);
if (priv->store != NULL) {
n_images = eom_list_store_length (EOM_LIST_STORE (priv->store));
}
if (n_images == 0) {
gtk_widget_hide (priv->layout);
gtk_action_group_set_sensitive (priv->actions_window, TRUE);
gtk_action_group_set_sensitive (priv->actions_image, FALSE);
gtk_action_group_set_sensitive (priv->actions_collection, FALSE);
gtk_action_set_sensitive (action_fscreen, FALSE);
gtk_action_set_sensitive (action_sshow, FALSE);
/* If there are no images on model, initialization
stops here. */
if (priv->status == EOM_WINDOW_STATUS_INIT) {
priv->status = EOM_WINDOW_STATUS_NORMAL;
}
} else {
if (priv->flags & EOM_STARTUP_DISABLE_COLLECTION) {
g_settings_set_boolean (priv->ui_settings, EOM_CONF_UI_IMAGE_COLLECTION, FALSE);
show_image_collection = FALSE;
} else {
show_image_collection =
g_settings_get_boolean (priv->ui_settings, EOM_CONF_UI_IMAGE_COLLECTION);
}
show_image_collection = show_image_collection &&
n_images > 1 &&
priv->mode != EOM_WINDOW_MODE_SLIDESHOW;
gtk_widget_show (priv->layout);
if (show_image_collection)
gtk_widget_show (priv->nav);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action_collection),
show_image_collection);
gtk_action_group_set_sensitive (priv->actions_window, TRUE);
gtk_action_group_set_sensitive (priv->actions_image, TRUE);
gtk_action_set_sensitive (action_fscreen, TRUE);
if (n_images == 1) {
gtk_action_group_set_sensitive (priv->actions_collection, FALSE);
gtk_action_set_sensitive (action_collection, FALSE);
gtk_action_set_sensitive (action_sshow, FALSE);
} else {
gtk_action_group_set_sensitive (priv->actions_collection, TRUE);
gtk_action_set_sensitive (action_sshow, TRUE);
}
if (show_image_collection)
gtk_widget_grab_focus (priv->thumbview);
else
gtk_widget_grab_focus (priv->view);
}
print_disabled = g_settings_get_boolean (priv->lockdown_settings,
EOM_CONF_LOCKDOWN_CAN_PRINT);
if (print_disabled) {
gtk_action_set_sensitive (action_print, FALSE);
}
if (eom_sidebar_is_empty (EOM_SIDEBAR (priv->sidebar))) {
gtk_action_set_sensitive (action_sidebar, FALSE);
gtk_widget_hide (priv->sidebar);
}
}
static void
update_selection_ui_visibility (EomWindow *window)
{
EomWindowPrivate *priv;
GtkAction *wallpaper_action;
gint n_selected;
priv = window->priv;
n_selected = eom_thumb_view_get_n_selected (EOM_THUMB_VIEW (priv->thumbview));
wallpaper_action =
gtk_action_group_get_action (priv->actions_image,
"ImageSetAsWallpaper");
if (n_selected == 1) {
gtk_action_set_sensitive (wallpaper_action, TRUE);
} else {
gtk_action_set_sensitive (wallpaper_action, FALSE);
}
}
static gboolean
add_file_to_recent_files (GFile *file)
{
gchar *text_uri;
GFileInfo *file_info;
GtkRecentData *recent_data;
static gchar *groups[2] = { EOM_RECENT_FILES_GROUP , NULL };
if (file == NULL) return FALSE;
/* The password gets stripped here because ~/.recently-used.xbel is
* readable by everyone (chmod 644). It also makes the workaround
* for the bug with gtk_recent_info_get_uri_display() easier
* (see the comment in eom_window_update_recent_files_menu()). */
text_uri = g_file_get_uri (file);
if (text_uri == NULL)
return FALSE;
file_info = g_file_query_info (file,
G_FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE,
0, NULL, NULL);
if (file_info == NULL)
return FALSE;
recent_data = g_slice_new (GtkRecentData);
recent_data->display_name = NULL;
recent_data->description = NULL;
recent_data->mime_type = (gchar *) g_file_info_get_content_type (file_info);
recent_data->app_name = EOM_RECENT_FILES_APP_NAME;
recent_data->app_exec = g_strjoin(" ", g_get_prgname (), "%u", NULL);
recent_data->groups = groups;
recent_data->is_private = FALSE;
gtk_recent_manager_add_full (gtk_recent_manager_get_default (),
text_uri,
recent_data);
g_free (recent_data->app_exec);
g_free (text_uri);
g_object_unref (file_info);
g_slice_free (GtkRecentData, recent_data);
return FALSE;
}
static void
image_thumb_changed_cb (EomImage *image, gpointer data)
{
EomWindow *window;
EomWindowPrivate *priv;
GdkPixbuf *thumb;
g_return_if_fail (EOM_IS_WINDOW (data));
window = EOM_WINDOW (data);
priv = window->priv;
thumb = eom_image_get_thumbnail (image);
if (thumb != NULL) {
gtk_window_set_icon (GTK_WINDOW (window), thumb);
if (window->priv->properties_dlg != NULL) {
eom_properties_dialog_update (EOM_PROPERTIES_DIALOG (priv->properties_dlg),
image);
}
g_object_unref (thumb);
} else if (!gtk_widget_get_visible (window->priv->nav)) {
gint img_pos = eom_list_store_get_pos_by_image (window->priv->store, image);
GtkTreePath *path = gtk_tree_path_new_from_indices (img_pos,-1);
GtkTreeIter iter;
gtk_tree_model_get_iter (GTK_TREE_MODEL (window->priv->store), &iter, path);
eom_list_store_thumbnail_set (window->priv->store, &iter);
gtk_tree_path_free (path);
}
}
static void
file_changed_info_bar_response (GtkInfoBar *info_bar,
gint response,
EomWindow *window)
{
if (response == GTK_RESPONSE_YES) {
eom_window_reload_image (window);
}
window->priv->needs_reload_confirmation = TRUE;
eom_window_set_message_area (window, NULL);
}
static void
image_file_changed_cb (EomImage *img, EomWindow *window)
{
GtkWidget *info_bar;
gchar *text, *markup;
GtkWidget *image;
GtkWidget *label;
GtkWidget *hbox;
if (window->priv->needs_reload_confirmation == FALSE)
return;
window->priv->needs_reload_confirmation = FALSE;
info_bar = gtk_info_bar_new_with_buttons (_("_Reload"),
GTK_RESPONSE_YES,
C_("MessageArea", "Hi_de"),
GTK_RESPONSE_NO, NULL);
gtk_info_bar_set_message_type (GTK_INFO_BAR (info_bar),
GTK_MESSAGE_QUESTION);
image = gtk_image_new_from_stock (GTK_STOCK_DIALOG_QUESTION,
GTK_ICON_SIZE_DIALOG);
label = gtk_label_new (NULL);
/* The newline character is currently necessary due to a problem
* with the automatic line break. */
text = g_strdup_printf (_("The image \"%s\" has been modified by an external application."
"\nWould you like to reload it?"), eom_image_get_caption (img));
markup = g_markup_printf_escaped ("<b>%s</b>", text);
gtk_label_set_markup (GTK_LABEL (label), markup);
g_free (text);
g_free (markup);
hbox = gtk_hbox_new (FALSE, 8);
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 0);
#if GTK_CHECK_VERSION (3, 14, 0)
gtk_widget_set_valign (image, GTK_ALIGN_START);
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
gtk_widget_set_halign (label, GTK_ALIGN_START);
#else
gtk_misc_set_alignment (GTK_MISC (image), 0.5, 0);
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
gtk_misc_set_alignment (GTK_MISC (label), 0, 0.5);
#endif
gtk_box_pack_start (GTK_BOX (gtk_info_bar_get_content_area (GTK_INFO_BAR (info_bar))), hbox, TRUE, TRUE, 0);
gtk_widget_show_all (hbox);
gtk_widget_show (info_bar);
eom_window_set_message_area (window, info_bar);
g_signal_connect (info_bar, "response",
G_CALLBACK (file_changed_info_bar_response), window);
}
static void
eom_window_display_image (EomWindow *window, EomImage *image)
{
EomWindowPrivate *priv;
GFile *file;
g_return_if_fail (EOM_IS_WINDOW (window));
g_return_if_fail (EOM_IS_IMAGE (image));
eom_debug (DEBUG_WINDOW);
g_assert (eom_image_has_data (image, EOM_IMAGE_DATA_IMAGE));
priv = window->priv;
if (image != NULL) {
g_signal_connect (image,
"thumbnail_changed",
G_CALLBACK (image_thumb_changed_cb),
window);
g_signal_connect (image, "file-changed",
G_CALLBACK (image_file_changed_cb),
window);
image_thumb_changed_cb (image, window);
}
priv->needs_reload_confirmation = TRUE;
eom_scroll_view_set_image (EOM_SCROLL_VIEW (priv->view), image);
gtk_window_set_title (GTK_WINDOW (window), eom_image_get_caption (image));
update_status_bar (window);
file = eom_image_get_file (image);
g_idle_add_full (G_PRIORITY_LOW,
(GSourceFunc) add_file_to_recent_files,
file,
(GDestroyNotify) g_object_unref);
eom_window_update_openwith_menu (window, image);
}
static void
open_with_launch_application_cb (GtkAction *action, gpointer data) {
EomImage *image;
GAppInfo *app;
GFile *file;
GList *files = NULL;
image = EOM_IMAGE (data);
file = eom_image_get_file (image);
app = g_object_get_data (G_OBJECT (action), "app");
files = g_list_append (files, file);
g_app_info_launch (app,
files,
NULL, NULL);
g_object_unref (file);
g_list_free (files);
}
static void
eom_window_update_openwith_menu (EomWindow *window, EomImage *image)
{
gboolean edit_button_active;
GAppInfo *editor_app;
GFile *file;
GFileInfo *file_info;
GList *iter;
gchar *label, *tip;
const gchar *mime_type;
GtkAction *action;
EomWindowPrivate *priv;
GList *apps;
guint action_id = 0;
GIcon *app_icon;
char *path;
GtkWidget *menuitem;
priv = window->priv;
edit_button_active = FALSE;
editor_app = get_appinfo_for_editor (window);
file = eom_image_get_file (image);
file_info = g_file_query_info (file,
G_FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE,
0, NULL, NULL);
if (file_info == NULL)
return;
else {
mime_type = g_file_info_get_content_type (file_info);
}
if (priv->open_with_menu_id != 0) {
gtk_ui_manager_remove_ui (priv->ui_mgr, priv->open_with_menu_id);
priv->open_with_menu_id = 0;
}
if (priv->actions_open_with != NULL) {
gtk_ui_manager_remove_action_group (priv->ui_mgr, priv->actions_open_with);
priv->actions_open_with = NULL;
}
if (mime_type == NULL) {
g_object_unref (file_info);
return;
}
apps = g_app_info_get_all_for_type (mime_type);
g_object_unref (file_info);
if (!apps)
return;
priv->actions_open_with = gtk_action_group_new ("OpenWithActions");
gtk_ui_manager_insert_action_group (priv->ui_mgr, priv->actions_open_with, -1);
priv->open_with_menu_id = gtk_ui_manager_new_merge_id (priv->ui_mgr);
for (iter = apps; iter; iter = iter->next) {
GAppInfo *app = iter->data;
gchar name[64];
if (editor_app != NULL && g_app_info_equal (editor_app, app)) {
edit_button_active = TRUE;
}
/* Do not include eom itself */
if (g_ascii_strcasecmp (g_app_info_get_executable (app),
g_get_prgname ()) == 0) {
g_object_unref (app);
continue;
}
g_snprintf (name, sizeof (name), "OpenWith%u", action_id++);
label = g_strdup (g_app_info_get_name (app));
tip = g_strdup_printf (_("Use \"%s\" to open the selected image"), g_app_info_get_name (app));
action = gtk_action_new (name, label, tip, NULL);
app_icon = g_app_info_get_icon (app);
if (G_LIKELY (app_icon != NULL)) {
g_object_ref (app_icon);
gtk_action_set_gicon (action, app_icon);
g_object_unref (app_icon);
}
g_free (label);
g_free (tip);
g_object_set_data_full (G_OBJECT (action), "app", app,
(GDestroyNotify) g_object_unref);
g_signal_connect (action,
"activate",
G_CALLBACK (open_with_launch_application_cb),
image);
gtk_action_group_add_action (priv->actions_open_with, action);
g_object_unref (action);
gtk_ui_manager_add_ui (priv->ui_mgr,
priv->open_with_menu_id,
"/MainMenu/Image/ImageOpenWith/Applications Placeholder",
name,
name,
GTK_UI_MANAGER_MENUITEM,
FALSE);
gtk_ui_manager_add_ui (priv->ui_mgr,
priv->open_with_menu_id,
"/ThumbnailPopup/ImageOpenWith/Applications Placeholder",
name,
name,
GTK_UI_MANAGER_MENUITEM,
FALSE);
gtk_ui_manager_add_ui (priv->ui_mgr,
priv->open_with_menu_id,
"/ViewPopup/ImageOpenWith/Applications Placeholder",
name,
name,
GTK_UI_MANAGER_MENUITEM,
FALSE);
path = g_strdup_printf ("/MainMenu/Image/ImageOpenWith/Applications Placeholder/%s", name);
menuitem = gtk_ui_manager_get_widget (priv->ui_mgr, path);
/* Only force displaying the icon if it is an application icon */
gtk_image_menu_item_set_always_show_image (GTK_IMAGE_MENU_ITEM (menuitem), app_icon != NULL);
g_free (path);
path = g_strdup_printf ("/ThumbnailPopup/ImageOpenWith/Applications Placeholder/%s", name);
menuitem = gtk_ui_manager_get_widget (priv->ui_mgr, path);
/* Only force displaying the icon if it is an application icon */
gtk_image_menu_item_set_always_show_image (GTK_IMAGE_MENU_ITEM (menuitem), app_icon != NULL);
g_free (path);
path = g_strdup_printf ("/ViewPopup/ImageOpenWith/Applications Placeholder/%s", name);
menuitem = gtk_ui_manager_get_widget (priv->ui_mgr, path);
/* Only force displaying the icon if it is an application icon */
gtk_image_menu_item_set_always_show_image (GTK_IMAGE_MENU_ITEM (menuitem), app_icon != NULL);
g_free (path);
}
g_list_free (apps);
action = gtk_action_group_get_action (window->priv->actions_image,
"OpenEditor");
if (action != NULL) {
gtk_action_set_sensitive (action, edit_button_active);
}
}
static void
eom_window_clear_load_job (EomWindow *window)
{
EomWindowPrivate *priv = window->priv;
if (priv->load_job != NULL) {
if (!priv->load_job->finished)
eom_job_queue_remove_job (priv->load_job);
g_signal_handlers_disconnect_by_func (priv->load_job,
eom_job_progress_cb,
window);
g_signal_handlers_disconnect_by_func (priv->load_job,
eom_job_load_cb,
window);
eom_image_cancel_load (EOM_JOB_LOAD (priv->load_job)->image);
g_object_unref (priv->load_job);
priv->load_job = NULL;
/* Hide statusbar */
eom_statusbar_set_progress (EOM_STATUSBAR (priv->statusbar), 0);
}
}
static void
eom_job_progress_cb (EomJobLoad *job, float progress, gpointer user_data)
{
EomWindow *window;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
eom_statusbar_set_progress (EOM_STATUSBAR (window->priv->statusbar),
progress);
}
static void
eom_job_save_progress_cb (EomJobSave *job, float progress, gpointer user_data)
{
EomWindowPrivate *priv;
EomWindow *window;
static EomImage *image = NULL;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = window->priv;
eom_statusbar_set_progress (EOM_STATUSBAR (priv->statusbar),
progress);
if (image != job->current_image) {
gchar *str_image, *status_message;
guint n_images;
image = job->current_image;
n_images = g_list_length (job->images);
str_image = eom_image_get_uri_for_display (image);
/* Translators: This string is displayed in the statusbar
* while saving images. The tokens are from left to right:
* - the original filename
* - the current image's position in the queue
* - the total number of images queued for saving */
status_message = g_strdup_printf (_("Saving image \"%s\" (%u/%u)"),
str_image,
job->current_pos + 1,
n_images);
g_free (str_image);
gtk_statusbar_pop (GTK_STATUSBAR (priv->statusbar),
priv->image_info_message_cid);
gtk_statusbar_push (GTK_STATUSBAR (priv->statusbar),
priv->image_info_message_cid,
status_message);
g_free (status_message);
}
if (progress == 1.0)
image = NULL;
}
static void
eom_window_obtain_desired_size (EomImage *image,
gint width,
gint height,
EomWindow *window)
{
GdkScreen *screen;
GdkRectangle monitor;
GtkAllocation allocation;
gint final_width, final_height;
gint screen_width, screen_height;
gint window_width, window_height;
gint img_width, img_height;
gint view_width, view_height;
gint deco_width, deco_height;
update_action_groups_state (window);
img_width = width;
img_height = height;
if (!gtk_widget_get_realized (window->priv->view)) {
gtk_widget_realize (window->priv->view);
}
gtk_widget_get_allocation (window->priv->view, &allocation);
view_width = allocation.width;
view_height = allocation.height;
if (!gtk_widget_get_realized (GTK_WIDGET (window))) {
gtk_widget_realize (GTK_WIDGET (window));
}
gtk_widget_get_allocation (GTK_WIDGET (window), &allocation);
window_width = allocation.width;
window_height = allocation.height;
screen = gtk_window_get_screen (GTK_WINDOW (window));
gdk_screen_get_monitor_geometry (screen,
gdk_screen_get_monitor_at_window (screen,
gtk_widget_get_window (GTK_WIDGET (window))),
&monitor);
screen_width = monitor.width;
screen_height = monitor.height;
deco_width = window_width - view_width;
deco_height = window_height - view_height;
if (img_width > 0 && img_height > 0) {
if ((img_width + deco_width > screen_width) ||
(img_height + deco_height > screen_height))
{
double factor;
if (img_width > img_height) {
factor = (screen_width * 0.75 - deco_width) / (double) img_width;
} else {
factor = (screen_height * 0.75 - deco_height) / (double) img_height;
}
img_width = img_width * factor;
img_height = img_height * factor;
}
}
final_width = MAX (EOM_WINDOW_MIN_WIDTH, img_width + deco_width);
final_height = MAX (EOM_WINDOW_MIN_HEIGHT, img_height + deco_height);
eom_debug_message (DEBUG_WINDOW, "Setting window size: %d x %d", final_width, final_height);
gtk_window_set_default_size (GTK_WINDOW (window), final_width, final_height);
g_signal_emit (window, signals[SIGNAL_PREPARED], 0);
}
static void
eom_window_error_message_area_response (GtkInfoBar *message_area,
gint response_id,
EomWindow *window)
{
if (response_id != GTK_RESPONSE_OK) {
eom_window_set_message_area (window, NULL);
return;
}
/* Trigger loading for current image again */
eom_thumb_view_select_single (EOM_THUMB_VIEW (window->priv->thumbview),
EOM_THUMB_VIEW_SELECT_CURRENT);
}
static void
eom_job_load_cb (EomJobLoad *job, gpointer data)
{
EomWindow *window;
EomWindowPrivate *priv;
GtkAction *action_undo, *action_save;
g_return_if_fail (EOM_IS_WINDOW (data));
eom_debug (DEBUG_WINDOW);
window = EOM_WINDOW (data);
priv = window->priv;
eom_statusbar_set_progress (EOM_STATUSBAR (priv->statusbar), 0.0);
gtk_statusbar_pop (GTK_STATUSBAR (window->priv->statusbar),
priv->image_info_message_cid);
if (priv->image != NULL) {
g_signal_handlers_disconnect_by_func (priv->image,
image_thumb_changed_cb,
window);
g_signal_handlers_disconnect_by_func (priv->image,
image_file_changed_cb,
window);
g_object_unref (priv->image);
}
priv->image = g_object_ref (job->image);
if (EOM_JOB (job)->error == NULL) {
#ifdef HAVE_LCMS
eom_image_apply_display_profile (job->image,
priv->display_profile);
#endif
gtk_action_group_set_sensitive (priv->actions_image, TRUE);
eom_window_display_image (window, job->image);
} else {
GtkWidget *message_area;
message_area = eom_image_load_error_message_area_new (
eom_image_get_caption (job->image),
EOM_JOB (job)->error);
g_signal_connect (message_area,
"response",
G_CALLBACK (eom_window_error_message_area_response),
window);
gtk_window_set_icon (GTK_WINDOW (window), NULL);
gtk_window_set_title (GTK_WINDOW (window),
eom_image_get_caption (job->image));
eom_window_set_message_area (window, message_area);
gtk_info_bar_set_default_response (GTK_INFO_BAR (message_area),
GTK_RESPONSE_CANCEL);
gtk_widget_show (message_area);
update_status_bar (window);
eom_scroll_view_set_image (EOM_SCROLL_VIEW (priv->view), NULL);
if (window->priv->status == EOM_WINDOW_STATUS_INIT) {
update_action_groups_state (window);
g_signal_emit (window, signals[SIGNAL_PREPARED], 0);
}
gtk_action_group_set_sensitive (priv->actions_image, FALSE);
}
eom_window_clear_load_job (window);
if (window->priv->status == EOM_WINDOW_STATUS_INIT) {
window->priv->status = EOM_WINDOW_STATUS_NORMAL;
g_signal_handlers_disconnect_by_func
(job->image,
G_CALLBACK (eom_window_obtain_desired_size),
window);
}
action_save = gtk_action_group_get_action (priv->actions_image, "ImageSave");
action_undo = gtk_action_group_get_action (priv->actions_image, "EditUndo");
/* Set Save and Undo sensitive according to image state.
* Respect lockdown in case of Save.*/
gtk_action_set_sensitive (action_save, (!priv->save_disabled && eom_image_is_modified (job->image)));
gtk_action_set_sensitive (action_undo, eom_image_is_modified (job->image));
g_object_unref (job->image);
}
static void
eom_window_clear_transform_job (EomWindow *window)
{
EomWindowPrivate *priv = window->priv;
if (priv->transform_job != NULL) {
if (!priv->transform_job->finished)
eom_job_queue_remove_job (priv->transform_job);
g_signal_handlers_disconnect_by_func (priv->transform_job,
eom_job_transform_cb,
window);
g_object_unref (priv->transform_job);
priv->transform_job = NULL;
}
}
static void
eom_job_transform_cb (EomJobTransform *job, gpointer data)
{
EomWindow *window;
GtkAction *action_undo, *action_save;
EomImage *image;
g_return_if_fail (EOM_IS_WINDOW (data));
window = EOM_WINDOW (data);
eom_window_clear_transform_job (window);
action_undo =
gtk_action_group_get_action (window->priv->actions_image, "EditUndo");
action_save =
gtk_action_group_get_action (window->priv->actions_image, "ImageSave");
image = eom_window_get_image (window);
gtk_action_set_sensitive (action_undo, eom_image_is_modified (image));
if (!window->priv->save_disabled)
{
gtk_action_set_sensitive (action_save, eom_image_is_modified (image));
}
}
static void
apply_transformation (EomWindow *window, EomTransform *trans)
{
EomWindowPrivate *priv;
GList *images;
g_return_if_fail (EOM_IS_WINDOW (window));
priv = window->priv;
images = eom_thumb_view_get_selected_images (EOM_THUMB_VIEW (priv->thumbview));
eom_window_clear_transform_job (window);
priv->transform_job = eom_job_transform_new (images, trans);
g_signal_connect (priv->transform_job,
"finished",
G_CALLBACK (eom_job_transform_cb),
window);
g_signal_connect (priv->transform_job,
"progress",
G_CALLBACK (eom_job_progress_cb),
window);
eom_job_queue_add_job (priv->transform_job);
}
static void
handle_image_selection_changed_cb (EomThumbView *thumbview, EomWindow *window)
{
EomWindowPrivate *priv;
EomImage *image;
gchar *status_message;
gchar *str_image;
priv = window->priv;
if (eom_list_store_length (EOM_LIST_STORE (priv->store)) == 0) {
gtk_window_set_title (GTK_WINDOW (window),
g_get_application_name());
gtk_statusbar_remove_all (GTK_STATUSBAR (priv->statusbar),
priv->image_info_message_cid);
eom_scroll_view_set_image (EOM_SCROLL_VIEW (priv->view),
NULL);
}
if (eom_thumb_view_get_n_selected (EOM_THUMB_VIEW (priv->thumbview)) == 0)
return;
update_selection_ui_visibility (window);
image = eom_thumb_view_get_first_selected_image (EOM_THUMB_VIEW (priv->thumbview));
g_assert (EOM_IS_IMAGE (image));
eom_window_clear_load_job (window);
eom_window_set_message_area (window, NULL);
gtk_statusbar_pop (GTK_STATUSBAR (priv->statusbar),
priv->image_info_message_cid);
if (image == priv->image) {
update_status_bar (window);
return;
}
if (eom_image_has_data (image, EOM_IMAGE_DATA_IMAGE)) {
if (priv->image != NULL)
g_object_unref (priv->image);
priv->image = image;
eom_window_display_image (window, image);
return;
}
if (priv->status == EOM_WINDOW_STATUS_INIT) {
g_signal_connect (image,
"size-prepared",
G_CALLBACK (eom_window_obtain_desired_size),
window);
}
priv->load_job = eom_job_load_new (image, EOM_IMAGE_DATA_ALL);
g_signal_connect (priv->load_job,
"finished",
G_CALLBACK (eom_job_load_cb),
window);
g_signal_connect (priv->load_job,
"progress",
G_CALLBACK (eom_job_progress_cb),
window);
eom_job_queue_add_job (priv->load_job);
str_image = eom_image_get_uri_for_display (image);
status_message = g_strdup_printf (_("Opening image \"%s\""),
str_image);
g_free (str_image);
gtk_statusbar_push (GTK_STATUSBAR (priv->statusbar),
priv->image_info_message_cid, status_message);
g_free (status_message);
}
static void
view_zoom_changed_cb (GtkWidget *widget, double zoom, gpointer user_data)
{
EomWindow *window;
GtkAction *action_zoom_in;
GtkAction *action_zoom_out;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
update_status_bar (window);
action_zoom_in =
gtk_action_group_get_action (window->priv->actions_image,
"ViewZoomIn");
action_zoom_out =
gtk_action_group_get_action (window->priv->actions_image,
"ViewZoomOut");
gtk_action_set_sensitive (action_zoom_in,
!eom_scroll_view_get_zoom_is_max (EOM_SCROLL_VIEW (window->priv->view)));
gtk_action_set_sensitive (action_zoom_out,
!eom_scroll_view_get_zoom_is_min (EOM_SCROLL_VIEW (window->priv->view)));
}
static void
eom_window_open_recent_cb (GtkAction *action, EomWindow *window)
{
GtkRecentInfo *info;
const gchar *uri;
GSList *list = NULL;
info = g_object_get_data (G_OBJECT (action), "gtk-recent-info");
g_return_if_fail (info != NULL);
uri = gtk_recent_info_get_uri (info);
list = g_slist_prepend (list, g_strdup (uri));
eom_application_open_uri_list (EOM_APP,
list,
GDK_CURRENT_TIME,
0,
NULL);
g_slist_foreach (list, (GFunc) g_free, NULL);
g_slist_free (list);
}
static void
file_open_dialog_response_cb (GtkWidget *chooser,
gint response_id,
EomWindow *ev_window)
{
if (response_id == GTK_RESPONSE_OK) {
GSList *uris;
uris = gtk_file_chooser_get_uris (GTK_FILE_CHOOSER (chooser));
eom_application_open_uri_list (EOM_APP,
uris,
GDK_CURRENT_TIME,
0,
NULL);
g_slist_foreach (uris, (GFunc) g_free, NULL);
g_slist_free (uris);
}
gtk_widget_destroy (chooser);
}
static void
eom_window_update_fullscreen_action (EomWindow *window)
{
GtkAction *action;
action = gtk_action_group_get_action (window->priv->actions_image,
"ViewFullscreen");
g_signal_handlers_block_by_func
(action, G_CALLBACK (eom_window_cmd_fullscreen), window);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action),
window->priv->mode == EOM_WINDOW_MODE_FULLSCREEN);
g_signal_handlers_unblock_by_func
(action, G_CALLBACK (eom_window_cmd_fullscreen), window);
}
static void
eom_window_update_slideshow_action (EomWindow *window)
{
GtkAction *action;
action = gtk_action_group_get_action (window->priv->actions_collection,
"ViewSlideshow");
g_signal_handlers_block_by_func
(action, G_CALLBACK (eom_window_cmd_slideshow), window);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action),
window->priv->mode == EOM_WINDOW_MODE_SLIDESHOW);
g_signal_handlers_unblock_by_func
(action, G_CALLBACK (eom_window_cmd_slideshow), window);
}
static void
eom_window_update_pause_slideshow_action (EomWindow *window)
{
GtkAction *action;
action = gtk_action_group_get_action (window->priv->actions_image,
"PauseSlideshow");
g_signal_handlers_block_by_func
(action, G_CALLBACK (eom_window_cmd_pause_slideshow), window);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action),
window->priv->mode != EOM_WINDOW_MODE_SLIDESHOW);
g_signal_handlers_unblock_by_func
(action, G_CALLBACK (eom_window_cmd_pause_slideshow), window);
}
static void
eom_window_update_fullscreen_popup (EomWindow *window)
{
GtkWidget *popup = window->priv->fullscreen_popup;
GdkRectangle screen_rect;
GdkScreen *screen;
g_return_if_fail (popup != NULL);
if (gtk_widget_get_window (GTK_WIDGET (window)) == NULL) return;
screen = gtk_widget_get_screen (GTK_WIDGET (window));
gdk_screen_get_monitor_geometry (screen,
gdk_screen_get_monitor_at_window
(screen,
gtk_widget_get_window (GTK_WIDGET (window))),
&screen_rect);
gtk_widget_set_size_request (popup,
screen_rect.width,
-1);
gtk_window_move (GTK_WINDOW (popup), screen_rect.x, screen_rect.y);
}
static void
screen_size_changed_cb (GdkScreen *screen, EomWindow *window)
{
eom_window_update_fullscreen_popup (window);
}
static void
fullscreen_popup_size_request_cb (GtkWidget *popup,
GtkRequisition *req,
EomWindow *window)
{
eom_window_update_fullscreen_popup (window);
}
static gboolean
fullscreen_timeout_cb (gpointer data)
{
EomWindow *window = EOM_WINDOW (data);
gtk_widget_hide (window->priv->fullscreen_popup);
eom_scroll_view_hide_cursor (EOM_SCROLL_VIEW (window->priv->view));
fullscreen_clear_timeout (window);
return FALSE;
}
static gboolean
slideshow_is_loop_end (EomWindow *window)
{
EomWindowPrivate *priv = window->priv;
EomImage *image = NULL;
gint pos;
image = eom_thumb_view_get_first_selected_image (EOM_THUMB_VIEW (priv->thumbview));
pos = eom_list_store_get_pos_by_image (priv->store, image);
return (pos == (eom_list_store_length (priv->store) - 1));
}
static gboolean
slideshow_switch_cb (gpointer data)
{
EomWindow *window = EOM_WINDOW (data);
EomWindowPrivate *priv = window->priv;
eom_debug (DEBUG_WINDOW);
if (priv->slideshow_random) {
eom_thumb_view_select_single (EOM_THUMB_VIEW (priv->thumbview),
EOM_THUMB_VIEW_SELECT_RANDOM);
return TRUE;
}
if (!priv->slideshow_loop && slideshow_is_loop_end (window)) {
eom_window_stop_fullscreen (window, TRUE);
return FALSE;
}
eom_thumb_view_select_single (EOM_THUMB_VIEW (priv->thumbview),
EOM_THUMB_VIEW_SELECT_RIGHT);
return TRUE;
}
static void
fullscreen_clear_timeout (EomWindow *window)
{
eom_debug (DEBUG_WINDOW);
if (window->priv->fullscreen_timeout_source != NULL) {
g_source_unref (window->priv->fullscreen_timeout_source);
g_source_destroy (window->priv->fullscreen_timeout_source);
}
window->priv->fullscreen_timeout_source = NULL;
}
static void
fullscreen_set_timeout (EomWindow *window)
{
GSource *source;
eom_debug (DEBUG_WINDOW);
fullscreen_clear_timeout (window);
source = g_timeout_source_new (EOM_WINDOW_FULLSCREEN_TIMEOUT);
g_source_set_callback (source, fullscreen_timeout_cb, window, NULL);
g_source_attach (source, NULL);
window->priv->fullscreen_timeout_source = source;
eom_scroll_view_show_cursor (EOM_SCROLL_VIEW (window->priv->view));
}
static void
slideshow_clear_timeout (EomWindow *window)
{
eom_debug (DEBUG_WINDOW);
if (window->priv->slideshow_switch_source != NULL) {
g_source_unref (window->priv->slideshow_switch_source);
g_source_destroy (window->priv->slideshow_switch_source);
}
window->priv->slideshow_switch_source = NULL;
}
static void
slideshow_set_timeout (EomWindow *window)
{
GSource *source;
eom_debug (DEBUG_WINDOW);
slideshow_clear_timeout (window);
if (window->priv->slideshow_switch_timeout <= 0)
return;
source = g_timeout_source_new (window->priv->slideshow_switch_timeout * 1000);
g_source_set_callback (source, slideshow_switch_cb, window, NULL);
g_source_attach (source, NULL);
window->priv->slideshow_switch_source = source;
}
static void
show_fullscreen_popup (EomWindow *window)
{
eom_debug (DEBUG_WINDOW);
if (!gtk_widget_get_visible (window->priv->fullscreen_popup)) {
gtk_widget_show_all (GTK_WIDGET (window->priv->fullscreen_popup));
}
fullscreen_set_timeout (window);
}
static gboolean
fullscreen_motion_notify_cb (GtkWidget *widget,
GdkEventMotion *event,
gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
eom_debug (DEBUG_WINDOW);
if (event->y < EOM_WINDOW_FULLSCREEN_POPUP_THRESHOLD) {
show_fullscreen_popup (window);
} else {
fullscreen_set_timeout (window);
}
return FALSE;
}
static gboolean
fullscreen_leave_notify_cb (GtkWidget *widget,
GdkEventCrossing *event,
gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
eom_debug (DEBUG_WINDOW);
fullscreen_clear_timeout (window);
return FALSE;
}
static void
exit_fullscreen_button_clicked_cb (GtkWidget *button, EomWindow *window)
{
GtkAction *action;
eom_debug (DEBUG_WINDOW);
if (window->priv->mode == EOM_WINDOW_MODE_SLIDESHOW) {
action = gtk_action_group_get_action (window->priv->actions_collection,
"ViewSlideshow");
} else {
action = gtk_action_group_get_action (window->priv->actions_image,
"ViewFullscreen");
}
g_return_if_fail (action != NULL);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action), FALSE);
}
static GtkWidget *
eom_window_get_exit_fullscreen_button (EomWindow *window)
{
GtkWidget *button;
button = gtk_button_new_from_stock (GTK_STOCK_LEAVE_FULLSCREEN);
g_signal_connect (button, "clicked",
G_CALLBACK (exit_fullscreen_button_clicked_cb),
window);
return button;
}
static GtkWidget *
eom_window_create_fullscreen_popup (EomWindow *window)
{
GtkWidget *popup;
GtkWidget *hbox;
GtkWidget *button;
GtkWidget *toolbar;
GdkScreen *screen;
eom_debug (DEBUG_WINDOW);
popup = gtk_window_new (GTK_WINDOW_POPUP);
hbox = gtk_hbox_new (FALSE, 0);
gtk_container_add (GTK_CONTAINER (popup), hbox);
toolbar = gtk_ui_manager_get_widget (window->priv->ui_mgr,
"/FullscreenToolbar");
g_assert (GTK_IS_WIDGET (toolbar));
gtk_toolbar_set_style (GTK_TOOLBAR (toolbar), GTK_TOOLBAR_ICONS);
gtk_box_pack_start (GTK_BOX (hbox), toolbar, TRUE, TRUE, 0);
button = eom_window_get_exit_fullscreen_button (window);
gtk_box_pack_start (GTK_BOX (hbox), button, FALSE, FALSE, 0);
gtk_window_set_resizable (GTK_WINDOW (popup), FALSE);
screen = gtk_widget_get_screen (GTK_WIDGET (window));
g_signal_connect_object (screen, "size-changed",
G_CALLBACK (screen_size_changed_cb),
window, 0);
g_signal_connect_object (popup, "size_request",
G_CALLBACK (fullscreen_popup_size_request_cb),
window, 0);
g_signal_connect (popup,
"enter-notify-event",
G_CALLBACK (fullscreen_leave_notify_cb),
window);
gtk_window_set_screen (GTK_WINDOW (popup), screen);
return popup;
}
static void
update_ui_visibility (EomWindow *window)
{
EomWindowPrivate *priv;
GtkAction *action;
GtkWidget *menubar;
gboolean fullscreen_mode, visible;
g_return_if_fail (EOM_IS_WINDOW (window));
eom_debug (DEBUG_WINDOW);
priv = window->priv;
fullscreen_mode = priv->mode == EOM_WINDOW_MODE_FULLSCREEN ||
priv->mode == EOM_WINDOW_MODE_SLIDESHOW;
menubar = gtk_ui_manager_get_widget (priv->ui_mgr, "/MainMenu");
g_assert (GTK_IS_WIDGET (menubar));
visible = g_settings_get_boolean (priv->ui_settings, EOM_CONF_UI_TOOLBAR);
visible = visible && !fullscreen_mode;
action = gtk_ui_manager_get_action (priv->ui_mgr, "/MainMenu/View/ToolbarToggle");
g_assert (action != NULL);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action), visible);
g_object_set (G_OBJECT (priv->toolbar), "visible", visible, NULL);
visible = g_settings_get_boolean (priv->ui_settings, EOM_CONF_UI_STATUSBAR);
visible = visible && !fullscreen_mode;
action = gtk_ui_manager_get_action (priv->ui_mgr, "/MainMenu/View/StatusbarToggle");
g_assert (action != NULL);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action), visible);
g_object_set (G_OBJECT (priv->statusbar), "visible", visible, NULL);
if (priv->status != EOM_WINDOW_STATUS_INIT) {
visible = g_settings_get_boolean (priv->ui_settings, EOM_CONF_UI_IMAGE_COLLECTION);
visible = visible && priv->mode != EOM_WINDOW_MODE_SLIDESHOW;
action = gtk_ui_manager_get_action (priv->ui_mgr, "/MainMenu/View/ImageCollectionToggle");
g_assert (action != NULL);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action), visible);
if (visible) {
gtk_widget_show (priv->nav);
} else {
gtk_widget_hide (priv->nav);
}
}
visible = g_settings_get_boolean (priv->ui_settings, EOM_CONF_UI_SIDEBAR);
visible = visible && !fullscreen_mode;
action = gtk_ui_manager_get_action (priv->ui_mgr, "/MainMenu/View/SidebarToggle");
g_assert (action != NULL);
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action), visible);
if (visible) {
gtk_widget_show (priv->sidebar);
} else {
gtk_widget_hide (priv->sidebar);
}
if (priv->fullscreen_popup != NULL) {
gtk_widget_hide (priv->fullscreen_popup);
}
}
static void
eom_window_run_fullscreen (EomWindow *window, gboolean slideshow)
{
#if GTK_CHECK_VERSION (3, 0, 0)
static const GdkRGBA black = { 0., 0., 0., 1.};
#endif
EomWindowPrivate *priv;
GtkWidget *menubar;
gboolean upscale;
eom_debug (DEBUG_WINDOW);
priv = window->priv;
if (slideshow) {
priv->mode = EOM_WINDOW_MODE_SLIDESHOW;
} else {
/* Stop the timer if we come from slideshowing */
if (priv->mode == EOM_WINDOW_MODE_SLIDESHOW)
slideshow_clear_timeout (window);
priv->mode = EOM_WINDOW_MODE_FULLSCREEN;
}
if (window->priv->fullscreen_popup == NULL)
priv->fullscreen_popup
= eom_window_create_fullscreen_popup (window);
update_ui_visibility (window);
menubar = gtk_ui_manager_get_widget (priv->ui_mgr, "/MainMenu");
g_assert (GTK_IS_WIDGET (menubar));
gtk_widget_hide (menubar);
g_signal_connect (priv->view,
"motion-notify-event",
G_CALLBACK (fullscreen_motion_notify_cb),
window);
g_signal_connect (priv->view,
"leave-notify-event",
G_CALLBACK (fullscreen_leave_notify_cb),
window);
g_signal_connect (priv->thumbview,
"motion-notify-event",
G_CALLBACK (fullscreen_motion_notify_cb),
window);
g_signal_connect (priv->thumbview,
"leave-notify-event",
G_CALLBACK (fullscreen_leave_notify_cb),
window);
fullscreen_set_timeout (window);
if (slideshow) {
priv->slideshow_random =
g_settings_get_boolean (priv->fullscreen_settings,
EOM_CONF_FULLSCREEN_RANDOM);
priv->slideshow_loop =
g_settings_get_boolean (priv->fullscreen_settings,
EOM_CONF_FULLSCREEN_LOOP);
priv->slideshow_switch_timeout =
g_settings_get_int (priv->fullscreen_settings,
EOM_CONF_FULLSCREEN_SECONDS);
slideshow_set_timeout (window);
}
upscale = g_settings_get_boolean (priv->fullscreen_settings,
EOM_CONF_FULLSCREEN_UPSCALE);
eom_scroll_view_set_zoom_upscale (EOM_SCROLL_VIEW (priv->view),
upscale);
gtk_widget_grab_focus (priv->view);
eom_scroll_view_override_bg_color (EOM_SCROLL_VIEW (window->priv->view),
#if GTK_CHECK_VERSION (3, 0, 0)
&black);
#else
&(gtk_widget_get_style (GTK_WIDGET (window))->black));
#endif
#if !GTK_CHECK_VERSION (3, 0, 0)
{
GtkStyle *style;
style = gtk_style_copy (gtk_widget_get_style (gtk_widget_get_parent (priv->view)));
style->xthickness = 0;
style->ythickness = 0;
gtk_widget_set_style (gtk_widget_get_parent (priv->view),
style);
g_object_unref (style);
}
#endif
gtk_window_fullscreen (GTK_WINDOW (window));
eom_window_update_fullscreen_popup (window);
#ifdef HAVE_DBUS
eom_application_screensaver_disable (EOM_APP);
#endif
/* Update both actions as we could've already been in one those modes */
eom_window_update_slideshow_action (window);
eom_window_update_fullscreen_action (window);
eom_window_update_pause_slideshow_action (window);
}
static void
eom_window_stop_fullscreen (EomWindow *window, gboolean slideshow)
{
EomWindowPrivate *priv;
GtkWidget *menubar;
eom_debug (DEBUG_WINDOW);
priv = window->priv;
if (priv->mode != EOM_WINDOW_MODE_SLIDESHOW &&
priv->mode != EOM_WINDOW_MODE_FULLSCREEN) return;
priv->mode = EOM_WINDOW_MODE_NORMAL;
fullscreen_clear_timeout (window);
if (slideshow) {
slideshow_clear_timeout (window);
}
g_signal_handlers_disconnect_by_func (priv->view,
(gpointer) fullscreen_motion_notify_cb,
window);
g_signal_handlers_disconnect_by_func (priv->view,
(gpointer) fullscreen_leave_notify_cb,
window);
g_signal_handlers_disconnect_by_func (priv->thumbview,
(gpointer) fullscreen_motion_notify_cb,
window);
g_signal_handlers_disconnect_by_func (priv->thumbview,
(gpointer) fullscreen_leave_notify_cb,
window);
update_ui_visibility (window);
menubar = gtk_ui_manager_get_widget (priv->ui_mgr, "/MainMenu");
g_assert (GTK_IS_WIDGET (menubar));
gtk_widget_show (menubar);
eom_scroll_view_set_zoom_upscale (EOM_SCROLL_VIEW (priv->view), FALSE);
eom_scroll_view_override_bg_color (EOM_SCROLL_VIEW (window->priv->view),
NULL);
#if !GTK_CHECK_VERSION (3, 0, 0)
gtk_widget_set_style (gtk_widget_get_parent (window->priv->view), NULL);
#endif
gtk_window_unfullscreen (GTK_WINDOW (window));
if (slideshow) {
eom_window_update_slideshow_action (window);
} else {
eom_window_update_fullscreen_action (window);
}
eom_scroll_view_show_cursor (EOM_SCROLL_VIEW (priv->view));
#ifdef HAVE_DBUS
eom_application_screensaver_enable (EOM_APP);
#endif
}
static void
eom_window_print (EomWindow *window)
{
GtkWidget *dialog;
GError *error = NULL;
GtkPrintOperation *print;
GtkPrintOperationResult res;
GtkPageSetup *page_setup;
GtkPrintSettings *print_settings;
gboolean page_setup_disabled = FALSE;
eom_debug (DEBUG_PRINTING);
print_settings = eom_print_get_print_settings ();
/* Make sure the window stays valid while printing */
g_object_ref (window);
if (window->priv->page_setup !=NULL)
page_setup = g_object_ref (window->priv->page_setup);
else
page_setup = NULL;
print = eom_print_operation_new (window->priv->image,
print_settings,
page_setup);
// Disable page setup options if they are locked down
page_setup_disabled = g_settings_get_boolean (window->priv->lockdown_settings,
EOM_CONF_LOCKDOWN_CAN_SETUP_PAGE);
if (page_setup_disabled)
gtk_print_operation_set_embed_page_setup (print, FALSE);
res = gtk_print_operation_run (print,
GTK_PRINT_OPERATION_ACTION_PRINT_DIALOG,
GTK_WINDOW (window), &error);
if (res == GTK_PRINT_OPERATION_RESULT_ERROR) {
dialog = gtk_message_dialog_new (GTK_WINDOW (window),
GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_MESSAGE_ERROR,
GTK_BUTTONS_CLOSE,
_("Error printing file:\n%s"),
error->message);
g_signal_connect (dialog, "response",
G_CALLBACK (gtk_widget_destroy), NULL);
gtk_widget_show (dialog);
g_error_free (error);
} else if (res == GTK_PRINT_OPERATION_RESULT_APPLY) {
GtkPageSetup *new_page_setup;
eom_print_set_print_settings (gtk_print_operation_get_print_settings (print));
new_page_setup = gtk_print_operation_get_default_page_setup (print);
if (window->priv->page_setup != NULL)
g_object_unref (window->priv->page_setup);
window->priv->page_setup = g_object_ref (new_page_setup);
}
if (page_setup != NULL)
g_object_unref (page_setup);
g_object_unref (print_settings);
g_object_unref (window);
}
static void
eom_window_cmd_file_open (GtkAction *action, gpointer user_data)
{
EomWindow *window;
EomWindowPrivate *priv;
EomImage *current;
GtkWidget *dlg;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = window->priv;
dlg = eom_file_chooser_new (GTK_FILE_CHOOSER_ACTION_OPEN);
current = eom_thumb_view_get_first_selected_image (EOM_THUMB_VIEW (priv->thumbview));
if (current != NULL) {
gchar *dir_uri, *file_uri;
file_uri = eom_image_get_uri_for_display (current);
dir_uri = g_path_get_dirname (file_uri);
gtk_file_chooser_set_current_folder_uri (GTK_FILE_CHOOSER (dlg),
dir_uri);
g_free (file_uri);
g_free (dir_uri);
g_object_unref (current);
} else {
/* If desired by the user,
fallback to the XDG_PICTURES_DIR (if available) */
const gchar *pics_dir;
gboolean use_fallback;
use_fallback = g_settings_get_boolean (priv->ui_settings,
EOM_CONF_UI_FILECHOOSER_XDG_FALLBACK);
pics_dir = g_get_user_special_dir (G_USER_DIRECTORY_PICTURES);
if (use_fallback && pics_dir) {
gtk_file_chooser_set_current_folder (GTK_FILE_CHOOSER (dlg),
pics_dir);
}
}
g_signal_connect (dlg, "response",
G_CALLBACK (file_open_dialog_response_cb),
window);
gtk_widget_show_all (dlg);
}
static void
eom_job_close_save_cb (EomJobSave *job, gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
g_signal_handlers_disconnect_by_func (job,
eom_job_close_save_cb,
window);
gtk_widget_destroy (GTK_WIDGET (window));
}
static void
close_confirmation_dialog_response_handler (EomCloseConfirmationDialog *dlg,
gint response_id,
EomWindow *window)
{
GList *selected_images;
EomWindowPrivate *priv;
priv = window->priv;
switch (response_id)
{
case GTK_RESPONSE_YES:
/* save selected images */
selected_images = eom_close_confirmation_dialog_get_selected_images (dlg);
eom_close_confirmation_dialog_set_sensitive (dlg, FALSE);
if (eom_window_save_images (window, selected_images)) {
g_signal_connect (priv->save_job,
"finished",
G_CALLBACK (eom_job_close_save_cb),
window);
eom_job_queue_add_job (priv->save_job);
}
break;
case GTK_RESPONSE_NO:
/* dont save */
gtk_widget_destroy (GTK_WIDGET (window));
break;
default:
/* Cancel */
gtk_widget_destroy (GTK_WIDGET (dlg));
break;
}
}
static gboolean
eom_window_unsaved_images_confirm (EomWindow *window)
{
EomWindowPrivate *priv;
gboolean disabled;
GtkWidget *dialog;
GList *list;
EomImage *image;
GtkTreeIter iter;
priv = window->priv;
disabled = g_settings_get_boolean(priv->ui_settings,
EOM_CONF_UI_DISABLE_CLOSE_CONFIRMATION);
disabled |= window->priv->save_disabled;
if (disabled) {
return FALSE;
}
list = NULL;
if (gtk_tree_model_get_iter_first (GTK_TREE_MODEL (priv->store), &iter)) {
do {
gtk_tree_model_get (GTK_TREE_MODEL (priv->store), &iter,
EOM_LIST_STORE_EOM_IMAGE, &image,
-1);
if (!image)
continue;
if (eom_image_is_modified (image)) {
list = g_list_prepend (list, image);
}
} while (gtk_tree_model_iter_next (GTK_TREE_MODEL (priv->store), &iter));
}
if (list) {
list = g_list_reverse (list);
dialog = eom_close_confirmation_dialog_new (GTK_WINDOW (window),
list);
g_list_free (list);
g_signal_connect (dialog,
"response",
G_CALLBACK (close_confirmation_dialog_response_handler),
window);
gtk_window_set_destroy_with_parent (GTK_WINDOW (dialog), TRUE);
gtk_widget_show (dialog);
return TRUE;
}
return FALSE;
}
static void
eom_window_cmd_close_window (GtkAction *action, gpointer user_data)
{
EomWindow *window;
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = window->priv;
if (priv->save_job != NULL) {
eom_window_finish_saving (window);
}
if (!eom_window_unsaved_images_confirm (window)) {
gtk_widget_destroy (GTK_WIDGET (user_data));
}
}
static void
eom_window_cmd_preferences (GtkAction *action, gpointer user_data)
{
EomWindow *window;
GObject *pref_dlg;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
pref_dlg = eom_preferences_dialog_get_instance (GTK_WINDOW (window));
eom_dialog_show (EOM_DIALOG (pref_dlg));
}
#define EOM_TB_EDITOR_DLG_RESET_RESPONSE 128
static void
eom_window_cmd_edit_toolbar_cb (GtkDialog *dialog, gint response, gpointer data)
{
EomWindow *window = EOM_WINDOW (data);
if (response == EOM_TB_EDITOR_DLG_RESET_RESPONSE) {
EggToolbarsModel *model;
EggToolbarEditor *editor;
editor = g_object_get_data (G_OBJECT (dialog),
"EggToolbarEditor");
g_return_if_fail (editor != NULL);
egg_editable_toolbar_set_edit_mode
(EGG_EDITABLE_TOOLBAR (window->priv->toolbar), FALSE);
eom_application_reset_toolbars_model (EOM_APP);
model = eom_application_get_toolbars_model (EOM_APP);
egg_editable_toolbar_set_model
(EGG_EDITABLE_TOOLBAR (window->priv->toolbar), model);
egg_toolbar_editor_set_model (editor, model);
/* Toolbar would be uneditable now otherwise */
egg_editable_toolbar_set_edit_mode
(EGG_EDITABLE_TOOLBAR (window->priv->toolbar), TRUE);
} else if (response == GTK_RESPONSE_HELP) {
eom_util_show_help ("eom-toolbareditor", NULL);
} else {
egg_editable_toolbar_set_edit_mode
(EGG_EDITABLE_TOOLBAR (window->priv->toolbar), FALSE);
eom_application_save_toolbars_model (EOM_APP);
gtk_widget_destroy (GTK_WIDGET (dialog));
}
}
static void
eom_window_cmd_edit_toolbar (GtkAction *action, gpointer *user_data)
{
EomWindow *window;
GtkWidget *dialog;
GtkWidget *editor;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
dialog = gtk_dialog_new_with_buttons (_("Toolbar Editor"),
GTK_WINDOW (window),
GTK_DIALOG_DESTROY_WITH_PARENT,
_("_Reset to Default"),
EOM_TB_EDITOR_DLG_RESET_RESPONSE,
GTK_STOCK_CLOSE,
GTK_RESPONSE_CLOSE,
GTK_STOCK_HELP,
GTK_RESPONSE_HELP,
NULL);
gtk_dialog_set_default_response (GTK_DIALOG (dialog),
GTK_RESPONSE_CLOSE);
gtk_container_set_border_width (GTK_CONTAINER (dialog), 5);
gtk_box_set_spacing (GTK_BOX (gtk_dialog_get_content_area (GTK_DIALOG (dialog))), 2);
gtk_window_set_default_size (GTK_WINDOW (dialog), 500, 400);
editor = egg_toolbar_editor_new (window->priv->ui_mgr,
eom_application_get_toolbars_model (EOM_APP));
gtk_container_set_border_width (GTK_CONTAINER (editor), 5);
#if GTK_CHECK_VERSION (3, 0, 0)
// Use as much vertical space as available
gtk_widget_set_vexpand (GTK_WIDGET (editor), TRUE);
#endif
gtk_box_set_spacing (GTK_BOX (EGG_TOOLBAR_EDITOR (editor)), 5);
gtk_container_add (GTK_CONTAINER (gtk_dialog_get_content_area (GTK_DIALOG (dialog))), editor);
egg_editable_toolbar_set_edit_mode
(EGG_EDITABLE_TOOLBAR (window->priv->toolbar), TRUE);
g_object_set_data (G_OBJECT (dialog), "EggToolbarEditor", editor);
g_signal_connect (dialog,
"response",
G_CALLBACK (eom_window_cmd_edit_toolbar_cb),
window);
gtk_widget_show_all (dialog);
}
static void
eom_window_cmd_help (GtkAction *action, gpointer user_data)
{
EomWindow *window;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
eom_util_show_help (NULL, GTK_WINDOW (window));
}
static void
eom_window_cmd_about (GtkAction *action, gpointer user_data)
{
EomWindow *window;
g_return_if_fail (EOM_IS_WINDOW (user_data));
static const char *authors[] = {
"Perberos <perberos@gmail.com>",
"Steve Zesch <stevezesch2@gmail.com>",
"Stefano Karapetsas <stefano@karapetsas.com>",
"",
"Claudio Saavedra <csaavedra@igalia.com> (maintainer)",
"Felix Riemann <friemann@gnome.org> (maintainer)",
"",
"Lucas Rocha <lucasr@gnome.org>",
"Tim Gerla <tim+matebugs@gerla.net>",
"Philip Van Hoof <pvanhoof@gnome.org>",
"Paolo Borelli <pborelli@katamail.com>",
"Jens Finke <jens@triq.net>",
"Martin Baulig <martin@home-of-linux.org>",
"Arik Devens <arik@gnome.org>",
"Michael Meeks <mmeeks@gnu.org>",
"Federico Mena-Quintero <federico@gnu.org>",
"Lutz M\xc3\xbcller <urc8@rz.uni-karlsruhe.de>",
NULL
};
static const char *documenters[] = {
"Eliot Landrum <eliot@landrum.cx>",
"Federico Mena-Quintero <federico@gnu.org>",
"Sun GNOME Documentation Team <gdocteam@sun.com>",
NULL
};
const char *translators;
translators = _("translator-credits");
const char *license[] = {
N_("This program is free software; you can redistribute it and/or modify "
"it under the terms of the GNU General Public License as published by "
"the Free Software Foundation; either version 2 of the License, or "
"(at your option) any later version.\n"),
N_("This program is distributed in the hope that it will be useful, "
"but WITHOUT ANY WARRANTY; without even the implied warranty of "
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the "
"GNU General Public License for more details.\n"),
N_("You should have received a copy of the GNU General Public License "
"along with this program; if not, write to the Free Software "
"Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.")
};
char *license_trans;
license_trans = g_strconcat (_(license[0]), "\n", _(license[1]), "\n",
_(license[2]), "\n", NULL);
window = EOM_WINDOW (user_data);
mate_show_about_dialog (GTK_WINDOW (window),
"program-name", _("Eye of MATE"),
"version", VERSION,
"copyright", "Copyright \xc2\xa9 2000-2010 Free Software Foundation, Inc.\n"
"Copyright \xc2\xa9 2011 Perberos\n"
"Copyright \xc2\xa9 2012-2014 MATE developers",
"comments",_("The MATE image viewer."),
"authors", authors,
"documenters", documenters,
"translator-credits", translators,
"website", "http://www.mate-desktop.org/",
"logo-icon-name", "eom",
"wrap-license", TRUE,
"license", license_trans,
NULL);
g_free (license_trans);
}
static void
eom_window_cmd_show_hide_bar (GtkAction *action, gpointer user_data)
{
EomWindow *window;
EomWindowPrivate *priv;
gboolean visible;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = window->priv;
if (priv->mode != EOM_WINDOW_MODE_NORMAL &&
priv->mode != EOM_WINDOW_MODE_FULLSCREEN) return;
visible = gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action));
if (g_ascii_strcasecmp (gtk_action_get_name (action), "ViewToolbar") == 0) {
g_object_set (G_OBJECT (priv->toolbar), "visible", visible, NULL);
if (priv->mode == EOM_WINDOW_MODE_NORMAL)
g_settings_set_boolean (priv->ui_settings, EOM_CONF_UI_TOOLBAR, visible);
} else if (g_ascii_strcasecmp (gtk_action_get_name (action), "ViewStatusbar") == 0) {
g_object_set (G_OBJECT (priv->statusbar), "visible", visible, NULL);
if (priv->mode == EOM_WINDOW_MODE_NORMAL)
g_settings_set_boolean (priv->ui_settings, EOM_CONF_UI_STATUSBAR, visible);
} else if (g_ascii_strcasecmp (gtk_action_get_name (action), "ViewImageCollection") == 0) {
if (visible) {
/* Make sure the focus widget is realized to
* avoid warnings on keypress events */
if (!gtk_widget_get_realized (window->priv->thumbview))
gtk_widget_realize (window->priv->thumbview);
gtk_widget_show (priv->nav);
gtk_widget_grab_focus (priv->thumbview);
} else {
/* Make sure the focus widget is realized to
* avoid warnings on keypress events.
* Don't do it during init phase or the view
* will get a bogus allocation. */
if (!gtk_widget_get_realized (priv->view)
&& priv->status == EOM_WINDOW_STATUS_NORMAL)
gtk_widget_realize (priv->view);
gtk_widget_hide (priv->nav);
if (gtk_widget_get_realized (priv->view))
gtk_widget_grab_focus (priv->view);
}
g_settings_set_boolean (priv->ui_settings, EOM_CONF_UI_IMAGE_COLLECTION, visible);
} else if (g_ascii_strcasecmp (gtk_action_get_name (action), "ViewSidebar") == 0) {
if (visible) {
gtk_widget_show (priv->sidebar);
} else {
gtk_widget_hide (priv->sidebar);
}
g_settings_set_boolean (priv->ui_settings, EOM_CONF_UI_SIDEBAR, visible);
}
}
static void
wallpaper_info_bar_response (GtkInfoBar *bar, gint response, EomWindow *window)
{
if (response == GTK_RESPONSE_YES) {
GdkScreen *screen;
screen = gtk_widget_get_screen (GTK_WIDGET (window));
mate_gdk_spawn_command_line_on_screen (screen,
"mate-appearance-properties"
" --show-page=background",
NULL);
}
/* Close message area on every response */
eom_window_set_message_area (window, NULL);
}
static void
eom_window_set_wallpaper (EomWindow *window, const gchar *filename, const gchar *visible_filename)
{
GtkWidget *info_bar;
GtkWidget *image;
GtkWidget *label;
GtkWidget *hbox;
gchar *markup;
gchar *text;
gchar *basename;
GSettings *wallpaper_settings;
wallpaper_settings = g_settings_new (EOM_CONF_BACKGROUND_SCHEMA);
g_settings_set_string (wallpaper_settings,
EOM_CONF_BACKGROUND_FILE,
filename);
g_object_unref (wallpaper_settings);
/* I18N: When setting mnemonics for these strings, watch out to not
clash with mnemonics from eom's menubar */
info_bar = gtk_info_bar_new_with_buttons (_("_Open Background Preferences"),
GTK_RESPONSE_YES,
C_("MessageArea","Hi_de"),
GTK_RESPONSE_NO, NULL);
gtk_info_bar_set_message_type (GTK_INFO_BAR (info_bar),
GTK_MESSAGE_QUESTION);
image = gtk_image_new_from_stock (GTK_STOCK_DIALOG_QUESTION,
GTK_ICON_SIZE_DIALOG);
label = gtk_label_new (NULL);
if (!visible_filename)
basename = g_path_get_basename (filename);
/* The newline character is currently necessary due to a problem
* with the automatic line break. */
text = g_strdup_printf (_("The image \"%s\" has been set as Desktop Background."
"\nWould you like to modify its appearance?"),
visible_filename ? visible_filename : basename);
markup = g_markup_printf_escaped ("<b>%s</b>", text);
gtk_label_set_markup (GTK_LABEL (label), markup);
g_free (markup);
g_free (text);
if (!visible_filename)
g_free (basename);
hbox = gtk_hbox_new (FALSE, 8);
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 0);
#if GTK_CHECK_VERSION (3, 14, 0)
gtk_widget_set_valign (image, GTK_ALIGN_START);
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
gtk_widget_set_halign (label, GTK_ALIGN_START);
#else
gtk_misc_set_alignment (GTK_MISC (image), 0.5, 0);
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
gtk_misc_set_alignment (GTK_MISC (label), 0, 0.5);
#endif
gtk_box_pack_start (GTK_BOX (gtk_info_bar_get_content_area (GTK_INFO_BAR (info_bar))), hbox, TRUE, TRUE, 0);
gtk_widget_show_all (hbox);
gtk_widget_show (info_bar);
eom_window_set_message_area (window, info_bar);
gtk_info_bar_set_default_response (GTK_INFO_BAR (info_bar),
GTK_RESPONSE_YES);
g_signal_connect (info_bar, "response",
G_CALLBACK (wallpaper_info_bar_response), window);
}
static void
eom_job_save_cb (EomJobSave *job, gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
GtkAction *action_save;
g_signal_handlers_disconnect_by_func (job,
eom_job_save_cb,
window);
g_signal_handlers_disconnect_by_func (job,
eom_job_save_progress_cb,
window);
g_object_unref (window->priv->save_job);
window->priv->save_job = NULL;
update_status_bar (window);
action_save = gtk_action_group_get_action (window->priv->actions_image,
"ImageSave");
gtk_action_set_sensitive (action_save, FALSE);
}
static void
eom_job_copy_cb (EomJobCopy *job, gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
gchar *filepath, *basename, *filename, *extension;
GtkAction *action;
GFile *source_file, *dest_file;
/* Create source GFile */
basename = g_file_get_basename (job->images->data);
filepath = g_build_filename (job->dest, basename, NULL);
source_file = g_file_new_for_path (filepath);
g_free (filepath);
/* Create destination GFile */
extension = eom_util_filename_get_extension (basename);
filename = g_strdup_printf ("%s.%s", EOM_WALLPAPER_FILENAME, extension);
filepath = g_build_filename (job->dest, filename, NULL);
dest_file = g_file_new_for_path (filepath);
g_free (filename);
g_free (extension);
/* Move the file */
g_file_move (source_file, dest_file, G_FILE_COPY_OVERWRITE,
NULL, NULL, NULL, NULL);
/* Set the wallpaper */
eom_window_set_wallpaper (window, filepath, basename);
g_free (basename);
g_free (filepath);
gtk_statusbar_pop (GTK_STATUSBAR (window->priv->statusbar),
window->priv->copy_file_cid);
action = gtk_action_group_get_action (window->priv->actions_image,
"ImageSetAsWallpaper");
gtk_action_set_sensitive (action, TRUE);
window->priv->copy_job = NULL;
g_object_unref (source_file);
g_object_unref (dest_file);
g_object_unref (G_OBJECT (job->images->data));
g_list_free (job->images);
g_object_unref (job);
}
static gboolean
eom_window_save_images (EomWindow *window, GList *images)
{
EomWindowPrivate *priv;
priv = window->priv;
if (window->priv->save_job != NULL)
return FALSE;
priv->save_job = eom_job_save_new (images);
g_signal_connect (priv->save_job,
"finished",
G_CALLBACK (eom_job_save_cb),
window);
g_signal_connect (priv->save_job,
"progress",
G_CALLBACK (eom_job_save_progress_cb),
window);
return TRUE;
}
static void
eom_window_cmd_save (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
EomWindow *window;
GList *images;
window = EOM_WINDOW (user_data);
priv = window->priv;
if (window->priv->save_job != NULL)
return;
images = eom_thumb_view_get_selected_images (EOM_THUMB_VIEW (priv->thumbview));
if (eom_window_save_images (window, images)) {
eom_job_queue_add_job (priv->save_job);
}
}
static GFile*
eom_window_retrieve_save_as_file (EomWindow *window, EomImage *image)
{
GtkWidget *dialog;
GFile *save_file = NULL;
GFile *last_dest_folder;
gint response;
g_assert (image != NULL);
dialog = eom_file_chooser_new (GTK_FILE_CHOOSER_ACTION_SAVE);
last_dest_folder = window->priv->last_save_as_folder;
if (last_dest_folder && g_file_query_exists (last_dest_folder, NULL)) {
gtk_file_chooser_set_current_folder_file (GTK_FILE_CHOOSER (dialog), last_dest_folder, NULL);
gtk_file_chooser_set_current_name (GTK_FILE_CHOOSER (dialog),
eom_image_get_caption (image));
} else {
GFile *image_file;
image_file = eom_image_get_file (image);
/* Setting the file will also navigate to its parent folder */
gtk_file_chooser_set_file (GTK_FILE_CHOOSER (dialog),
image_file, NULL);
g_object_unref (image_file);
}
response = gtk_dialog_run (GTK_DIALOG (dialog));
gtk_widget_hide (dialog);
if (response == GTK_RESPONSE_OK) {
save_file = gtk_file_chooser_get_file (GTK_FILE_CHOOSER (dialog));
if (window->priv->last_save_as_folder)
g_object_unref (window->priv->last_save_as_folder);
window->priv->last_save_as_folder = g_file_get_parent (save_file);
}
gtk_widget_destroy (dialog);
return save_file;
}
static void
eom_window_cmd_save_as (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
EomWindow *window;
GList *images;
guint n_images;
window = EOM_WINDOW (user_data);
priv = window->priv;
if (window->priv->save_job != NULL)
return;
images = eom_thumb_view_get_selected_images (EOM_THUMB_VIEW (priv->thumbview));
n_images = g_list_length (images);
if (n_images == 1) {
GFile *file;
file = eom_window_retrieve_save_as_file (window, images->data);
if (!file) {
g_list_free (images);
return;
}
priv->save_job = eom_job_save_as_new (images, NULL, file);
g_object_unref (file);
} else if (n_images > 1) {
GFile *base_file;
GtkWidget *dialog;
gchar *basedir;
EomURIConverter *converter;
basedir = g_get_current_dir ();
base_file = g_file_new_for_path (basedir);
g_free (basedir);
dialog = eom_save_as_dialog_new (GTK_WINDOW (window),
images,
base_file);
gtk_widget_show_all (dialog);
if (gtk_dialog_run (GTK_DIALOG (dialog)) != GTK_RESPONSE_OK) {
g_object_unref (base_file);
g_list_free (images);
gtk_widget_destroy (dialog);
return;
}
converter = eom_save_as_dialog_get_converter (dialog);
g_assert (converter != NULL);
priv->save_job = eom_job_save_as_new (images, converter, NULL);
gtk_widget_destroy (dialog);
g_object_unref (converter);
g_object_unref (base_file);
} else {
/* n_images = 0 -- No Image selected */
return;
}
g_signal_connect (priv->save_job,
"finished",
G_CALLBACK (eom_job_save_cb),
window);
g_signal_connect (priv->save_job,
"progress",
G_CALLBACK (eom_job_save_progress_cb),
window);
eom_job_queue_add_job (priv->save_job);
}
static void
eom_window_cmd_print (GtkAction *action, gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
eom_window_print (window);
}
static void
eom_window_cmd_properties (GtkAction *action, gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
EomWindowPrivate *priv;
GtkAction *next_image_action, *previous_image_action;
priv = window->priv;
next_image_action =
gtk_action_group_get_action (priv->actions_collection,
"GoNext");
previous_image_action =
gtk_action_group_get_action (priv->actions_collection,
"GoPrevious");
if (window->priv->properties_dlg == NULL) {
window->priv->properties_dlg =
eom_properties_dialog_new (GTK_WINDOW (window),
EOM_THUMB_VIEW (priv->thumbview),
next_image_action,
previous_image_action);
eom_properties_dialog_update (EOM_PROPERTIES_DIALOG (priv->properties_dlg),
priv->image);
g_settings_bind (priv->ui_settings,
EOM_CONF_UI_PROPSDIALOG_NETBOOK_MODE,
priv->properties_dlg, "netbook-mode",
G_SETTINGS_BIND_GET);
}
eom_dialog_show (EOM_DIALOG (window->priv->properties_dlg));
}
static void
eom_window_cmd_undo (GtkAction *action, gpointer user_data)
{
g_return_if_fail (EOM_IS_WINDOW (user_data));
apply_transformation (EOM_WINDOW (user_data), NULL);
}
static void
eom_window_cmd_flip_horizontal (GtkAction *action, gpointer user_data)
{
g_return_if_fail (EOM_IS_WINDOW (user_data));
apply_transformation (EOM_WINDOW (user_data),
eom_transform_flip_new (EOM_TRANSFORM_FLIP_HORIZONTAL));
}
static void
eom_window_cmd_flip_vertical (GtkAction *action, gpointer user_data)
{
g_return_if_fail (EOM_IS_WINDOW (user_data));
apply_transformation (EOM_WINDOW (user_data),
eom_transform_flip_new (EOM_TRANSFORM_FLIP_VERTICAL));
}
static void
eom_window_cmd_rotate_90 (GtkAction *action, gpointer user_data)
{
g_return_if_fail (EOM_IS_WINDOW (user_data));
apply_transformation (EOM_WINDOW (user_data),
eom_transform_rotate_new (90));
}
static void
eom_window_cmd_rotate_270 (GtkAction *action, gpointer user_data)
{
g_return_if_fail (EOM_IS_WINDOW (user_data));
apply_transformation (EOM_WINDOW (user_data),
eom_transform_rotate_new (270));
}
static void
eom_window_cmd_wallpaper (GtkAction *action, gpointer user_data)
{
EomWindow *window;
EomWindowPrivate *priv;
EomImage *image;
GFile *file;
char *filename = NULL;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = window->priv;
/* If currently copying an image to set it as wallpaper, return. */
if (priv->copy_job != NULL)
return;
image = eom_thumb_view_get_first_selected_image (EOM_THUMB_VIEW (priv->thumbview));
g_return_if_fail (EOM_IS_IMAGE (image));
file = eom_image_get_file (image);
filename = g_file_get_path (file);
/* Currently only local files can be set as wallpaper */
if (filename == NULL || !eom_util_file_is_persistent (file))
{
GList *files = NULL;
GtkAction *action;
action = gtk_action_group_get_action (window->priv->actions_image,
"ImageSetAsWallpaper");
gtk_action_set_sensitive (action, FALSE);
priv->copy_file_cid = gtk_statusbar_get_context_id (GTK_STATUSBAR (priv->statusbar),
"copy_file_cid");
gtk_statusbar_push (GTK_STATUSBAR (priv->statusbar),
priv->copy_file_cid,
_("Saving image locally…"));
files = g_list_append (files, eom_image_get_file (image));
priv->copy_job = eom_job_copy_new (files, g_get_user_data_dir ());
g_signal_connect (priv->copy_job,
"finished",
G_CALLBACK (eom_job_copy_cb),
window);
g_signal_connect (priv->copy_job,
"progress",
G_CALLBACK (eom_job_progress_cb),
window);
eom_job_queue_add_job (priv->copy_job);
g_object_unref (file);
g_free (filename);
return;
}
g_object_unref (file);
eom_window_set_wallpaper (window, filename, NULL);
g_free (filename);
}
static gboolean
eom_window_all_images_trasheable (GList *images)
{
GFile *file;
GFileInfo *file_info;
GList *iter;
EomImage *image;
gboolean can_trash = TRUE;
for (iter = images; iter != NULL; iter = g_list_next (iter)) {
image = (EomImage *) iter->data;
file = eom_image_get_file (image);
file_info = g_file_query_info (file,
G_FILE_ATTRIBUTE_ACCESS_CAN_TRASH,
0, NULL, NULL);
can_trash = g_file_info_get_attribute_boolean (file_info,
G_FILE_ATTRIBUTE_ACCESS_CAN_TRASH);
g_object_unref (file_info);
g_object_unref (file);
if (can_trash == FALSE)
break;
}
return can_trash;
}
static int
show_move_to_trash_confirm_dialog (EomWindow *window, GList *images, gboolean can_trash)
{
GtkWidget *dlg;
char *prompt;
int response;
int n_images;
EomImage *image;
static gboolean dontaskagain = FALSE;
gboolean neverask = FALSE;
GtkWidget* dontask_cbutton = NULL;
/* Check if the user never wants to be bugged. */
neverask = g_settings_get_boolean (window->priv->ui_settings,
EOM_CONF_UI_DISABLE_TRASH_CONFIRMATION);
/* Assume agreement, if the user doesn't want to be
* asked and the trash is available */
if (can_trash && (dontaskagain || neverask))
return GTK_RESPONSE_OK;
n_images = g_list_length (images);
if (n_images == 1) {
image = EOM_IMAGE (images->data);
if (can_trash) {
prompt = g_strdup_printf (_("Are you sure you want to move\n\"%s\" to the trash?"),
eom_image_get_caption (image));
} else {
prompt = g_strdup_printf (_("A trash for \"%s\" couldn't be found. Do you want to remove "
"this image permanently?"), eom_image_get_caption (image));
}
} else {
if (can_trash) {
prompt = g_strdup_printf (ngettext("Are you sure you want to move\n"
"the selected image to the trash?",
"Are you sure you want to move\n"
"the %d selected images to the trash?", n_images), n_images);
} else {
prompt = g_strdup (_("Some of the selected images can't be moved to the trash "
"and will be removed permanently. Are you sure you want "
"to proceed?"));
}
}
dlg = gtk_message_dialog_new_with_markup (GTK_WINDOW (window),
GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_MESSAGE_WARNING,
GTK_BUTTONS_NONE,
"<span weight=\"bold\" size=\"larger\">%s</span>",
prompt);
g_free (prompt);
gtk_dialog_add_button (GTK_DIALOG (dlg), GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL);
if (can_trash) {
gtk_dialog_add_button (GTK_DIALOG (dlg), _("Move to _Trash"), GTK_RESPONSE_OK);
dontask_cbutton = gtk_check_button_new_with_mnemonic (_("_Do not ask again during this session"));
gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (dontask_cbutton), FALSE);
gtk_box_pack_end (GTK_BOX (gtk_dialog_get_content_area (GTK_DIALOG (dlg))), dontask_cbutton, TRUE, TRUE, 0);
} else {
if (n_images == 1) {
gtk_dialog_add_button (GTK_DIALOG (dlg), GTK_STOCK_DELETE, GTK_RESPONSE_OK);
} else {
gtk_dialog_add_button (GTK_DIALOG (dlg), GTK_STOCK_YES, GTK_RESPONSE_OK);
}
}
gtk_dialog_set_default_response (GTK_DIALOG (dlg), GTK_RESPONSE_OK);
gtk_window_set_title (GTK_WINDOW (dlg), "");
gtk_widget_show_all (dlg);
response = gtk_dialog_run (GTK_DIALOG (dlg));
/* Only update the property if the user has accepted */
if (can_trash && response == GTK_RESPONSE_OK)
dontaskagain = gtk_toggle_button_get_active (GTK_TOGGLE_BUTTON (dontask_cbutton));
/* The checkbutton is destroyed together with the dialog */
gtk_widget_destroy (dlg);
return response;
}
static gboolean
move_to_trash_real (EomImage *image, GError **error)
{
GFile *file;
GFileInfo *file_info;
gboolean can_trash, result;
g_return_val_if_fail (EOM_IS_IMAGE (image), FALSE);
file = eom_image_get_file (image);
file_info = g_file_query_info (file,
G_FILE_ATTRIBUTE_ACCESS_CAN_TRASH,
0, NULL, NULL);
if (file_info == NULL) {
g_set_error (error,
EOM_WINDOW_ERROR,
EOM_WINDOW_ERROR_TRASH_NOT_FOUND,
_("Couldn't access trash."));
return FALSE;
}
can_trash = g_file_info_get_attribute_boolean (file_info,
G_FILE_ATTRIBUTE_ACCESS_CAN_TRASH);
g_object_unref (file_info);
if (can_trash)
{
result = g_file_trash (file, NULL, NULL);
if (result == FALSE) {
g_set_error (error,
EOM_WINDOW_ERROR,
EOM_WINDOW_ERROR_TRASH_NOT_FOUND,
_("Couldn't access trash."));
}
} else {
result = g_file_delete (file, NULL, NULL);
if (result == FALSE) {
g_set_error (error,
EOM_WINDOW_ERROR,
EOM_WINDOW_ERROR_IO,
_("Couldn't delete file"));
}
}
g_object_unref (file);
return result;
}
static void
eom_window_cmd_copy_image (GtkAction *action, gpointer user_data)
{
GtkClipboard *clipboard;
EomWindow *window;
EomWindowPrivate *priv;
EomImage *image;
EomClipboardHandler *cbhandler;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = window->priv;
image = eom_thumb_view_get_first_selected_image (EOM_THUMB_VIEW (priv->thumbview));
g_return_if_fail (EOM_IS_IMAGE (image));
clipboard = gtk_clipboard_get (GDK_SELECTION_CLIPBOARD);
cbhandler = eom_clipboard_handler_new (image);
// cbhandler will self-destruct when it's not needed anymore
eom_clipboard_handler_copy_to_clipboard (cbhandler, clipboard);
}
static void
eom_window_cmd_move_to_trash (GtkAction *action, gpointer user_data)
{
GList *images;
GList *it;
EomWindowPrivate *priv;
EomListStore *list;
int pos;
EomImage *img;
EomWindow *window;
int response;
int n_images;
gboolean success;
gboolean can_trash;
g_return_if_fail (EOM_IS_WINDOW (user_data));
window = EOM_WINDOW (user_data);
priv = window->priv;
list = priv->store;
n_images = eom_thumb_view_get_n_selected (EOM_THUMB_VIEW (priv->thumbview));
if (n_images < 1) return;
/* save position of selected image after the deletion */
images = eom_thumb_view_get_selected_images (EOM_THUMB_VIEW (priv->thumbview));
g_assert (images != NULL);
/* HACK: eom_list_store_get_n_selected return list in reverse order */
images = g_list_reverse (images);
can_trash = eom_window_all_images_trasheable (images);
if (g_ascii_strcasecmp (gtk_action_get_name (action), "Delete") == 0 ||
can_trash == FALSE) {
response = show_move_to_trash_confirm_dialog (window, images, can_trash);
if (response != GTK_RESPONSE_OK) return;
}
pos = eom_list_store_get_pos_by_image (list, EOM_IMAGE (images->data));
/* FIXME: make a nice progress dialog */
/* Do the work actually. First try to delete the image from the disk. If this
* is successfull, remove it from the screen. Otherwise show error dialog.
*/
for (it = images; it != NULL; it = it->next) {
GError *error = NULL;
EomImage *image;
image = EOM_IMAGE (it->data);
success = move_to_trash_real (image, &error);
if (success) {
eom_list_store_remove_image (list, image);
} else {
char *header;
GtkWidget *dlg;
header = g_strdup_printf (_("Error on deleting image %s"),
eom_image_get_caption (image));
dlg = gtk_message_dialog_new (GTK_WINDOW (window),
GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_MESSAGE_ERROR,
GTK_BUTTONS_OK,
"%s", header);
gtk_message_dialog_format_secondary_text (GTK_MESSAGE_DIALOG (dlg),
"%s", error->message);
gtk_dialog_run (GTK_DIALOG (dlg));
gtk_widget_destroy (dlg);
g_free (header);
}
}
/* free list */
g_list_foreach (images, (GFunc) g_object_unref, NULL);
g_list_free (images);
/* select image at previously saved position */
pos = MIN (pos, eom_list_store_length (list) - 1);
if (pos >= 0) {
img = eom_list_store_get_image_by_pos (list, pos);
eom_thumb_view_set_current_image (EOM_THUMB_VIEW (priv->thumbview),
img,
TRUE);
if (img != NULL) {
g_object_unref (img);
}
}
}
static void
eom_window_cmd_fullscreen (GtkAction *action, gpointer user_data)
{
EomWindow *window;
gboolean fullscreen;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
window = EOM_WINDOW (user_data);
fullscreen = gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action));
if (fullscreen) {
eom_window_run_fullscreen (window, FALSE);
} else {
eom_window_stop_fullscreen (window, FALSE);
}
}
static void
eom_window_cmd_slideshow (GtkAction *action, gpointer user_data)
{
EomWindow *window;
gboolean slideshow;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
window = EOM_WINDOW (user_data);
slideshow = gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action));
if (slideshow) {
eom_window_run_fullscreen (window, TRUE);
} else {
eom_window_stop_fullscreen (window, TRUE);
}
}
static void
eom_window_cmd_pause_slideshow (GtkAction *action, gpointer user_data)
{
EomWindow *window;
gboolean slideshow;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
window = EOM_WINDOW (user_data);
slideshow = window->priv->mode == EOM_WINDOW_MODE_SLIDESHOW;
if (!slideshow && window->priv->mode != EOM_WINDOW_MODE_FULLSCREEN)
return;
eom_window_run_fullscreen (window, !slideshow);
}
static void
eom_window_cmd_zoom_in (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
if (priv->view) {
eom_scroll_view_zoom_in (EOM_SCROLL_VIEW (priv->view), FALSE);
}
}
static void
eom_window_cmd_zoom_out (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
if (priv->view) {
eom_scroll_view_zoom_out (EOM_SCROLL_VIEW (priv->view), FALSE);
}
}
static void
eom_window_cmd_zoom_normal (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
if (priv->view) {
eom_scroll_view_set_zoom (EOM_SCROLL_VIEW (priv->view), 1.0);
}
}
static void
eom_window_cmd_zoom_fit (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
if (priv->view) {
eom_scroll_view_zoom_fit (EOM_SCROLL_VIEW (priv->view));
}
}
static void
eom_window_cmd_go_prev (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
eom_thumb_view_select_single (EOM_THUMB_VIEW (priv->thumbview),
EOM_THUMB_VIEW_SELECT_LEFT);
}
static void
eom_window_cmd_go_next (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
eom_thumb_view_select_single (EOM_THUMB_VIEW (priv->thumbview),
EOM_THUMB_VIEW_SELECT_RIGHT);
}
static void
eom_window_cmd_go_first (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
eom_thumb_view_select_single (EOM_THUMB_VIEW (priv->thumbview),
EOM_THUMB_VIEW_SELECT_FIRST);
}
static void
eom_window_cmd_go_last (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
eom_thumb_view_select_single (EOM_THUMB_VIEW (priv->thumbview),
EOM_THUMB_VIEW_SELECT_LAST);
}
static void
eom_window_cmd_go_random (GtkAction *action, gpointer user_data)
{
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (user_data));
eom_debug (DEBUG_WINDOW);
priv = EOM_WINDOW (user_data)->priv;
eom_thumb_view_select_single (EOM_THUMB_VIEW (priv->thumbview),
EOM_THUMB_VIEW_SELECT_RANDOM);
}
static const GtkActionEntry action_entries_window[] = {
{ "Image", NULL, N_("_Image") },
{ "Edit", NULL, N_("_Edit") },
{ "View", NULL, N_("_View") },
{ "Go", NULL, N_("_Go") },
{ "Tools", NULL, N_("_Tools") },
{ "Help", NULL, N_("_Help") },
{ "ImageOpen", GTK_STOCK_OPEN, N_("_Open…"), "<control>O",
N_("Open a file"),
G_CALLBACK (eom_window_cmd_file_open) },
{ "ImageClose", GTK_STOCK_CLOSE, N_("_Close"), "<control>W",
N_("Close window"),
G_CALLBACK (eom_window_cmd_close_window) },
{ "EditToolbar", NULL, N_("T_oolbar"), NULL,
N_("Edit the application toolbar"),
G_CALLBACK (eom_window_cmd_edit_toolbar) },
{ "EditPreferences", GTK_STOCK_PREFERENCES, N_("Prefere_nces"), NULL,
N_("Preferences for Eye of MATE"),
G_CALLBACK (eom_window_cmd_preferences) },
{ "HelpManual", GTK_STOCK_HELP, N_("_Contents"), "F1",
N_("Help on this application"),
G_CALLBACK (eom_window_cmd_help) },
{ "HelpAbout", GTK_STOCK_ABOUT, N_("_About"), NULL,
N_("About this application"),
G_CALLBACK (eom_window_cmd_about) }
};
static const GtkToggleActionEntry toggle_entries_window[] = {
{ "ViewToolbar", NULL, N_("_Toolbar"), NULL,
N_("Changes the visibility of the toolbar in the current window"),
G_CALLBACK (eom_window_cmd_show_hide_bar), TRUE },
{ "ViewStatusbar", NULL, N_("_Statusbar"), NULL,
N_("Changes the visibility of the statusbar in the current window"),
G_CALLBACK (eom_window_cmd_show_hide_bar), TRUE },
{ "ViewImageCollection", "eom-image-collection", N_("_Image Collection"), "F9",
N_("Changes the visibility of the image collection pane in the current window"),
G_CALLBACK (eom_window_cmd_show_hide_bar), TRUE },
{ "ViewSidebar", NULL, N_("Side _Pane"), "<control>F9",
N_("Changes the visibility of the side pane in the current window"),
G_CALLBACK (eom_window_cmd_show_hide_bar), TRUE },
};
static const GtkActionEntry action_entries_image[] = {
{ "ImageSave", GTK_STOCK_SAVE, N_("_Save"), "<control>s",
N_("Save changes in currently selected images"),
G_CALLBACK (eom_window_cmd_save) },
{ "ImageOpenWith", NULL, N_("Open _with"), NULL,
N_("Open the selected image with a different application"),
NULL},
{ "ImageSaveAs", GTK_STOCK_SAVE_AS, N_("Save _As…"), "<control><shift>s",
N_("Save the selected images with a different name"),
G_CALLBACK (eom_window_cmd_save_as) },
{ "ImagePrint", GTK_STOCK_PRINT, N_("_Print…"), "<control>p",
N_("Print the selected image"),
G_CALLBACK (eom_window_cmd_print) },
{ "ImageProperties", GTK_STOCK_PROPERTIES, N_("Prope_rties"), "<alt>Return",
N_("Show the properties and metadata of the selected image"),
G_CALLBACK (eom_window_cmd_properties) },
{ "EditUndo", GTK_STOCK_UNDO, N_("_Undo"), "<control>z",
N_("Undo the last change in the image"),
G_CALLBACK (eom_window_cmd_undo) },
{ "EditFlipHorizontal", "object-flip-horizontal", N_("Flip _Horizontal"), NULL,
N_("Mirror the image horizontally"),
G_CALLBACK (eom_window_cmd_flip_horizontal) },
{ "EditFlipVertical", "object-flip-vertical", N_("Flip _Vertical"), NULL,
N_("Mirror the image vertically"),
G_CALLBACK (eom_window_cmd_flip_vertical) },
{ "EditRotate90", "object-rotate-right", N_("_Rotate Clockwise"), "<control>r",
N_("Rotate the image 90 degrees to the right"),
G_CALLBACK (eom_window_cmd_rotate_90) },
{ "EditRotate270", "object-rotate-left", N_("Rotate Counterc_lockwise"), "<ctrl><shift>r",
N_("Rotate the image 90 degrees to the left"),
G_CALLBACK (eom_window_cmd_rotate_270) },
{ "ImageSetAsWallpaper", NULL, N_("Set as _Desktop Background"),
"<control>F8", N_("Set the selected image as the desktop background"),
G_CALLBACK (eom_window_cmd_wallpaper) },
{ "EditMoveToTrash", "user-trash", N_("Move to _Trash"), NULL,
N_("Move the selected image to the trash folder"),
G_CALLBACK (eom_window_cmd_move_to_trash) },
{ "EditCopyImage", "edit-copy", N_("_Copy"), "<control>C",
N_("Copy the selected image to the clipboard"),
G_CALLBACK (eom_window_cmd_copy_image) },
{ "ViewZoomIn", GTK_STOCK_ZOOM_IN, N_("_Zoom In"), "<control>plus",
N_("Enlarge the image"),
G_CALLBACK (eom_window_cmd_zoom_in) },
{ "ViewZoomOut", GTK_STOCK_ZOOM_OUT, N_("Zoom _Out"), "<control>minus",
N_("Shrink the image"),
G_CALLBACK (eom_window_cmd_zoom_out) },
{ "ViewZoomNormal", GTK_STOCK_ZOOM_100, N_("_Normal Size"), "<control>0",
N_("Show the image at its normal size"),
G_CALLBACK (eom_window_cmd_zoom_normal) },
{ "ViewZoomFit", GTK_STOCK_ZOOM_FIT, N_("_Best Fit"), "F",
N_("Fit the image to the window"),
G_CALLBACK (eom_window_cmd_zoom_fit) },
{ "ControlEqual", GTK_STOCK_ZOOM_IN, N_("_Zoom In"), "<control>equal",
N_("Enlarge the image"),
G_CALLBACK (eom_window_cmd_zoom_in) },
{ "ControlKpAdd", GTK_STOCK_ZOOM_IN, N_("_Zoom In"), "<control>KP_Add",
N_("Shrink the image"),
G_CALLBACK (eom_window_cmd_zoom_in) },
{ "ControlKpSub", GTK_STOCK_ZOOM_OUT, N_("Zoom _Out"), "<control>KP_Subtract",
N_("Shrink the image"),
G_CALLBACK (eom_window_cmd_zoom_out) },
{ "Delete", NULL, N_("Move to _Trash"), "Delete",
NULL,
G_CALLBACK (eom_window_cmd_move_to_trash) },
};
static const GtkToggleActionEntry toggle_entries_image[] = {
{ "ViewFullscreen", GTK_STOCK_FULLSCREEN, N_("_Fullscreen"), "F11",
N_("Show the current image in fullscreen mode"),
G_CALLBACK (eom_window_cmd_fullscreen), FALSE },
{ "PauseSlideshow", "media-playback-pause", N_("Pause Slideshow"),
NULL, N_("Pause or resume the slideshow"),
G_CALLBACK (eom_window_cmd_pause_slideshow), FALSE },
};
static const GtkActionEntry action_entries_collection[] = {
{ "GoPrevious", GTK_STOCK_GO_BACK, N_("_Previous Image"), "<Alt>Left",
N_("Go to the previous image of the collection"),
G_CALLBACK (eom_window_cmd_go_prev) },
{ "GoNext", GTK_STOCK_GO_FORWARD, N_("_Next Image"), "<Alt>Right",
N_("Go to the next image of the collection"),
G_CALLBACK (eom_window_cmd_go_next) },
{ "GoFirst", GTK_STOCK_GOTO_FIRST, N_("_First Image"), "<Alt>Home",
N_("Go to the first image of the collection"),
G_CALLBACK (eom_window_cmd_go_first) },
{ "GoLast", GTK_STOCK_GOTO_LAST, N_("_Last Image"), "<Alt>End",
N_("Go to the last image of the collection"),
G_CALLBACK (eom_window_cmd_go_last) },
{ "GoRandom", NULL, N_("_Random Image"), "<control>M",
N_("Go to a random image of the collection"),
G_CALLBACK (eom_window_cmd_go_random) },
{ "BackSpace", NULL, N_("_Previous Image"), "BackSpace",
NULL,
G_CALLBACK (eom_window_cmd_go_prev) },
{ "Home", NULL, N_("_First Image"), "Home",
NULL,
G_CALLBACK (eom_window_cmd_go_first) },
{ "End", NULL, N_("_Last Image"), "End",
NULL,
G_CALLBACK (eom_window_cmd_go_last) },
};
static const GtkToggleActionEntry toggle_entries_collection[] = {
{ "ViewSlideshow", "slideshow-play", N_("S_lideshow"), "F5",
N_("Start a slideshow view of the images"),
G_CALLBACK (eom_window_cmd_slideshow), FALSE },
};
static void
menu_item_select_cb (GtkMenuItem *proxy, EomWindow *window)
{
GtkAction *action;
char *message;
action = gtk_activatable_get_related_action (GTK_ACTIVATABLE (proxy));
g_return_if_fail (action != NULL);
g_object_get (G_OBJECT (action), "tooltip", &message, NULL);
if (message) {
gtk_statusbar_push (GTK_STATUSBAR (window->priv->statusbar),
window->priv->tip_message_cid, message);
g_free (message);
}
}
static void
menu_item_deselect_cb (GtkMenuItem *proxy, EomWindow *window)
{
gtk_statusbar_pop (GTK_STATUSBAR (window->priv->statusbar),
window->priv->tip_message_cid);
}
static void
connect_proxy_cb (GtkUIManager *manager,
GtkAction *action,
GtkWidget *proxy,
EomWindow *window)
{
if (GTK_IS_MENU_ITEM (proxy)) {
g_signal_connect (proxy, "select",
G_CALLBACK (menu_item_select_cb), window);
g_signal_connect (proxy, "deselect",
G_CALLBACK (menu_item_deselect_cb), window);
}
}
static void
disconnect_proxy_cb (GtkUIManager *manager,
GtkAction *action,
GtkWidget *proxy,
EomWindow *window)
{
if (GTK_IS_MENU_ITEM (proxy)) {
g_signal_handlers_disconnect_by_func
(proxy, G_CALLBACK (menu_item_select_cb), window);
g_signal_handlers_disconnect_by_func
(proxy, G_CALLBACK (menu_item_deselect_cb), window);
}
}
static void
set_action_properties (GtkActionGroup *window_group,
GtkActionGroup *image_group,
GtkActionGroup *collection_group)
{
GtkAction *action;
action = gtk_action_group_get_action (collection_group, "GoPrevious");
g_object_set (action, "short_label", _("Previous"), NULL);
g_object_set (action, "is-important", TRUE, NULL);
action = gtk_action_group_get_action (collection_group, "GoNext");
g_object_set (action, "short_label", _("Next"), NULL);
g_object_set (action, "is-important", TRUE, NULL);
action = gtk_action_group_get_action (image_group, "EditRotate90");
g_object_set (action, "short_label", _("Right"), NULL);
action = gtk_action_group_get_action (image_group, "EditRotate270");
g_object_set (action, "short_label", _("Left"), NULL);
action = gtk_action_group_get_action (image_group, "ViewZoomIn");
g_object_set (action, "short_label", _("In"), NULL);
action = gtk_action_group_get_action (image_group, "ViewZoomOut");
g_object_set (action, "short_label", _("Out"), NULL);
action = gtk_action_group_get_action (image_group, "ViewZoomNormal");
g_object_set (action, "short_label", _("Normal"), NULL);
action = gtk_action_group_get_action (image_group, "ViewZoomFit");
g_object_set (action, "short_label", _("Fit"), NULL);
action = gtk_action_group_get_action (window_group, "ViewImageCollection");
g_object_set (action, "short_label", _("Collection"), NULL);
action = gtk_action_group_get_action (image_group, "EditMoveToTrash");
g_object_set (action, "short_label", C_("action (to trash)", "Trash"), NULL);
}
static gint
sort_recents_mru (GtkRecentInfo *a, GtkRecentInfo *b)
{
gboolean has_eom_a, has_eom_b;
/* We need to check this first as gtk_recent_info_get_application_info
* will treat it as a non-fatal error when the GtkRecentInfo doesn't
* have the application registered. */
has_eom_a = gtk_recent_info_has_application (a,
EOM_RECENT_FILES_APP_NAME);
has_eom_b = gtk_recent_info_has_application (b,
EOM_RECENT_FILES_APP_NAME);
if (has_eom_a && has_eom_b) {
time_t time_a, time_b;
/* These should not fail as we already checked that
* the application is registered with the info objects */
gtk_recent_info_get_application_info (a,
EOM_RECENT_FILES_APP_NAME,
NULL,
NULL,
&time_a);
gtk_recent_info_get_application_info (b,
EOM_RECENT_FILES_APP_NAME,
NULL,
NULL,
&time_b);
return (time_b - time_a);
} else if (has_eom_a) {
return -1;
} else if (has_eom_b) {
return 1;
}
return 0;
}
static void
eom_window_update_recent_files_menu (EomWindow *window)
{
EomWindowPrivate *priv;
GList *actions = NULL, *li = NULL, *items = NULL;
guint count_recent = 0;
priv = window->priv;
if (priv->recent_menu_id != 0)
gtk_ui_manager_remove_ui (priv->ui_mgr, priv->recent_menu_id);
actions = gtk_action_group_list_actions (priv->actions_recent);
for (li = actions; li != NULL; li = li->next) {
g_signal_handlers_disconnect_by_func (GTK_ACTION (li->data),
G_CALLBACK(eom_window_open_recent_cb),
window);
gtk_action_group_remove_action (priv->actions_recent,
GTK_ACTION (li->data));
}
g_list_free (actions);
priv->recent_menu_id = gtk_ui_manager_new_merge_id (priv->ui_mgr);
items = gtk_recent_manager_get_items (gtk_recent_manager_get_default());
items = g_list_sort (items, (GCompareFunc) sort_recents_mru);
for (li = items; li != NULL && count_recent < EOM_RECENT_FILES_LIMIT; li = li->next) {
gchar *action_name;
gchar *label;
gchar *tip;
gchar **display_name;
gchar *label_filename;
GtkAction *action;
GtkRecentInfo *info = li->data;
/* Sorting moves non-EOM files to the end of the list.
* So no file of interest will follow if this test fails */
if (!gtk_recent_info_has_application (info, EOM_RECENT_FILES_APP_NAME))
break;
count_recent++;
action_name = g_strdup_printf ("recent-info-%d", count_recent);
display_name = g_strsplit (gtk_recent_info_get_display_name (info), "_", -1);
label_filename = g_strjoinv ("__", display_name);
label = g_strdup_printf ("%s_%d. %s",
(is_rtl ? "\xE2\x80\x8F" : ""), count_recent, label_filename);
g_free (label_filename);
g_strfreev (display_name);
tip = gtk_recent_info_get_uri_display (info);
/* This is a workaround for a bug (#351945) regarding
* gtk_recent_info_get_uri_display() and remote URIs.
* mate_vfs_format_uri_for_display is sufficient here
* since the password gets stripped when adding the
* file to the recently used list. */
if (tip == NULL)
tip = g_uri_unescape_string (gtk_recent_info_get_uri (info), NULL);
action = gtk_action_new (action_name, label, tip, NULL);
gtk_action_set_always_show_image (action, TRUE);
g_object_set_data_full (G_OBJECT (action), "gtk-recent-info",
gtk_recent_info_ref (info),
(GDestroyNotify) gtk_recent_info_unref);
g_object_set (G_OBJECT (action), "icon-name", "image-x-generic", NULL);
g_signal_connect (action, "activate",
G_CALLBACK (eom_window_open_recent_cb),
window);
gtk_action_group_add_action (priv->actions_recent, action);
g_object_unref (action);
gtk_ui_manager_add_ui (priv->ui_mgr, priv->recent_menu_id,
"/MainMenu/Image/RecentDocuments",
action_name, action_name,
GTK_UI_MANAGER_AUTO, FALSE);
g_free (action_name);
g_free (label);
g_free (tip);
}
g_list_foreach (items, (GFunc) gtk_recent_info_unref, NULL);
g_list_free (items);
}
static void
eom_window_recent_manager_changed_cb (GtkRecentManager *manager, EomWindow *window)
{
eom_window_update_recent_files_menu (window);
}
static void
eom_window_drag_data_received (GtkWidget *widget,
GdkDragContext *context,
gint x, gint y,
GtkSelectionData *selection_data,
guint info, guint time)
{
GSList *file_list;
EomWindow *window;
GdkAtom target;
GtkWidget *src;
target = gtk_selection_data_get_target (selection_data);
if (!gtk_targets_include_uri (&target, 1))
return;
/* if the request is from another process this will return NULL */
src = gtk_drag_get_source_widget (context);
/* if the drag request originates from the current eom instance, ignore
the request if the source window is the same as the dest window */
if (src &&
gtk_widget_get_toplevel (src) == gtk_widget_get_toplevel (widget))
{
gdk_drag_status (context, 0, time);
return;
}
if (gdk_drag_context_get_suggested_action (context) == GDK_ACTION_COPY) {
window = EOM_WINDOW (widget);
file_list = eom_util_parse_uri_string_list_to_file_list ((const gchar *) gtk_selection_data_get_data (selection_data));
eom_window_open_file_list (window, file_list);
}
}
static void
eom_window_set_drag_dest (EomWindow *window)
{
gtk_drag_dest_set (GTK_WIDGET (window),
GTK_DEST_DEFAULT_MOTION | GTK_DEST_DEFAULT_DROP,
NULL, 0,
GDK_ACTION_COPY | GDK_ACTION_ASK);
gtk_drag_dest_add_uri_targets (GTK_WIDGET (window));
}
static void
eom_window_sidebar_visibility_changed (GtkWidget *widget, EomWindow *window)
{
GtkAction *action;
gboolean visible;
visible = gtk_widget_get_visible (window->priv->sidebar);
g_settings_set_boolean (window->priv->ui_settings,
EOM_CONF_UI_SIDEBAR,
visible);
action = gtk_action_group_get_action (window->priv->actions_window,
"ViewSidebar");
if (gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action)) != visible)
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action), visible);
/* Focus the image */
if (!visible && window->priv->image != NULL)
gtk_widget_grab_focus (window->priv->view);
}
static void
eom_window_sidebar_page_added (EomSidebar *sidebar,
GtkWidget *main_widget,
EomWindow *window)
{
if (eom_sidebar_get_n_pages (sidebar) == 1) {
GtkAction *action;
gboolean show;
action = gtk_action_group_get_action (window->priv->actions_window,
"ViewSidebar");
gtk_action_set_sensitive (action, TRUE);
show = gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action));
if (show)
gtk_widget_show (GTK_WIDGET (sidebar));
}
}
static void
eom_window_sidebar_page_removed (EomSidebar *sidebar,
GtkWidget *main_widget,
EomWindow *window)
{
if (eom_sidebar_is_empty (sidebar)) {
GtkAction *action;
gtk_widget_hide (GTK_WIDGET (sidebar));
action = gtk_action_group_get_action (window->priv->actions_window,
"ViewSidebar");
gtk_action_set_sensitive (action, FALSE);
}
}
static void
eom_window_finish_saving (EomWindow *window)
{
EomWindowPrivate *priv = window->priv;
gtk_widget_set_sensitive (GTK_WIDGET (window), FALSE);
do {
gtk_main_iteration ();
} while (priv->save_job != NULL);
}
static GAppInfo *
get_appinfo_for_editor (EomWindow *window)
{
/* We want this function to always return the same thing, not
* just for performance reasons, but because if someone edits
* GConf while eom is running, the application could get into an
* inconsistent state. If the editor exists once, it gets added
* to the "available" list of the EggToolbarsModel (for which
* there is no API to remove it). If later the editor no longer
* existed when constructing a new window, we'd be unable to
* construct a GtkAction for the editor for that window, causing
* assertion failures when viewing the "Edit Toolbars" dialog
* (item is available, but can't find the GtkAction for it).
*
* By ensuring we keep the GAppInfo around, we avoid the
* possibility of that situation occuring.
*/
static GDesktopAppInfo *app_info = NULL;
static gboolean initialised;
if (!initialised) {
gchar *editor;
editor = g_settings_get_string (window->priv->ui_settings,
EOM_CONF_UI_EXTERNAL_EDITOR);
if (editor != NULL) {
app_info = g_desktop_app_info_new (editor);
}
initialised = TRUE;
g_free (editor);
}
return (GAppInfo *) app_info;
}
static void
eom_window_open_editor (GtkAction *action,
EomWindow *window)
{
GdkAppLaunchContext *context;
GAppInfo *app_info;
GList files;
app_info = get_appinfo_for_editor (window);
if (app_info == NULL)
return;
#if GTK_CHECK_VERSION (3, 0, 0)
context = gdk_display_get_app_launch_context (
gtk_widget_get_display (GTK_WIDGET (window)));
#else
context = gdk_app_launch_context_new ();
#endif
gdk_app_launch_context_set_screen (context,
gtk_widget_get_screen (GTK_WIDGET (window)));
gdk_app_launch_context_set_icon (context,
g_app_info_get_icon (app_info));
gdk_app_launch_context_set_timestamp (context,
gtk_get_current_event_time ());
{
GList f = { eom_image_get_file (window->priv->image) };
files = f;
}
g_app_info_launch (app_info, &files,
G_APP_LAUNCH_CONTEXT (context), NULL);
g_object_unref (files.data);
g_object_unref (context);
}
static void
eom_window_add_open_editor_action (EomWindow *window)
{
EggToolbarsModel *model;
GAppInfo *app_info;
GtkAction *action;
gchar *tooltip;
app_info = get_appinfo_for_editor (window);
if (app_info == NULL)
return;
model = eom_application_get_toolbars_model (EOM_APP);
egg_toolbars_model_set_name_flags (model, "OpenEditor",
EGG_TB_MODEL_NAME_KNOWN);
tooltip = g_strdup_printf (_("Edit the current image using %s"),
g_app_info_get_name (app_info));
action = gtk_action_new ("OpenEditor", _("Edit Image"), tooltip, NULL);
gtk_action_set_gicon (action, g_app_info_get_icon (app_info));
gtk_action_set_is_important (action, TRUE);
g_signal_connect (action, "activate",
G_CALLBACK (eom_window_open_editor), window);
gtk_action_group_add_action (window->priv->actions_image, action);
g_object_unref (action);
g_free (tooltip);
}
static void
eom_window_construct_ui (EomWindow *window)
{
EomWindowPrivate *priv;
GError *error = NULL;
GtkWidget *menubar;
GtkWidget *thumb_popup;
GtkWidget *view_popup;
GtkWidget *hpaned;
GtkWidget *menuitem;
g_return_if_fail (EOM_IS_WINDOW (window));
priv = window->priv;
priv->box = gtk_vbox_new (FALSE, 0);
gtk_container_add (GTK_CONTAINER (window), priv->box);
gtk_widget_show (priv->box);
priv->ui_mgr = gtk_ui_manager_new ();
priv->actions_window = gtk_action_group_new ("MenuActionsWindow");
gtk_action_group_set_translation_domain (priv->actions_window,
GETTEXT_PACKAGE);
gtk_action_group_add_actions (priv->actions_window,
action_entries_window,
G_N_ELEMENTS (action_entries_window),
window);
gtk_action_group_add_toggle_actions (priv->actions_window,
toggle_entries_window,
G_N_ELEMENTS (toggle_entries_window),
window);
gtk_ui_manager_insert_action_group (priv->ui_mgr, priv->actions_window, 0);
priv->actions_image = gtk_action_group_new ("MenuActionsImage");
gtk_action_group_set_translation_domain (priv->actions_image,
GETTEXT_PACKAGE);
gtk_action_group_add_actions (priv->actions_image,
action_entries_image,
G_N_ELEMENTS (action_entries_image),
window);
eom_window_add_open_editor_action (window);
gtk_action_group_add_toggle_actions (priv->actions_image,
toggle_entries_image,
G_N_ELEMENTS (toggle_entries_image),
window);
gtk_ui_manager_insert_action_group (priv->ui_mgr, priv->actions_image, 0);
priv->actions_collection = gtk_action_group_new ("MenuActionsCollection");
gtk_action_group_set_translation_domain (priv->actions_collection,
GETTEXT_PACKAGE);
gtk_action_group_add_actions (priv->actions_collection,
action_entries_collection,
G_N_ELEMENTS (action_entries_collection),
window);
gtk_action_group_add_toggle_actions (priv->actions_collection,
toggle_entries_collection,
G_N_ELEMENTS (toggle_entries_collection),
window);
set_action_properties (priv->actions_window,
priv->actions_image,
priv->actions_collection);
gtk_ui_manager_insert_action_group (priv->ui_mgr, priv->actions_collection, 0);
if (!gtk_ui_manager_add_ui_from_file (priv->ui_mgr,
EOM_DATA_DIR"/eom-ui.xml",
&error)) {
g_warning ("building menus failed: %s", error->message);
g_error_free (error);
}
g_signal_connect (priv->ui_mgr, "connect_proxy",
G_CALLBACK (connect_proxy_cb), window);
g_signal_connect (priv->ui_mgr, "disconnect_proxy",
G_CALLBACK (disconnect_proxy_cb), window);
menubar = gtk_ui_manager_get_widget (priv->ui_mgr, "/MainMenu");
g_assert (GTK_IS_WIDGET (menubar));
gtk_box_pack_start (GTK_BOX (priv->box), menubar, FALSE, FALSE, 0);
gtk_widget_show (menubar);
menuitem = gtk_ui_manager_get_widget (priv->ui_mgr,
"/MainMenu/Edit/EditFlipHorizontal");
gtk_image_menu_item_set_always_show_image (
GTK_IMAGE_MENU_ITEM (menuitem), TRUE);
menuitem = gtk_ui_manager_get_widget (priv->ui_mgr,
"/MainMenu/Edit/EditFlipVertical");
gtk_image_menu_item_set_always_show_image (
GTK_IMAGE_MENU_ITEM (menuitem), TRUE);
menuitem = gtk_ui_manager_get_widget (priv->ui_mgr,
"/MainMenu/Edit/EditRotate90");
gtk_image_menu_item_set_always_show_image (
GTK_IMAGE_MENU_ITEM (menuitem), TRUE);
menuitem = gtk_ui_manager_get_widget (priv->ui_mgr,
"/MainMenu/Edit/EditRotate270");
gtk_image_menu_item_set_always_show_image (
GTK_IMAGE_MENU_ITEM (menuitem), TRUE);
priv->toolbar = GTK_WIDGET
(g_object_new (EGG_TYPE_EDITABLE_TOOLBAR,
"ui-manager", priv->ui_mgr,
"model", eom_application_get_toolbars_model (EOM_APP),
NULL));
#if GTK_CHECK_VERSION (3, 0, 2)
gtk_style_context_add_class (gtk_widget_get_style_context (GTK_WIDGET (priv->toolbar)),
GTK_STYLE_CLASS_PRIMARY_TOOLBAR);
#endif
egg_editable_toolbar_show (EGG_EDITABLE_TOOLBAR (priv->toolbar),
"Toolbar");
gtk_box_pack_start (GTK_BOX (priv->box),
priv->toolbar,
FALSE,
FALSE,
0);
gtk_widget_show (priv->toolbar);
gtk_window_add_accel_group (GTK_WINDOW (window),
gtk_ui_manager_get_accel_group (priv->ui_mgr));
priv->actions_recent = gtk_action_group_new ("RecentFilesActions");
gtk_action_group_set_translation_domain (priv->actions_recent,
GETTEXT_PACKAGE);
g_signal_connect (gtk_recent_manager_get_default (), "changed",
G_CALLBACK (eom_window_recent_manager_changed_cb),
window);
eom_window_update_recent_files_menu (window);
gtk_ui_manager_insert_action_group (priv->ui_mgr, priv->actions_recent, 0);
priv->cbox = gtk_vbox_new (FALSE, 0);
gtk_box_pack_start (GTK_BOX (priv->box), priv->cbox, TRUE, TRUE, 0);
gtk_widget_show (priv->cbox);
priv->statusbar = eom_statusbar_new ();
gtk_box_pack_end (GTK_BOX (priv->box),
GTK_WIDGET (priv->statusbar),
FALSE, FALSE, 0);
gtk_widget_show (priv->statusbar);
priv->image_info_message_cid =
gtk_statusbar_get_context_id (GTK_STATUSBAR (priv->statusbar),
"image_info_message");
priv->tip_message_cid =
gtk_statusbar_get_context_id (GTK_STATUSBAR (priv->statusbar),
"tip_message");
priv->layout = gtk_vbox_new (FALSE, 2);
#if GTK_CHECK_VERSION (3, 2, 0)
hpaned = gtk_paned_new (GTK_ORIENTATION_HORIZONTAL);
#else
hpaned = gtk_hpaned_new ();
#endif
priv->sidebar = eom_sidebar_new ();
/* The sidebar shouldn't be shown automatically on show_all(),
but only when the user actually wants it. */
gtk_widget_set_no_show_all (priv->sidebar, TRUE);
gtk_widget_set_size_request (priv->sidebar, 210, -1);
g_signal_connect_after (priv->sidebar,
"show",
G_CALLBACK (eom_window_sidebar_visibility_changed),
window);
g_signal_connect_after (priv->sidebar,
"hide",
G_CALLBACK (eom_window_sidebar_visibility_changed),
window);
g_signal_connect_after (priv->sidebar,
"page-added",
G_CALLBACK (eom_window_sidebar_page_added),
window);
g_signal_connect_after (priv->sidebar,
"page-removed",
G_CALLBACK (eom_window_sidebar_page_removed),
window);
priv->view = eom_scroll_view_new ();
gtk_widget_set_size_request (GTK_WIDGET (priv->view), 100, 100);
g_signal_connect (G_OBJECT (priv->view),
"zoom_changed",
G_CALLBACK (view_zoom_changed_cb),
window);
g_settings_bind (priv->view_settings, EOM_CONF_VIEW_SCROLL_WHEEL_ZOOM,
priv->view, "scrollwheel-zoom", G_SETTINGS_BIND_GET);
g_settings_bind (priv->view_settings, EOM_CONF_VIEW_ZOOM_MULTIPLIER,
priv->view, "zoom-multiplier", G_SETTINGS_BIND_GET);
view_popup = gtk_ui_manager_get_widget (priv->ui_mgr, "/ViewPopup");
eom_scroll_view_set_popup (EOM_SCROLL_VIEW (priv->view),
GTK_MENU (view_popup));
gtk_paned_pack1 (GTK_PANED (hpaned),
priv->sidebar,
FALSE,
FALSE);
gtk_paned_pack2 (GTK_PANED (hpaned),
priv->view,
TRUE,
FALSE);
gtk_widget_show_all (hpaned);
gtk_box_pack_start (GTK_BOX (priv->layout), hpaned, TRUE, TRUE, 0);
priv->thumbview = g_object_ref (eom_thumb_view_new ());
/* giving shape to the view */
gtk_icon_view_set_margin (GTK_ICON_VIEW (priv->thumbview), 4);
gtk_icon_view_set_row_spacing (GTK_ICON_VIEW (priv->thumbview), 0);
g_signal_connect (G_OBJECT (priv->thumbview), "selection_changed",
G_CALLBACK (handle_image_selection_changed_cb), window);
priv->nav = eom_thumb_nav_new (priv->thumbview,
EOM_THUMB_NAV_MODE_ONE_ROW,
g_settings_get_boolean (priv->ui_settings,
EOM_CONF_UI_SCROLL_BUTTONS));
// Bind the scroll buttons to their GSettings key
g_settings_bind (priv->ui_settings, EOM_CONF_UI_SCROLL_BUTTONS,
priv->nav, "show-buttons", G_SETTINGS_BIND_GET);
thumb_popup = gtk_ui_manager_get_widget (priv->ui_mgr, "/ThumbnailPopup");
eom_thumb_view_set_thumbnail_popup (EOM_THUMB_VIEW (priv->thumbview),
GTK_MENU (thumb_popup));
gtk_box_pack_start (GTK_BOX (priv->layout), priv->nav, FALSE, FALSE, 0);
gtk_box_pack_end (GTK_BOX (priv->cbox), priv->layout, TRUE, TRUE, 0);
eom_window_can_save_changed_cb (priv->lockdown_settings,
EOM_CONF_LOCKDOWN_CAN_SAVE,
window);
g_settings_bind (priv->ui_settings, EOM_CONF_UI_IMAGE_COLLECTION_POSITION,
window, "collection-position", G_SETTINGS_BIND_GET);
g_settings_bind (priv->ui_settings, EOM_CONF_UI_IMAGE_COLLECTION_RESIZABLE,
window, "collection-resizable", G_SETTINGS_BIND_GET);
if ((priv->flags & EOM_STARTUP_FULLSCREEN) ||
(priv->flags & EOM_STARTUP_SLIDE_SHOW)) {
eom_window_run_fullscreen (window, (priv->flags & EOM_STARTUP_SLIDE_SHOW));
} else {
priv->mode = EOM_WINDOW_MODE_NORMAL;
update_ui_visibility (window);
}
eom_window_set_drag_dest (window);
}
static void
eom_window_init (EomWindow *window)
{
GdkGeometry hints;
GdkScreen *screen;
EomWindowPrivate *priv;
eom_debug (DEBUG_WINDOW);
hints.min_width = EOM_WINDOW_MIN_WIDTH;
hints.min_height = EOM_WINDOW_MIN_HEIGHT;
screen = gtk_widget_get_screen (GTK_WIDGET (window));
priv = window->priv = EOM_WINDOW_GET_PRIVATE (window);
priv->view_settings = g_settings_new (EOM_CONF_VIEW);
priv->ui_settings = g_settings_new (EOM_CONF_UI);
priv->fullscreen_settings = g_settings_new (EOM_CONF_FULLSCREEN);
priv->lockdown_settings = g_settings_new (EOM_CONF_LOCKDOWN_SCHEMA);
g_signal_connect (priv->lockdown_settings,
"changed::" EOM_CONF_LOCKDOWN_CAN_SAVE,
G_CALLBACK (eom_window_can_save_changed_cb),
window);
window->priv->store = NULL;
window->priv->image = NULL;
window->priv->fullscreen_popup = NULL;
window->priv->fullscreen_timeout_source = NULL;
window->priv->slideshow_random = FALSE;
window->priv->slideshow_loop = FALSE;
window->priv->slideshow_switch_timeout = 0;
window->priv->slideshow_switch_source = NULL;
gtk_window_set_geometry_hints (GTK_WINDOW (window),
GTK_WIDGET (window),
&hints,
GDK_HINT_MIN_SIZE);
gtk_window_set_default_size (GTK_WINDOW (window),
EOM_WINDOW_DEFAULT_WIDTH,
EOM_WINDOW_DEFAULT_HEIGHT);
gtk_window_set_position (GTK_WINDOW (window), GTK_WIN_POS_CENTER);
window->priv->mode = EOM_WINDOW_MODE_UNKNOWN;
window->priv->status = EOM_WINDOW_STATUS_UNKNOWN;
#ifdef HAVE_LCMS
window->priv->display_profile =
eom_window_get_display_profile (screen);
#endif
window->priv->recent_menu_id = 0;
window->priv->collection_position = 0;
window->priv->collection_resizable = FALSE;
window->priv->save_disabled = FALSE;
window->priv->page_setup = NULL;
}
static void
eom_window_dispose (GObject *object)
{
EomWindow *window;
EomWindowPrivate *priv;
g_return_if_fail (object != NULL);
g_return_if_fail (EOM_IS_WINDOW (object));
eom_debug (DEBUG_WINDOW);
window = EOM_WINDOW (object);
priv = window->priv;
if (priv->page_setup != NULL) {
g_object_unref (priv->page_setup);
priv->page_setup = NULL;
}
if (priv->thumbview)
{
/* Disconnect so we don't get any unwanted callbacks
* when the thumb view is disposed. */
g_signal_handlers_disconnect_by_func (priv->thumbview,
G_CALLBACK (handle_image_selection_changed_cb),
window);
g_clear_object (&priv->thumbview);
}
eom_plugin_engine_garbage_collect ();
if (priv->store != NULL) {
g_signal_handlers_disconnect_by_func (priv->store,
eom_window_list_store_image_added,
window);
g_signal_handlers_disconnect_by_func (priv->store,
eom_window_list_store_image_removed,
window);
g_object_unref (priv->store);
priv->store = NULL;
}
if (priv->image != NULL) {
g_signal_handlers_disconnect_by_func (priv->image,
image_thumb_changed_cb,
window);
g_signal_handlers_disconnect_by_func (priv->image,
image_file_changed_cb,
window);
g_object_unref (priv->image);
priv->image = NULL;
}
if (priv->actions_window != NULL) {
g_object_unref (priv->actions_window);
priv->actions_window = NULL;
}
if (priv->actions_image != NULL) {
g_object_unref (priv->actions_image);
priv->actions_image = NULL;
}
if (priv->actions_collection != NULL) {
g_object_unref (priv->actions_collection);
priv->actions_collection = NULL;
}
if (priv->actions_recent != NULL) {
g_object_unref (priv->actions_recent);
priv->actions_recent = NULL;
}
if (priv->actions_open_with != NULL) {
g_object_unref (priv->actions_open_with);
priv->actions_open_with = NULL;
}
fullscreen_clear_timeout (window);
if (window->priv->fullscreen_popup != NULL) {
gtk_widget_destroy (priv->fullscreen_popup);
priv->fullscreen_popup = NULL;
}
slideshow_clear_timeout (window);
g_signal_handlers_disconnect_by_func (gtk_recent_manager_get_default (),
G_CALLBACK (eom_window_recent_manager_changed_cb),
window);
priv->recent_menu_id = 0;
eom_window_clear_load_job (window);
eom_window_clear_transform_job (window);
if (priv->view_settings) {
g_object_unref (priv->view_settings);
priv->view_settings = NULL;
}
if (priv->ui_settings) {
g_object_unref (priv->ui_settings);
priv->ui_settings = NULL;
}
if (priv->fullscreen_settings) {
g_object_unref (priv->fullscreen_settings);
priv->fullscreen_settings = NULL;
}
if (priv->lockdown_settings) {
g_object_unref (priv->lockdown_settings);
priv->lockdown_settings = NULL;
}
if (priv->file_list != NULL) {
g_slist_foreach (priv->file_list, (GFunc) g_object_unref, NULL);
g_slist_free (priv->file_list);
priv->file_list = NULL;
}
#ifdef HAVE_LCMS
if (priv->display_profile != NULL) {
cmsCloseProfile (priv->display_profile);
priv->display_profile = NULL;
}
#endif
if (priv->last_save_as_folder != NULL) {
g_object_unref (priv->last_save_as_folder);
priv->last_save_as_folder = NULL;
}
eom_plugin_engine_garbage_collect ();
G_OBJECT_CLASS (eom_window_parent_class)->dispose (object);
}
static void
eom_window_finalize (GObject *object)
{
GList *windows = eom_application_get_windows (EOM_APP);
g_return_if_fail (EOM_IS_WINDOW (object));
eom_debug (DEBUG_WINDOW);
if (windows == NULL) {
eom_application_shutdown (EOM_APP);
} else {
g_list_free (windows);
}
G_OBJECT_CLASS (eom_window_parent_class)->finalize (object);
}
static gint
eom_window_delete (GtkWidget *widget, GdkEventAny *event)
{
EomWindow *window;
EomWindowPrivate *priv;
g_return_val_if_fail (EOM_IS_WINDOW (widget), FALSE);
window = EOM_WINDOW (widget);
priv = window->priv;
if (priv->save_job != NULL) {
eom_window_finish_saving (window);
}
if (eom_window_unsaved_images_confirm (window)) {
return TRUE;
}
gtk_widget_destroy (widget);
return TRUE;
}
static gint
eom_window_key_press (GtkWidget *widget, GdkEventKey *event)
{
GtkContainer *tbcontainer = GTK_CONTAINER ((EOM_WINDOW (widget)->priv->toolbar));
gint result = FALSE;
gboolean handle_selection = FALSE;
switch (event->keyval) {
case GDK_KEY_space:
if (event->state & GDK_CONTROL_MASK) {
handle_selection = TRUE;
break;
}
case GDK_KEY_Return:
if (gtk_container_get_focus_child (tbcontainer) == NULL) {
/* Image properties dialog case */
if (event->state & GDK_MOD1_MASK) {
result = FALSE;
break;
}
if (event->state & GDK_SHIFT_MASK) {
eom_window_cmd_go_prev (NULL, EOM_WINDOW (widget));
} else {
eom_window_cmd_go_next (NULL, EOM_WINDOW (widget));
}
result = TRUE;
}
break;
case GDK_KEY_p:
case GDK_KEY_P:
if (EOM_WINDOW (widget)->priv->mode == EOM_WINDOW_MODE_FULLSCREEN || EOM_WINDOW (widget)->priv->mode == EOM_WINDOW_MODE_SLIDESHOW) {
gboolean slideshow;
slideshow = EOM_WINDOW (widget)->priv->mode == EOM_WINDOW_MODE_SLIDESHOW;
eom_window_run_fullscreen (EOM_WINDOW (widget), !slideshow);
}
break;
case GDK_KEY_Q:
case GDK_KEY_q:
case GDK_KEY_Escape:
if (EOM_WINDOW (widget)->priv->mode == EOM_WINDOW_MODE_FULLSCREEN) {
eom_window_stop_fullscreen (EOM_WINDOW (widget), FALSE);
} else if (EOM_WINDOW (widget)->priv->mode == EOM_WINDOW_MODE_SLIDESHOW) {
eom_window_stop_fullscreen (EOM_WINDOW (widget), TRUE);
} else {
eom_window_cmd_close_window (NULL, EOM_WINDOW (widget));
return TRUE;
}
break;
case GDK_KEY_Left:
if (event->state & GDK_MOD1_MASK) {
/* Alt+Left moves to previous image */
if (is_rtl) { /* move to next in RTL mode */
eom_window_cmd_go_next (NULL, EOM_WINDOW (widget));
} else {
eom_window_cmd_go_prev (NULL, EOM_WINDOW (widget));
}
result = TRUE;
break;
} /* else fall-trough is intended */
case GDK_KEY_Up:
if (eom_scroll_view_scrollbars_visible (EOM_SCROLL_VIEW (EOM_WINDOW (widget)->priv->view))) {
/* break to let scrollview handle the key */
break;
}
if (gtk_container_get_focus_child (tbcontainer) != NULL)
break;
if (!gtk_widget_get_visible (EOM_WINDOW (widget)->priv->nav)) {
if (is_rtl && event->keyval == GDK_KEY_Left) {
/* handle RTL fall-through,
* need to behave like GDK_Down then */
eom_window_cmd_go_next (NULL,
EOM_WINDOW (widget));
} else {
eom_window_cmd_go_prev (NULL,
EOM_WINDOW (widget));
}
result = TRUE;
break;
}
case GDK_KEY_Right:
if (event->state & GDK_MOD1_MASK) {
/* Alt+Right moves to next image */
if (is_rtl) { /* move to previous in RTL mode */
eom_window_cmd_go_prev (NULL, EOM_WINDOW (widget));
} else {
eom_window_cmd_go_next (NULL, EOM_WINDOW (widget));
}
result = TRUE;
break;
} /* else fall-trough is intended */
case GDK_KEY_Down:
if (eom_scroll_view_scrollbars_visible (EOM_SCROLL_VIEW (EOM_WINDOW (widget)->priv->view))) {
/* break to let scrollview handle the key */
break;
}
if (gtk_container_get_focus_child (tbcontainer) != NULL)
break;
if (!gtk_widget_get_visible (EOM_WINDOW (widget)->priv->nav)) {
if (is_rtl && event->keyval == GDK_KEY_Right) {
/* handle RTL fall-through,
* need to behave like GDK_Up then */
eom_window_cmd_go_prev (NULL,
EOM_WINDOW (widget));
} else {
eom_window_cmd_go_next (NULL,
EOM_WINDOW (widget));
}
result = TRUE;
break;
}
case GDK_KEY_Page_Up:
if (!eom_scroll_view_scrollbars_visible (EOM_SCROLL_VIEW (EOM_WINDOW (widget)->priv->view))) {
if (!gtk_widget_get_visible (EOM_WINDOW (widget)->priv->nav)) {
/* If the iconview is not visible skip to the
* previous image manually as it won't handle
* the keypress then. */
eom_window_cmd_go_prev (NULL,
EOM_WINDOW (widget));
result = TRUE;
} else
handle_selection = TRUE;
}
break;
case GDK_KEY_Page_Down:
if (!eom_scroll_view_scrollbars_visible (EOM_SCROLL_VIEW (EOM_WINDOW (widget)->priv->view))) {
if (!gtk_widget_get_visible (EOM_WINDOW (widget)->priv->nav)) {
/* If the iconview is not visible skip to the
* next image manually as it won't handle
* the keypress then. */
eom_window_cmd_go_next (NULL,
EOM_WINDOW (widget));
result = TRUE;
} else
handle_selection = TRUE;
}
break;
}
/* Update slideshow timeout */
if (result && (EOM_WINDOW (widget)->priv->mode == EOM_WINDOW_MODE_SLIDESHOW)) {
slideshow_set_timeout (EOM_WINDOW (widget));
}
if (handle_selection == TRUE && result == FALSE) {
gtk_widget_grab_focus (GTK_WIDGET (EOM_WINDOW (widget)->priv->thumbview));
result = gtk_widget_event (GTK_WIDGET (EOM_WINDOW (widget)->priv->thumbview),
(GdkEvent *) event);
}
/* If the focus is not in the toolbar and we still haven't handled the
event, give the scrollview a chance to do it. */
if (!gtk_container_get_focus_child (tbcontainer) && result == FALSE &&
gtk_widget_get_realized (GTK_WIDGET (EOM_WINDOW (widget)->priv->view))) {
result = gtk_widget_event (GTK_WIDGET (EOM_WINDOW (widget)->priv->view),
(GdkEvent *) event);
}
if (result == FALSE && GTK_WIDGET_CLASS (eom_window_parent_class)->key_press_event) {
result = (* GTK_WIDGET_CLASS (eom_window_parent_class)->key_press_event) (widget, event);
}
return result;
}
static gint
eom_window_button_press (GtkWidget *widget, GdkEventButton *event)
{
EomWindow *window = EOM_WINDOW (widget);
gint result = FALSE;
if (event->type == GDK_BUTTON_PRESS) {
switch (event->button) {
case 6:
eom_thumb_view_select_single (EOM_THUMB_VIEW (window->priv->thumbview),
EOM_THUMB_VIEW_SELECT_LEFT);
result = TRUE;
break;
case 7:
eom_thumb_view_select_single (EOM_THUMB_VIEW (window->priv->thumbview),
EOM_THUMB_VIEW_SELECT_RIGHT);
result = TRUE;
break;
}
}
if (result == FALSE && GTK_WIDGET_CLASS (eom_window_parent_class)->button_press_event) {
result = (* GTK_WIDGET_CLASS (eom_window_parent_class)->button_press_event) (widget, event);
}
return result;
}
static gboolean
eom_window_focus_out_event (GtkWidget *widget, GdkEventFocus *event)
{
EomWindow *window = EOM_WINDOW (widget);
EomWindowPrivate *priv = window->priv;
gboolean fullscreen;
eom_debug (DEBUG_WINDOW);
fullscreen = priv->mode == EOM_WINDOW_MODE_FULLSCREEN ||
priv->mode == EOM_WINDOW_MODE_SLIDESHOW;
if (fullscreen) {
gtk_widget_hide (priv->fullscreen_popup);
}
return GTK_WIDGET_CLASS (eom_window_parent_class)->focus_out_event (widget, event);
}
static void
eom_window_set_property (GObject *object,
guint property_id,
const GValue *value,
GParamSpec *pspec)
{
EomWindow *window;
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (object));
window = EOM_WINDOW (object);
priv = window->priv;
switch (property_id) {
case PROP_COLLECTION_POS:
eom_window_set_collection_mode (window, g_value_get_enum (value),
priv->collection_resizable);
break;
case PROP_COLLECTION_RESIZABLE:
eom_window_set_collection_mode (window, priv->collection_position,
g_value_get_boolean (value));
break;
case PROP_STARTUP_FLAGS:
priv->flags = g_value_get_flags (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
}
}
static void
eom_window_get_property (GObject *object,
guint property_id,
GValue *value,
GParamSpec *pspec)
{
EomWindow *window;
EomWindowPrivate *priv;
g_return_if_fail (EOM_IS_WINDOW (object));
window = EOM_WINDOW (object);
priv = window->priv;
switch (property_id) {
case PROP_COLLECTION_POS:
g_value_set_enum (value, priv->collection_position);
break;
case PROP_COLLECTION_RESIZABLE:
g_value_set_boolean (value, priv->collection_resizable);
break;
case PROP_STARTUP_FLAGS:
g_value_set_flags (value, priv->flags);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
}
}
static GObject *
eom_window_constructor (GType type,
guint n_construct_properties,
GObjectConstructParam *construct_params)
{
GObject *object;
object = G_OBJECT_CLASS (eom_window_parent_class)->constructor
(type, n_construct_properties, construct_params);
eom_window_construct_ui (EOM_WINDOW (object));
eom_plugin_engine_update_plugins_ui (EOM_WINDOW (object), TRUE);
return object;
}
static void
eom_window_class_init (EomWindowClass *class)
{
GObjectClass *g_object_class = (GObjectClass *) class;
GtkWidgetClass *widget_class = (GtkWidgetClass *) class;
g_object_class->constructor = eom_window_constructor;
g_object_class->dispose = eom_window_dispose;
g_object_class->finalize = eom_window_finalize;
g_object_class->set_property = eom_window_set_property;
g_object_class->get_property = eom_window_get_property;
widget_class->delete_event = eom_window_delete;
widget_class->key_press_event = eom_window_key_press;
widget_class->button_press_event = eom_window_button_press;
widget_class->drag_data_received = eom_window_drag_data_received;
widget_class->focus_out_event = eom_window_focus_out_event;
/**
* EomWindow:collection-position:
*
* Determines the position of the image collection in the window
* relative to the image.
*/
g_object_class_install_property (
g_object_class, PROP_COLLECTION_POS,
g_param_spec_enum ("collection-position", NULL, NULL,
EOM_TYPE_WINDOW_COLLECTION_POS,
EOM_WINDOW_COLLECTION_POS_BOTTOM,
G_PARAM_READWRITE | G_PARAM_STATIC_NAME));
/**
* EomWindow:collection-resizable:
*
* If %TRUE the collection will be resizable by the user otherwise it will be
* in single column/row mode.
*/
g_object_class_install_property (
g_object_class, PROP_COLLECTION_RESIZABLE,
g_param_spec_boolean ("collection-resizable", NULL, NULL, FALSE,
G_PARAM_READWRITE | G_PARAM_STATIC_NAME));
/**
* EomWindow:startup-flags:
*
* A bitwise OR of #EomStartupFlags elements, indicating how the window
* should behave upon creation.
*/
g_object_class_install_property (g_object_class,
PROP_STARTUP_FLAGS,
g_param_spec_flags ("startup-flags",
NULL,
NULL,
EOM_TYPE_STARTUP_FLAGS,
0,
G_PARAM_READWRITE |
G_PARAM_CONSTRUCT_ONLY));
/**
* EomWindow::prepared:
* @window: the object which received the signal.
*
* The #EomWindow::prepared signal is emitted when the @window is ready
* to be shown.
*/
signals [SIGNAL_PREPARED] =
g_signal_new ("prepared",
EOM_TYPE_WINDOW,
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (EomWindowClass, prepared),
NULL, NULL,
g_cclosure_marshal_VOID__VOID,
G_TYPE_NONE, 0);
g_type_class_add_private (g_object_class, sizeof (EomWindowPrivate));
}
/**
* eom_window_new:
* @flags: the initialization parameters for the new window.
*
*
* Creates a new and empty #EomWindow. Use @flags to indicate
* if the window should be initialized fullscreen, in slideshow mode,
* and/or without the thumbnails collection visible. See #EomStartupFlags.
*
* Returns: a newly created #EomWindow.
**/
GtkWidget*
eom_window_new (EomStartupFlags flags)
{
EomWindow *window;
eom_debug (DEBUG_WINDOW);
window = EOM_WINDOW (g_object_new (EOM_TYPE_WINDOW,
"type", GTK_WINDOW_TOPLEVEL,
"startup-flags", flags,
NULL));
return GTK_WIDGET (window);
}
static void
eom_window_list_store_image_added (GtkTreeModel *tree_model,
GtkTreePath *path,
GtkTreeIter *iter,
gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
update_image_pos (window);
update_action_groups_state (window);
}
static void
eom_window_list_store_image_removed (GtkTreeModel *tree_model,
GtkTreePath *path,
gpointer user_data)
{
EomWindow *window = EOM_WINDOW (user_data);
update_image_pos (window);
update_action_groups_state (window);
}
static void
eom_job_model_cb (EomJobModel *job, gpointer data)
{
EomWindow *window;
EomWindowPrivate *priv;
gint n_images;
eom_debug (DEBUG_WINDOW);
#ifdef HAVE_EXIF
int i;
EomImage *image;
#endif
g_return_if_fail (EOM_IS_WINDOW (data));
window = EOM_WINDOW (data);
priv = window->priv;
if (priv->store != NULL) {
g_object_unref (priv->store);
priv->store = NULL;
}
priv->store = g_object_ref (job->store);
n_images = eom_list_store_length (EOM_LIST_STORE (priv->store));
#ifdef HAVE_EXIF
if (g_settings_get_boolean (priv->view_settings, EOM_CONF_VIEW_AUTOROTATE)) {
for (i = 0; i < n_images; i++) {
image = eom_list_store_get_image_by_pos (priv->store, i);
eom_image_autorotate (image);
g_object_unref (image);
}
}
#endif
eom_thumb_view_set_model (EOM_THUMB_VIEW (priv->thumbview), priv->store);
g_signal_connect (G_OBJECT (priv->store),
"row-inserted",
G_CALLBACK (eom_window_list_store_image_added),
window);
g_signal_connect (G_OBJECT (priv->store),
"row-deleted",
G_CALLBACK (eom_window_list_store_image_removed),
window);
if (n_images == 0) {
gint n_files;
priv->status = EOM_WINDOW_STATUS_NORMAL;
update_action_groups_state (window);
n_files = g_slist_length (priv->file_list);
if (n_files > 0) {
GtkWidget *message_area;
GFile *file = NULL;
if (n_files == 1) {
file = (GFile *) priv->file_list->data;
}
message_area = eom_no_images_error_message_area_new (file);
eom_window_set_message_area (window, message_area);
gtk_widget_show (message_area);
}
g_signal_emit (window, signals[SIGNAL_PREPARED], 0);
}
}
/**
* eom_window_open_file_list:
* @window: An #EomWindow.
* @file_list: (element-type GFile): A %NULL-terminated list of #GFile's.
*
* Opens a list of files, adding them to the collection in @window.
* Files will be checked to be readable and later filtered according
* with eom_list_store_add_files().
**/
void
eom_window_open_file_list (EomWindow *window, GSList *file_list)
{
EomJob *job;
eom_debug (DEBUG_WINDOW);
window->priv->status = EOM_WINDOW_STATUS_INIT;
g_slist_foreach (file_list, (GFunc) g_object_ref, NULL);
window->priv->file_list = file_list;
job = eom_job_model_new (file_list);
g_signal_connect (job,
"finished",
G_CALLBACK (eom_job_model_cb),
window);
eom_job_queue_add_job (job);
g_object_unref (job);
}
/**
* eom_window_get_ui_manager:
* @window: An #EomWindow.
*
* Gets the #GtkUIManager that describes the UI of @window.
*
* Returns: (transfer none): A #GtkUIManager.
**/
GtkUIManager *
eom_window_get_ui_manager (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return window->priv->ui_mgr;
}
/**
* eom_window_get_mode:
* @window: An #EomWindow.
*
* Gets the mode of @window. See #EomWindowMode for details.
*
* Returns: An #EomWindowMode.
**/
EomWindowMode
eom_window_get_mode (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), EOM_WINDOW_MODE_UNKNOWN);
return window->priv->mode;
}
/**
* eom_window_set_mode:
* @window: an #EomWindow.
* @mode: an #EomWindowMode value.
*
* Changes the mode of @window to normal, fullscreen, or slideshow.
* See #EomWindowMode for details.
**/
void
eom_window_set_mode (EomWindow *window, EomWindowMode mode)
{
g_return_if_fail (EOM_IS_WINDOW (window));
if (window->priv->mode == mode)
return;
switch (mode) {
case EOM_WINDOW_MODE_NORMAL:
eom_window_stop_fullscreen (window,
window->priv->mode == EOM_WINDOW_MODE_SLIDESHOW);
break;
case EOM_WINDOW_MODE_FULLSCREEN:
eom_window_run_fullscreen (window, FALSE);
break;
case EOM_WINDOW_MODE_SLIDESHOW:
eom_window_run_fullscreen (window, TRUE);
break;
case EOM_WINDOW_MODE_UNKNOWN:
break;
}
}
/**
* eom_window_get_store:
* @window: An #EomWindow.
*
* Gets the #EomListStore that contains the images in the collection
* of @window.
*
* Returns: (transfer none): an #EomListStore.
**/
EomListStore *
eom_window_get_store (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return EOM_LIST_STORE (window->priv->store);
}
/**
* eom_window_get_view:
* @window: An #EomWindow.
*
* Gets the #EomScrollView in the window.
*
* Returns: (transfer none): the #EomScrollView.
**/
GtkWidget *
eom_window_get_view (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return window->priv->view;
}
/**
* eom_window_get_sidebar:
* @window: An #EomWindow.
*
* Gets the sidebar widget of @window.
*
* Returns: (transfer none): the #EomSidebar.
**/
GtkWidget *
eom_window_get_sidebar (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return window->priv->sidebar;
}
/**
* eom_window_get_thumb_view:
* @window: an #EomWindow.
*
* Gets the thumbnails view in @window.
*
* Returns: (transfer none): an #EomThumbView.
**/
GtkWidget *
eom_window_get_thumb_view (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return window->priv->thumbview;
}
/**
* eom_window_get_thumb_nav:
* @window: an #EomWindow.
*
* Gets the thumbnails navigation pane in @window.
*
* Returns: (transfer none): an #EomThumbNav.
**/
GtkWidget *
eom_window_get_thumb_nav (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return window->priv->nav;
}
/**
* eom_window_get_statusbar:
* @window: an #EomWindow.
*
* Gets the statusbar in @window.
*
* Returns: (transfer none): a #EomStatusBar.
**/
GtkWidget *
eom_window_get_statusbar (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return window->priv->statusbar;
}
/**
* eom_window_get_image:
* @window: an #EomWindow.
*
* Gets the image currently displayed in @window or %NULL if
* no image is being displayed.
*
* Returns: (transfer none): an #EomImage.
**/
EomImage *
eom_window_get_image (EomWindow *window)
{
g_return_val_if_fail (EOM_IS_WINDOW (window), NULL);
return window->priv->image;
}
/**
* eom_window_is_empty:
* @window: an #EomWindow.
*
* Tells whether @window is currently empty or not.
*
* Returns: %TRUE if @window has no images, %FALSE otherwise.
**/
gboolean
eom_window_is_empty (EomWindow *window)
{
EomWindowPrivate *priv;
gboolean empty = TRUE;
eom_debug (DEBUG_WINDOW);
g_return_val_if_fail (EOM_IS_WINDOW (window), FALSE);
priv = window->priv;
if (priv->store != NULL) {
empty = (eom_list_store_length (EOM_LIST_STORE (priv->store)) == 0);
}
return empty;
}
void
eom_window_reload_image (EomWindow *window)
{
GtkWidget *view;
g_return_if_fail (EOM_IS_WINDOW (window));
if (window->priv->image == NULL)
return;
g_object_unref (window->priv->image);
window->priv->image = NULL;
view = eom_window_get_view (window);
eom_scroll_view_set_image (EOM_SCROLL_VIEW (view), NULL);
eom_thumb_view_select_single (EOM_THUMB_VIEW (window->priv->thumbview),
EOM_THUMB_VIEW_SELECT_CURRENT);
}
|
monsta/eom
|
src/eom-window.c
|
C
|
gpl-2.0
| 153,265
|
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2004-2012 OpenWorks LLP
info@open-works.net
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
The GNU General Public License is contained in the file COPYING.
Neither the names of the U.S. Department of Energy nor the
University of California nor the names of its contributors may be
used to endorse or promote products derived from this software
without prior written permission.
*/
#include "libvex_basictypes.h"
#include "libvex_ir.h"
#include "libvex.h"
#include "ir_match.h"
#include "main_util.h"
#include "main_globals.h"
#include "host_generic_regs.h"
#include "host_generic_simd64.h"
#include "host_ppc_defs.h"
#define HRcGPR(__mode64) (__mode64 ? HRcInt64 : HRcInt32)
static IRExpr* unop ( IROp op, IRExpr* a )
{
return IRExpr_Unop(op, a);
}
static IRExpr* mkU32 ( UInt i )
{
return IRExpr_Const(IRConst_U32(i));
}
static IRExpr* bind ( Int binder )
{
return IRExpr_Binder(binder);
}
typedef
struct {
IRTypeEnv* type_env;
HReg* vregmapLo;
HReg* vregmapMedLo;
HReg* vregmapMedHi;
HReg* vregmapHi;
Int n_vregmap;
UInt hwcaps;
Bool mode64;
VexAbiInfo* vbi;
Bool chainingAllowed;
Addr64 max_ga;
HInstrArray* code;
Int vreg_ctr;
IRExpr* previous_rm;
}
ISelEnv;
static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp )
{
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
return env->vregmapLo[tmp];
}
static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO,
ISelEnv* env, IRTemp tmp )
{
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
vassert(env->vregmapMedLo[tmp] != INVALID_HREG);
*vrLO = env->vregmapLo[tmp];
*vrHI = env->vregmapMedLo[tmp];
}
static void lookupIRTempQuad ( HReg* vrHi, HReg* vrMedHi, HReg* vrMedLo,
HReg* vrLo, ISelEnv* env, IRTemp tmp )
{
vassert(!env->mode64);
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
vassert(env->vregmapMedLo[tmp] != INVALID_HREG);
*vrHi = env->vregmapHi[tmp];
*vrMedHi = env->vregmapMedHi[tmp];
*vrMedLo = env->vregmapMedLo[tmp];
*vrLo = env->vregmapLo[tmp];
}
static void addInstr ( ISelEnv* env, PPCInstr* instr )
{
addHInstr(env->code, instr);
if (vex_traceflags & VEX_TRACE_VCODE) {
ppPPCInstr(instr, env->mode64);
vex_printf("\n");
}
}
static HReg newVRegI ( ISelEnv* env )
{
HReg reg = mkHReg(env->vreg_ctr, HRcGPR(env->mode64),
True);
env->vreg_ctr++;
return reg;
}
static HReg newVRegF ( ISelEnv* env )
{
HReg reg = mkHReg(env->vreg_ctr, HRcFlt64, True);
env->vreg_ctr++;
return reg;
}
static HReg newVRegV ( ISelEnv* env )
{
HReg reg = mkHReg(env->vreg_ctr, HRcVec128, True);
env->vreg_ctr++;
return reg;
}
static HReg iselWordExpr_R_wrk ( ISelEnv* env, IRExpr* e );
static HReg iselWordExpr_R ( ISelEnv* env, IRExpr* e );
static PPCRH* iselWordExpr_RH_wrk ( ISelEnv* env,
Bool syned, IRExpr* e );
static PPCRH* iselWordExpr_RH ( ISelEnv* env,
Bool syned, IRExpr* e );
static PPCRI* iselWordExpr_RI_wrk ( ISelEnv* env, IRExpr* e );
static PPCRI* iselWordExpr_RI ( ISelEnv* env, IRExpr* e );
static PPCRH* iselWordExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e );
static PPCRH* iselWordExpr_RH5u ( ISelEnv* env, IRExpr* e );
static PPCRH* iselWordExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e );
static PPCRH* iselWordExpr_RH6u ( ISelEnv* env, IRExpr* e );
static PPCAMode* iselWordExpr_AMode_wrk ( ISelEnv* env, IRExpr* e, IRType xferTy );
static PPCAMode* iselWordExpr_AMode ( ISelEnv* env, IRExpr* e, IRType xferTy );
static void iselInt128Expr_to_32x4_wrk ( HReg* rHi, HReg* rMedHi,
HReg* rMedLo, HReg* rLo,
ISelEnv* env, IRExpr* e );
static void iselInt128Expr_to_32x4 ( HReg* rHi, HReg* rMedHi,
HReg* rMedLo, HReg* rLo,
ISelEnv* env, IRExpr* e );
static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e );
static void iselInt64Expr ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e );
static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e );
static void iselInt128Expr ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e );
static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e );
static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e );
static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e );
static HReg iselDblExpr ( ISelEnv* env, IRExpr* e );
static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e );
static HReg iselFltExpr ( ISelEnv* env, IRExpr* e );
static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e );
static HReg iselVecExpr ( ISelEnv* env, IRExpr* e );
static HReg iselDfp64Expr_wrk ( ISelEnv* env, IRExpr* e );
static HReg iselDfp64Expr ( ISelEnv* env, IRExpr* e );
static void iselDfp128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env,
IRExpr* e );
static void iselDfp128Expr ( HReg* rHi, HReg* rLo, ISelEnv* env,
IRExpr* e );
static PPCInstr* mk_iMOVds_RR ( HReg r_dst, HReg r_src )
{
vassert(hregClass(r_dst) == hregClass(r_src));
vassert(hregClass(r_src) == HRcInt32 ||
hregClass(r_src) == HRcInt64);
return PPCInstr_Alu(Palu_OR, r_dst, r_src, PPCRH_Reg(r_src));
}
static void add_to_sp ( ISelEnv* env, UInt n )
{
HReg sp = StackFramePtr(env->mode64);
vassert(n < 256 && (n%16) == 0);
addInstr(env, PPCInstr_Alu( Palu_ADD, sp, sp,
PPCRH_Imm(True,toUShort(n)) ));
}
static void sub_from_sp ( ISelEnv* env, UInt n )
{
HReg sp = StackFramePtr(env->mode64);
vassert(n < 256 && (n%16) == 0);
addInstr(env, PPCInstr_Alu( Palu_SUB, sp, sp,
PPCRH_Imm(True,toUShort(n)) ));
}
static HReg get_sp_aligned16 ( ISelEnv* env )
{
HReg r = newVRegI(env);
HReg align16 = newVRegI(env);
addInstr(env, mk_iMOVds_RR(r, StackFramePtr(env->mode64)));
addInstr(env, PPCInstr_Alu( Palu_ADD, r, r,
PPCRH_Imm(True,toUShort(16)) ));
addInstr(env,
PPCInstr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, env->mode64));
addInstr(env, PPCInstr_Alu(Palu_AND, r,r, PPCRH_Reg(align16)));
return r;
}
static HReg mk_LoadRR32toFPR ( ISelEnv* env,
HReg r_srcHi, HReg r_srcLo )
{
HReg fr_dst = newVRegF(env);
PPCAMode *am_addr0, *am_addr1;
vassert(!env->mode64);
vassert(hregClass(r_srcHi) == HRcInt32);
vassert(hregClass(r_srcLo) == HRcInt32);
sub_from_sp( env, 16 );
am_addr0 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
am_addr1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
addInstr(env, PPCInstr_Store( 4, am_addr0, r_srcHi, env->mode64 ));
addInstr(env, PPCInstr_Store( 4, am_addr1, r_srcLo, env->mode64 ));
addInstr(env, PPCInstr_FpLdSt(True, 8, fr_dst, am_addr0));
add_to_sp( env, 16 );
return fr_dst;
}
static HReg mk_LoadR64toFPR ( ISelEnv* env, HReg r_src )
{
HReg fr_dst = newVRegF(env);
PPCAMode *am_addr0;
vassert(env->mode64);
vassert(hregClass(r_src) == HRcInt64);
sub_from_sp( env, 16 );
am_addr0 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
addInstr(env, PPCInstr_Store( 8, am_addr0, r_src, env->mode64 ));
addInstr(env, PPCInstr_FpLdSt(True, 8, fr_dst, am_addr0));
add_to_sp( env, 16 );
return fr_dst;
}
static PPCAMode* advance4 ( ISelEnv* env, PPCAMode* am )
{
PPCAMode* am4 = dopyPPCAMode( am );
if (am4->tag == Pam_IR
&& am4->Pam.IR.index + 4 <= 32767) {
am4->Pam.IR.index += 4;
} else {
vpanic("advance4(ppc,host)");
}
return am4;
}
static
PPCAMode* genGuestArrayOffset ( ISelEnv* env, IRRegArray* descr,
IRExpr* off, Int bias )
{
HReg rtmp, roff;
Int elemSz = sizeofIRType(descr->elemTy);
Int nElems = descr->nElems;
Int shift = 0;
if (nElems != 16 && nElems != 32)
vpanic("genGuestArrayOffset(ppc host)(1)");
switch (elemSz) {
case 4: shift = 2; break;
case 8: shift = 3; break;
default: vpanic("genGuestArrayOffset(ppc host)(2)");
}
if (bias < -100 || bias > 100)
vpanic("genGuestArrayOffset(ppc host)(3)");
if (descr->base < 0 || descr->base > 5000)
vpanic("genGuestArrayOffset(ppc host)(4)");
roff = iselWordExpr_R(env, off);
rtmp = newVRegI(env);
addInstr(env, PPCInstr_Alu(
Palu_ADD,
rtmp, roff,
PPCRH_Imm(True, toUShort(bias))));
addInstr(env, PPCInstr_Alu(
Palu_AND,
rtmp, rtmp,
PPCRH_Imm(False, toUShort(nElems-1))));
addInstr(env, PPCInstr_Shft(
Pshft_SHL,
env->mode64 ? False : True,
rtmp, rtmp,
PPCRH_Imm(False, toUShort(shift))));
addInstr(env, PPCInstr_Alu(
Palu_ADD,
rtmp, rtmp,
PPCRH_Imm(True, toUShort(descr->base))));
return
PPCAMode_RR( GuestStatePtr(env->mode64), rtmp );
}
static
Bool mightRequireFixedRegs ( IRExpr* e )
{
switch (e->tag) {
case Iex_RdTmp: case Iex_Const: case Iex_Get:
return False;
default:
return True;
}
}
static
void doHelperCall ( ISelEnv* env,
Bool passBBP,
IRExpr* guard, IRCallee* cee, IRExpr** args )
{
PPCCondCode cc;
HReg argregs[PPC_N_REGPARMS];
HReg tmpregs[PPC_N_REGPARMS];
Bool go_fast;
Int n_args, i, argreg;
UInt argiregs;
ULong target;
Bool mode64 = env->mode64;
Bool regalign_int64s
= (!mode64) && env->vbi->host_ppc32_regalign_int64_args;
n_args = 0;
for (i = 0; args[i]; i++)
n_args++;
if (PPC_N_REGPARMS < n_args + (passBBP ? 1 : 0)) {
vpanic("doHelperCall(PPC): cannot currently handle > 8 args");
}
argregs[0] = hregPPC_GPR3(mode64);
argregs[1] = hregPPC_GPR4(mode64);
argregs[2] = hregPPC_GPR5(mode64);
argregs[3] = hregPPC_GPR6(mode64);
argregs[4] = hregPPC_GPR7(mode64);
argregs[5] = hregPPC_GPR8(mode64);
argregs[6] = hregPPC_GPR9(mode64);
argregs[7] = hregPPC_GPR10(mode64);
argiregs = 0;
tmpregs[0] = tmpregs[1] = tmpregs[2] =
tmpregs[3] = tmpregs[4] = tmpregs[5] =
tmpregs[6] = tmpregs[7] = INVALID_HREG;
go_fast = True;
if (guard) {
if (guard->tag == Iex_Const
&& guard->Iex.Const.con->tag == Ico_U1
&& guard->Iex.Const.con->Ico.U1 == True) {
} else {
go_fast = False;
}
}
if (go_fast) {
for (i = 0; i < n_args; i++) {
if (mightRequireFixedRegs(args[i])) {
go_fast = False;
break;
}
}
}
if (go_fast) {
argreg = 0;
if (passBBP) {
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg],
GuestStatePtr(mode64) ));
argreg++;
}
for (i = 0; i < n_args; i++) {
vassert(argreg < PPC_N_REGPARMS);
vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 ||
typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
if (!mode64) {
if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) {
argiregs |= (1 << (argreg+3));
addInstr(env,
mk_iMOVds_RR( argregs[argreg],
iselWordExpr_R(env, args[i]) ));
} else {
HReg rHi, rLo;
if (regalign_int64s && (argreg%2) == 1)
argreg++;
vassert(argreg < PPC_N_REGPARMS-1);
iselInt64Expr(&rHi,&rLo, env, args[i]);
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg++], rHi ));
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
}
} else {
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg],
iselWordExpr_R(env, args[i]) ));
}
argreg++;
}
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
} else {
argreg = 0;
if (passBBP) {
tmpregs[argreg] = newVRegI(env);
addInstr(env, mk_iMOVds_RR( tmpregs[argreg],
GuestStatePtr(mode64) ));
argreg++;
}
for (i = 0; i < n_args; i++) {
vassert(argreg < PPC_N_REGPARMS);
vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 ||
typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
if (!mode64) {
if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) {
tmpregs[argreg] = iselWordExpr_R(env, args[i]);
} else {
HReg rHi, rLo;
if (regalign_int64s && (argreg%2) == 1)
argreg++;
vassert(argreg < PPC_N_REGPARMS-1);
iselInt64Expr(&rHi,&rLo, env, args[i]);
tmpregs[argreg++] = rHi;
tmpregs[argreg] = rLo;
}
} else {
tmpregs[argreg] = iselWordExpr_R(env, args[i]);
}
argreg++;
}
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
if (guard) {
if (guard->tag == Iex_Const
&& guard->Iex.Const.con->tag == Ico_U1
&& guard->Iex.Const.con->Ico.U1 == True) {
} else {
cc = iselCondCode( env, guard );
}
}
for (i = 0; i < argreg; i++) {
if (tmpregs[i] == INVALID_HREG)
continue;
argiregs |= (1 << (i+3));
addInstr( env, mk_iMOVds_RR( argregs[i], tmpregs[i] ) );
}
}
target = mode64 ? Ptr_to_ULong(cee->addr) :
toUInt(Ptr_to_ULong(cee->addr));
addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs ));
}
static HReg roundModeIRtoPPC ( ISelEnv* env, HReg r_rmIR )
{
HReg r_rmPPC = newVRegI(env);
HReg r_tmp1 = newVRegI(env);
HReg r_tmp2 = newVRegI(env);
vassert(hregClass(r_rmIR) == HRcGPR(env->mode64));
addInstr(env, PPCInstr_Shft(Pshft_SHL, True,
r_tmp1, r_rmIR, PPCRH_Imm(False,1)));
addInstr( env, PPCInstr_Alu( Palu_AND,
r_tmp2, r_tmp1, PPCRH_Imm( False, 3 ) ) );
addInstr( env, PPCInstr_Alu( Palu_XOR,
r_rmPPC, r_rmIR, PPCRH_Reg( r_tmp2 ) ) );
return r_rmPPC;
}
static
void _set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode, Bool dfp_rm )
{
HReg fr_src = newVRegF(env);
HReg r_src;
vassert(typeOfIRExpr(env->type_env,mode) == Ity_I32);
if (env->previous_rm
&& env->previous_rm->tag == Iex_RdTmp
&& mode->tag == Iex_RdTmp
&& env->previous_rm->Iex.RdTmp.tmp == mode->Iex.RdTmp.tmp) {
vassert(typeOfIRExpr(env->type_env, env->previous_rm) == Ity_I32);
return;
}
env->previous_rm = mode;
r_src = roundModeIRtoPPC( env, iselWordExpr_R(env, mode) );
if (env->mode64) {
if (dfp_rm) {
HReg r_tmp1 = newVRegI( env );
addInstr( env,
PPCInstr_Shft( Pshft_SHL, False,
r_tmp1, r_src, PPCRH_Imm( False, 32 ) ) );
fr_src = mk_LoadR64toFPR( env, r_tmp1 );
} else {
fr_src = mk_LoadR64toFPR( env, r_src );
}
} else {
if (dfp_rm) {
HReg r_zero = newVRegI( env );
addInstr( env, PPCInstr_LI( r_zero, 0, env->mode64 ) );
fr_src = mk_LoadRR32toFPR( env, r_src, r_zero );
} else {
fr_src = mk_LoadRR32toFPR( env, r_src, r_src );
}
}
addInstr(env, PPCInstr_FpLdFPSCR( fr_src, dfp_rm ));
}
static void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode )
{
_set_FPU_rounding_mode(env, mode, False);
}
static void set_FPU_DFP_rounding_mode ( ISelEnv* env, IRExpr* mode )
{
_set_FPU_rounding_mode(env, mode, True);
}
static HReg generate_zeroes_V128 ( ISelEnv* env )
{
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBinary(Pav_XOR, dst, dst, dst));
return dst;
}
static HReg generate_ones_V128 ( ISelEnv* env )
{
HReg dst = newVRegV(env);
PPCVI5s * src = PPCVI5s_Imm(-1);
addInstr(env, PPCInstr_AvSplat(8, dst, src));
return dst;
}
static HReg mk_AvDuplicateRI( ISelEnv* env, IRExpr* e )
{
HReg r_src;
HReg dst = newVRegV(env);
PPCRI* ri = iselWordExpr_RI(env, e);
IRType ty = typeOfIRExpr(env->type_env,e);
UInt sz = (ty == Ity_I8) ? 8 : (ty == Ity_I16) ? 16 : 32;
vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
if (ri->tag == Pri_Imm) {
Int simm32 = (Int)ri->Pri.Imm;
if (simm32 >= -32 && simm32 <= 31) {
Char simm6 = (Char)simm32;
if (simm6 > 15) {
HReg v1 = newVRegV(env);
HReg v2 = newVRegV(env);
addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16)));
addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6-16)));
addInstr(env,
(sz== 8) ? PPCInstr_AvBin8x16(Pav_SUBU, dst, v2, v1) :
(sz==16) ? PPCInstr_AvBin16x8(Pav_SUBU, dst, v2, v1)
: PPCInstr_AvBin32x4(Pav_SUBU, dst, v2, v1) );
return dst;
}
if (simm6 < -16) {
HReg v1 = newVRegV(env);
HReg v2 = newVRegV(env);
addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16)));
addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6+16)));
addInstr(env,
(sz== 8) ? PPCInstr_AvBin8x16(Pav_ADDU, dst, v2, v1) :
(sz==16) ? PPCInstr_AvBin16x8(Pav_ADDU, dst, v2, v1)
: PPCInstr_AvBin32x4(Pav_ADDU, dst, v2, v1) );
return dst;
}
addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Imm(simm6)));
return dst;
}
r_src = newVRegI(env);
addInstr(env, PPCInstr_LI(r_src, (Long)simm32, env->mode64));
}
else {
r_src = ri->Pri.Reg;
}
{
HReg r_aligned16;
HReg v_src = newVRegV(env);
PPCAMode *am_off12;
sub_from_sp( env, 32 );
r_aligned16 = get_sp_aligned16( env );
am_off12 = PPCAMode_IR( 12, r_aligned16 );
addInstr(env, PPCInstr_Store( 4, am_off12, r_src, env->mode64 ));
addInstr(env, PPCInstr_AvLdSt( True, 4, v_src, am_off12 ) );
add_to_sp( env, 32 );
addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Reg(v_src)));
return dst;
}
}
static HReg isNan ( ISelEnv* env, HReg vSrc )
{
HReg zeros, msk_exp, msk_mnt, expt, mnts, vIsNan;
vassert(hregClass(vSrc) == HRcVec128);
zeros = mk_AvDuplicateRI(env, mkU32(0));
msk_exp = mk_AvDuplicateRI(env, mkU32(0x7F800000));
msk_mnt = mk_AvDuplicateRI(env, mkU32(0x7FFFFF));
expt = newVRegV(env);
mnts = newVRegV(env);
vIsNan = newVRegV(env);
addInstr(env, PPCInstr_AvBinary(Pav_AND, expt, vSrc, msk_exp));
addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, expt, expt, msk_exp));
addInstr(env, PPCInstr_AvBinary(Pav_AND, mnts, vSrc, msk_mnt));
addInstr(env, PPCInstr_AvBin32x4(Pav_CMPGTU, mnts, mnts, zeros));
addInstr(env, PPCInstr_AvBinary(Pav_AND, vIsNan, expt, mnts));
return vIsNan;
}
static HReg iselWordExpr_R ( ISelEnv* env, IRExpr* e )
{
HReg r = iselWordExpr_R_wrk(env, e);
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
vassert(hregClass(r) == HRcGPR(env->mode64));
vassert(hregIsVirtual(r));
return r;
}
static HReg iselWordExpr_R_wrk ( ISelEnv* env, IRExpr* e )
{
Bool mode64 = env->mode64;
MatchInfo mi;
DECLARE_PATTERN(p_32to1_then_1Uto8);
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64));
switch (e->tag) {
case Iex_RdTmp:
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
case Iex_Load: {
HReg r_dst;
PPCAMode* am_addr;
if (e->Iex.Load.end != Iend_BE)
goto irreducible;
r_dst = newVRegI(env);
am_addr = iselWordExpr_AMode( env, e->Iex.Load.addr, ty );
addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
r_dst, am_addr, mode64 ));
return r_dst;
}
case Iex_Binop: {
PPCAluOp aluOp;
PPCShftOp shftOp;
switch (e->Iex.Binop.op) {
case Iop_Add8: case Iop_Add16: case Iop_Add32: case Iop_Add64:
aluOp = Palu_ADD; break;
case Iop_Sub8: case Iop_Sub16: case Iop_Sub32: case Iop_Sub64:
aluOp = Palu_SUB; break;
case Iop_And8: case Iop_And16: case Iop_And32: case Iop_And64:
aluOp = Palu_AND; break;
case Iop_Or8: case Iop_Or16: case Iop_Or32: case Iop_Or64:
aluOp = Palu_OR; break;
case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64:
aluOp = Palu_XOR; break;
default:
aluOp = Palu_INVALID; break;
}
if (aluOp != Palu_INVALID) {
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
PPCRH* ri_srcR = NULL;
switch (aluOp) {
case Palu_ADD: case Palu_SUB:
ri_srcR = iselWordExpr_RH(env, True,
e->Iex.Binop.arg2);
break;
case Palu_AND: case Palu_OR: case Palu_XOR:
ri_srcR = iselWordExpr_RH(env, False,
e->Iex.Binop.arg2);
break;
default:
vpanic("iselWordExpr_R_wrk-aluOp-arg2");
}
addInstr(env, PPCInstr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
return r_dst;
}
switch (e->Iex.Binop.op) {
case Iop_Shl8: case Iop_Shl16: case Iop_Shl32: case Iop_Shl64:
shftOp = Pshft_SHL; break;
case Iop_Shr8: case Iop_Shr16: case Iop_Shr32: case Iop_Shr64:
shftOp = Pshft_SHR; break;
case Iop_Sar8: case Iop_Sar16: case Iop_Sar32: case Iop_Sar64:
shftOp = Pshft_SAR; break;
default:
shftOp = Pshft_INVALID; break;
}
if (shftOp != Pshft_INVALID) {
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
PPCRH* ri_srcR = NULL;
switch (shftOp) {
case Pshft_SHL: case Pshft_SHR: case Pshft_SAR:
if (!mode64)
ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop.arg2);
else
ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
break;
default:
vpanic("iselIntExpr_R_wrk-shftOp-arg2");
}
if (shftOp == Pshft_SHR || shftOp == Pshft_SAR) {
if (ty == Ity_I8 || ty == Ity_I16) {
PPCRH* amt = PPCRH_Imm(False,
toUShort(ty == Ity_I8 ? 24 : 16));
HReg tmp = newVRegI(env);
addInstr(env, PPCInstr_Shft(Pshft_SHL,
True,
tmp, r_srcL, amt));
addInstr(env, PPCInstr_Shft(shftOp,
True,
tmp, tmp, amt));
r_srcL = tmp;
vassert(0);
}
}
if (ty == Ity_I64) {
vassert(mode64);
addInstr(env, PPCInstr_Shft(shftOp, False,
r_dst, r_srcL, ri_srcR));
} else {
addInstr(env, PPCInstr_Shft(shftOp, True,
r_dst, r_srcL, ri_srcR));
}
return r_dst;
}
if (e->Iex.Binop.op == Iop_DivS32 ||
e->Iex.Binop.op == Iop_DivU32 ||
e->Iex.Binop.op == Iop_DivS32E ||
e->Iex.Binop.op == Iop_DivU32E) {
Bool syned = toBool((e->Iex.Binop.op == Iop_DivS32) || (e->Iex.Binop.op == Iop_DivS32E));
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
addInstr( env,
PPCInstr_Div( ( ( e->Iex.Binop.op == Iop_DivU32E )
|| ( e->Iex.Binop.op == Iop_DivS32E ) ) ? True
: False,
syned,
True,
r_dst,
r_srcL,
r_srcR ) );
return r_dst;
}
if (e->Iex.Binop.op == Iop_DivS64 ||
e->Iex.Binop.op == Iop_DivU64 || e->Iex.Binop.op == Iop_DivS64E
|| e->Iex.Binop.op == Iop_DivU64E ) {
Bool syned = toBool((e->Iex.Binop.op == Iop_DivS64) ||(e->Iex.Binop.op == Iop_DivS64E));
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
vassert(mode64);
addInstr( env,
PPCInstr_Div( ( ( e->Iex.Binop.op == Iop_DivS64E )
|| ( e->Iex.Binop.op
== Iop_DivU64E ) ) ? True
: False,
syned,
False,
r_dst,
r_srcL,
r_srcR ) );
return r_dst;
}
if (e->Iex.Binop.op == Iop_Mul32
|| e->Iex.Binop.op == Iop_Mul64) {
Bool syned = False;
Bool sz32 = (e->Iex.Binop.op != Iop_Mul64);
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_MulL(syned, False, sz32,
r_dst, r_srcL, r_srcR));
return r_dst;
}
if (mode64
&& (e->Iex.Binop.op == Iop_MullU32
|| e->Iex.Binop.op == Iop_MullS32)) {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg r_dst = newVRegI(env);
Bool syned = toBool(e->Iex.Binop.op == Iop_MullS32);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_MulL(False,
False, True,
tLo, r_srcL, r_srcR));
addInstr(env, PPCInstr_MulL(syned,
True, True,
tHi, r_srcL, r_srcR));
addInstr(env, PPCInstr_Shft(Pshft_SHL, False,
r_dst, tHi, PPCRH_Imm(False,32)));
addInstr(env, PPCInstr_Alu(Palu_OR,
r_dst, r_dst, PPCRH_Reg(tLo)));
return r_dst;
}
if (e->Iex.Binop.op == Iop_CmpORD32S
|| e->Iex.Binop.op == Iop_CmpORD32U) {
Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S);
HReg dst = newVRegI(env);
HReg srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
PPCRH* srcR = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Cmp(syned, True,
7, srcL, srcR));
addInstr(env, PPCInstr_MfCR(dst));
addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst,
PPCRH_Imm(False,7<<1)));
return dst;
}
if (e->Iex.Binop.op == Iop_CmpORD64S
|| e->Iex.Binop.op == Iop_CmpORD64U) {
Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD64S);
HReg dst = newVRegI(env);
HReg srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
PPCRH* srcR = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2);
vassert(mode64);
addInstr(env, PPCInstr_Cmp(syned, False,
7, srcL, srcR));
addInstr(env, PPCInstr_MfCR(dst));
addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst,
PPCRH_Imm(False,7<<1)));
return dst;
}
if (e->Iex.Binop.op == Iop_Max32U) {
HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r2 = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg rdst = newVRegI(env);
PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
addInstr(env, mk_iMOVds_RR(rdst, r1));
addInstr(env, PPCInstr_Cmp(False, True,
7, rdst, PPCRH_Reg(r2)));
addInstr(env, PPCInstr_CMov(cc, rdst, PPCRI_Reg(r2)));
return rdst;
}
if (e->Iex.Binop.op == Iop_32HLto64) {
HReg r_Hi = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_Lo = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg r_dst = newVRegI(env);
HReg msk = newVRegI(env);
vassert(mode64);
addInstr(env, PPCInstr_Shft(Pshft_SHL, False,
r_dst, r_Hi, PPCRH_Imm(False,32)));
addInstr(env, PPCInstr_LI(msk, 0xFFFFFFFF, mode64));
addInstr(env, PPCInstr_Alu( Palu_AND, r_Lo, r_Lo,
PPCRH_Reg(msk) ));
addInstr(env, PPCInstr_Alu( Palu_OR, r_dst, r_dst,
PPCRH_Reg(r_Lo) ));
return r_dst;
}
if ((e->Iex.Binop.op == Iop_CmpF64) ||
(e->Iex.Binop.op == Iop_CmpD64) ||
(e->Iex.Binop.op == Iop_CmpD128)) {
HReg fr_srcL;
HReg fr_srcL_lo;
HReg fr_srcR;
HReg fr_srcR_lo;
HReg r_ccPPC = newVRegI(env);
HReg r_ccIR = newVRegI(env);
HReg r_ccIR_b0 = newVRegI(env);
HReg r_ccIR_b2 = newVRegI(env);
HReg r_ccIR_b6 = newVRegI(env);
if (e->Iex.Binop.op == Iop_CmpF64) {
fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1);
fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_FpCmp(r_ccPPC, fr_srcL, fr_srcR));
} else if (e->Iex.Binop.op == Iop_CmpD64) {
fr_srcL = iselDfp64Expr(env, e->Iex.Binop.arg1);
fr_srcR = iselDfp64Expr(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Dfp64Cmp(r_ccPPC, fr_srcL, fr_srcR));
} else {
iselDfp128Expr(&fr_srcL, &fr_srcL_lo, env, e->Iex.Binop.arg1);
iselDfp128Expr(&fr_srcR, &fr_srcR_lo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Dfp128Cmp(r_ccPPC, fr_srcL, fr_srcL_lo,
fr_srcR, fr_srcR_lo));
}
addInstr(env, PPCInstr_Shft(Pshft_SHR, True,
r_ccIR_b0, r_ccPPC,
PPCRH_Imm(False,0x3)));
addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b0,
r_ccPPC, PPCRH_Reg(r_ccIR_b0)));
addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b0,
r_ccIR_b0, PPCRH_Imm(False,0x1)));
addInstr(env, PPCInstr_Shft(Pshft_SHL, True,
r_ccIR_b2, r_ccPPC,
PPCRH_Imm(False,0x2)));
addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b2,
r_ccIR_b2, PPCRH_Imm(False,0x4)));
addInstr(env, PPCInstr_Shft(Pshft_SHR, True,
r_ccIR_b6, r_ccPPC,
PPCRH_Imm(False,0x1)));
addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b6,
r_ccPPC, PPCRH_Reg(r_ccIR_b6)));
addInstr(env, PPCInstr_Shft(Pshft_SHL, True,
r_ccIR_b6, r_ccIR_b6,
PPCRH_Imm(False,0x6)));
addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b6,
r_ccIR_b6, PPCRH_Imm(False,0x40)));
addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR,
r_ccIR_b0, PPCRH_Reg(r_ccIR_b2)));
addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR,
r_ccIR, PPCRH_Reg(r_ccIR_b6)));
return r_ccIR;
}
if ( e->Iex.Binop.op == Iop_F64toI32S ||
e->Iex.Binop.op == Iop_F64toI32U ) {
HReg r1 = StackFramePtr(env->mode64);
PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
HReg fsrc = iselDblExpr(env, e->Iex.Binop.arg2);
HReg ftmp = newVRegF(env);
HReg idst = newVRegI(env);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_FpCftI(False, True,
e->Iex.Binop.op == Iop_F64toI32S ? True
: False,
True,
ftmp, fsrc));
addInstr(env, PPCInstr_FpSTFIW(r1, ftmp));
addInstr(env, PPCInstr_Load(4, idst, zero_r1, mode64));
if (mode64)
addInstr(env, PPCInstr_Unary(Pun_EXTSW, idst, idst));
add_to_sp( env, 16 );
return idst;
}
if (e->Iex.Binop.op == Iop_F64toI64S || e->Iex.Binop.op == Iop_F64toI64U ) {
if (mode64) {
HReg r1 = StackFramePtr(env->mode64);
PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
HReg fsrc = iselDblExpr(env, e->Iex.Binop.arg2);
HReg idst = newVRegI(env);
HReg ftmp = newVRegF(env);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_FpCftI(False, False,
( e->Iex.Binop.op == Iop_F64toI64S ) ? True
: False,
True, ftmp, fsrc));
addInstr(env, PPCInstr_FpLdSt(False, 8, ftmp, zero_r1));
addInstr(env, PPCInstr_Load(8, idst, zero_r1, True));
add_to_sp( env, 16 );
return idst;
}
}
break;
}
case Iex_Unop: {
IROp op_unop = e->Iex.Unop.op;
DEFINE_PATTERN(p_32to1_then_1Uto8,
unop(Iop_1Uto8,unop(Iop_32to1,bind(0))));
if (matchIRExpr(&mi,p_32to1_then_1Uto8,e)) {
IRExpr* expr32 = mi.bindee[0];
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, expr32);
addInstr(env, PPCInstr_Alu(Palu_AND, r_dst,
r_src, PPCRH_Imm(False,1)));
return r_dst;
}
{
DECLARE_PATTERN(p_LDbe16_then_16Uto32);
DEFINE_PATTERN(p_LDbe16_then_16Uto32,
unop(Iop_16Uto32,
IRExpr_Load(Iend_BE,Ity_I16,bind(0))) );
if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) {
HReg r_dst = newVRegI(env);
PPCAMode* amode
= iselWordExpr_AMode( env, mi.bindee[0], Ity_I16 );
addInstr(env, PPCInstr_Load(2,r_dst,amode, mode64));
return r_dst;
}
}
switch (op_unop) {
case Iop_8Uto16:
case Iop_8Uto32:
case Iop_8Uto64:
case Iop_16Uto32:
case Iop_16Uto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
UShort mask = toUShort(op_unop==Iop_16Uto64 ? 0xFFFF :
op_unop==Iop_16Uto32 ? 0xFFFF : 0xFF);
addInstr(env, PPCInstr_Alu(Palu_AND,r_dst,r_src,
PPCRH_Imm(False,mask)));
return r_dst;
}
case Iop_32Uto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
vassert(mode64);
addInstr(env,
PPCInstr_Shft(Pshft_SHL, False,
r_dst, r_src, PPCRH_Imm(False,32)));
addInstr(env,
PPCInstr_Shft(Pshft_SHR, False,
r_dst, r_dst, PPCRH_Imm(False,32)));
return r_dst;
}
case Iop_8Sto16:
case Iop_8Sto32:
case Iop_16Sto32: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
UShort amt = toUShort(op_unop==Iop_16Sto32 ? 16 : 24);
addInstr(env,
PPCInstr_Shft(Pshft_SHL, True,
r_dst, r_src, PPCRH_Imm(False,amt)));
addInstr(env,
PPCInstr_Shft(Pshft_SAR, True,
r_dst, r_dst, PPCRH_Imm(False,amt)));
return r_dst;
}
case Iop_8Sto64:
case Iop_16Sto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
UShort amt = toUShort(op_unop==Iop_8Sto64 ? 56 : 48);
vassert(mode64);
addInstr(env,
PPCInstr_Shft(Pshft_SHL, False,
r_dst, r_src, PPCRH_Imm(False,amt)));
addInstr(env,
PPCInstr_Shft(Pshft_SAR, False,
r_dst, r_dst, PPCRH_Imm(False,amt)));
return r_dst;
}
case Iop_32Sto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
vassert(mode64);
addInstr(env,
PPCInstr_Shft(Pshft_SAR, True,
r_dst, r_src, PPCRH_Imm(False,0)));
return r_dst;
}
case Iop_Not8:
case Iop_Not16:
case Iop_Not32:
case Iop_Not64: {
if (op_unop == Iop_Not64) vassert(mode64);
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Unary(Pun_NOT,r_dst,r_src));
return r_dst;
}
case Iop_64HIto32: {
if (!mode64) {
HReg rHi, rLo;
iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
return rHi;
} else {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env,
PPCInstr_Shft(Pshft_SHR, False,
r_dst, r_src, PPCRH_Imm(False,32)));
return r_dst;
}
}
case Iop_64to32: {
if (!mode64) {
HReg rHi, rLo;
iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
return rLo;
} else {
return iselWordExpr_R(env, e->Iex.Unop.arg);
}
}
case Iop_64to16: {
if (mode64) {
return iselWordExpr_R(env, e->Iex.Unop.arg);
}
break;
}
case Iop_16HIto8:
case Iop_32HIto16: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
UShort shift = toUShort(op_unop == Iop_16HIto8 ? 8 : 16);
addInstr(env,
PPCInstr_Shft(Pshft_SHR, True,
r_dst, r_src, PPCRH_Imm(False,shift)));
return r_dst;
}
case Iop_128HIto64:
if (mode64) {
HReg rHi, rLo;
iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
return rHi;
}
break;
case Iop_128to64:
if (mode64) {
HReg rHi, rLo;
iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
return rLo;
}
break;
case Iop_1Uto64:
case Iop_1Uto32:
case Iop_1Uto8:
if ((op_unop != Iop_1Uto64) || mode64) {
HReg r_dst = newVRegI(env);
PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Set(cond,r_dst));
return r_dst;
}
break;
case Iop_1Sto8:
case Iop_1Sto16:
case Iop_1Sto32: {
HReg r_dst = newVRegI(env);
PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Set(cond,r_dst));
addInstr(env,
PPCInstr_Shft(Pshft_SHL, True,
r_dst, r_dst, PPCRH_Imm(False,31)));
addInstr(env,
PPCInstr_Shft(Pshft_SAR, True,
r_dst, r_dst, PPCRH_Imm(False,31)));
return r_dst;
}
case Iop_1Sto64:
if (mode64) {
HReg r_dst = newVRegI(env);
PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Set(cond,r_dst));
addInstr(env, PPCInstr_Shft(Pshft_SHL, False,
r_dst, r_dst, PPCRH_Imm(False,63)));
addInstr(env, PPCInstr_Shft(Pshft_SAR, False,
r_dst, r_dst, PPCRH_Imm(False,63)));
return r_dst;
}
break;
case Iop_Clz32:
case Iop_Clz64: {
HReg r_src, r_dst;
PPCUnaryOp op_clz = (op_unop == Iop_Clz32) ? Pun_CLZ32 :
Pun_CLZ64;
if (op_unop == Iop_Clz64 && !mode64)
goto irreducible;
r_dst = newVRegI(env);
r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Unary(op_clz,r_dst,r_src));
return r_dst;
}
case Iop_Left8:
case Iop_Left32:
case Iop_Left64: {
HReg r_src, r_dst;
if (op_unop == Iop_Left64 && !mode64)
goto irreducible;
r_dst = newVRegI(env);
r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src));
addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src)));
return r_dst;
}
case Iop_CmpwNEZ32: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src));
addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src)));
addInstr(env, PPCInstr_Shft(Pshft_SAR, True,
r_dst, r_dst, PPCRH_Imm(False, 31)));
return r_dst;
}
case Iop_CmpwNEZ64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
if (!mode64) goto irreducible;
addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src));
addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src)));
addInstr(env, PPCInstr_Shft(Pshft_SAR, False,
r_dst, r_dst, PPCRH_Imm(False, 63)));
return r_dst;
}
case Iop_V128to32: {
HReg r_aligned16;
HReg dst = newVRegI(env);
HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
PPCAMode *am_off0, *am_off12;
sub_from_sp( env, 32 );
r_aligned16 = get_sp_aligned16( env );
am_off0 = PPCAMode_IR( 0, r_aligned16 );
am_off12 = PPCAMode_IR( 12,r_aligned16 );
addInstr(env,
PPCInstr_AvLdSt( False, 16, vec, am_off0 ));
addInstr(env,
PPCInstr_Load( 4, dst, am_off12, mode64 ));
add_to_sp( env, 32 );
return dst;
}
case Iop_V128to64:
case Iop_V128HIto64:
if (mode64) {
HReg r_aligned16;
HReg dst = newVRegI(env);
HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
PPCAMode *am_off0, *am_off8;
sub_from_sp( env, 32 );
r_aligned16 = get_sp_aligned16( env );
am_off0 = PPCAMode_IR( 0, r_aligned16 );
am_off8 = PPCAMode_IR( 8 ,r_aligned16 );
addInstr(env,
PPCInstr_AvLdSt( False, 16, vec, am_off0 ));
addInstr(env,
PPCInstr_Load(
8, dst,
op_unop == Iop_V128HIto64 ? am_off0 : am_off8,
mode64 ));
add_to_sp( env, 32 );
return dst;
}
break;
case Iop_16to8:
case Iop_32to8:
case Iop_32to16:
case Iop_64to8:
return iselWordExpr_R(env, e->Iex.Unop.arg);
case Iop_ReinterpF64asI64:
if (mode64) {
PPCAMode *am_addr;
HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
HReg r_dst = newVRegI(env);
sub_from_sp( env, 16 );
am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
addInstr(env, PPCInstr_FpLdSt( False, 8,
fr_src, am_addr ));
addInstr(env, PPCInstr_Load( 8, r_dst, am_addr, mode64 ));
add_to_sp( env, 16 );
return r_dst;
}
break;
case Iop_ReinterpF32asI32: {
PPCAMode *am_addr;
HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg);
HReg r_dst = newVRegI(env);
sub_from_sp( env, 16 );
am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
addInstr(env, PPCInstr_FpLdSt( False, 4,
fr_src, am_addr ));
addInstr(env, PPCInstr_Load( 4, r_dst, am_addr, mode64 ));
add_to_sp( env, 16 );
return r_dst;
}
break;
case Iop_ReinterpD64asI64:
if (mode64) {
PPCAMode *am_addr;
HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg);
HReg r_dst = newVRegI(env);
sub_from_sp( env, 16 );
am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
addInstr(env, PPCInstr_FpLdSt( False, 8,
fr_src, am_addr ));
addInstr(env, PPCInstr_Load( 8, r_dst, am_addr, mode64 ));
add_to_sp( env, 16 );
return r_dst;
}
break;
case Iop_BCDtoDPB: {
PPCCondCode cc;
UInt argiregs;
HReg argregs[1];
HReg r_dst = newVRegI(env);
Int argreg;
HWord* fdescr;
argiregs = 0;
argreg = 0;
argregs[0] = hregPPC_GPR3(mode64);
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg++],
iselWordExpr_R(env, e->Iex.Unop.arg) ) );
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
fdescr = (HWord*)h_BCDtoDPB;
addInstr(env, PPCInstr_Call( cc, (Addr64)(fdescr[0]), argiregs ) );
addInstr(env, mk_iMOVds_RR(r_dst, argregs[0]));
return r_dst;
}
case Iop_DPBtoBCD: {
PPCCondCode cc;
UInt argiregs;
HReg argregs[1];
HReg r_dst = newVRegI(env);
Int argreg;
HWord* fdescr;
argiregs = 0;
argreg = 0;
argregs[0] = hregPPC_GPR3(mode64);
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg++],
iselWordExpr_R(env, e->Iex.Unop.arg) ) );
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
fdescr = (HWord*)h_DPBtoBCD;
addInstr(env, PPCInstr_Call( cc, (Addr64)(fdescr[0]), argiregs ) );
addInstr(env, mk_iMOVds_RR(r_dst, argregs[0]));
return r_dst;
}
default:
break;
}
break;
}
case Iex_Get: {
if (ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_dst = newVRegI(env);
PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
GuestStatePtr(mode64) );
addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
r_dst, am_addr, mode64 ));
return r_dst;
}
break;
}
case Iex_GetI: {
PPCAMode* src_am
= genGuestArrayOffset( env, e->Iex.GetI.descr,
e->Iex.GetI.ix, e->Iex.GetI.bias );
HReg r_dst = newVRegI(env);
if (mode64 && ty == Ity_I64) {
addInstr(env, PPCInstr_Load( toUChar(8),
r_dst, src_am, mode64 ));
return r_dst;
}
if ((!mode64) && ty == Ity_I32) {
addInstr(env, PPCInstr_Load( toUChar(4),
r_dst, src_am, mode64 ));
return r_dst;
}
break;
}
case Iex_CCall: {
HReg r_dst = newVRegI(env);
vassert(ty == Ity_I32);
if (e->Iex.CCall.retty != Ity_I32)
goto irreducible;
doHelperCall( env, False, NULL,
e->Iex.CCall.cee, e->Iex.CCall.args );
addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
return r_dst;
}
case Iex_Const: {
Long l;
HReg r_dst = newVRegI(env);
IRConst* con = e->Iex.Const.con;
switch (con->tag) {
case Ico_U64: if (!mode64) goto irreducible;
l = (Long) con->Ico.U64; break;
case Ico_U32: l = (Long)(Int) con->Ico.U32; break;
case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break;
case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break;
default: vpanic("iselIntExpr_R.const(ppc)");
}
addInstr(env, PPCInstr_LI(r_dst, (ULong)l, mode64));
return r_dst;
}
case Iex_Mux0X: {
if ((ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) &&
typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
HReg rX = iselWordExpr_R(env, e->Iex.Mux0X.exprX);
PPCRI* r0 = iselWordExpr_RI(env, e->Iex.Mux0X.expr0);
HReg r_dst = newVRegI(env);
HReg r_tmp = newVRegI(env);
addInstr(env, mk_iMOVds_RR(r_dst,rX));
addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp,
r_cond, PPCRH_Imm(False,0xFF)));
addInstr(env, PPCInstr_Cmp(False, True,
7, r_tmp, PPCRH_Imm(False,0)));
addInstr(env, PPCInstr_CMov(cc,r_dst,r0));
return r_dst;
}
break;
}
default:
break;
}
irreducible:
ppIRExpr(e);
vpanic("iselIntExpr_R(ppc): cannot reduce tree");
}
static Bool uInt_fits_in_16_bits ( UInt u )
{
Int i = u & 0xFFFF;
i <<= 16;
i >>= 16;
return toBool(u == (UInt)i);
}
static Bool uLong_fits_in_16_bits ( ULong u )
{
Long i = u & 0xFFFFULL;
i <<= 48;
i >>= 48;
return toBool(u == (ULong)i);
}
static Bool uLong_is_4_aligned ( ULong u )
{
return toBool((u & 3ULL) == 0);
}
static Bool sane_AMode ( ISelEnv* env, PPCAMode* am )
{
Bool mode64 = env->mode64;
switch (am->tag) {
case Pam_IR:
return toBool( hregClass(am->Pam.IR.base) == HRcGPR(mode64) &&
hregIsVirtual(am->Pam.IR.base) &&
uInt_fits_in_16_bits(am->Pam.IR.index) );
case Pam_RR:
return toBool( hregClass(am->Pam.RR.base) == HRcGPR(mode64) &&
hregIsVirtual(am->Pam.RR.base) &&
hregClass(am->Pam.RR.index) == HRcGPR(mode64) &&
hregIsVirtual(am->Pam.IR.index) );
default:
vpanic("sane_AMode: unknown ppc amode tag");
}
}
static
PPCAMode* iselWordExpr_AMode ( ISelEnv* env, IRExpr* e, IRType xferTy )
{
PPCAMode* am = iselWordExpr_AMode_wrk(env, e, xferTy);
vassert(sane_AMode(env, am));
return am;
}
static PPCAMode* iselWordExpr_AMode_wrk ( ISelEnv* env, IRExpr* e, IRType xferTy )
{
IRType ty = typeOfIRExpr(env->type_env,e);
if (env->mode64) {
Bool aligned4imm = toBool(xferTy == Ity_I32 || xferTy == Ity_I64);
vassert(ty == Ity_I64);
if (e->tag == Iex_Binop
&& e->Iex.Binop.op == Iop_Add64
&& e->Iex.Binop.arg2->tag == Iex_Const
&& e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64
&& (aligned4imm ? uLong_is_4_aligned(e->Iex.Binop.arg2
->Iex.Const.con->Ico.U64)
: True)
&& uLong_fits_in_16_bits(e->Iex.Binop.arg2
->Iex.Const.con->Ico.U64)) {
return PPCAMode_IR( (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U64,
iselWordExpr_R(env, e->Iex.Binop.arg1) );
}
if (e->tag == Iex_Binop
&& e->Iex.Binop.op == Iop_Add64) {
HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2);
return PPCAMode_RR( r_idx, r_base );
}
} else {
vassert(ty == Ity_I32);
if (e->tag == Iex_Binop
&& e->Iex.Binop.op == Iop_Add32
&& e->Iex.Binop.arg2->tag == Iex_Const
&& e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32
&& uInt_fits_in_16_bits(e->Iex.Binop.arg2
->Iex.Const.con->Ico.U32)) {
return PPCAMode_IR( (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U32,
iselWordExpr_R(env, e->Iex.Binop.arg1) );
}
if (e->tag == Iex_Binop
&& e->Iex.Binop.op == Iop_Add32) {
HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2);
return PPCAMode_RR( r_idx, r_base );
}
}
return PPCAMode_IR( 0, iselWordExpr_R(env,e) );
}
static PPCRH* iselWordExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e )
{
PPCRH* ri = iselWordExpr_RH_wrk(env, syned, e);
switch (ri->tag) {
case Prh_Imm:
vassert(ri->Prh.Imm.syned == syned);
if (syned)
vassert(ri->Prh.Imm.imm16 != 0x8000);
return ri;
case Prh_Reg:
vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64));
vassert(hregIsVirtual(ri->Prh.Reg.reg));
return ri;
default:
vpanic("iselIntExpr_RH: unknown ppc RH tag");
}
}
static PPCRH* iselWordExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e )
{
ULong u;
Long l;
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && env->mode64));
if (e->tag == Iex_Const) {
IRConst* con = e->Iex.Const.con;
switch (con->tag) {
case Ico_U64: vassert(env->mode64);
u = con->Ico.U64; break;
case Ico_U32: u = 0xFFFFFFFF & con->Ico.U32; break;
case Ico_U16: u = 0x0000FFFF & con->Ico.U16; break;
case Ico_U8: u = 0x000000FF & con->Ico.U8; break;
default: vpanic("iselIntExpr_RH.Iex_Const(ppch)");
}
l = (Long)u;
if (!syned && u <= 65535) {
return PPCRH_Imm(False, toUShort(u & 0xFFFF));
}
if (syned && l >= -32767 && l <= 32767) {
return PPCRH_Imm(True, toUShort(u & 0xFFFF));
}
}
return PPCRH_Reg( iselWordExpr_R ( env, e ) );
}
static PPCRI* iselWordExpr_RI ( ISelEnv* env, IRExpr* e )
{
PPCRI* ri = iselWordExpr_RI_wrk(env, e);
switch (ri->tag) {
case Pri_Imm:
return ri;
case Pri_Reg:
vassert(hregClass(ri->Pri.Reg) == HRcGPR(env->mode64));
vassert(hregIsVirtual(ri->Pri.Reg));
return ri;
default:
vpanic("iselIntExpr_RI: unknown ppc RI tag");
}
}
static PPCRI* iselWordExpr_RI_wrk ( ISelEnv* env, IRExpr* e )
{
Long l;
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && env->mode64));
if (e->tag == Iex_Const) {
IRConst* con = e->Iex.Const.con;
switch (con->tag) {
case Ico_U64: vassert(env->mode64);
l = (Long) con->Ico.U64; break;
case Ico_U32: l = (Long)(Int) con->Ico.U32; break;
case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break;
case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break;
default: vpanic("iselIntExpr_RI.Iex_Const(ppch)");
}
return PPCRI_Imm((ULong)l);
}
return PPCRI_Reg( iselWordExpr_R ( env, e ) );
}
static PPCRH* iselWordExpr_RH5u ( ISelEnv* env, IRExpr* e )
{
PPCRH* ri;
vassert(!env->mode64);
ri = iselWordExpr_RH5u_wrk(env, e);
switch (ri->tag) {
case Prh_Imm:
vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 31);
vassert(!ri->Prh.Imm.syned);
return ri;
case Prh_Reg:
vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64));
vassert(hregIsVirtual(ri->Prh.Reg.reg));
return ri;
default:
vpanic("iselIntExpr_RH5u: unknown ppc RI tag");
}
}
static PPCRH* iselWordExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e )
{
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I8);
if (e->tag == Iex_Const
&& e->Iex.Const.con->tag == Ico_U8
&& e->Iex.Const.con->Ico.U8 >= 1
&& e->Iex.Const.con->Ico.U8 <= 31) {
return PPCRH_Imm(False, e->Iex.Const.con->Ico.U8);
}
return PPCRH_Reg( iselWordExpr_R ( env, e ) );
}
static PPCRH* iselWordExpr_RH6u ( ISelEnv* env, IRExpr* e )
{
PPCRH* ri;
vassert(env->mode64);
ri = iselWordExpr_RH6u_wrk(env, e);
switch (ri->tag) {
case Prh_Imm:
vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 63);
vassert(!ri->Prh.Imm.syned);
return ri;
case Prh_Reg:
vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64));
vassert(hregIsVirtual(ri->Prh.Reg.reg));
return ri;
default:
vpanic("iselIntExpr_RH6u: unknown ppc64 RI tag");
}
}
static PPCRH* iselWordExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e )
{
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I8);
if (e->tag == Iex_Const
&& e->Iex.Const.con->tag == Ico_U8
&& e->Iex.Const.con->Ico.U8 >= 1
&& e->Iex.Const.con->Ico.U8 <= 63) {
return PPCRH_Imm(False, e->Iex.Const.con->Ico.U8);
}
return PPCRH_Reg( iselWordExpr_R ( env, e ) );
}
static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e )
{
return iselCondCode_wrk(env,e);
}
static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
{
vassert(e);
vassert(typeOfIRExpr(env->type_env,e) == Ity_I1);
if (e->tag == Iex_Const && e->Iex.Const.con->Ico.U1 == True) {
HReg r_zero = newVRegI(env);
addInstr(env, PPCInstr_LI(r_zero, 0, env->mode64));
addInstr(env, PPCInstr_Cmp(False, True,
7, r_zero, PPCRH_Reg(r_zero)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
cond.test = invertCondTest(cond.test);
return cond;
}
if (e->tag == Iex_Unop &&
(e->Iex.Unop.op == Iop_32to1 || e->Iex.Unop.op == Iop_64to1)) {
HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
HReg tmp = newVRegI(env);
addInstr(env, PPCInstr_Alu(Palu_AND, tmp,
src, PPCRH_Imm(False,1)));
addInstr(env, PPCInstr_Cmp(False, True,
7, tmp, PPCRH_Imm(False,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ8) {
HReg arg = iselWordExpr_R(env, e->Iex.Unop.arg);
HReg tmp = newVRegI(env);
addInstr(env, PPCInstr_Alu(Palu_AND, tmp, arg,
PPCRH_Imm(False,0xFF)));
addInstr(env, PPCInstr_Cmp(False, True,
7, tmp, PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ32) {
HReg r1 = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Cmp(False, True,
7, r1, PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
if (e->tag == Iex_Binop
&& (e->Iex.Binop.op == Iop_CmpEQ32
|| e->Iex.Binop.op == Iop_CmpNE32
|| e->Iex.Binop.op == Iop_CmpLT32S
|| e->Iex.Binop.op == Iop_CmpLT32U
|| e->Iex.Binop.op == Iop_CmpLE32S
|| e->Iex.Binop.op == Iop_CmpLE32U)) {
Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S ||
e->Iex.Binop.op == Iop_CmpLE32S);
HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
PPCRH* ri2 = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Cmp(syned, True,
7, r1, ri2));
switch (e->Iex.Binop.op) {
case Iop_CmpEQ32: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
case Iop_CmpNE32: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
case Iop_CmpLT32U: case Iop_CmpLT32S:
return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
case Iop_CmpLE32U: case Iop_CmpLE32S:
return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
default: vpanic("iselCondCode(ppc): CmpXX32");
}
}
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ64) {
if (!env->mode64) {
HReg hi, lo;
HReg tmp = newVRegI(env);
iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg );
addInstr(env, PPCInstr_Alu(Palu_OR, tmp, lo, PPCRH_Reg(hi)));
addInstr(env, PPCInstr_Cmp(False, True,
7, tmp,PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
} else {
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Cmp(False, False,
7, r_src,PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
}
if (e->tag == Iex_Binop
&& (e->Iex.Binop.op == Iop_CmpEQ64
|| e->Iex.Binop.op == Iop_CmpNE64
|| e->Iex.Binop.op == Iop_CmpLT64S
|| e->Iex.Binop.op == Iop_CmpLT64U
|| e->Iex.Binop.op == Iop_CmpLE64S
|| e->Iex.Binop.op == Iop_CmpLE64U)) {
Bool syned = (e->Iex.Binop.op == Iop_CmpLT64S ||
e->Iex.Binop.op == Iop_CmpLE64S);
HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
PPCRH* ri2 = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2);
vassert(env->mode64);
addInstr(env, PPCInstr_Cmp(syned, False,
7, r1, ri2));
switch (e->Iex.Binop.op) {
case Iop_CmpEQ64: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
case Iop_CmpNE64: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
case Iop_CmpLT64U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
case Iop_CmpLE64U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
default: vpanic("iselCondCode(ppc): CmpXX64");
}
}
if (e->tag == Iex_RdTmp) {
HReg r_src = lookupIRTemp(env, e->Iex.RdTmp.tmp);
HReg src_masked = newVRegI(env);
addInstr(env,
PPCInstr_Alu(Palu_AND, src_masked,
r_src, PPCRH_Imm(False,1)));
addInstr(env,
PPCInstr_Cmp(False, True,
7, src_masked, PPCRH_Imm(False,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
vex_printf("iselCondCode(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
vpanic("iselCondCode(ppc)");
}
static void iselInt128Expr ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e )
{
vassert(env->mode64);
iselInt128Expr_wrk(rHi, rLo, env, e);
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
vassert(hregClass(*rHi) == HRcGPR(env->mode64));
vassert(hregIsVirtual(*rHi));
vassert(hregClass(*rLo) == HRcGPR(env->mode64));
vassert(hregIsVirtual(*rLo));
}
static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e )
{
vassert(e);
vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
if (e->tag == Iex_RdTmp) {
lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp);
return;
}
if (e->tag == Iex_Binop) {
switch (e->Iex.Binop.op) {
case Iop_MullU64:
case Iop_MullS64: {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_MulL(False,
False, False,
tLo, r_srcL, r_srcR));
addInstr(env, PPCInstr_MulL(syned,
True, False,
tHi, r_srcL, r_srcR));
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_64HLto128:
*rHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
*rLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
return;
default:
break;
}
}
if (e->tag == Iex_Unop) {
switch (e->Iex.Unop.op) {
default:
break;
}
}
vex_printf("iselInt128Expr(ppc64): No such tag(%u)\n", e->tag);
ppIRExpr(e);
vpanic("iselInt128Expr(ppc64)");
}
static void iselInt128Expr_to_32x4 ( HReg* rHi, HReg* rMedHi, HReg* rMedLo,
HReg* rLo, ISelEnv* env, IRExpr* e )
{
vassert(!env->mode64);
iselInt128Expr_to_32x4_wrk(rHi, rMedHi, rMedLo, rLo, env, e);
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
vassert(hregClass(*rHi) == HRcInt32);
vassert(hregIsVirtual(*rHi));
vassert(hregClass(*rMedHi) == HRcInt32);
vassert(hregIsVirtual(*rMedHi));
vassert(hregClass(*rMedLo) == HRcInt32);
vassert(hregIsVirtual(*rMedLo));
vassert(hregClass(*rLo) == HRcInt32);
vassert(hregIsVirtual(*rLo));
}
static void iselInt128Expr_to_32x4_wrk ( HReg* rHi, HReg* rMedHi,
HReg* rMedLo, HReg* rLo,
ISelEnv* env, IRExpr* e )
{
vassert(e);
vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
if (e->tag == Iex_RdTmp) {
lookupIRTempQuad( rHi, rMedHi, rMedLo, rLo, env, e->Iex.RdTmp.tmp);
return;
}
if (e->tag == Iex_Binop) {
IROp op_binop = e->Iex.Binop.op;
switch (op_binop) {
case Iop_64HLto128:
iselInt64Expr(rHi, rMedHi, env, e->Iex.Binop.arg1);
iselInt64Expr(rMedLo, rLo, env, e->Iex.Binop.arg2);
return;
default:
vex_printf("iselInt128Expr_to_32x4_wrk: Binop case 0x%x not found\n",
op_binop);
break;
}
}
vex_printf("iselInt128Expr_to_32x4_wrk: e->tag 0x%x not found\n", e->tag);
return;
}
static void iselInt64Expr ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e )
{
vassert(!env->mode64);
iselInt64Expr_wrk(rHi, rLo, env, e);
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
vassert(hregClass(*rHi) == HRcInt32);
vassert(hregIsVirtual(*rHi));
vassert(hregClass(*rLo) == HRcInt32);
vassert(hregIsVirtual(*rLo));
}
static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e )
{
vassert(e);
vassert(typeOfIRExpr(env->type_env,e) == Ity_I64);
if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr);
vassert(!env->mode64);
addInstr(env, PPCInstr_Load( 4,
tHi, PPCAMode_IR( 0, r_addr ),
False) );
addInstr(env, PPCInstr_Load( 4,
tLo, PPCAMode_IR( 4, r_addr ),
False) );
*rHi = tHi;
*rLo = tLo;
return;
}
if (e->tag == Iex_Const) {
ULong w64 = e->Iex.Const.con->Ico.U64;
UInt wHi = ((UInt)(w64 >> 32)) & 0xFFFFFFFF;
UInt wLo = ((UInt)w64) & 0xFFFFFFFF;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
vassert(e->Iex.Const.con->tag == Ico_U64);
addInstr(env, PPCInstr_LI(tHi, (Long)(Int)wHi, False));
addInstr(env, PPCInstr_LI(tLo, (Long)(Int)wLo, False));
*rHi = tHi;
*rLo = tLo;
return;
}
if (e->tag == Iex_RdTmp) {
lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp);
return;
}
if (e->tag == Iex_Get) {
PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
GuestStatePtr(False) );
PPCAMode* am_addr4 = advance4(env, am_addr);
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
addInstr(env, PPCInstr_Load( 4, tHi, am_addr, False ));
addInstr(env, PPCInstr_Load( 4, tLo, am_addr4, False ));
*rHi = tHi;
*rLo = tLo;
return;
}
if (e->tag == Iex_Mux0X) {
HReg e0Lo, e0Hi, eXLo, eXHi;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
HReg r_tmp = newVRegI(env);
iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0);
iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX);
addInstr(env, mk_iMOVds_RR(tHi,eXHi));
addInstr(env, mk_iMOVds_RR(tLo,eXLo));
addInstr(env, PPCInstr_Alu(Palu_AND,
r_tmp, r_cond, PPCRH_Imm(False,0xFF)));
addInstr(env, PPCInstr_Cmp(False, True,
7, r_tmp, PPCRH_Imm(False,0)));
addInstr(env, PPCInstr_CMov(cc,tHi,PPCRI_Reg(e0Hi)));
addInstr(env, PPCInstr_CMov(cc,tLo,PPCRI_Reg(e0Lo)));
*rHi = tHi;
*rLo = tLo;
return;
}
if (e->tag == Iex_Binop) {
IROp op_binop = e->Iex.Binop.op;
switch (op_binop) {
case Iop_MullU32:
case Iop_MullS32: {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
Bool syned = toBool(op_binop == Iop_MullS32);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_MulL(False,
False, True,
tLo, r_srcL, r_srcR));
addInstr(env, PPCInstr_MulL(syned,
True, True,
tHi, r_srcL, r_srcR));
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_Or64:
case Iop_And64:
case Iop_Xor64: {
HReg xLo, xHi, yLo, yHi;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
PPCAluOp op = (op_binop == Iop_Or64) ? Palu_OR :
(op_binop == Iop_And64) ? Palu_AND : Palu_XOR;
iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Alu(op, tHi, xHi, PPCRH_Reg(yHi)));
addInstr(env, PPCInstr_Alu(op, tLo, xLo, PPCRH_Reg(yLo)));
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_Add64: {
HReg xLo, xHi, yLo, yHi;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_AddSubC( True, True ,
tLo, xLo, yLo));
addInstr(env, PPCInstr_AddSubC( True, False,
tHi, xHi, yHi));
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_32HLto64:
*rHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
*rLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
return;
case Iop_F64toI64S: case Iop_F64toI64U: {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg r1 = StackFramePtr(env->mode64);
PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
PPCAMode* four_r1 = PPCAMode_IR( 4, r1 );
HReg fsrc = iselDblExpr(env, e->Iex.Binop.arg2);
HReg ftmp = newVRegF(env);
vassert(!env->mode64);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_FpCftI(False, False,
(op_binop == Iop_F64toI64S) ? True : False,
True, ftmp, fsrc));
addInstr(env, PPCInstr_FpLdSt(False, 8, ftmp, zero_r1));
addInstr(env, PPCInstr_Load(4, tHi, zero_r1, False));
addInstr(env, PPCInstr_Load(4, tLo, four_r1, False));
add_to_sp( env, 16 );
*rHi = tHi;
*rLo = tLo;
return;
}
default:
break;
}
}
if (e->tag == Iex_Unop) {
switch (e->Iex.Unop.op) {
case Iop_CmpwNEZ64: {
HReg argHi, argLo;
HReg tmp1 = newVRegI(env);
HReg tmp2 = newVRegI(env);
iselInt64Expr(&argHi, &argLo, env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Alu(Palu_OR, tmp1, argHi, PPCRH_Reg(argLo)));
addInstr(env, PPCInstr_Unary(Pun_NEG,tmp2,tmp1));
addInstr(env, PPCInstr_Alu(Palu_OR, tmp2, tmp2, PPCRH_Reg(tmp1)));
addInstr(env, PPCInstr_Shft(Pshft_SAR, True,
tmp2, tmp2, PPCRH_Imm(False, 31)));
*rHi = tmp2;
*rLo = tmp2;
return;
}
case Iop_Left64: {
HReg argHi, argLo;
HReg zero32 = newVRegI(env);
HReg resHi = newVRegI(env);
HReg resLo = newVRegI(env);
iselInt64Expr(&argHi, &argLo, env, e->Iex.Unop.arg);
vassert(env->mode64 == False);
addInstr(env, PPCInstr_LI(zero32, 0, env->mode64));
addInstr(env, PPCInstr_AddSubC( False, True,
resLo, zero32, argLo ));
addInstr(env, PPCInstr_AddSubC( False, False,
resHi, zero32, argHi ));
addInstr(env, PPCInstr_Alu(Palu_OR, resLo, resLo, PPCRH_Reg(argLo)));
addInstr(env, PPCInstr_Alu(Palu_OR, resHi, resHi, PPCRH_Reg(argHi)));
*rHi = resHi;
*rLo = resLo;
return;
}
case Iop_32Sto64: {
HReg tHi = newVRegI(env);
HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Shft(Pshft_SAR, True,
tHi, src, PPCRH_Imm(False,31)));
*rHi = tHi;
*rLo = src;
return;
}
case Iop_32Uto64: {
HReg tHi = newVRegI(env);
HReg tLo = iselWordExpr_R(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_LI(tHi, 0, False));
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_128to64: {
HReg r_Hi = INVALID_HREG;
HReg r_MedHi = INVALID_HREG;
HReg r_MedLo = INVALID_HREG;
HReg r_Lo = INVALID_HREG;
iselInt128Expr_to_32x4(&r_Hi, &r_MedHi, &r_MedLo, &r_Lo,
env, e->Iex.Unop.arg);
*rHi = r_MedLo;
*rLo = r_Lo;
return;
}
case Iop_128HIto64: {
HReg r_Hi = INVALID_HREG;
HReg r_MedHi = INVALID_HREG;
HReg r_MedLo = INVALID_HREG;
HReg r_Lo = INVALID_HREG;
iselInt128Expr_to_32x4(&r_Hi, &r_MedHi, &r_MedLo, &r_Lo,
env, e->Iex.Unop.arg);
*rHi = r_Hi;
*rLo = r_MedHi;
return;
}
case Iop_V128HIto64:
case Iop_V128to64: {
HReg r_aligned16;
Int off = e->Iex.Unop.op==Iop_V128HIto64 ? 0 : 8;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
PPCAMode *am_off0, *am_offLO, *am_offHI;
sub_from_sp( env, 32 );
r_aligned16 = get_sp_aligned16( env );
am_off0 = PPCAMode_IR( 0, r_aligned16 );
am_offHI = PPCAMode_IR( off, r_aligned16 );
am_offLO = PPCAMode_IR( off+4, r_aligned16 );
addInstr(env,
PPCInstr_AvLdSt( False, 16, vec, am_off0 ));
addInstr(env,
PPCInstr_Load( 4, tHi, am_offHI, False ));
addInstr(env,
PPCInstr_Load( 4, tLo, am_offLO, False ));
add_to_sp( env, 32 );
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_1Sto64: {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Set(cond,tLo));
addInstr(env, PPCInstr_Shft(Pshft_SHL, True,
tLo, tLo, PPCRH_Imm(False,31)));
addInstr(env, PPCInstr_Shft(Pshft_SAR, True,
tLo, tLo, PPCRH_Imm(False,31)));
addInstr(env, mk_iMOVds_RR(tHi, tLo));
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_Not64: {
HReg xLo, xHi;
HReg tmpLo = newVRegI(env);
HReg tmpHi = newVRegI(env);
iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Unary(Pun_NOT,tmpLo,xLo));
addInstr(env, PPCInstr_Unary(Pun_NOT,tmpHi,xHi));
*rHi = tmpHi;
*rLo = tmpLo;
return;
}
case Iop_ReinterpF64asI64: {
PPCAMode *am_addr0, *am_addr1;
HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
HReg r_dstLo = newVRegI(env);
HReg r_dstHi = newVRegI(env);
sub_from_sp( env, 16 );
am_addr0 = PPCAMode_IR( 0, StackFramePtr(False) );
am_addr1 = PPCAMode_IR( 4, StackFramePtr(False) );
addInstr(env, PPCInstr_FpLdSt( False, 8,
fr_src, am_addr0 ));
addInstr(env, PPCInstr_Load( 4, r_dstHi,
am_addr0, False ));
addInstr(env, PPCInstr_Load( 4, r_dstLo,
am_addr1, False ));
*rHi = r_dstHi;
*rLo = r_dstLo;
add_to_sp( env, 16 );
return;
}
case Iop_ReinterpD64asI64: {
HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg);
PPCAMode *am_addr0, *am_addr1;
HReg r_dstLo = newVRegI(env);
HReg r_dstHi = newVRegI(env);
sub_from_sp( env, 16 );
am_addr0 = PPCAMode_IR( 0, StackFramePtr(False) );
am_addr1 = PPCAMode_IR( 4, StackFramePtr(False) );
addInstr(env, PPCInstr_FpLdSt( False, 8,
fr_src, am_addr0 ));
addInstr(env, PPCInstr_Load( 4, r_dstHi,
am_addr0, False ));
addInstr(env, PPCInstr_Load( 4, r_dstLo,
am_addr1, False ));
*rHi = r_dstHi;
*rLo = r_dstLo;
add_to_sp( env, 16 );
return;
}
case Iop_BCDtoDPB: {
PPCCondCode cc;
UInt argiregs;
HReg argregs[2];
Int argreg;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg tmpHi;
HReg tmpLo;
ULong target;
Bool mode64 = env->mode64;
argregs[0] = hregPPC_GPR3(mode64);
argregs[1] = hregPPC_GPR4(mode64);
argiregs = 0;
argreg = 0;
iselInt64Expr( &tmpHi, &tmpLo, env, e->Iex.Unop.arg );
argiregs |= ( 1 << (argreg+3 ) );
addInstr( env, mk_iMOVds_RR( argregs[argreg++], tmpHi ) );
argiregs |= ( 1 << (argreg+3 ) );
addInstr( env, mk_iMOVds_RR( argregs[argreg], tmpLo ) );
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
target = toUInt( Ptr_to_ULong(h_BCDtoDPB ) );
addInstr( env, PPCInstr_Call( cc, (Addr64)target, argiregs ) );
addInstr( env, mk_iMOVds_RR( tHi, argregs[argreg-1] ) );
addInstr( env, mk_iMOVds_RR( tLo, argregs[argreg] ) );
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_DPBtoBCD: {
PPCCondCode cc;
UInt argiregs;
HReg argregs[2];
Int argreg;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg tmpHi;
HReg tmpLo;
ULong target;
Bool mode64 = env->mode64;
argregs[0] = hregPPC_GPR3(mode64);
argregs[1] = hregPPC_GPR4(mode64);
argiregs = 0;
argreg = 0;
iselInt64Expr(&tmpHi, &tmpLo, env, e->Iex.Unop.arg);
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg++], tmpHi ));
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg], tmpLo));
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
target = toUInt( Ptr_to_ULong( h_DPBtoBCD ) );
addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs ) );
addInstr(env, mk_iMOVds_RR(tHi, argregs[argreg-1]));
addInstr(env, mk_iMOVds_RR(tLo, argregs[argreg]));
*rHi = tHi;
*rLo = tLo;
return;
}
default:
break;
}
}
vex_printf("iselInt64Expr(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
vpanic("iselInt64Expr(ppc)");
}
static HReg iselFltExpr ( ISelEnv* env, IRExpr* e )
{
HReg r = iselFltExpr_wrk( env, e );
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
vassert(hregClass(r) == HRcFlt64);
vassert(hregIsVirtual(r));
return r;
}
static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e )
{
Bool mode64 = env->mode64;
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_F32);
if (e->tag == Iex_RdTmp) {
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
PPCAMode* am_addr;
HReg r_dst = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F32);
am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_F32);
addInstr(env, PPCInstr_FpLdSt(True, 4, r_dst, am_addr));
return r_dst;
}
if (e->tag == Iex_Get) {
HReg r_dst = newVRegF(env);
PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
GuestStatePtr(env->mode64) );
addInstr(env, PPCInstr_FpLdSt( True, 4, r_dst, am_addr ));
return r_dst;
}
if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_TruncF64asF32) {
/* This is quite subtle. The only way to do the relevant
truncation is to do a single-precision store and then a
double precision load to get it back into a register. The
problem is, if the data is then written to memory a second
time, as in
STbe(...) = TruncF64asF32(...)
then will the second truncation further alter the value? The
answer is no: flds (as generated here) followed by fsts
(generated for the STbe) is the identity function on 32-bit
floats, so we are safe.
Another upshot of this is that if iselStmt can see the
entirety of
STbe(...) = TruncF64asF32(arg)
then it can short circuit having to deal with TruncF64asF32
individually; instead just compute arg into a 64-bit FP
register and do 'fsts' (since that itself does the
truncation).
We generate pretty poor code here (should be ok both for
32-bit and 64-bit mode); but it is expected that for the most
part the latter optimisation will apply and hence this code
will not often be used.
*/
HReg fsrc = iselDblExpr(env, e->Iex.Unop.arg);
HReg fdst = newVRegF(env);
PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_FpLdSt( False, 4,
fsrc, zero_r1 ));
addInstr(env, PPCInstr_FpLdSt( True, 4,
fdst, zero_r1 ));
add_to_sp( env, 16 );
return fdst;
}
if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_I64UtoF32) {
if (mode64) {
HReg fdst = newVRegF(env);
HReg isrc = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg r1 = StackFramePtr(env->mode64);
PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_Store(8, zero_r1, isrc, True));
addInstr(env, PPCInstr_FpLdSt(True, 8, fdst, zero_r1));
addInstr(env, PPCInstr_FpCftI(True, False,
False, False,
fdst, fdst));
add_to_sp( env, 16 );
return fdst;
} else {
HReg fdst = newVRegF(env);
HReg isrcHi, isrcLo;
HReg r1 = StackFramePtr(env->mode64);
PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
PPCAMode* four_r1 = PPCAMode_IR( 4, r1 );
iselInt64Expr(&isrcHi, &isrcLo, env, e->Iex.Binop.arg2);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_Store(4, zero_r1, isrcHi, False));
addInstr(env, PPCInstr_Store(4, four_r1, isrcLo, False));
addInstr(env, PPCInstr_FpLdSt(True, 8, fdst, zero_r1));
addInstr(env, PPCInstr_FpCftI(True, False,
False, False,
fdst, fdst));
add_to_sp( env, 16 );
return fdst;
}
}
vex_printf("iselFltExpr(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
vpanic("iselFltExpr_wrk(ppc)");
}
static HReg iselDblExpr ( ISelEnv* env, IRExpr* e )
{
HReg r = iselDblExpr_wrk( env, e );
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
vassert(hregClass(r) == HRcFlt64);
vassert(hregIsVirtual(r));
return r;
}
static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e )
{
Bool mode64 = env->mode64;
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(e);
vassert(ty == Ity_F64);
if (e->tag == Iex_RdTmp) {
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
if (e->tag == Iex_Const) {
union { UInt u32x2[2]; ULong u64; Double f64; } u;
vassert(sizeof(u) == 8);
vassert(sizeof(u.u64) == 8);
vassert(sizeof(u.f64) == 8);
vassert(sizeof(u.u32x2) == 8);
if (e->Iex.Const.con->tag == Ico_F64) {
u.f64 = e->Iex.Const.con->Ico.F64;
}
else if (e->Iex.Const.con->tag == Ico_F64i) {
u.u64 = e->Iex.Const.con->Ico.F64i;
}
else
vpanic("iselDblExpr(ppc): const");
if (!mode64) {
HReg r_srcHi = newVRegI(env);
HReg r_srcLo = newVRegI(env);
addInstr(env, PPCInstr_LI(r_srcHi, u.u32x2[0], mode64));
addInstr(env, PPCInstr_LI(r_srcLo, u.u32x2[1], mode64));
return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
} else {
HReg r_src = newVRegI(env);
addInstr(env, PPCInstr_LI(r_src, u.u64, mode64));
return mk_LoadR64toFPR( env, r_src );
}
}
if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
HReg r_dst = newVRegF(env);
PPCAMode* am_addr;
vassert(e->Iex.Load.ty == Ity_F64);
am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_F64);
addInstr(env, PPCInstr_FpLdSt(True, 8, r_dst, am_addr));
return r_dst;
}
if (e->tag == Iex_Get) {
HReg r_dst = newVRegF(env);
PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
GuestStatePtr(mode64) );
addInstr(env, PPCInstr_FpLdSt( True, 8, r_dst, am_addr ));
return r_dst;
}
if (e->tag == Iex_Qop) {
PPCFpOp fpop = Pfp_INVALID;
switch (e->Iex.Qop.details->op) {
case Iop_MAddF64: fpop = Pfp_MADDD; break;
case Iop_MAddF64r32: fpop = Pfp_MADDS; break;
case Iop_MSubF64: fpop = Pfp_MSUBD; break;
case Iop_MSubF64r32: fpop = Pfp_MSUBS; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg r_dst = newVRegF(env);
HReg r_srcML = iselDblExpr(env, e->Iex.Qop.details->arg2);
HReg r_srcMR = iselDblExpr(env, e->Iex.Qop.details->arg3);
HReg r_srcAcc = iselDblExpr(env, e->Iex.Qop.details->arg4);
set_FPU_rounding_mode( env, e->Iex.Qop.details->arg1 );
addInstr(env, PPCInstr_FpMulAcc(fpop, r_dst,
r_srcML, r_srcMR, r_srcAcc));
return r_dst;
}
}
if (e->tag == Iex_Triop) {
IRTriop *triop = e->Iex.Triop.details;
PPCFpOp fpop = Pfp_INVALID;
switch (triop->op) {
case Iop_AddF64: fpop = Pfp_ADDD; break;
case Iop_SubF64: fpop = Pfp_SUBD; break;
case Iop_MulF64: fpop = Pfp_MULD; break;
case Iop_DivF64: fpop = Pfp_DIVD; break;
case Iop_AddF64r32: fpop = Pfp_ADDS; break;
case Iop_SubF64r32: fpop = Pfp_SUBS; break;
case Iop_MulF64r32: fpop = Pfp_MULS; break;
case Iop_DivF64r32: fpop = Pfp_DIVS; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg r_dst = newVRegF(env);
HReg r_srcL = iselDblExpr(env, triop->arg2);
HReg r_srcR = iselDblExpr(env, triop->arg3);
set_FPU_rounding_mode( env, triop->arg1 );
addInstr(env, PPCInstr_FpBinary(fpop, r_dst, r_srcL, r_srcR));
return r_dst;
}
switch (triop->op) {
case Iop_QuantizeD64: fpop = Pfp_DQUA; break;
case Iop_SignificanceRoundD64: fpop = Pfp_RRDTR; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg r_dst = newVRegF(env);
HReg r_srcL = iselDblExpr(env, triop->arg2);
HReg r_srcR = iselDblExpr(env, triop->arg3);
PPCRI* rmc = iselWordExpr_RI(env, triop->arg1);
addInstr(env, PPCInstr_DfpQuantize(fpop, r_dst, r_srcL, r_srcR, rmc));
return r_dst;
}
}
if (e->tag == Iex_Binop) {
PPCFpOp fpop = Pfp_INVALID;
switch (e->Iex.Binop.op) {
case Iop_SqrtF64: fpop = Pfp_SQRT; break;
case Iop_I64StoD64: fpop = Pfp_DCFFIX; break;
case Iop_D64toI64S: fpop = Pfp_DCTFIX; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg fr_dst = newVRegF(env);
HReg fr_src = iselDblExpr(env, e->Iex.Binop.arg2);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src));
return fr_dst;
}
}
if (e->tag == Iex_Binop) {
if (e->Iex.Binop.op == Iop_RoundF64toF32) {
HReg r_dst = newVRegF(env);
HReg r_src = iselDblExpr(env, e->Iex.Binop.arg2);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
addInstr(env, PPCInstr_FpRSP(r_dst, r_src));
return r_dst;
}
if (e->Iex.Binop.op == Iop_I64StoF64 || e->Iex.Binop.op == Iop_I64UtoF64) {
if (mode64) {
HReg fdst = newVRegF(env);
HReg isrc = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg r1 = StackFramePtr(env->mode64);
PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_Store(8, zero_r1, isrc, True));
addInstr(env, PPCInstr_FpLdSt(True, 8, fdst, zero_r1));
addInstr(env, PPCInstr_FpCftI(True, False,
e->Iex.Binop.op == Iop_I64StoF64,
True,
fdst, fdst));
add_to_sp( env, 16 );
return fdst;
} else {
HReg fdst = newVRegF(env);
HReg isrcHi, isrcLo;
HReg r1 = StackFramePtr(env->mode64);
PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
PPCAMode* four_r1 = PPCAMode_IR( 4, r1 );
iselInt64Expr(&isrcHi, &isrcLo, env, e->Iex.Binop.arg2);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
addInstr(env, PPCInstr_Store(4, zero_r1, isrcHi, False));
addInstr(env, PPCInstr_Store(4, four_r1, isrcLo, False));
addInstr(env, PPCInstr_FpLdSt(True, 8, fdst, zero_r1));
addInstr(env, PPCInstr_FpCftI(True, False,
e->Iex.Binop.op == Iop_I64StoF64,
True,
fdst, fdst));
add_to_sp( env, 16 );
return fdst;
}
}
}
if (e->tag == Iex_Unop) {
PPCFpOp fpop = Pfp_INVALID;
switch (e->Iex.Unop.op) {
case Iop_NegF64: fpop = Pfp_NEG; break;
case Iop_AbsF64: fpop = Pfp_ABS; break;
case Iop_Est5FRSqrt: fpop = Pfp_RSQRTE; break;
case Iop_RoundF64toF64_NegINF: fpop = Pfp_FRIM; break;
case Iop_RoundF64toF64_PosINF: fpop = Pfp_FRIP; break;
case Iop_RoundF64toF64_NEAREST: fpop = Pfp_FRIN; break;
case Iop_RoundF64toF64_ZERO: fpop = Pfp_FRIZ; break;
case Iop_ExtractExpD64: fpop = Pfp_DXEX; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg fr_dst = newVRegF(env);
HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src));
return fr_dst;
}
}
if (e->tag == Iex_Unop) {
switch (e->Iex.Unop.op) {
case Iop_ReinterpI64asF64: {
if (!mode64) {
HReg r_srcHi, r_srcLo;
iselInt64Expr( &r_srcHi, &r_srcLo, env, e->Iex.Unop.arg);
return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
} else {
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
return mk_LoadR64toFPR( env, r_src );
}
}
case Iop_F32toF64: {
if (e->Iex.Unop.arg->tag == Iex_Unop &&
e->Iex.Unop.arg->Iex.Unop.op == Iop_ReinterpI32asF32 ) {
e = e->Iex.Unop.arg;
HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
HReg fr_dst = newVRegF(env);
PPCAMode *am_addr;
sub_from_sp( env, 16 );
am_addr = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
addInstr(env, PPCInstr_Store( 4, am_addr, src, env->mode64 ));
addInstr(env, PPCInstr_FpLdSt(True, 4, fr_dst, am_addr));
add_to_sp( env, 16 );
return fr_dst;
}
HReg res = iselFltExpr(env, e->Iex.Unop.arg);
return res;
}
default:
break;
}
}
if (e->tag == Iex_Mux0X) {
if (ty == Ity_F64
&& typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
HReg frX = iselDblExpr(env, e->Iex.Mux0X.exprX);
HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
HReg fr_dst = newVRegF(env);
HReg r_tmp = newVRegI(env);
addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp,
r_cond, PPCRH_Imm(False,0xFF)));
addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, frX ));
addInstr(env, PPCInstr_Cmp(False, True,
7, r_tmp, PPCRH_Imm(False,0)));
addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr0 ));
return fr_dst;
}
}
vex_printf("iselDblExpr(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
vpanic("iselDblExpr_wrk(ppc)");
}
static HReg iselDfp64Expr(ISelEnv* env, IRExpr* e)
{
HReg r = iselDfp64Expr_wrk( env, e );
vassert(hregClass(r) == HRcFlt64);
vassert( hregIsVirtual(r) );
return r;
}
static HReg iselDfp64Expr_wrk(ISelEnv* env, IRExpr* e)
{
Bool mode64 = env->mode64;
IRType ty = typeOfIRExpr( env->type_env, e );
HReg r_dstHi, r_dstLo;
vassert( e );
vassert( ty == Ity_D64 );
if (e->tag == Iex_RdTmp) {
return lookupIRTemp( env, e->Iex.RdTmp.tmp );
}
if (e->tag == Iex_Get) {
HReg r_dst = newVRegF( env );
PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
GuestStatePtr(mode64) );
addInstr( env, PPCInstr_FpLdSt( True, 8, r_dst, am_addr ) );
return r_dst;
}
if (e->tag == Iex_Qop) {
HReg r_dst = newVRegF( env );
return r_dst;
}
if (e->tag == Iex_Unop) {
HReg fr_dst = newVRegF(env);
switch (e->Iex.Unop.op) {
case Iop_ReinterpI64asD64: {
if (!mode64) {
HReg r_srcHi, r_srcLo;
iselInt64Expr( &r_srcHi, &r_srcLo, env, e->Iex.Unop.arg);
return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
} else {
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
return mk_LoadR64toFPR( env, r_src );
}
}
case Iop_ExtractExpD64: {
HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Dfp64Unary(Pfp_DXEX, fr_dst, fr_src));
return fr_dst;
}
case Iop_ExtractExpD128: {
HReg r_srcHi;
HReg r_srcLo;
iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_ExtractExpD128(Pfp_DXEXQ, fr_dst,
r_srcHi, r_srcLo));
return fr_dst;
}
case Iop_D32toD64: {
HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg);
addInstr(env, PPCInstr_Dfp64Unary(Pfp_DCTDP, fr_dst, fr_src));
return fr_dst;
}
case Iop_D128HItoD64:
iselDfp128Expr( &r_dstHi, &r_dstLo, env, e->Iex.Unop.arg );
return r_dstHi;
case Iop_D128LOtoD64:
iselDfp128Expr( &r_dstHi, &r_dstLo, env, e->Iex.Unop.arg );
return r_dstLo;
case Iop_InsertExpD64: {
HReg fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1);
HReg fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Dfp64Binary(Pfp_DIEX, fr_dst, fr_srcL,
fr_srcR));
return fr_dst;
}
default:
vex_printf( "ERROR: iselDfp64Expr_wrk, UNKNOWN unop case %d\n",
e->Iex.Unop.op );
}
}
if (e->tag == Iex_Binop) {
switch (e->Iex.Binop.op) {
case Iop_D128toI64S: {
PPCFpOp fpop = Pfp_DCTFIXQ;
HReg fr_dst = newVRegF(env);
HReg r_srcHi = newVRegF(env);
HReg r_srcLo = newVRegF(env);
set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1 );
iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo));
return fr_dst;
}
case Iop_D128toD64: {
PPCFpOp fpop = Pfp_DRDPQ;
HReg fr_dst = newVRegF(env);
HReg r_srcHi = newVRegF(env);
HReg r_srcLo = newVRegF(env);
set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1 );
iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo));
return fr_dst;
}
break;
default:
break;
}
if (e->Iex.Unop.op == Iop_RoundD64toInt) {
HReg fr_dst = newVRegF(env);
HReg fr_src = newVRegF(env);
PPCRI* r_rmc = iselWordExpr_RI(env, e->Iex.Binop.arg1);
fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_DfpRound(fr_dst, fr_src, r_rmc));
return fr_dst;
}
}
if (e->tag == Iex_Binop) {
PPCFpOp fpop = Pfp_INVALID;
HReg fr_dst = newVRegF(env);
switch (e->Iex.Binop.op) {
case Iop_D64toD32: fpop = Pfp_DRSP; break;
case Iop_I64StoD64: fpop = Pfp_DCFFIX; break;
case Iop_D64toI64S: fpop = Pfp_DCTFIX; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2);
set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1 );
addInstr(env, PPCInstr_Dfp64Unary(fpop, fr_dst, fr_src));
return fr_dst;
}
switch (e->Iex.Binop.op) {
case Iop_ShlD64: fpop = Pfp_DSCLI; break;
case Iop_ShrD64: fpop = Pfp_DSCRI; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg1);
PPCRI* shift = iselWordExpr_RI(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_DfpShift(fpop, fr_dst, fr_src, shift));
return fr_dst;
}
switch (e->Iex.Binop.op) {
case Iop_InsertExpD64:
fpop = Pfp_DIEX;
break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg fr_srcL = iselDfp64Expr(env, e->Iex.Binop.arg1);
HReg fr_srcR = iselDfp64Expr(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Dfp64Binary(fpop, fr_dst, fr_srcL, fr_srcR));
return fr_dst;
}
}
if (e->tag == Iex_Triop) {
IRTriop *triop = e->Iex.Triop.details;
PPCFpOp fpop = Pfp_INVALID;
switch (triop->op) {
case Iop_AddD64:
fpop = Pfp_DFPADD;
break;
case Iop_SubD64:
fpop = Pfp_DFPSUB;
break;
case Iop_MulD64:
fpop = Pfp_DFPMUL;
break;
case Iop_DivD64:
fpop = Pfp_DFPDIV;
break;
default:
break;
}
if (fpop != Pfp_INVALID) {
HReg r_dst = newVRegF( env );
HReg r_srcL = iselDfp64Expr( env, triop->arg2 );
HReg r_srcR = iselDfp64Expr( env, triop->arg3 );
set_FPU_DFP_rounding_mode( env, triop->arg1 );
addInstr( env, PPCInstr_Dfp64Binary( fpop, r_dst, r_srcL, r_srcR ) );
return r_dst;
}
switch (triop->op) {
case Iop_QuantizeD64: fpop = Pfp_DQUA; break;
case Iop_SignificanceRoundD64: fpop = Pfp_RRDTR; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg r_dst = newVRegF(env);
HReg r_srcL = iselDfp64Expr(env, triop->arg2);
HReg r_srcR = iselDfp64Expr(env, triop->arg3);
PPCRI* rmc = iselWordExpr_RI(env, triop->arg1);
addInstr(env, PPCInstr_DfpQuantize(fpop, r_dst, r_srcL, r_srcR,
rmc));
return r_dst;
}
}
ppIRExpr( e );
vpanic( "iselDfp64Expr_wrk(ppc)" );
}
static void iselDfp128Expr(HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e)
{
iselDfp128Expr_wrk( rHi, rLo, env, e );
vassert( hregIsVirtual(*rHi) );
vassert( hregIsVirtual(*rLo) );
}
static void iselDfp128Expr_wrk(HReg* rHi, HReg *rLo, ISelEnv* env, IRExpr* e)
{
vassert( e );
vassert( typeOfIRExpr(env->type_env,e) == Ity_D128 );
if (e->tag == Iex_RdTmp) {
lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp );
return;
}
if (e->tag == Iex_Unop) {
PPCFpOp fpop = Pfp_INVALID;
HReg r_dstHi = newVRegF(env);
HReg r_dstLo = newVRegF(env);
if (e->Iex.Unop.op == Iop_I64StoD128) {
HReg r_src = iselDfp64Expr(env, e->Iex.Unop.arg);
fpop = Pfp_DCFFIXQ;
addInstr(env, PPCInstr_DfpI64StoD128(fpop, r_dstHi, r_dstLo,
r_src));
}
if (e->Iex.Unop.op == Iop_D64toD128) {
HReg r_src = iselDfp64Expr(env, e->Iex.Unop.arg);
fpop = Pfp_DCTQPQ;
addInstr(env, PPCInstr_Dfp128Unary(fpop, r_dstHi, r_dstLo,
r_src, r_src));
}
*rHi = r_dstHi;
*rLo = r_dstLo;
return;
}
if (e->tag == Iex_Binop) {
HReg r_srcHi;
HReg r_srcLo;
switch (e->Iex.Binop.op) {
case Iop_D64HLtoD128:
r_srcHi = iselDfp64Expr( env, e->Iex.Binop.arg1 );
r_srcLo = iselDfp64Expr( env, e->Iex.Binop.arg2 );
*rHi = r_srcHi;
*rLo = r_srcLo;
return;
break;
case Iop_D128toD64: {
PPCFpOp fpop = Pfp_DRDPQ;
HReg fr_dst = newVRegF(env);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo));
*rHi = fr_dst;
*rLo = fr_dst;
return;
}
case Iop_ShlD128:
case Iop_ShrD128: {
HReg fr_dst_hi = newVRegF(env);
HReg fr_dst_lo = newVRegF(env);
PPCRI* shift = iselWordExpr_RI(env, e->Iex.Binop.arg2);
PPCFpOp fpop = Pfp_DSCLIQ;
iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg1);
if (e->Iex.Binop.op == Iop_ShrD128)
fpop = Pfp_DSCRIQ;
addInstr(env, PPCInstr_DfpShift128(fpop, fr_dst_hi, fr_dst_lo,
r_srcHi, r_srcLo, shift));
*rHi = fr_dst_hi;
*rLo = fr_dst_lo;
return;
}
case Iop_RoundD128toInt: {
HReg r_dstHi = newVRegF(env);
HReg r_dstLo = newVRegF(env);
PPCRI* r_rmc = iselWordExpr_RI(env, e->Iex.Binop.arg1);
iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_DfpRound128(r_dstHi, r_dstLo,
r_srcHi, r_srcLo, r_rmc));
*rHi = r_dstHi;
*rLo = r_dstLo;
return;
}
case Iop_InsertExpD128: {
HReg r_dstHi = newVRegF(env);
HReg r_dstLo = newVRegF(env);
HReg r_srcL = newVRegF(env);
r_srcL = iselDfp64Expr(env, e->Iex.Binop.arg1);
iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_InsertExpD128(Pfp_DIEXQ,
r_dstHi, r_dstLo,
r_srcL, r_srcHi, r_srcLo));
*rHi = r_dstHi;
*rLo = r_dstLo;
return;
}
default:
vex_printf( "ERROR: iselDfp128Expr_wrk, UNKNOWN binop case %d\n",
e->Iex.Binop.op );
break;
}
}
if (e->tag == Iex_Triop) {
IRTriop *triop = e->Iex.Triop.details;
PPCFpOp fpop = Pfp_INVALID;
switch (triop->op) {
case Iop_AddD128:
fpop = Pfp_DFPADDQ;
break;
case Iop_SubD128:
fpop = Pfp_DFPSUBQ;
break;
case Iop_MulD128:
fpop = Pfp_DFPMULQ;
break;
case Iop_DivD128:
fpop = Pfp_DFPDIVQ;
break;
default:
break;
}
if (fpop != Pfp_INVALID) {
HReg r_dstHi = newVRegV( env );
HReg r_dstLo = newVRegV( env );
HReg r_srcRHi = newVRegV( env );
HReg r_srcRLo = newVRegV( env );
iselDfp128Expr( &r_dstHi, &r_dstLo, env, triop->arg2 );
iselDfp128Expr( &r_srcRHi, &r_srcRLo, env, triop->arg3 );
set_FPU_rounding_mode( env, triop->arg1 );
addInstr( env,
PPCInstr_Dfp128Binary( fpop, r_dstHi, r_dstLo,
r_srcRHi, r_srcRLo ) );
*rHi = r_dstHi;
*rLo = r_dstLo;
return;
}
switch (triop->op) {
case Iop_QuantizeD128: fpop = Pfp_DQUAQ; break;
case Iop_SignificanceRoundD128: fpop = Pfp_DRRNDQ; break;
default: break;
}
if (fpop != Pfp_INVALID) {
HReg r_dstHi = newVRegF(env);
HReg r_dstLo = newVRegF(env);
HReg r_srcHi = newVRegF(env);
HReg r_srcLo = newVRegF(env);
PPCRI* rmc = iselWordExpr_RI(env, triop->arg1);
iselDfp128Expr(&r_dstHi, &r_dstLo, env, triop->arg2);
iselDfp128Expr(&r_srcHi, &r_srcLo, env, triop->arg3);
addInstr(env, PPCInstr_DfpQuantize128(fpop, r_dstHi, r_dstLo,
r_srcHi, r_srcLo, rmc));
*rHi = r_dstHi;
*rLo = r_dstLo;
return;
}
}
ppIRExpr( e );
vpanic( "iselDfp128Expr(ppc64)" );
}
static HReg iselVecExpr ( ISelEnv* env, IRExpr* e )
{
HReg r = iselVecExpr_wrk( env, e );
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
vassert(hregClass(r) == HRcVec128);
vassert(hregIsVirtual(r));
return r;
}
static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e )
{
Bool mode64 = env->mode64;
PPCAvOp op = Pav_INVALID;
PPCAvFpOp fpop = Pavfp_INVALID;
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(e);
vassert(ty == Ity_V128);
if (e->tag == Iex_RdTmp) {
return lookupIRTemp(env, e->Iex.RdTmp.tmp);
}
if (e->tag == Iex_Get) {
HReg dst = newVRegV(env);
addInstr(env,
PPCInstr_AvLdSt( True, 16, dst,
PPCAMode_IR( e->Iex.Get.offset,
GuestStatePtr(mode64) )));
return dst;
}
if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
PPCAMode* am_addr;
HReg v_dst = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_V128);
am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_V128);
addInstr(env, PPCInstr_AvLdSt( True, 16, v_dst, am_addr));
return v_dst;
}
if (e->tag == Iex_Unop) {
switch (e->Iex.Unop.op) {
case Iop_NotV128: {
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, arg));
return dst;
}
case Iop_CmpNEZ8x16: {
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg zero = newVRegV(env);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
addInstr(env, PPCInstr_AvBin8x16(Pav_CMPEQU, dst, arg, zero));
addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
case Iop_CmpNEZ16x8: {
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg zero = newVRegV(env);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
addInstr(env, PPCInstr_AvBin16x8(Pav_CMPEQU, dst, arg, zero));
addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
case Iop_CmpNEZ32x4: {
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg zero = newVRegV(env);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, dst, arg, zero));
addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
case Iop_Recip32Fx4: fpop = Pavfp_RCPF; goto do_32Fx4_unary;
case Iop_RSqrt32Fx4: fpop = Pavfp_RSQRTF; goto do_32Fx4_unary;
case Iop_I32UtoFx4: fpop = Pavfp_CVTU2F; goto do_32Fx4_unary;
case Iop_I32StoFx4: fpop = Pavfp_CVTS2F; goto do_32Fx4_unary;
case Iop_QFtoI32Ux4_RZ: fpop = Pavfp_QCVTF2U; goto do_32Fx4_unary;
case Iop_QFtoI32Sx4_RZ: fpop = Pavfp_QCVTF2S; goto do_32Fx4_unary;
case Iop_RoundF32x4_RM: fpop = Pavfp_ROUNDM; goto do_32Fx4_unary;
case Iop_RoundF32x4_RP: fpop = Pavfp_ROUNDP; goto do_32Fx4_unary;
case Iop_RoundF32x4_RN: fpop = Pavfp_ROUNDN; goto do_32Fx4_unary;
case Iop_RoundF32x4_RZ: fpop = Pavfp_ROUNDZ; goto do_32Fx4_unary;
do_32Fx4_unary:
{
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvUn32Fx4(fpop, dst, arg));
return dst;
}
case Iop_32UtoV128: {
HReg r_aligned16, r_zeros;
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
HReg dst = newVRegV(env);
PPCAMode *am_off0, *am_off4, *am_off8, *am_off12;
sub_from_sp( env, 32 );
r_aligned16 = get_sp_aligned16( env );
am_off0 = PPCAMode_IR( 0, r_aligned16 );
am_off4 = PPCAMode_IR( 4, r_aligned16 );
am_off8 = PPCAMode_IR( 8, r_aligned16 );
am_off12 = PPCAMode_IR( 12, r_aligned16 );
r_zeros = newVRegI(env);
addInstr(env, PPCInstr_LI(r_zeros, 0x0, mode64));
addInstr(env, PPCInstr_Store( 4, am_off0, r_zeros, mode64 ));
addInstr(env, PPCInstr_Store( 4, am_off4, r_zeros, mode64 ));
addInstr(env, PPCInstr_Store( 4, am_off8, r_zeros, mode64 ));
addInstr(env, PPCInstr_Store( 4, am_off12, r_src, mode64 ));
addInstr(env, PPCInstr_AvLdSt( True, 4, dst, am_off12 ));
add_to_sp( env, 32 );
return dst;
}
case Iop_Dup8x16:
case Iop_Dup16x8:
case Iop_Dup32x4:
return mk_AvDuplicateRI(env, e->Iex.Unop.arg);
default:
break;
}
}
if (e->tag == Iex_Binop) {
switch (e->Iex.Binop.op) {
case Iop_64HLtoV128: {
if (!mode64) {
HReg r3, r2, r1, r0, r_aligned16;
PPCAMode *am_off0, *am_off4, *am_off8, *am_off12;
HReg dst = newVRegV(env);
sub_from_sp( env, 32 );
r_aligned16 = get_sp_aligned16( env );
am_off0 = PPCAMode_IR( 0, r_aligned16 );
am_off4 = PPCAMode_IR( 4, r_aligned16 );
am_off8 = PPCAMode_IR( 8, r_aligned16 );
am_off12 = PPCAMode_IR( 12, r_aligned16 );
iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_Store( 4, am_off12, r0, mode64 ));
addInstr(env, PPCInstr_Store( 4, am_off8, r1, mode64 ));
iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1);
addInstr(env, PPCInstr_Store( 4, am_off4, r2, mode64 ));
addInstr(env, PPCInstr_Store( 4, am_off0, r3, mode64 ));
addInstr(env, PPCInstr_AvLdSt(True, 16, dst, am_off0));
add_to_sp( env, 32 );
return dst;
} else {
HReg rHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg rLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
HReg r_aligned16;
PPCAMode *am_off0, *am_off8;
sub_from_sp( env, 32 );
r_aligned16 = get_sp_aligned16( env );
am_off0 = PPCAMode_IR( 0, r_aligned16 );
am_off8 = PPCAMode_IR( 8, r_aligned16 );
addInstr(env, PPCInstr_Store( 8, am_off0, rHi, mode64 ));
addInstr(env, PPCInstr_Store( 8, am_off8, rLo, mode64 ));
addInstr(env, PPCInstr_AvLdSt(True, 16, dst, am_off0));
add_to_sp( env, 32 );
return dst;
}
}
case Iop_Add32Fx4: fpop = Pavfp_ADDF; goto do_32Fx4;
case Iop_Sub32Fx4: fpop = Pavfp_SUBF; goto do_32Fx4;
case Iop_Max32Fx4: fpop = Pavfp_MAXF; goto do_32Fx4;
case Iop_Min32Fx4: fpop = Pavfp_MINF; goto do_32Fx4;
case Iop_Mul32Fx4: fpop = Pavfp_MULF; goto do_32Fx4;
case Iop_CmpEQ32Fx4: fpop = Pavfp_CMPEQF; goto do_32Fx4;
case Iop_CmpGT32Fx4: fpop = Pavfp_CMPGTF; goto do_32Fx4;
case Iop_CmpGE32Fx4: fpop = Pavfp_CMPGEF; goto do_32Fx4;
do_32Fx4:
{
HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBin32Fx4(fpop, dst, argL, argR));
return dst;
}
case Iop_CmpLE32Fx4: {
HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
HReg isNanLR = newVRegV(env);
HReg isNanL = isNan(env, argL);
HReg isNanR = isNan(env, argR);
addInstr(env, PPCInstr_AvBinary(Pav_OR, isNanLR,
isNanL, isNanR));
addInstr(env, PPCInstr_AvBin32Fx4(Pavfp_CMPGTF, dst,
argL, argR));
addInstr(env, PPCInstr_AvBinary(Pav_OR, dst, dst, isNanLR));
addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
case Iop_AndV128: op = Pav_AND; goto do_AvBin;
case Iop_OrV128: op = Pav_OR; goto do_AvBin;
case Iop_XorV128: op = Pav_XOR; goto do_AvBin;
do_AvBin: {
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBinary(op, dst, arg1, arg2));
return dst;
}
case Iop_Shl8x16: op = Pav_SHL; goto do_AvBin8x16;
case Iop_Shr8x16: op = Pav_SHR; goto do_AvBin8x16;
case Iop_Sar8x16: op = Pav_SAR; goto do_AvBin8x16;
case Iop_Rol8x16: op = Pav_ROTL; goto do_AvBin8x16;
case Iop_InterleaveHI8x16: op = Pav_MRGHI; goto do_AvBin8x16;
case Iop_InterleaveLO8x16: op = Pav_MRGLO; goto do_AvBin8x16;
case Iop_Add8x16: op = Pav_ADDU; goto do_AvBin8x16;
case Iop_QAdd8Ux16: op = Pav_QADDU; goto do_AvBin8x16;
case Iop_QAdd8Sx16: op = Pav_QADDS; goto do_AvBin8x16;
case Iop_Sub8x16: op = Pav_SUBU; goto do_AvBin8x16;
case Iop_QSub8Ux16: op = Pav_QSUBU; goto do_AvBin8x16;
case Iop_QSub8Sx16: op = Pav_QSUBS; goto do_AvBin8x16;
case Iop_Avg8Ux16: op = Pav_AVGU; goto do_AvBin8x16;
case Iop_Avg8Sx16: op = Pav_AVGS; goto do_AvBin8x16;
case Iop_Max8Ux16: op = Pav_MAXU; goto do_AvBin8x16;
case Iop_Max8Sx16: op = Pav_MAXS; goto do_AvBin8x16;
case Iop_Min8Ux16: op = Pav_MINU; goto do_AvBin8x16;
case Iop_Min8Sx16: op = Pav_MINS; goto do_AvBin8x16;
case Iop_MullEven8Ux16: op = Pav_OMULU; goto do_AvBin8x16;
case Iop_MullEven8Sx16: op = Pav_OMULS; goto do_AvBin8x16;
case Iop_CmpEQ8x16: op = Pav_CMPEQU; goto do_AvBin8x16;
case Iop_CmpGT8Ux16: op = Pav_CMPGTU; goto do_AvBin8x16;
case Iop_CmpGT8Sx16: op = Pav_CMPGTS; goto do_AvBin8x16;
do_AvBin8x16: {
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBin8x16(op, dst, arg1, arg2));
return dst;
}
case Iop_Shl16x8: op = Pav_SHL; goto do_AvBin16x8;
case Iop_Shr16x8: op = Pav_SHR; goto do_AvBin16x8;
case Iop_Sar16x8: op = Pav_SAR; goto do_AvBin16x8;
case Iop_Rol16x8: op = Pav_ROTL; goto do_AvBin16x8;
case Iop_NarrowBin16to8x16: op = Pav_PACKUU; goto do_AvBin16x8;
case Iop_QNarrowBin16Uto8Ux16: op = Pav_QPACKUU; goto do_AvBin16x8;
case Iop_QNarrowBin16Sto8Sx16: op = Pav_QPACKSS; goto do_AvBin16x8;
case Iop_InterleaveHI16x8: op = Pav_MRGHI; goto do_AvBin16x8;
case Iop_InterleaveLO16x8: op = Pav_MRGLO; goto do_AvBin16x8;
case Iop_Add16x8: op = Pav_ADDU; goto do_AvBin16x8;
case Iop_QAdd16Ux8: op = Pav_QADDU; goto do_AvBin16x8;
case Iop_QAdd16Sx8: op = Pav_QADDS; goto do_AvBin16x8;
case Iop_Sub16x8: op = Pav_SUBU; goto do_AvBin16x8;
case Iop_QSub16Ux8: op = Pav_QSUBU; goto do_AvBin16x8;
case Iop_QSub16Sx8: op = Pav_QSUBS; goto do_AvBin16x8;
case Iop_Avg16Ux8: op = Pav_AVGU; goto do_AvBin16x8;
case Iop_Avg16Sx8: op = Pav_AVGS; goto do_AvBin16x8;
case Iop_Max16Ux8: op = Pav_MAXU; goto do_AvBin16x8;
case Iop_Max16Sx8: op = Pav_MAXS; goto do_AvBin16x8;
case Iop_Min16Ux8: op = Pav_MINU; goto do_AvBin16x8;
case Iop_Min16Sx8: op = Pav_MINS; goto do_AvBin16x8;
case Iop_MullEven16Ux8: op = Pav_OMULU; goto do_AvBin16x8;
case Iop_MullEven16Sx8: op = Pav_OMULS; goto do_AvBin16x8;
case Iop_CmpEQ16x8: op = Pav_CMPEQU; goto do_AvBin16x8;
case Iop_CmpGT16Ux8: op = Pav_CMPGTU; goto do_AvBin16x8;
case Iop_CmpGT16Sx8: op = Pav_CMPGTS; goto do_AvBin16x8;
do_AvBin16x8: {
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBin16x8(op, dst, arg1, arg2));
return dst;
}
case Iop_Shl32x4: op = Pav_SHL; goto do_AvBin32x4;
case Iop_Shr32x4: op = Pav_SHR; goto do_AvBin32x4;
case Iop_Sar32x4: op = Pav_SAR; goto do_AvBin32x4;
case Iop_Rol32x4: op = Pav_ROTL; goto do_AvBin32x4;
case Iop_NarrowBin32to16x8: op = Pav_PACKUU; goto do_AvBin32x4;
case Iop_QNarrowBin32Uto16Ux8: op = Pav_QPACKUU; goto do_AvBin32x4;
case Iop_QNarrowBin32Sto16Sx8: op = Pav_QPACKSS; goto do_AvBin32x4;
case Iop_InterleaveHI32x4: op = Pav_MRGHI; goto do_AvBin32x4;
case Iop_InterleaveLO32x4: op = Pav_MRGLO; goto do_AvBin32x4;
case Iop_Add32x4: op = Pav_ADDU; goto do_AvBin32x4;
case Iop_QAdd32Ux4: op = Pav_QADDU; goto do_AvBin32x4;
case Iop_QAdd32Sx4: op = Pav_QADDS; goto do_AvBin32x4;
case Iop_Sub32x4: op = Pav_SUBU; goto do_AvBin32x4;
case Iop_QSub32Ux4: op = Pav_QSUBU; goto do_AvBin32x4;
case Iop_QSub32Sx4: op = Pav_QSUBS; goto do_AvBin32x4;
case Iop_Avg32Ux4: op = Pav_AVGU; goto do_AvBin32x4;
case Iop_Avg32Sx4: op = Pav_AVGS; goto do_AvBin32x4;
case Iop_Max32Ux4: op = Pav_MAXU; goto do_AvBin32x4;
case Iop_Max32Sx4: op = Pav_MAXS; goto do_AvBin32x4;
case Iop_Min32Ux4: op = Pav_MINU; goto do_AvBin32x4;
case Iop_Min32Sx4: op = Pav_MINS; goto do_AvBin32x4;
case Iop_CmpEQ32x4: op = Pav_CMPEQU; goto do_AvBin32x4;
case Iop_CmpGT32Ux4: op = Pav_CMPGTU; goto do_AvBin32x4;
case Iop_CmpGT32Sx4: op = Pav_CMPGTS; goto do_AvBin32x4;
do_AvBin32x4: {
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
addInstr(env, PPCInstr_AvBin32x4(op, dst, arg1, arg2));
return dst;
}
case Iop_ShlN8x16: op = Pav_SHL; goto do_AvShift8x16;
case Iop_SarN8x16: op = Pav_SAR; goto do_AvShift8x16;
do_AvShift8x16: {
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg dst = newVRegV(env);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_AvBin8x16(op, dst, r_src, v_shft));
return dst;
}
case Iop_ShlN16x8: op = Pav_SHL; goto do_AvShift16x8;
case Iop_ShrN16x8: op = Pav_SHR; goto do_AvShift16x8;
case Iop_SarN16x8: op = Pav_SAR; goto do_AvShift16x8;
do_AvShift16x8: {
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg dst = newVRegV(env);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_AvBin16x8(op, dst, r_src, v_shft));
return dst;
}
case Iop_ShlN32x4: op = Pav_SHL; goto do_AvShift32x4;
case Iop_ShrN32x4: op = Pav_SHR; goto do_AvShift32x4;
case Iop_SarN32x4: op = Pav_SAR; goto do_AvShift32x4;
do_AvShift32x4: {
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg dst = newVRegV(env);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_AvBin32x4(op, dst, r_src, v_shft));
return dst;
}
case Iop_ShrV128: op = Pav_SHR; goto do_AvShiftV128;
case Iop_ShlV128: op = Pav_SHL; goto do_AvShiftV128;
do_AvShiftV128: {
HReg dst = newVRegV(env);
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_AvBinary(op, dst, r_src, v_shft));
return dst;
}
case Iop_Perm8x16: {
HReg dst = newVRegV(env);
HReg v_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg v_ctl = iselVecExpr(env, e->Iex.Binop.arg2);
addInstr(env, PPCInstr_AvPerm(dst, v_src, v_src, v_ctl));
return dst;
}
default:
break;
}
}
if (e->tag == Iex_Const ) {
vassert(e->Iex.Const.con->tag == Ico_V128);
if (e->Iex.Const.con->Ico.V128 == 0x0000) {
return generate_zeroes_V128(env);
}
else if (e->Iex.Const.con->Ico.V128 == 0xffff) {
return generate_ones_V128(env);
}
}
vex_printf("iselVecExpr(ppc) (subarch = %s): can't reduce\n",
LibVEX_ppVexHwCaps(mode64 ? VexArchPPC64 : VexArchPPC32,
env->hwcaps));
ppIRExpr(e);
vpanic("iselVecExpr_wrk(ppc)");
}
static void iselStmt ( ISelEnv* env, IRStmt* stmt )
{
Bool mode64 = env->mode64;
if (vex_traceflags & VEX_TRACE_VCODE) {
vex_printf("\n -- ");
ppIRStmt(stmt);
vex_printf("\n");
}
switch (stmt->tag) {
case Ist_Store: {
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
if (end != Iend_BE)
goto stmt_fail;
if (!mode64 && (tya != Ity_I32))
goto stmt_fail;
if (mode64 && (tya != Ity_I64))
goto stmt_fail;
if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
(mode64 && (tyd == Ity_I64))) {
PPCAMode* am_addr
= iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd);
HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data);
addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(tyd)),
am_addr, r_src, mode64 ));
return;
}
if (tyd == Ity_F64) {
PPCAMode* am_addr
= iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd);
HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data);
addInstr(env,
PPCInstr_FpLdSt(False, 8, fr_src, am_addr));
return;
}
if (tyd == Ity_F32) {
PPCAMode* am_addr
= iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd);
HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data);
addInstr(env,
PPCInstr_FpLdSt(False, 4, fr_src, am_addr));
return;
}
if (tyd == Ity_V128) {
PPCAMode* am_addr
= iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd);
HReg v_src = iselVecExpr(env, stmt->Ist.Store.data);
addInstr(env,
PPCInstr_AvLdSt(False, 16, v_src, am_addr));
return;
}
if (tyd == Ity_I64 && !mode64) {
HReg rHi32, rLo32;
HReg r_addr = iselWordExpr_R(env, stmt->Ist.Store.addr);
iselInt64Expr( &rHi32, &rLo32, env, stmt->Ist.Store.data );
addInstr(env, PPCInstr_Store( 4,
PPCAMode_IR( 0, r_addr ),
rHi32,
False) );
addInstr(env, PPCInstr_Store( 4,
PPCAMode_IR( 4, r_addr ),
rLo32,
False) );
return;
}
break;
}
case Ist_Put: {
IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
if (ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_src = iselWordExpr_R(env, stmt->Ist.Put.data);
PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
GuestStatePtr(mode64) );
addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(ty)),
am_addr, r_src, mode64 ));
return;
}
if (!mode64 && ty == Ity_I64) {
HReg rHi, rLo;
PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
GuestStatePtr(mode64) );
PPCAMode* am_addr4 = advance4(env, am_addr);
iselInt64Expr(&rHi,&rLo, env, stmt->Ist.Put.data);
addInstr(env, PPCInstr_Store( 4, am_addr, rHi, mode64 ));
addInstr(env, PPCInstr_Store( 4, am_addr4, rLo, mode64 ));
return;
}
if (ty == Ity_V128) {
HReg v_src = iselVecExpr(env, stmt->Ist.Put.data);
PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
GuestStatePtr(mode64) );
addInstr(env,
PPCInstr_AvLdSt(False, 16, v_src, am_addr));
return;
}
if (ty == Ity_F64) {
HReg fr_src = iselDblExpr(env, stmt->Ist.Put.data);
PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
GuestStatePtr(mode64) );
addInstr(env, PPCInstr_FpLdSt( False, 8,
fr_src, am_addr ));
return;
}
if (ty == Ity_D64) {
HReg fr_src = iselDfp64Expr( env, stmt->Ist.Put.data );
PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
GuestStatePtr(mode64) );
addInstr( env, PPCInstr_FpLdSt( False, 8, fr_src, am_addr ) );
return;
}
break;
}
case Ist_PutI: {
IRPutI *puti = stmt->Ist.PutI.details;
PPCAMode* dst_am
= genGuestArrayOffset(
env, puti->descr,
puti->ix, puti->bias );
IRType ty = typeOfIRExpr(env->type_env, puti->data);
if (mode64 && ty == Ity_I64) {
HReg r_src = iselWordExpr_R(env, puti->data);
addInstr(env, PPCInstr_Store( toUChar(8),
dst_am, r_src, mode64 ));
return;
}
if ((!mode64) && ty == Ity_I32) {
HReg r_src = iselWordExpr_R(env, puti->data);
addInstr(env, PPCInstr_Store( toUChar(4),
dst_am, r_src, mode64 ));
return;
}
break;
}
case Ist_WrTmp: {
IRTemp tmp = stmt->Ist.WrTmp.tmp;
IRType ty = typeOfIRTemp(env->type_env, tmp);
if (ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_dst = lookupIRTemp(env, tmp);
HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data);
addInstr(env, mk_iMOVds_RR( r_dst, r_src ));
return;
}
if (!mode64 && ty == Ity_I64) {
HReg r_srcHi, r_srcLo, r_dstHi, r_dstLo;
iselInt64Expr(&r_srcHi,&r_srcLo, env, stmt->Ist.WrTmp.data);
lookupIRTempPair( &r_dstHi, &r_dstLo, env, tmp);
addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) );
addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) );
return;
}
if (mode64 && ty == Ity_I128) {
HReg r_srcHi, r_srcLo, r_dstHi, r_dstLo;
iselInt128Expr(&r_srcHi,&r_srcLo, env, stmt->Ist.WrTmp.data);
lookupIRTempPair( &r_dstHi, &r_dstLo, env, tmp);
addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) );
addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) );
return;
}
if (!mode64 && ty == Ity_I128) {
HReg r_srcHi, r_srcMedHi, r_srcMedLo, r_srcLo;
HReg r_dstHi, r_dstMedHi, r_dstMedLo, r_dstLo;
iselInt128Expr_to_32x4(&r_srcHi, &r_srcMedHi,
&r_srcMedLo, &r_srcLo,
env, stmt->Ist.WrTmp.data);
lookupIRTempQuad( &r_dstHi, &r_dstMedHi, &r_dstMedLo,
&r_dstLo, env, tmp);
addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) );
addInstr(env, mk_iMOVds_RR(r_dstMedHi, r_srcMedHi) );
addInstr(env, mk_iMOVds_RR(r_dstMedLo, r_srcMedLo) );
addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) );
return;
}
if (ty == Ity_I1) {
PPCCondCode cond = iselCondCode(env, stmt->Ist.WrTmp.data);
HReg r_dst = lookupIRTemp(env, tmp);
addInstr(env, PPCInstr_Set(cond, r_dst));
return;
}
if (ty == Ity_F64) {
HReg fr_dst = lookupIRTemp(env, tmp);
HReg fr_src = iselDblExpr(env, stmt->Ist.WrTmp.data);
addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src));
return;
}
if (ty == Ity_F32) {
HReg fr_dst = lookupIRTemp(env, tmp);
HReg fr_src = iselFltExpr(env, stmt->Ist.WrTmp.data);
addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src));
return;
}
if (ty == Ity_V128) {
HReg v_dst = lookupIRTemp(env, tmp);
HReg v_src = iselVecExpr(env, stmt->Ist.WrTmp.data);
addInstr(env, PPCInstr_AvUnary(Pav_MOV, v_dst, v_src));
return;
}
if (ty == Ity_D64) {
HReg fr_dst = lookupIRTemp( env, tmp );
HReg fr_src = iselDfp64Expr( env, stmt->Ist.WrTmp.data );
addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dst, fr_src ) );
return;
}
if (ty == Ity_D128) {
HReg fr_srcHi, fr_srcLo, fr_dstHi, fr_dstLo;
lookupIRTempPair( &fr_dstHi, &fr_dstLo, env, tmp );
iselDfp128Expr( &fr_srcHi, &fr_srcLo, env, stmt->Ist.WrTmp.data );
addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dstHi, fr_srcHi ) );
addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dstLo, fr_srcLo ) );
return;
}
break;
}
case Ist_LLSC: {
IRTemp res = stmt->Ist.LLSC.result;
IRType tyRes = typeOfIRTemp(env->type_env, res);
IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr);
if (stmt->Ist.LLSC.end != Iend_BE)
goto stmt_fail;
if (!mode64 && (tyAddr != Ity_I32))
goto stmt_fail;
if (mode64 && (tyAddr != Ity_I64))
goto stmt_fail;
if (stmt->Ist.LLSC.storedata == NULL) {
HReg r_addr = iselWordExpr_R( env, stmt->Ist.LLSC.addr );
HReg r_dst = lookupIRTemp(env, res);
if (tyRes == Ity_I32) {
addInstr(env, PPCInstr_LoadL( 4, r_dst, r_addr, mode64 ));
return;
}
if (tyRes == Ity_I64 && mode64) {
addInstr(env, PPCInstr_LoadL( 8, r_dst, r_addr, mode64 ));
return;
}
;
} else {
HReg r_res = lookupIRTemp(env, res);
HReg r_a = iselWordExpr_R(env, stmt->Ist.LLSC.addr);
HReg r_src = iselWordExpr_R(env, stmt->Ist.LLSC.storedata);
HReg r_tmp = newVRegI(env);
IRType tyData = typeOfIRExpr(env->type_env,
stmt->Ist.LLSC.storedata);
vassert(tyRes == Ity_I1);
if (tyData == Ity_I32 || (tyData == Ity_I64 && mode64)) {
addInstr(env, PPCInstr_StoreC( tyData==Ity_I32 ? 4 : 8,
r_a, r_src, mode64 ));
addInstr(env, PPCInstr_MfCR( r_tmp ));
addInstr(env, PPCInstr_Shft(
Pshft_SHR,
env->mode64 ? False : True
,
r_tmp, r_tmp,
PPCRH_Imm(False, 29)));
addInstr(env, PPCInstr_Alu(
Palu_AND,
r_res, r_tmp,
PPCRH_Imm(False, 1)));
return;
}
}
goto stmt_fail;
}
case Ist_Dirty: {
IRType retty;
IRDirty* d = stmt->Ist.Dirty.details;
Bool passBBP = False;
if (d->nFxState == 0)
vassert(!d->needsBBP);
passBBP = toBool(d->nFxState > 0 && d->needsBBP);
doHelperCall( env, passBBP, d->guard, d->cee, d->args );
if (d->tmp == IRTemp_INVALID)
return;
retty = typeOfIRTemp(env->type_env, d->tmp);
if (!mode64 && retty == Ity_I64) {
HReg r_dstHi, r_dstLo;
lookupIRTempPair( &r_dstHi, &r_dstLo, env, d->tmp);
addInstr(env, mk_iMOVds_RR(r_dstHi, hregPPC_GPR3(mode64)));
addInstr(env, mk_iMOVds_RR(r_dstLo, hregPPC_GPR4(mode64)));
return;
}
if (retty == Ity_I8 || retty == Ity_I16 ||
retty == Ity_I32 || ((retty == Ity_I64) && mode64)) {
HReg r_dst = lookupIRTemp(env, d->tmp);
addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
return;
}
break;
}
case Ist_MBE:
switch (stmt->Ist.MBE.event) {
case Imbe_Fence:
addInstr(env, PPCInstr_MFence());
return;
default:
break;
}
break;
case Ist_IMark:
return;
case Ist_AbiHint:
return;
case Ist_NoOp:
return;
case Ist_Exit: {
IRConst* dst = stmt->Ist.Exit.dst;
if (!mode64 && dst->tag != Ico_U32)
vpanic("iselStmt(ppc): Ist_Exit: dst is not a 32-bit value");
if (mode64 && dst->tag != Ico_U64)
vpanic("iselStmt(ppc64): Ist_Exit: dst is not a 64-bit value");
PPCCondCode cc = iselCondCode(env, stmt->Ist.Exit.guard);
PPCAMode* amCIA = PPCAMode_IR(stmt->Ist.Exit.offsIP,
hregPPC_GPR31(mode64));
if (stmt->Ist.Exit.jk == Ijk_Boring
|| stmt->Ist.Exit.jk == Ijk_Call
) {
if (env->chainingAllowed) {
Bool toFastEP
= mode64
? (((Addr64)stmt->Ist.Exit.dst->Ico.U64) > (Addr64)env->max_ga)
: (((Addr32)stmt->Ist.Exit.dst->Ico.U32) > (Addr32)env->max_ga);
if (0) vex_printf("%s", toFastEP ? "Y" : ",");
addInstr(env, PPCInstr_XDirect(
mode64 ? (Addr64)stmt->Ist.Exit.dst->Ico.U64
: (Addr64)stmt->Ist.Exit.dst->Ico.U32,
amCIA, cc, toFastEP));
} else {
HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
addInstr(env, PPCInstr_XAssisted(r, amCIA, cc, Ijk_Boring));
}
return;
}
switch (stmt->Ist.Exit.jk) {
case Ijk_ClientReq:
case Ijk_EmFail:
case Ijk_EmWarn:
case Ijk_NoDecode:
case Ijk_NoRedir:
case Ijk_SigBUS:
case Ijk_SigTRAP:
case Ijk_Sys_syscall:
case Ijk_TInval:
{
HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
addInstr(env, PPCInstr_XAssisted(r, amCIA, cc,
stmt->Ist.Exit.jk));
return;
}
default:
break;
}
goto stmt_fail;
}
default: break;
}
stmt_fail:
ppIRStmt(stmt);
vpanic("iselStmt(ppc)");
}
static void iselNext ( ISelEnv* env,
IRExpr* next, IRJumpKind jk, Int offsIP )
{
if (vex_traceflags & VEX_TRACE_VCODE) {
vex_printf( "\n-- PUT(%d) = ", offsIP);
ppIRExpr( next );
vex_printf( "; exit-");
ppIRJumpKind(jk);
vex_printf( "\n");
}
PPCCondCode always = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
if (next->tag == Iex_Const) {
IRConst* cdst = next->Iex.Const.con;
vassert(cdst->tag == (env->mode64 ? Ico_U64 :Ico_U32));
if (jk == Ijk_Boring || jk == Ijk_Call) {
PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64));
if (env->chainingAllowed) {
Bool toFastEP
= env->mode64
? (((Addr64)cdst->Ico.U64) > (Addr64)env->max_ga)
: (((Addr32)cdst->Ico.U32) > (Addr32)env->max_ga);
if (0) vex_printf("%s", toFastEP ? "X" : ".");
addInstr(env, PPCInstr_XDirect(
env->mode64 ? (Addr64)cdst->Ico.U64
: (Addr64)cdst->Ico.U32,
amCIA, always, toFastEP));
} else {
HReg r = iselWordExpr_R(env, next);
addInstr(env, PPCInstr_XAssisted(r, amCIA, always,
Ijk_Boring));
}
return;
}
}
switch (jk) {
case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
HReg r = iselWordExpr_R(env, next);
PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64));
if (env->chainingAllowed) {
addInstr(env, PPCInstr_XIndir(r, amCIA, always));
} else {
addInstr(env, PPCInstr_XAssisted(r, amCIA, always,
Ijk_Boring));
}
return;
}
default:
break;
}
switch (jk) {
case Ijk_ClientReq:
case Ijk_EmFail:
case Ijk_EmWarn:
case Ijk_NoDecode:
case Ijk_NoRedir:
case Ijk_SigBUS:
case Ijk_SigTRAP:
case Ijk_Sys_syscall:
case Ijk_TInval:
{
HReg r = iselWordExpr_R(env, next);
PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64));
addInstr(env, PPCInstr_XAssisted(r, amCIA, always, jk));
return;
}
default:
break;
}
vex_printf( "\n-- PUT(%d) = ", offsIP);
ppIRExpr( next );
vex_printf( "; exit-");
ppIRJumpKind(jk);
vex_printf( "\n");
vassert(0);
}
HInstrArray* iselSB_PPC ( IRSB* bb,
VexArch arch_host,
VexArchInfo* archinfo_host,
VexAbiInfo* vbi,
Int offs_Host_EvC_Counter,
Int offs_Host_EvC_FailAddr,
Bool chainingAllowed,
Bool addProfInc,
Addr64 max_ga )
{
Int i, j;
HReg hregLo, hregMedLo, hregMedHi, hregHi;
ISelEnv* env;
UInt hwcaps_host = archinfo_host->hwcaps;
Bool mode64 = False;
UInt mask32, mask64;
PPCAMode *amCounter, *amFailAddr;
vassert(arch_host == VexArchPPC32 || arch_host == VexArchPPC64);
mode64 = arch_host == VexArchPPC64;
if (!mode64) vassert(max_ga <= 0xFFFFFFFFULL);
mask32 = VEX_HWCAPS_PPC32_F | VEX_HWCAPS_PPC32_V
| VEX_HWCAPS_PPC32_FX | VEX_HWCAPS_PPC32_GX | VEX_HWCAPS_PPC32_VX
| VEX_HWCAPS_PPC32_DFP;
mask64 = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX
| VEX_HWCAPS_PPC64_GX | VEX_HWCAPS_PPC64_VX | VEX_HWCAPS_PPC64_DFP;
if (mode64) {
vassert((hwcaps_host & mask32) == 0);
} else {
vassert((hwcaps_host & mask64) == 0);
}
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
env->mode64 = mode64;
env->code = newHInstrArray();
env->type_env = bb->tyenv;
env->n_vregmap = bb->tyenv->types_used;
env->vregmapLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
env->vregmapMedLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
if (mode64) {
env->vregmapMedHi = NULL;
env->vregmapHi = NULL;
} else {
env->vregmapMedHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
env->vregmapHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
}
env->chainingAllowed = chainingAllowed;
env->max_ga = max_ga;
env->hwcaps = hwcaps_host;
env->previous_rm = NULL;
env->vbi = vbi;
j = 0;
for (i = 0; i < env->n_vregmap; i++) {
hregLo = hregMedLo = hregMedHi = hregHi = INVALID_HREG;
switch (bb->tyenv->types[i]) {
case Ity_I1:
case Ity_I8:
case Ity_I16:
case Ity_I32:
if (mode64) { hregLo = mkHReg(j++, HRcInt64, True); break;
} else { hregLo = mkHReg(j++, HRcInt32, True); break;
}
case Ity_I64:
if (mode64) { hregLo = mkHReg(j++, HRcInt64, True); break;
} else { hregLo = mkHReg(j++, HRcInt32, True);
hregMedLo = mkHReg(j++, HRcInt32, True); break;
}
case Ity_I128:
if (mode64) { hregLo = mkHReg(j++, HRcInt64, True);
hregMedLo = mkHReg(j++, HRcInt64, True); break;
} else { hregLo = mkHReg(j++, HRcInt32, True);
hregMedLo = mkHReg(j++, HRcInt32, True);
hregMedHi = mkHReg(j++, HRcInt32, True);
hregHi = mkHReg(j++, HRcInt32, True); break;
}
case Ity_F32:
case Ity_F64: hregLo = mkHReg(j++, HRcFlt64, True); break;
case Ity_V128: hregLo = mkHReg(j++, HRcVec128, True); break;
case Ity_D64: hregLo = mkHReg(j++, HRcFlt64, True); break;
case Ity_D128: hregLo = mkHReg(j++, HRcFlt64, True);
hregMedLo = mkHReg(j++, HRcFlt64, True); break;
default:
ppIRType(bb->tyenv->types[i]);
vpanic("iselBB(ppc): IRTemp type");
}
env->vregmapLo[i] = hregLo;
env->vregmapMedLo[i] = hregMedLo;
if (!mode64) {
env->vregmapMedHi[i] = hregMedHi;
env->vregmapHi[i] = hregHi;
}
}
env->vreg_ctr = j;
amCounter = PPCAMode_IR(offs_Host_EvC_Counter, hregPPC_GPR31(mode64));
amFailAddr = PPCAMode_IR(offs_Host_EvC_FailAddr, hregPPC_GPR31(mode64));
addInstr(env, PPCInstr_EvCheck(amCounter, amFailAddr));
if (addProfInc) {
addInstr(env, PPCInstr_ProfInc());
}
for (i = 0; i < bb->stmts_used; i++)
iselStmt(env, bb->stmts[i]);
iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
env->code->n_vregs = env->vreg_ctr;
return env->code;
}
|
qtekfun/htcDesire820Kernel
|
external/valgrind/main/VEX/priv/host_ppc_isel.c
|
C
|
gpl-2.0
| 151,543
|
/*
* Implementation of the security services.
*
* Authors : Stephen Smalley, <sds@epoch.ncsc.mil>
* James Morris <jmorris@redhat.com>
*
* Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
*
* Support for enhanced MLS infrastructure.
* Support for context based audit filters.
*
* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
*
* Added conditional policy language extensions
*
* Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support for NetLabel
* Added support for the policy capability bitmap
*
* Updated: Chad Sellers <csellers@tresys.com>
*
* Added validation of kernel classes and permissions
*
* Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* Added support for bounds domain and audit messaged on masked permissions
*
* Updated: Guido Trentalancia <guido@trentalancia.com>
*
* Added support for runtime switching of the policy type
*
* Copyright (C) 2008, 2009 NEC Corporation
* Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
* Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/sched.h>
#include <linux/audit.h>
#include <linux/mutex.h>
#include <linux/selinux.h>
#include <linux/flex_array.h>
#include <linux/vmalloc.h>
#include <net/netlabel.h>
#include "flask.h"
#include "avc.h"
#include "avc_ss.h"
#include "security.h"
#include "context.h"
#include "policydb.h"
#include "sidtab.h"
#include "services.h"
#include "conditional.h"
#include "mls.h"
#include "objsec.h"
#include "netlabel.h"
#include "xfrm.h"
#include "ebitmap.h"
#include "audit.h"
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
static DEFINE_RWLOCK(policy_rwlock);
static struct sidtab sidtab;
struct policydb policydb;
int ss_initialized;
/*
* The largest sequence number that has been used when
* providing an access decision to the access vector cache.
* The sequence number only changes when a policy change
* occurs.
*/
static u32 latest_granting;
/* Forward declaration. */
static int context_struct_to_string(struct context *context, char **scontext,
u32 *scontext_len);
static void context_struct_compute_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
struct av_decision *avd);
struct selinux_mapping {
u16 value; /* policy value */
unsigned num_perms;
u32 perms[sizeof(u32) * 8];
};
static struct selinux_mapping *current_mapping;
static u16 current_mapping_size;
static int selinux_set_mapping(struct policydb *pol,
struct security_class_mapping *map,
struct selinux_mapping **out_map_p,
u16 *out_map_size)
{
struct selinux_mapping *out_map = NULL;
size_t size = sizeof(struct selinux_mapping);
u16 i, j;
unsigned k;
bool print_unknown_handle = false;
/* Find number of classes in the input mapping */
if (!map)
return -EINVAL;
i = 0;
while (map[i].name)
i++;
/* Allocate space for the class records, plus one for class zero */
out_map = kcalloc(++i, size, GFP_ATOMIC);
if (!out_map)
return -ENOMEM;
/* Store the raw class and permission values */
j = 0;
while (map[j].name) {
struct security_class_mapping *p_in = map + (j++);
struct selinux_mapping *p_out = out_map + j;
/* An empty class string skips ahead */
if (!strcmp(p_in->name, "")) {
p_out->num_perms = 0;
continue;
}
p_out->value = string_to_security_class(pol, p_in->name);
if (!p_out->value) {
printk(KERN_INFO
"SELinux: Class %s not defined in policy.\n",
p_in->name);
if (pol->reject_unknown)
goto err;
p_out->num_perms = 0;
print_unknown_handle = true;
continue;
}
k = 0;
while (p_in->perms && p_in->perms[k]) {
/* An empty permission string skips ahead */
if (!*p_in->perms[k]) {
k++;
continue;
}
p_out->perms[k] = string_to_av_perm(pol, p_out->value,
p_in->perms[k]);
if (!p_out->perms[k]) {
printk(KERN_INFO
"SELinux: Permission %s in class %s not defined in policy.\n",
p_in->perms[k], p_in->name);
if (pol->reject_unknown)
goto err;
print_unknown_handle = true;
}
k++;
}
p_out->num_perms = k;
}
if (print_unknown_handle)
printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
pol->allow_unknown ? "allowed" : "denied");
*out_map_p = out_map;
*out_map_size = i;
return 0;
err:
kfree(out_map);
return -EINVAL;
}
/*
* Get real, policy values from mapped values
*/
static u16 unmap_class(u16 tclass)
{
if (tclass < current_mapping_size)
return current_mapping[tclass].value;
return tclass;
}
/*
* Get kernel value for class from its policy value
*/
static u16 map_class(u16 pol_value)
{
u16 i;
for (i = 1; i < current_mapping_size; i++) {
if (current_mapping[i].value == pol_value)
return i;
}
return SECCLASS_NULL;
}
static void map_decision(u16 tclass, struct av_decision *avd,
int allow_unknown)
{
if (tclass < current_mapping_size) {
unsigned i, n = current_mapping[tclass].num_perms;
u32 result;
for (i = 0, result = 0; i < n; i++) {
if (avd->allowed & current_mapping[tclass].perms[i])
result |= 1<<i;
if (allow_unknown && !current_mapping[tclass].perms[i])
result |= 1<<i;
}
avd->allowed = result;
for (i = 0, result = 0; i < n; i++)
if (avd->auditallow & current_mapping[tclass].perms[i])
result |= 1<<i;
avd->auditallow = result;
for (i = 0, result = 0; i < n; i++) {
if (avd->auditdeny & current_mapping[tclass].perms[i])
result |= 1<<i;
if (!allow_unknown && !current_mapping[tclass].perms[i])
result |= 1<<i;
}
/*
* In case the kernel has a bug and requests a permission
* between num_perms and the maximum permission number, we
* should audit that denial
*/
for (; i < (sizeof(u32)*8); i++)
result |= 1<<i;
avd->auditdeny = result;
}
}
int security_mls_enabled(void)
{
return policydb.mls_enabled;
}
/*
* Return the boolean value of a constraint expression
* when it is applied to the specified source and target
* security contexts.
*
* xcontext is a special beast... It is used by the validatetrans rules
* only. For these rules, scontext is the context before the transition,
* tcontext is the context after the transition, and xcontext is the context
* of the process performing the transition. All other callers of
* constraint_expr_eval should pass in NULL for xcontext.
*/
static int constraint_expr_eval(struct context *scontext,
struct context *tcontext,
struct context *xcontext,
struct constraint_expr *cexpr)
{
u32 val1, val2;
struct context *c;
struct role_datum *r1, *r2;
struct mls_level *l1, *l2;
struct constraint_expr *e;
int s[CEXPR_MAXDEPTH];
int sp = -1;
for (e = cexpr; e; e = e->next) {
switch (e->expr_type) {
case CEXPR_NOT:
BUG_ON(sp < 0);
s[sp] = !s[sp];
break;
case CEXPR_AND:
BUG_ON(sp < 1);
sp--;
s[sp] &= s[sp + 1];
break;
case CEXPR_OR:
BUG_ON(sp < 1);
sp--;
s[sp] |= s[sp + 1];
break;
case CEXPR_ATTR:
if (sp == (CEXPR_MAXDEPTH - 1))
return 0;
switch (e->attr) {
case CEXPR_USER:
val1 = scontext->user;
val2 = tcontext->user;
break;
case CEXPR_TYPE:
val1 = scontext->type;
val2 = tcontext->type;
break;
case CEXPR_ROLE:
val1 = scontext->role;
val2 = tcontext->role;
r1 = policydb.role_val_to_struct[val1 - 1];
r2 = policydb.role_val_to_struct[val2 - 1];
switch (e->op) {
case CEXPR_DOM:
s[++sp] = ebitmap_get_bit(&r1->dominates,
val2 - 1);
continue;
case CEXPR_DOMBY:
s[++sp] = ebitmap_get_bit(&r2->dominates,
val1 - 1);
continue;
case CEXPR_INCOMP:
s[++sp] = (!ebitmap_get_bit(&r1->dominates,
val2 - 1) &&
!ebitmap_get_bit(&r2->dominates,
val1 - 1));
continue;
default:
break;
}
break;
case CEXPR_L1L2:
l1 = &(scontext->range.level[0]);
l2 = &(tcontext->range.level[0]);
goto mls_ops;
case CEXPR_L1H2:
l1 = &(scontext->range.level[0]);
l2 = &(tcontext->range.level[1]);
goto mls_ops;
case CEXPR_H1L2:
l1 = &(scontext->range.level[1]);
l2 = &(tcontext->range.level[0]);
goto mls_ops;
case CEXPR_H1H2:
l1 = &(scontext->range.level[1]);
l2 = &(tcontext->range.level[1]);
goto mls_ops;
case CEXPR_L1H1:
l1 = &(scontext->range.level[0]);
l2 = &(scontext->range.level[1]);
goto mls_ops;
case CEXPR_L2H2:
l1 = &(tcontext->range.level[0]);
l2 = &(tcontext->range.level[1]);
goto mls_ops;
mls_ops:
switch (e->op) {
case CEXPR_EQ:
s[++sp] = mls_level_eq(l1, l2);
continue;
case CEXPR_NEQ:
s[++sp] = !mls_level_eq(l1, l2);
continue;
case CEXPR_DOM:
s[++sp] = mls_level_dom(l1, l2);
continue;
case CEXPR_DOMBY:
s[++sp] = mls_level_dom(l2, l1);
continue;
case CEXPR_INCOMP:
s[++sp] = mls_level_incomp(l2, l1);
continue;
default:
BUG();
return 0;
}
break;
default:
BUG();
return 0;
}
switch (e->op) {
case CEXPR_EQ:
s[++sp] = (val1 == val2);
break;
case CEXPR_NEQ:
s[++sp] = (val1 != val2);
break;
default:
BUG();
return 0;
}
break;
case CEXPR_NAMES:
if (sp == (CEXPR_MAXDEPTH-1))
return 0;
c = scontext;
if (e->attr & CEXPR_TARGET)
c = tcontext;
else if (e->attr & CEXPR_XTARGET) {
c = xcontext;
if (!c) {
BUG();
return 0;
}
}
if (e->attr & CEXPR_USER)
val1 = c->user;
else if (e->attr & CEXPR_ROLE)
val1 = c->role;
else if (e->attr & CEXPR_TYPE)
val1 = c->type;
else {
BUG();
return 0;
}
switch (e->op) {
case CEXPR_EQ:
s[++sp] = ebitmap_get_bit(&e->names, val1 - 1);
break;
case CEXPR_NEQ:
s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1);
break;
default:
BUG();
return 0;
}
break;
default:
BUG();
return 0;
}
}
BUG_ON(sp != 0);
return s[0];
}
/*
* security_dump_masked_av - dumps masked permissions during
* security_compute_av due to RBAC, MLS/Constraint and Type bounds.
*/
static int dump_masked_av_helper(void *k, void *d, void *args)
{
struct perm_datum *pdatum = d;
char **permission_names = args;
BUG_ON(pdatum->value < 1 || pdatum->value > 32);
permission_names[pdatum->value - 1] = (char *)k;
return 0;
}
static void security_dump_masked_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 permissions,
const char *reason)
{
struct common_datum *common_dat;
struct class_datum *tclass_dat;
struct audit_buffer *ab;
char *tclass_name;
char *scontext_name = NULL;
char *tcontext_name = NULL;
char *permission_names[32];
int index;
u32 length;
bool need_comma = false;
if (!permissions)
return;
tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1);
tclass_dat = policydb.class_val_to_struct[tclass - 1];
common_dat = tclass_dat->comdatum;
/* init permission_names */
if (common_dat &&
hashtab_map(common_dat->permissions.table,
dump_masked_av_helper, permission_names) < 0)
goto out;
if (hashtab_map(tclass_dat->permissions.table,
dump_masked_av_helper, permission_names) < 0)
goto out;
/* get scontext/tcontext in text form */
if (context_struct_to_string(scontext,
&scontext_name, &length) < 0)
goto out;
if (context_struct_to_string(tcontext,
&tcontext_name, &length) < 0)
goto out;
/* audit a message */
ab = audit_log_start(current->audit_context,
GFP_ATOMIC, AUDIT_SELINUX_ERR);
if (!ab)
goto out;
audit_log_format(ab, "op=security_compute_av reason=%s "
"scontext=%s tcontext=%s tclass=%s perms=",
reason, scontext_name, tcontext_name, tclass_name);
for (index = 0; index < 32; index++) {
u32 mask = (1 << index);
if ((mask & permissions) == 0)
continue;
audit_log_format(ab, "%s%s",
need_comma ? "," : "",
permission_names[index]
? permission_names[index] : "????");
need_comma = true;
}
audit_log_end(ab);
out:
/* release scontext/tcontext */
kfree(tcontext_name);
kfree(scontext_name);
return;
}
/*
* security_boundary_permission - drops violated permissions
* on boundary constraint.
*/
static void type_attribute_bounds_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
struct av_decision *avd)
{
struct context lo_scontext;
struct context lo_tcontext;
struct av_decision lo_avd;
struct type_datum *source;
struct type_datum *target;
u32 masked = 0;
source = flex_array_get_ptr(policydb.type_val_to_struct_array,
scontext->type - 1);
BUG_ON(!source);
target = flex_array_get_ptr(policydb.type_val_to_struct_array,
tcontext->type - 1);
BUG_ON(!target);
if (source->bounds) {
memset(&lo_avd, 0, sizeof(lo_avd));
memcpy(&lo_scontext, scontext, sizeof(lo_scontext));
lo_scontext.type = source->bounds;
context_struct_compute_av(&lo_scontext,
tcontext,
tclass,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
masked = ~lo_avd.allowed & avd->allowed;
}
if (target->bounds) {
memset(&lo_avd, 0, sizeof(lo_avd));
memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext));
lo_tcontext.type = target->bounds;
context_struct_compute_av(scontext,
&lo_tcontext,
tclass,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
masked = ~lo_avd.allowed & avd->allowed;
}
if (source->bounds && target->bounds) {
memset(&lo_avd, 0, sizeof(lo_avd));
/*
* lo_scontext and lo_tcontext are already
* set up.
*/
context_struct_compute_av(&lo_scontext,
&lo_tcontext,
tclass,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
masked = ~lo_avd.allowed & avd->allowed;
}
if (masked) {
/* mask violated permissions */
avd->allowed &= ~masked;
/* audit masked permissions */
security_dump_masked_av(scontext, tcontext,
tclass, masked, "bounds");
}
}
/*
* Compute access vectors based on a context structure pair for
* the permissions in a particular class.
*/
static void context_struct_compute_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
struct av_decision *avd)
{
struct constraint_node *constraint;
struct role_allow *ra;
struct avtab_key avkey;
struct avtab_node *node;
struct class_datum *tclass_datum;
struct ebitmap *sattr, *tattr;
struct ebitmap_node *snode, *tnode;
unsigned int i, j;
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
if (printk_ratelimit())
printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
return;
}
tclass_datum = policydb.class_val_to_struct[tclass - 1];
/*
* If a specific type enforcement rule was defined for
* this permission check, then use it.
*/
avkey.target_class = tclass;
avkey.specified = AVTAB_AV;
sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1);
BUG_ON(!sattr);
tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1);
BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
avkey.target_type = j + 1;
for (node = avtab_search_node(&policydb.te_avtab, &avkey);
node;
node = avtab_search_node_next(node, avkey.specified)) {
if (node->key.specified == AVTAB_ALLOWED)
avd->allowed |= node->datum.data;
else if (node->key.specified == AVTAB_AUDITALLOW)
avd->auditallow |= node->datum.data;
else if (node->key.specified == AVTAB_AUDITDENY)
avd->auditdeny &= node->datum.data;
}
/* Check conditional av table for additional permissions */
cond_compute_av(&policydb.te_cond_avtab, &avkey, avd);
}
}
/*
* Remove any permissions prohibited by a constraint (this includes
* the MLS policy).
*/
constraint = tclass_datum->constraints;
while (constraint) {
if ((constraint->permissions & (avd->allowed)) &&
!constraint_expr_eval(scontext, tcontext, NULL,
constraint->expr)) {
avd->allowed &= ~(constraint->permissions);
}
constraint = constraint->next;
}
/*
* If checking process transition permission and the
* role is changing, then check the (current_role, new_role)
* pair.
*/
if (tclass == policydb.process_class &&
(avd->allowed & policydb.process_trans_perms) &&
scontext->role != tcontext->role) {
for (ra = policydb.role_allow; ra; ra = ra->next) {
if (scontext->role == ra->role &&
tcontext->role == ra->new_role)
break;
}
if (!ra)
avd->allowed &= ~policydb.process_trans_perms;
}
/*
* If the given source and target types have boundary
* constraint, lazy checks have to mask any violated
* permission and notice it to userspace via audit.
*/
type_attribute_bounds_av(scontext, tcontext,
tclass, avd);
}
static int security_validtrans_handle_fail(struct context *ocontext,
struct context *ncontext,
struct context *tcontext,
u16 tclass)
{
char *o = NULL, *n = NULL, *t = NULL;
u32 olen, nlen, tlen;
if (context_struct_to_string(ocontext, &o, &olen))
goto out;
if (context_struct_to_string(ncontext, &n, &nlen))
goto out;
if (context_struct_to_string(tcontext, &t, &tlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"security_validate_transition: denied for"
" oldcontext=%s newcontext=%s taskcontext=%s tclass=%s",
o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
out:
kfree(o);
kfree(n);
kfree(t);
#ifdef CONFIG_ALWAYS_ENFORCE
selinux_enforcing = 1;
#endif
if (!selinux_enforcing)
return 0;
return -EPERM;
}
int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
u16 orig_tclass)
{
struct context *ocontext;
struct context *ncontext;
struct context *tcontext;
struct class_datum *tclass_datum;
struct constraint_node *constraint;
u16 tclass;
int rc = 0;
if (!ss_initialized)
return 0;
read_lock(&policy_rwlock);
tclass = unmap_class(orig_tclass);
if (!tclass || tclass > policydb.p_classes.nprim) {
printk(KERN_ERR "SELinux: %s: unrecognized class %d\n",
__func__, tclass);
rc = -EINVAL;
goto out;
}
tclass_datum = policydb.class_val_to_struct[tclass - 1];
ocontext = sidtab_search(&sidtab, oldsid);
if (!ocontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, oldsid);
rc = -EINVAL;
goto out;
}
ncontext = sidtab_search(&sidtab, newsid);
if (!ncontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, newsid);
rc = -EINVAL;
goto out;
}
tcontext = sidtab_search(&sidtab, tasksid);
if (!tcontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, tasksid);
rc = -EINVAL;
goto out;
}
constraint = tclass_datum->validatetrans;
while (constraint) {
if (!constraint_expr_eval(ocontext, ncontext, tcontext,
constraint->expr)) {
rc = security_validtrans_handle_fail(ocontext, ncontext,
tcontext, tclass);
goto out;
}
constraint = constraint->next;
}
out:
read_unlock(&policy_rwlock);
return rc;
}
/*
* security_bounded_transition - check whether the given
* transition is directed to bounded, or not.
* It returns 0, if @newsid is bounded by @oldsid.
* Otherwise, it returns error code.
*
* @oldsid : current security identifier
* @newsid : destinated security identifier
*/
int security_bounded_transition(u32 old_sid, u32 new_sid)
{
struct context *old_context, *new_context;
struct type_datum *type;
int index;
int rc;
read_lock(&policy_rwlock);
rc = -EINVAL;
old_context = sidtab_search(&sidtab, old_sid);
if (!old_context) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
__func__, old_sid);
goto out;
}
rc = -EINVAL;
new_context = sidtab_search(&sidtab, new_sid);
if (!new_context) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
__func__, new_sid);
goto out;
}
rc = 0;
/* type/domain unchanged */
if (old_context->type == new_context->type)
goto out;
index = new_context->type;
while (true) {
type = flex_array_get_ptr(policydb.type_val_to_struct_array,
index - 1);
BUG_ON(!type);
/* not bounded anymore */
rc = -EPERM;
if (!type->bounds)
break;
/* @newsid is bounded by @oldsid */
rc = 0;
if (type->bounds == old_context->type)
break;
index = type->bounds;
}
if (rc) {
char *old_name = NULL;
char *new_name = NULL;
u32 length;
if (!context_struct_to_string(old_context,
&old_name, &length) &&
!context_struct_to_string(new_context,
&new_name, &length)) {
audit_log(current->audit_context,
GFP_ATOMIC, AUDIT_SELINUX_ERR,
"op=security_bounded_transition "
"result=denied "
"oldcontext=%s newcontext=%s",
old_name, new_name);
}
kfree(new_name);
kfree(old_name);
}
out:
read_unlock(&policy_rwlock);
return rc;
}
static void avd_init(struct av_decision *avd)
{
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
avd->seqno = latest_granting;
avd->flags = 0;
}
/**
* security_compute_av - Compute access vector decisions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @avd: access vector decisions
*
* Compute a set of access vector decisions based on the
* SID pair (@ssid, @tsid) for the permissions in @tclass.
*/
void security_compute_av(u32 ssid,
u32 tsid,
u16 orig_tclass,
struct av_decision *avd)
{
u16 tclass;
struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
avd_init(avd);
if (!ss_initialized)
goto allow;
scontext = sidtab_search(&sidtab, ssid);
if (!scontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
/* permissive domain? */
if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
tcontext = sidtab_search(&sidtab, tsid);
if (!tcontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
tclass = unmap_class(orig_tclass);
if (unlikely(orig_tclass && !tclass)) {
if (policydb.allow_unknown)
goto allow;
goto out;
}
context_struct_compute_av(scontext, tcontext, tclass, avd);
map_decision(orig_tclass, avd, policydb.allow_unknown);
out:
read_unlock(&policy_rwlock);
return;
allow:
avd->allowed = 0xffffffff;
goto out;
}
void security_compute_av_user(u32 ssid,
u32 tsid,
u16 tclass,
struct av_decision *avd)
{
struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
avd_init(avd);
if (!ss_initialized)
goto allow;
scontext = sidtab_search(&sidtab, ssid);
if (!scontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
/* permissive domain? */
if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
tcontext = sidtab_search(&sidtab, tsid);
if (!tcontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
if (unlikely(!tclass)) {
if (policydb.allow_unknown)
goto allow;
goto out;
}
context_struct_compute_av(scontext, tcontext, tclass, avd);
out:
read_unlock(&policy_rwlock);
return;
allow:
avd->allowed = 0xffffffff;
goto out;
}
/*
* Write the security context string representation of
* the context structure `context' into a dynamically
* allocated string of the correct size. Set `*scontext'
* to point to this string and set `*scontext_len' to
* the length of the string.
*/
static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len)
{
char *scontextp;
if (scontext)
*scontext = NULL;
*scontext_len = 0;
if (context->len) {
*scontext_len = context->len;
if (scontext) {
*scontext = kstrdup(context->str, GFP_ATOMIC);
if (!(*scontext))
return -ENOMEM;
}
return 0;
}
/* Compute the size of the context. */
*scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1;
*scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1;
*scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1;
*scontext_len += mls_compute_context_len(context);
if (!scontext)
return 0;
/* Allocate space for the context; caller must free this space. */
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp)
return -ENOMEM;
*scontext = scontextp;
/*
* Copy the user name, role name and type name into the context.
*/
sprintf(scontextp, "%s:%s:%s",
sym_name(&policydb, SYM_USERS, context->user - 1),
sym_name(&policydb, SYM_ROLES, context->role - 1),
sym_name(&policydb, SYM_TYPES, context->type - 1));
scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) +
1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) +
1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1));
mls_sid_to_context(context, &scontextp);
*scontextp = 0;
return 0;
}
#include "initial_sid_to_string.h"
const char *security_get_initial_sid_context(u32 sid)
{
if (unlikely(sid > SECINITSID_NUM))
return NULL;
return initial_sid_to_string[sid];
}
static int security_sid_to_context_core(u32 sid, char **scontext,
u32 *scontext_len, int force)
{
struct context *context;
int rc = 0;
if (scontext)
*scontext = NULL;
*scontext_len = 0;
if (!ss_initialized) {
if (sid <= SECINITSID_NUM) {
char *scontextp;
*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
if (!scontext)
goto out;
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp) {
rc = -ENOMEM;
goto out;
}
strcpy(scontextp, initial_sid_to_string[sid]);
*scontext = scontextp;
goto out;
}
printk(KERN_ERR "SELinux: %s: called before initial "
"load_policy on unknown SID %d\n", __func__, sid);
rc = -EINVAL;
goto out;
}
read_lock(&policy_rwlock);
if (force)
context = sidtab_search_force(&sidtab, sid);
else
context = sidtab_search(&sidtab, sid);
if (!context) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, sid);
rc = -EINVAL;
goto out_unlock;
}
rc = context_struct_to_string(context, scontext, scontext_len);
out_unlock:
read_unlock(&policy_rwlock);
out:
return rc;
}
/**
* security_sid_to_context - Obtain a context for a given SID.
* @sid: security identifier, SID
* @scontext: security context
* @scontext_len: length in bytes
*
* Write the string representation of the context associated with @sid
* into a dynamically allocated string of the correct size. Set @scontext
* to point to this string and set @scontext_len to the length of the string.
*/
int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len)
{
return security_sid_to_context_core(sid, scontext, scontext_len, 0);
}
int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len)
{
return security_sid_to_context_core(sid, scontext, scontext_len, 1);
}
/*
* Caveat: Mutates scontext.
*/
static int string_to_context_struct(struct policydb *pol,
struct sidtab *sidtabp,
char *scontext,
u32 scontext_len,
struct context *ctx,
u32 def_sid)
{
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *scontextp, *p, oldc;
int rc = 0;
context_init(ctx);
/* Parse the security context. */
rc = -EINVAL;
scontextp = (char *) scontext;
/* Extract the user. */
p = scontextp;
while (*p && *p != ':')
p++;
if (*p == 0)
goto out;
*p++ = 0;
usrdatum = hashtab_search(pol->p_users.table, scontextp);
if (!usrdatum)
goto out;
ctx->user = usrdatum->value;
/* Extract role. */
scontextp = p;
while (*p && *p != ':')
p++;
if (*p == 0)
goto out;
*p++ = 0;
role = hashtab_search(pol->p_roles.table, scontextp);
if (!role)
goto out;
ctx->role = role->value;
/* Extract type. */
scontextp = p;
while (*p && *p != ':')
p++;
oldc = *p;
*p++ = 0;
typdatum = hashtab_search(pol->p_types.table, scontextp);
if (!typdatum || typdatum->attribute)
goto out;
ctx->type = typdatum->value;
rc = mls_context_to_sid(pol, oldc, &p, ctx, sidtabp, def_sid);
if (rc)
goto out;
rc = -EINVAL;
if ((p - scontext) < scontext_len)
goto out;
/* Check the validity of the new context. */
if (!policydb_context_isvalid(pol, ctx))
goto out;
rc = 0;
out:
if (rc)
context_destroy(ctx);
return rc;
}
static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
u32 *sid, u32 def_sid, gfp_t gfp_flags,
int force)
{
char *scontext2, *str = NULL;
struct context context;
int rc = 0;
/* An empty security context is never valid. */
if (!scontext_len)
return -EINVAL;
/* An empty security context is never valid. */
if (!scontext_len)
return -EINVAL;
if (!ss_initialized) {
int i;
for (i = 1; i < SECINITSID_NUM; i++) {
if (!strcmp(initial_sid_to_string[i], scontext)) {
*sid = i;
return 0;
}
}
*sid = SECINITSID_KERNEL;
return 0;
}
*sid = SECSID_NULL;
/* Copy the string so that we can modify the copy as we parse it. */
scontext2 = kmalloc(scontext_len + 1, gfp_flags);
if (!scontext2)
return -ENOMEM;
memcpy(scontext2, scontext, scontext_len);
scontext2[scontext_len] = 0;
if (force) {
/* Save another copy for storing in uninterpreted form */
rc = -ENOMEM;
str = kstrdup(scontext2, gfp_flags);
if (!str)
goto out;
}
read_lock(&policy_rwlock);
rc = string_to_context_struct(&policydb, &sidtab, scontext2,
scontext_len, &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
context.len = scontext_len;
str = NULL;
} else if (rc)
goto out_unlock;
rc = sidtab_context_to_sid(&sidtab, &context, sid);
context_destroy(&context);
out_unlock:
read_unlock(&policy_rwlock);
out:
kfree(scontext2);
kfree(str);
return rc;
}
/**
* security_context_to_sid - Obtain a SID for a given security context.
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
*
* Obtains a SID associated with the security context that
* has the string representation specified by @scontext.
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
{
return security_context_to_sid_core(scontext, scontext_len,
sid, SECSID_NULL, GFP_KERNEL, 0);
}
/**
* security_context_to_sid_default - Obtain a SID for a given security context,
* falling back to specified default if needed.
*
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
* @def_sid: default SID to assign on error
*
* Obtains a SID associated with the security context that
* has the string representation specified by @scontext.
* The default SID is passed to the MLS layer to be used to allow
* kernel labeling of the MLS field if the MLS field is not present
* (for upgrading to MLS without full relabel).
* Implicitly forces adding of the context even if it cannot be mapped yet.
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
int security_context_to_sid_default(const char *scontext, u32 scontext_len,
u32 *sid, u32 def_sid, gfp_t gfp_flags)
{
return security_context_to_sid_core(scontext, scontext_len,
sid, def_sid, gfp_flags, 1);
}
int security_context_to_sid_force(const char *scontext, u32 scontext_len,
u32 *sid)
{
return security_context_to_sid_core(scontext, scontext_len,
sid, SECSID_NULL, GFP_KERNEL, 1);
}
static int compute_sid_handle_invalid_context(
struct context *scontext,
struct context *tcontext,
u16 tclass,
struct context *newcontext)
{
char *s = NULL, *t = NULL, *n = NULL;
u32 slen, tlen, nlen;
if (context_struct_to_string(scontext, &s, &slen))
goto out;
if (context_struct_to_string(tcontext, &t, &tlen))
goto out;
if (context_struct_to_string(newcontext, &n, &nlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"security_compute_sid: invalid context %s"
" for scontext=%s"
" tcontext=%s"
" tclass=%s",
n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
out:
kfree(s);
kfree(t);
kfree(n);
#ifdef CONFIG_ALWAYS_ENFORCE
selinux_enforcing = 1;
#endif
if (!selinux_enforcing)
return 0;
return -EACCES;
}
static void filename_compute_type(struct policydb *p, struct context *newcontext,
u32 stype, u32 ttype, u16 tclass,
const char *objname)
{
struct filename_trans ft;
struct filename_trans_datum *otype;
/*
* Most filename trans rules are going to live in specific directories
* like /dev or /var/run. This bitmap will quickly skip rule searches
* if the ttype does not contain any rules.
*/
if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype))
return;
ft.stype = stype;
ft.ttype = ttype;
ft.tclass = tclass;
ft.name = objname;
otype = hashtab_search(p->filename_trans, &ft);
if (otype)
newcontext->type = otype->otype;
}
static int security_compute_sid(u32 ssid,
u32 tsid,
u16 orig_tclass,
u32 specified,
const char *objname,
u32 *out_sid,
bool kern)
{
struct class_datum *cladatum = NULL;
struct context *scontext = NULL, *tcontext = NULL, newcontext;
struct role_trans *roletr = NULL;
struct avtab_key avkey;
struct avtab_datum *avdatum;
struct avtab_node *node;
u16 tclass;
int rc = 0;
bool sock;
if (!ss_initialized) {
switch (orig_tclass) {
case SECCLASS_PROCESS: /* kernel value */
*out_sid = ssid;
break;
default:
*out_sid = tsid;
break;
}
goto out;
}
context_init(&newcontext);
read_lock(&policy_rwlock);
if (kern) {
tclass = unmap_class(orig_tclass);
sock = security_is_socket_class(orig_tclass);
} else {
tclass = orig_tclass;
sock = security_is_socket_class(map_class(tclass));
}
scontext = sidtab_search(&sidtab, ssid);
if (!scontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
rc = -EINVAL;
goto out_unlock;
}
tcontext = sidtab_search(&sidtab, tsid);
if (!tcontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
rc = -EINVAL;
goto out_unlock;
}
if (tclass && tclass <= policydb.p_classes.nprim)
cladatum = policydb.class_val_to_struct[tclass - 1];
/* Set the user identity. */
switch (specified) {
case AVTAB_TRANSITION:
case AVTAB_CHANGE:
if (cladatum && cladatum->default_user == DEFAULT_TARGET) {
newcontext.user = tcontext->user;
} else {
/* notice this gets both DEFAULT_SOURCE and unset */
/* Use the process user identity. */
newcontext.user = scontext->user;
}
break;
case AVTAB_MEMBER:
/* Use the related object owner. */
newcontext.user = tcontext->user;
break;
}
/* Set the role to default values. */
if (cladatum && cladatum->default_role == DEFAULT_SOURCE) {
newcontext.role = scontext->role;
} else if (cladatum && cladatum->default_role == DEFAULT_TARGET) {
newcontext.role = tcontext->role;
} else {
if ((tclass == policydb.process_class) || (sock == true))
newcontext.role = scontext->role;
else
newcontext.role = OBJECT_R_VAL;
}
/* Set the type to default values. */
if (cladatum && cladatum->default_type == DEFAULT_SOURCE) {
newcontext.type = scontext->type;
} else if (cladatum && cladatum->default_type == DEFAULT_TARGET) {
newcontext.type = tcontext->type;
} else {
if ((tclass == policydb.process_class) || (sock == true)) {
/* Use the type of process. */
newcontext.type = scontext->type;
} else {
/* Use the type of the related object. */
newcontext.type = tcontext->type;
}
}
/* Look for a type transition/member/change rule. */
avkey.source_type = scontext->type;
avkey.target_type = tcontext->type;
avkey.target_class = tclass;
avkey.specified = specified;
avdatum = avtab_search(&policydb.te_avtab, &avkey);
/* If no permanent rule, also check for enabled conditional rules */
if (!avdatum) {
node = avtab_search_node(&policydb.te_cond_avtab, &avkey);
for (; node; node = avtab_search_node_next(node, specified)) {
if (node->key.specified & AVTAB_ENABLED) {
avdatum = &node->datum;
break;
}
}
}
if (avdatum) {
/* Use the type from the type transition/member/change rule. */
newcontext.type = avdatum->data;
}
/* if we have a objname this is a file trans check so check those rules */
if (objname)
filename_compute_type(&policydb, &newcontext, scontext->type,
tcontext->type, tclass, objname);
/* Check for class-specific changes. */
if (specified & AVTAB_TRANSITION) {
/* Look for a role transition rule. */
for (roletr = policydb.role_tr; roletr; roletr = roletr->next) {
if ((roletr->role == scontext->role) &&
(roletr->type == tcontext->type) &&
(roletr->tclass == tclass)) {
/* Use the role transition rule. */
newcontext.role = roletr->new_role;
break;
}
}
}
/* Set the MLS attributes.
This is done last because it may allocate memory. */
rc = mls_compute_sid(scontext, tcontext, tclass, specified,
&newcontext, sock);
if (rc)
goto out_unlock;
/* Check the validity of the context. */
if (!policydb_context_isvalid(&policydb, &newcontext)) {
rc = compute_sid_handle_invalid_context(scontext,
tcontext,
tclass,
&newcontext);
if (rc)
goto out_unlock;
}
/* Obtain the sid for the context. */
rc = sidtab_context_to_sid(&sidtab, &newcontext, out_sid);
out_unlock:
read_unlock(&policy_rwlock);
context_destroy(&newcontext);
out:
return rc;
}
/**
* security_transition_sid - Compute the SID for a new subject/object.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @out_sid: security identifier for new subject/object
*
* Compute a SID to use for labeling a new subject or object in the
* class @tclass based on a SID pair (@ssid, @tsid).
* Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
* if insufficient memory is available, or %0 if the new SID was
* computed successfully.
*/
int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
const struct qstr *qstr, u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
qstr ? qstr->name : NULL, out_sid, true);
}
int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
const char *objname, u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
objname, out_sid, false);
}
/**
* security_member_sid - Compute the SID for member selection.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @out_sid: security identifier for selected member
*
* Compute a SID to use when selecting a member of a polyinstantiated
* object of class @tclass based on a SID pair (@ssid, @tsid).
* Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
* if insufficient memory is available, or %0 if the SID was
* computed successfully.
*/
int security_member_sid(u32 ssid,
u32 tsid,
u16 tclass,
u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL,
out_sid, false);
}
/**
* security_change_sid - Compute the SID for object relabeling.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @out_sid: security identifier for selected member
*
* Compute a SID to use for relabeling an object of class @tclass
* based on a SID pair (@ssid, @tsid).
* Return -%EINVAL if any of the parameters are invalid, -%ENOMEM
* if insufficient memory is available, or %0 if the SID was
* computed successfully.
*/
int security_change_sid(u32 ssid,
u32 tsid,
u16 tclass,
u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL,
out_sid, false);
}
/* Clone the SID into the new SID table. */
static int clone_sid(u32 sid,
struct context *context,
void *arg)
{
struct sidtab *s = arg;
if (sid > SECINITSID_NUM)
return sidtab_insert(s, sid, context);
else
return 0;
}
static inline int convert_context_handle_invalid_context(struct context *context)
{
char *s;
u32 len;
#ifdef CONFIG_ALWAYS_ENFORCE
selinux_enforcing = 1;
#endif
if (selinux_enforcing)
return -EINVAL;
if (!context_struct_to_string(context, &s, &len)) {
printk(KERN_WARNING "SELinux: Context %s would be invalid if enforcing\n", s);
kfree(s);
}
return 0;
}
struct convert_context_args {
struct policydb *oldp;
struct policydb *newp;
};
/*
* Convert the values in the security context
* structure `c' from the values specified
* in the policy `p->oldp' to the values specified
* in the policy `p->newp'. Verify that the
* context is valid under the new policy.
*/
static int convert_context(u32 key,
struct context *c,
void *p)
{
struct convert_context_args *args;
struct context oldc;
struct ocontext *oc;
struct mls_range *range;
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *s;
u32 len;
int rc = 0;
if (key <= SECINITSID_NUM)
goto out;
args = p;
if (c->str) {
struct context ctx;
rc = -ENOMEM;
s = kstrdup(c->str, GFP_KERNEL);
if (!s)
goto out;
rc = string_to_context_struct(args->newp, NULL, s,
c->len, &ctx, SECSID_NULL);
kfree(s);
if (!rc) {
printk(KERN_INFO "SELinux: Context %s became valid (mapped).\n",
c->str);
/* Replace string with mapped representation. */
kfree(c->str);
memcpy(c, &ctx, sizeof(*c));
goto out;
} else if (rc == -EINVAL) {
/* Retain string representation for later mapping. */
rc = 0;
goto out;
} else {
/* Other error condition, e.g. ENOMEM. */
printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n",
c->str, -rc);
goto out;
}
}
rc = context_cpy(&oldc, c);
if (rc)
goto out;
/* Convert the user. */
rc = -EINVAL;
usrdatum = hashtab_search(args->newp->p_users.table,
sym_name(args->oldp, SYM_USERS, c->user - 1));
if (!usrdatum)
goto bad;
c->user = usrdatum->value;
/* Convert the role. */
rc = -EINVAL;
role = hashtab_search(args->newp->p_roles.table,
sym_name(args->oldp, SYM_ROLES, c->role - 1));
if (!role)
goto bad;
c->role = role->value;
/* Convert the type. */
rc = -EINVAL;
typdatum = hashtab_search(args->newp->p_types.table,
sym_name(args->oldp, SYM_TYPES, c->type - 1));
if (!typdatum)
goto bad;
c->type = typdatum->value;
/* Convert the MLS fields if dealing with MLS policies */
if (args->oldp->mls_enabled && args->newp->mls_enabled) {
rc = mls_convert_context(args->oldp, args->newp, c);
if (rc)
goto bad;
} else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
/*
* Switching between MLS and non-MLS policy:
* free any storage used by the MLS fields in the
* context for all existing entries in the sidtab.
*/
mls_context_destroy(c);
} else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
/*
* Switching between non-MLS and MLS policy:
* ensure that the MLS fields of the context for all
* existing entries in the sidtab are filled in with a
* suitable default value, likely taken from one of the
* initial SIDs.
*/
oc = args->newp->ocontexts[OCON_ISID];
while (oc && oc->sid[0] != SECINITSID_UNLABELED)
oc = oc->next;
rc = -EINVAL;
if (!oc) {
printk(KERN_ERR "SELinux: unable to look up"
" the initial SIDs list\n");
goto bad;
}
range = &oc->context[0].range;
rc = mls_range_set(c, range);
if (rc)
goto bad;
}
/* Check the validity of the new context. */
if (!policydb_context_isvalid(args->newp, c)) {
rc = convert_context_handle_invalid_context(&oldc);
if (rc)
goto bad;
}
context_destroy(&oldc);
rc = 0;
out:
return rc;
bad:
/* Map old representation to string and save it. */
rc = context_struct_to_string(&oldc, &s, &len);
if (rc)
return rc;
context_destroy(&oldc);
context_destroy(c);
c->str = s;
c->len = len;
printk(KERN_INFO "SELinux: Context %s became invalid (unmapped).\n",
c->str);
rc = 0;
goto out;
}
static void security_load_policycaps(void)
{
selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps,
POLICYDB_CAPABILITY_NETPEER);
selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
POLICYDB_CAPABILITY_OPENPERM);
}
static int security_preserve_bools(struct policydb *p);
/**
* security_load_policy - Load a security policy configuration.
* @data: binary policy data
* @len: length of data in bytes
*
* Load a new set of security policy configuration data,
* validate it and convert the SID table as necessary.
* This function will flush the access vector cache after
* loading the new policy.
*/
int security_load_policy(void *data, size_t len)
{
struct policydb oldpolicydb, newpolicydb;
struct sidtab oldsidtab, newsidtab;
struct selinux_mapping *oldmap, *map = NULL;
struct convert_context_args args;
u32 seqno;
u16 map_size;
int rc = 0;
struct policy_file file = { data, len }, *fp = &file;
if (!ss_initialized) {
avtab_cache_init();
rc = policydb_read(&policydb, fp);
if (rc) {
avtab_cache_destroy();
return rc;
}
policydb.len = len;
rc = selinux_set_mapping(&policydb, secclass_map,
¤t_mapping,
¤t_mapping_size);
if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
return rc;
}
rc = policydb_load_isids(&policydb, &sidtab);
if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
return rc;
}
security_load_policycaps();
ss_initialized = 1;
seqno = ++latest_granting;
selinux_complete_init();
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
return 0;
}
#if 0
sidtab_hash_eval(&sidtab, "sids");
#endif
rc = policydb_read(&newpolicydb, fp);
if (rc)
return rc;
newpolicydb.len = len;
/* If switching between different policy types, log MLS status */
if (policydb.mls_enabled && !newpolicydb.mls_enabled)
printk(KERN_INFO "SELinux: Disabling MLS support...\n");
else if (!policydb.mls_enabled && newpolicydb.mls_enabled)
printk(KERN_INFO "SELinux: Enabling MLS support...\n");
rc = policydb_load_isids(&newpolicydb, &newsidtab);
if (rc) {
printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
policydb_destroy(&newpolicydb);
return rc;
}
rc = selinux_set_mapping(&newpolicydb, secclass_map, &map, &map_size);
if (rc)
goto err;
rc = security_preserve_bools(&newpolicydb);
if (rc) {
printk(KERN_ERR "SELinux: unable to preserve booleans\n");
goto err;
}
/* Clone the SID table. */
sidtab_shutdown(&sidtab);
rc = sidtab_map(&sidtab, clone_sid, &newsidtab);
if (rc)
goto err;
/*
* Convert the internal representations of contexts
* in the new SID table.
*/
args.oldp = &policydb;
args.newp = &newpolicydb;
rc = sidtab_map(&newsidtab, convert_context, &args);
if (rc) {
printk(KERN_ERR "SELinux: unable to convert the internal"
" representation of contexts in the new SID"
" table\n");
goto err;
}
/* Save the old policydb and SID table to free later. */
memcpy(&oldpolicydb, &policydb, sizeof policydb);
sidtab_set(&oldsidtab, &sidtab);
/* Install the new policydb and SID table. */
write_lock_irq(&policy_rwlock);
memcpy(&policydb, &newpolicydb, sizeof policydb);
sidtab_set(&sidtab, &newsidtab);
security_load_policycaps();
oldmap = current_mapping;
current_mapping = map;
current_mapping_size = map_size;
seqno = ++latest_granting;
write_unlock_irq(&policy_rwlock);
/* Free the old policydb and SID table. */
policydb_destroy(&oldpolicydb);
sidtab_destroy(&oldsidtab);
kfree(oldmap);
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
return 0;
err:
kfree(map);
sidtab_destroy(&newsidtab);
policydb_destroy(&newpolicydb);
return rc;
}
size_t security_policydb_len(void)
{
size_t len;
read_lock(&policy_rwlock);
len = policydb.len;
read_unlock(&policy_rwlock);
return len;
}
/**
* security_port_sid - Obtain the SID for a port.
* @protocol: protocol number
* @port: port number
* @out_sid: security identifier
*/
int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
{
struct ocontext *c;
int rc = 0;
read_lock(&policy_rwlock);
c = policydb.ocontexts[OCON_PORT];
while (c) {
if (c->u.port.protocol == protocol &&
c->u.port.low_port <= port &&
c->u.port.high_port >= port)
break;
c = c->next;
}
if (c) {
if (!c->sid[0]) {
rc = sidtab_context_to_sid(&sidtab,
&c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
*out_sid = c->sid[0];
} else {
*out_sid = SECINITSID_PORT;
}
out:
read_unlock(&policy_rwlock);
return rc;
}
/**
* security_netif_sid - Obtain the SID for a network interface.
* @name: interface name
* @if_sid: interface SID
*/
int security_netif_sid(char *name, u32 *if_sid)
{
int rc = 0;
struct ocontext *c;
read_lock(&policy_rwlock);
c = policydb.ocontexts[OCON_NETIF];
while (c) {
if (strcmp(name, c->u.name) == 0)
break;
c = c->next;
}
if (c) {
if (!c->sid[0] || !c->sid[1]) {
rc = sidtab_context_to_sid(&sidtab,
&c->context[0],
&c->sid[0]);
if (rc)
goto out;
rc = sidtab_context_to_sid(&sidtab,
&c->context[1],
&c->sid[1]);
if (rc)
goto out;
}
*if_sid = c->sid[0];
} else
*if_sid = SECINITSID_NETIF;
out:
read_unlock(&policy_rwlock);
return rc;
}
static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
{
int i, fail = 0;
for (i = 0; i < 4; i++)
if (addr[i] != (input[i] & mask[i])) {
fail = 1;
break;
}
return !fail;
}
/**
* security_node_sid - Obtain the SID for a node (host).
* @domain: communication domain aka address family
* @addrp: address
* @addrlen: address length in bytes
* @out_sid: security identifier
*/
int security_node_sid(u16 domain,
void *addrp,
u32 addrlen,
u32 *out_sid)
{
int rc;
struct ocontext *c;
read_lock(&policy_rwlock);
switch (domain) {
case AF_INET: {
u32 addr;
rc = -EINVAL;
if (addrlen != sizeof(u32))
goto out;
addr = *((u32 *)addrp);
c = policydb.ocontexts[OCON_NODE];
while (c) {
if (c->u.node.addr == (addr & c->u.node.mask))
break;
c = c->next;
}
break;
}
case AF_INET6:
rc = -EINVAL;
if (addrlen != sizeof(u64) * 2)
goto out;
c = policydb.ocontexts[OCON_NODE6];
while (c) {
if (match_ipv6_addrmask(addrp, c->u.node6.addr,
c->u.node6.mask))
break;
c = c->next;
}
break;
default:
rc = 0;
*out_sid = SECINITSID_NODE;
goto out;
}
if (c) {
if (!c->sid[0]) {
rc = sidtab_context_to_sid(&sidtab,
&c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
*out_sid = c->sid[0];
} else {
*out_sid = SECINITSID_NODE;
}
rc = 0;
out:
read_unlock(&policy_rwlock);
return rc;
}
#define SIDS_NEL 25
/**
* security_get_user_sids - Obtain reachable SIDs for a user.
* @fromsid: starting SID
* @username: username
* @sids: array of reachable SIDs for user
* @nel: number of elements in @sids
*
* Generate the set of SIDs for legal security contexts
* for a given user that can be reached by @fromsid.
* Set *@sids to point to a dynamically allocated
* array containing the set of SIDs. Set *@nel to the
* number of elements in the array.
*/
int security_get_user_sids(u32 fromsid,
char *username,
u32 **sids,
u32 *nel)
{
struct context *fromcon, usercon;
u32 *mysids = NULL, *mysids2, sid;
u32 mynel = 0, maxnel = SIDS_NEL;
struct user_datum *user;
struct role_datum *role;
struct ebitmap_node *rnode, *tnode;
int rc = 0, i, j;
*sids = NULL;
*nel = 0;
if (!ss_initialized)
goto out;
read_lock(&policy_rwlock);
context_init(&usercon);
rc = -EINVAL;
fromcon = sidtab_search(&sidtab, fromsid);
if (!fromcon)
goto out_unlock;
rc = -EINVAL;
user = hashtab_search(policydb.p_users.table, username);
if (!user)
goto out_unlock;
usercon.user = user->value;
rc = -ENOMEM;
mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
if (!mysids)
goto out_unlock;
ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
role = policydb.role_val_to_struct[i];
usercon.role = i + 1;
ebitmap_for_each_positive_bit(&role->types, tnode, j) {
usercon.type = j + 1;
if (mls_setup_user_range(fromcon, user, &usercon))
continue;
rc = sidtab_context_to_sid(&sidtab, &usercon, &sid);
if (rc)
goto out_unlock;
if (mynel < maxnel) {
mysids[mynel++] = sid;
} else {
rc = -ENOMEM;
maxnel += SIDS_NEL;
mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC);
if (!mysids2)
goto out_unlock;
memcpy(mysids2, mysids, mynel * sizeof(*mysids2));
kfree(mysids);
mysids = mysids2;
mysids[mynel++] = sid;
}
}
}
rc = 0;
out_unlock:
read_unlock(&policy_rwlock);
if (rc || !mynel) {
kfree(mysids);
goto out;
}
rc = -ENOMEM;
mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
if (!mysids2) {
kfree(mysids);
goto out;
}
for (i = 0, j = 0; i < mynel; i++) {
struct av_decision dummy_avd;
rc = avc_has_perm_noaudit(fromsid, mysids[i],
SECCLASS_PROCESS, /* kernel value */
PROCESS__TRANSITION, AVC_STRICT,
&dummy_avd);
if (!rc)
mysids2[j++] = mysids[i];
cond_resched();
}
rc = 0;
kfree(mysids);
*sids = mysids2;
*nel = j;
out:
return rc;
}
/**
* security_genfs_sid - Obtain a SID for a file in a filesystem
* @fstype: filesystem type
* @path: path from root of mount
* @sclass: file security class
* @sid: SID for path
*
* Obtain a SID to use for a file in a filesystem that
* cannot support xattr or use a fixed labeling behavior like
* transition SIDs or task SIDs.
*/
int security_genfs_sid(const char *fstype,
char *path,
u16 orig_sclass,
u32 *sid)
{
int len;
u16 sclass;
struct genfs *genfs;
struct ocontext *c;
int rc, cmp = 0;
while (path[0] == '/' && path[1] == '/')
path++;
read_lock(&policy_rwlock);
sclass = unmap_class(orig_sclass);
*sid = SECINITSID_UNLABELED;
for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
cmp = strcmp(fstype, genfs->fstype);
if (cmp <= 0)
break;
}
rc = -ENOENT;
if (!genfs || cmp)
goto out;
for (c = genfs->head; c; c = c->next) {
len = strlen(c->u.name);
if ((!c->v.sclass || sclass == c->v.sclass) &&
(strncmp(c->u.name, path, len) == 0))
break;
}
rc = -ENOENT;
if (!c)
goto out;
if (!c->sid[0]) {
rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]);
if (rc)
goto out;
}
*sid = c->sid[0];
rc = 0;
out:
read_unlock(&policy_rwlock);
return rc;
}
/**
* security_fs_use - Determine how to handle labeling for a filesystem.
* @fstype: filesystem type
* @behavior: labeling behavior
* @sid: SID for filesystem (superblock)
*/
int security_fs_use(
const char *fstype,
unsigned int *behavior,
u32 *sid)
{
int rc = 0;
struct ocontext *c;
read_lock(&policy_rwlock);
c = policydb.ocontexts[OCON_FSUSE];
while (c) {
if (strcmp(fstype, c->u.name) == 0)
break;
c = c->next;
}
if (c) {
*behavior = c->v.behavior;
if (!c->sid[0]) {
rc = sidtab_context_to_sid(&sidtab, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
*sid = c->sid[0];
} else {
rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid);
if (rc) {
*behavior = SECURITY_FS_USE_NONE;
rc = 0;
} else {
*behavior = SECURITY_FS_USE_GENFS;
}
}
out:
read_unlock(&policy_rwlock);
return rc;
}
int security_get_bools(int *len, char ***names, int **values)
{
int i, rc;
read_lock(&policy_rwlock);
*names = NULL;
*values = NULL;
rc = 0;
*len = policydb.p_bools.nprim;
if (!*len)
goto out;
rc = -ENOMEM;
*names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
if (!*names)
goto err;
rc = -ENOMEM;
*values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
if (!*values)
goto err;
for (i = 0; i < *len; i++) {
size_t name_len;
(*values)[i] = policydb.bool_val_to_struct[i]->state;
name_len = strlen(sym_name(&policydb, SYM_BOOLS, i)) + 1;
rc = -ENOMEM;
(*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
if (!(*names)[i])
goto err;
strncpy((*names)[i], sym_name(&policydb, SYM_BOOLS, i), name_len);
(*names)[i][name_len - 1] = 0;
}
rc = 0;
out:
read_unlock(&policy_rwlock);
return rc;
err:
if (*names) {
for (i = 0; i < *len; i++)
kfree((*names)[i]);
}
kfree(*values);
goto out;
}
int security_set_bools(int len, int *values)
{
int i, rc;
int lenp, seqno = 0;
struct cond_node *cur;
write_lock_irq(&policy_rwlock);
rc = -EFAULT;
lenp = policydb.p_bools.nprim;
if (len != lenp)
goto out;
for (i = 0; i < len; i++) {
if (!!values[i] != policydb.bool_val_to_struct[i]->state) {
audit_log(current->audit_context, GFP_ATOMIC,
AUDIT_MAC_CONFIG_CHANGE,
"bool=%s val=%d old_val=%d auid=%u ses=%u",
sym_name(&policydb, SYM_BOOLS, i),
!!values[i],
policydb.bool_val_to_struct[i]->state,
from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
}
if (values[i])
policydb.bool_val_to_struct[i]->state = 1;
else
policydb.bool_val_to_struct[i]->state = 0;
}
for (cur = policydb.cond_list; cur; cur = cur->next) {
rc = evaluate_cond_node(&policydb, cur);
if (rc)
goto out;
}
seqno = ++latest_granting;
rc = 0;
out:
write_unlock_irq(&policy_rwlock);
if (!rc) {
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_status_update_policyload(seqno);
selinux_xfrm_notify_policyload();
}
return rc;
}
int security_get_bool_value(int bool)
{
int rc;
int len;
read_lock(&policy_rwlock);
rc = -EFAULT;
len = policydb.p_bools.nprim;
if (bool >= len)
goto out;
rc = policydb.bool_val_to_struct[bool]->state;
out:
read_unlock(&policy_rwlock);
return rc;
}
static int security_preserve_bools(struct policydb *p)
{
int rc, nbools = 0, *bvalues = NULL, i;
char **bnames = NULL;
struct cond_bool_datum *booldatum;
struct cond_node *cur;
rc = security_get_bools(&nbools, &bnames, &bvalues);
if (rc)
goto out;
for (i = 0; i < nbools; i++) {
booldatum = hashtab_search(p->p_bools.table, bnames[i]);
if (booldatum)
booldatum->state = bvalues[i];
}
for (cur = p->cond_list; cur; cur = cur->next) {
rc = evaluate_cond_node(p, cur);
if (rc)
goto out;
}
out:
if (bnames) {
for (i = 0; i < nbools; i++)
kfree(bnames[i]);
}
kfree(bnames);
kfree(bvalues);
return rc;
}
/*
* security_sid_mls_copy() - computes a new sid based on the given
* sid and the mls portion of mls_sid.
*/
int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
{
struct context *context1;
struct context *context2;
struct context newcon;
char *s;
u32 len;
int rc;
rc = 0;
if (!ss_initialized || !policydb.mls_enabled) {
*new_sid = sid;
goto out;
}
context_init(&newcon);
read_lock(&policy_rwlock);
rc = -EINVAL;
context1 = sidtab_search(&sidtab, sid);
if (!context1) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, sid);
goto out_unlock;
}
rc = -EINVAL;
context2 = sidtab_search(&sidtab, mls_sid);
if (!context2) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, mls_sid);
goto out_unlock;
}
newcon.user = context1->user;
newcon.role = context1->role;
newcon.type = context1->type;
rc = mls_context_cpy(&newcon, context2);
if (rc)
goto out_unlock;
/* Check the validity of the new context. */
if (!policydb_context_isvalid(&policydb, &newcon)) {
rc = convert_context_handle_invalid_context(&newcon);
if (rc) {
if (!context_struct_to_string(&newcon, &s, &len)) {
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"security_sid_mls_copy: invalid context %s", s);
kfree(s);
}
goto out_unlock;
}
}
rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid);
out_unlock:
read_unlock(&policy_rwlock);
context_destroy(&newcon);
out:
return rc;
}
/**
* security_net_peersid_resolve - Compare and resolve two network peer SIDs
* @nlbl_sid: NetLabel SID
* @nlbl_type: NetLabel labeling protocol type
* @xfrm_sid: XFRM SID
*
* Description:
* Compare the @nlbl_sid and @xfrm_sid values and if the two SIDs can be
* resolved into a single SID it is returned via @peer_sid and the function
* returns zero. Otherwise @peer_sid is set to SECSID_NULL and the function
* returns a negative value. A table summarizing the behavior is below:
*
* | function return | @sid
* ------------------------------+-----------------+-----------------
* no peer labels | 0 | SECSID_NULL
* single peer label | 0 | <peer_label>
* multiple, consistent labels | 0 | <peer_label>
* multiple, inconsistent labels | -<errno> | SECSID_NULL
*
*/
int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
u32 xfrm_sid,
u32 *peer_sid)
{
int rc;
struct context *nlbl_ctx;
struct context *xfrm_ctx;
*peer_sid = SECSID_NULL;
/* handle the common (which also happens to be the set of easy) cases
* right away, these two if statements catch everything involving a
* single or absent peer SID/label */
if (xfrm_sid == SECSID_NULL) {
*peer_sid = nlbl_sid;
return 0;
}
/* NOTE: an nlbl_type == NETLBL_NLTYPE_UNLABELED is a "fallback" label
* and is treated as if nlbl_sid == SECSID_NULL when a XFRM SID/label
* is present */
if (nlbl_sid == SECSID_NULL || nlbl_type == NETLBL_NLTYPE_UNLABELED) {
*peer_sid = xfrm_sid;
return 0;
}
/* we don't need to check ss_initialized here since the only way both
* nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
* security server was initialized and ss_initialized was true */
if (!policydb.mls_enabled)
return 0;
read_lock(&policy_rwlock);
rc = -EINVAL;
nlbl_ctx = sidtab_search(&sidtab, nlbl_sid);
if (!nlbl_ctx) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, nlbl_sid);
goto out;
}
rc = -EINVAL;
xfrm_ctx = sidtab_search(&sidtab, xfrm_sid);
if (!xfrm_ctx) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, xfrm_sid);
goto out;
}
rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
if (rc)
goto out;
/* at present NetLabel SIDs/labels really only carry MLS
* information so if the MLS portion of the NetLabel SID
* matches the MLS portion of the labeled XFRM SID/label
* then pass along the XFRM SID as it is the most
* expressive */
*peer_sid = xfrm_sid;
out:
read_unlock(&policy_rwlock);
return rc;
}
static int get_classes_callback(void *k, void *d, void *args)
{
struct class_datum *datum = d;
char *name = k, **classes = args;
int value = datum->value - 1;
classes[value] = kstrdup(name, GFP_ATOMIC);
if (!classes[value])
return -ENOMEM;
return 0;
}
int security_get_classes(char ***classes, int *nclasses)
{
int rc;
read_lock(&policy_rwlock);
rc = -ENOMEM;
*nclasses = policydb.p_classes.nprim;
*classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
if (!*classes)
goto out;
rc = hashtab_map(policydb.p_classes.table, get_classes_callback,
*classes);
if (rc) {
int i;
for (i = 0; i < *nclasses; i++)
kfree((*classes)[i]);
kfree(*classes);
}
out:
read_unlock(&policy_rwlock);
return rc;
}
static int get_permissions_callback(void *k, void *d, void *args)
{
struct perm_datum *datum = d;
char *name = k, **perms = args;
int value = datum->value - 1;
perms[value] = kstrdup(name, GFP_ATOMIC);
if (!perms[value])
return -ENOMEM;
return 0;
}
int security_get_permissions(char *class, char ***perms, int *nperms)
{
int rc, i;
struct class_datum *match;
read_lock(&policy_rwlock);
rc = -EINVAL;
match = hashtab_search(policydb.p_classes.table, class);
if (!match) {
printk(KERN_ERR "SELinux: %s: unrecognized class %s\n",
__func__, class);
goto out;
}
rc = -ENOMEM;
*nperms = match->permissions.nprim;
*perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
if (!*perms)
goto out;
if (match->comdatum) {
rc = hashtab_map(match->comdatum->permissions.table,
get_permissions_callback, *perms);
if (rc)
goto err;
}
rc = hashtab_map(match->permissions.table, get_permissions_callback,
*perms);
if (rc)
goto err;
out:
read_unlock(&policy_rwlock);
return rc;
err:
read_unlock(&policy_rwlock);
for (i = 0; i < *nperms; i++)
kfree((*perms)[i]);
kfree(*perms);
return rc;
}
int security_get_reject_unknown(void)
{
return policydb.reject_unknown;
}
int security_get_allow_unknown(void)
{
return policydb.allow_unknown;
}
/**
* security_policycap_supported - Check for a specific policy capability
* @req_cap: capability
*
* Description:
* This function queries the currently loaded policy to see if it supports the
* capability specified by @req_cap. Returns true (1) if the capability is
* supported, false (0) if it isn't supported.
*
*/
int security_policycap_supported(unsigned int req_cap)
{
int rc;
read_lock(&policy_rwlock);
rc = ebitmap_get_bit(&policydb.policycaps, req_cap);
read_unlock(&policy_rwlock);
return rc;
}
struct selinux_audit_rule {
u32 au_seqno;
struct context au_ctxt;
};
void selinux_audit_rule_free(void *vrule)
{
struct selinux_audit_rule *rule = vrule;
if (rule) {
context_destroy(&rule->au_ctxt);
kfree(rule);
}
}
int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
{
struct selinux_audit_rule *tmprule;
struct role_datum *roledatum;
struct type_datum *typedatum;
struct user_datum *userdatum;
struct selinux_audit_rule **rule = (struct selinux_audit_rule **)vrule;
int rc = 0;
#ifdef CONFIG_TIMA_RKP_RO_CRED
if ((rc = security_integrity_current()))
return rc;
#endif
*rule = NULL;
if (!ss_initialized)
return -EOPNOTSUPP;
switch (field) {
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
case AUDIT_SUBJ_TYPE:
case AUDIT_OBJ_USER:
case AUDIT_OBJ_ROLE:
case AUDIT_OBJ_TYPE:
/* only 'equals' and 'not equals' fit user, role, and type */
if (op != Audit_equal && op != Audit_not_equal)
return -EINVAL;
break;
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
/* we do not allow a range, indicated by the presence of '-' */
if (strchr(rulestr, '-'))
return -EINVAL;
break;
default:
/* only the above fields are valid */
return -EINVAL;
}
tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
if (!tmprule)
return -ENOMEM;
context_init(&tmprule->au_ctxt);
read_lock(&policy_rwlock);
tmprule->au_seqno = latest_granting;
switch (field) {
case AUDIT_SUBJ_USER:
case AUDIT_OBJ_USER:
rc = -EINVAL;
userdatum = hashtab_search(policydb.p_users.table, rulestr);
if (!userdatum)
goto out;
tmprule->au_ctxt.user = userdatum->value;
break;
case AUDIT_SUBJ_ROLE:
case AUDIT_OBJ_ROLE:
rc = -EINVAL;
roledatum = hashtab_search(policydb.p_roles.table, rulestr);
if (!roledatum)
goto out;
tmprule->au_ctxt.role = roledatum->value;
break;
case AUDIT_SUBJ_TYPE:
case AUDIT_OBJ_TYPE:
rc = -EINVAL;
typedatum = hashtab_search(policydb.p_types.table, rulestr);
if (!typedatum)
goto out;
tmprule->au_ctxt.type = typedatum->value;
break;
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC);
if (rc)
goto out;
break;
}
rc = 0;
out:
read_unlock(&policy_rwlock);
if (rc) {
selinux_audit_rule_free(tmprule);
tmprule = NULL;
}
*rule = tmprule;
return rc;
}
/* Check to see if the rule contains any selinux fields */
int selinux_audit_rule_known(struct audit_krule *rule)
{
int i;
#ifdef CONFIG_TIMA_RKP_RO_CRED
int rc;
if ((rc = security_integrity_current()))
return rc;
#endif
for (i = 0; i < rule->field_count; i++) {
struct audit_field *f = &rule->fields[i];
switch (f->type) {
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
case AUDIT_SUBJ_TYPE:
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_USER:
case AUDIT_OBJ_ROLE:
case AUDIT_OBJ_TYPE:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
return 1;
}
}
return 0;
}
int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
struct audit_context *actx)
{
struct context *ctxt;
struct mls_level *level;
struct selinux_audit_rule *rule = vrule;
int match = 0;
#ifdef CONFIG_TIMA_RKP_RO_CRED
int rc;
if ((rc = security_integrity_current()))
return rc;
#endif
if (!rule) {
audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"selinux_audit_rule_match: missing rule\n");
return -ENOENT;
}
read_lock(&policy_rwlock);
if (rule->au_seqno < latest_granting) {
audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"selinux_audit_rule_match: stale rule\n");
match = -ESTALE;
goto out;
}
ctxt = sidtab_search(&sidtab, sid);
if (!ctxt) {
audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"selinux_audit_rule_match: unrecognized SID %d\n",
sid);
match = -ENOENT;
goto out;
}
/* a field/op pair that is not caught here will simply fall through
without a match */
switch (field) {
case AUDIT_SUBJ_USER:
case AUDIT_OBJ_USER:
switch (op) {
case Audit_equal:
match = (ctxt->user == rule->au_ctxt.user);
break;
case Audit_not_equal:
match = (ctxt->user != rule->au_ctxt.user);
break;
}
break;
case AUDIT_SUBJ_ROLE:
case AUDIT_OBJ_ROLE:
switch (op) {
case Audit_equal:
match = (ctxt->role == rule->au_ctxt.role);
break;
case Audit_not_equal:
match = (ctxt->role != rule->au_ctxt.role);
break;
}
break;
case AUDIT_SUBJ_TYPE:
case AUDIT_OBJ_TYPE:
switch (op) {
case Audit_equal:
match = (ctxt->type == rule->au_ctxt.type);
break;
case Audit_not_equal:
match = (ctxt->type != rule->au_ctxt.type);
break;
}
break;
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
level = ((field == AUDIT_SUBJ_SEN ||
field == AUDIT_OBJ_LEV_LOW) ?
&ctxt->range.level[0] : &ctxt->range.level[1]);
switch (op) {
case Audit_equal:
match = mls_level_eq(&rule->au_ctxt.range.level[0],
level);
break;
case Audit_not_equal:
match = !mls_level_eq(&rule->au_ctxt.range.level[0],
level);
break;
case Audit_lt:
match = (mls_level_dom(&rule->au_ctxt.range.level[0],
level) &&
!mls_level_eq(&rule->au_ctxt.range.level[0],
level));
break;
case Audit_le:
match = mls_level_dom(&rule->au_ctxt.range.level[0],
level);
break;
case Audit_gt:
match = (mls_level_dom(level,
&rule->au_ctxt.range.level[0]) &&
!mls_level_eq(level,
&rule->au_ctxt.range.level[0]));
break;
case Audit_ge:
match = mls_level_dom(level,
&rule->au_ctxt.range.level[0]);
break;
}
}
out:
read_unlock(&policy_rwlock);
return match;
}
static int (*aurule_callback)(void) = audit_update_lsm_rules;
static int aurule_avc_callback(u32 event)
{
int err = 0;
if (event == AVC_CALLBACK_RESET && aurule_callback)
err = aurule_callback();
return err;
}
static int __init aurule_init(void)
{
int err;
err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET);
if (err)
panic("avc_add_callback() failed, error %d\n", err);
return err;
}
__initcall(aurule_init);
#ifdef CONFIG_NETLABEL
/**
* security_netlbl_cache_add - Add an entry to the NetLabel cache
* @secattr: the NetLabel packet security attributes
* @sid: the SELinux SID
*
* Description:
* Attempt to cache the context in @ctx, which was derived from the packet in
* @skb, in the NetLabel subsystem cache. This function assumes @secattr has
* already been initialized.
*
*/
static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
u32 sid)
{
u32 *sid_cache;
sid_cache = kmalloc(sizeof(*sid_cache), GFP_ATOMIC);
if (sid_cache == NULL)
return;
secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
if (secattr->cache == NULL) {
kfree(sid_cache);
return;
}
*sid_cache = sid;
secattr->cache->free = kfree;
secattr->cache->data = sid_cache;
secattr->flags |= NETLBL_SECATTR_CACHE;
}
/**
* security_netlbl_secattr_to_sid - Convert a NetLabel secattr to a SELinux SID
* @secattr: the NetLabel packet security attributes
* @sid: the SELinux SID
*
* Description:
* Convert the given NetLabel security attributes in @secattr into a
* SELinux SID. If the @secattr field does not contain a full SELinux
* SID/context then use SECINITSID_NETMSG as the foundation. If possible the
* 'cache' field of @secattr is set and the CACHE flag is set; this is to
* allow the @secattr to be used by NetLabel to cache the secattr to SID
* conversion for future lookups. Returns zero on success, negative values on
* failure.
*
*/
int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
u32 *sid)
{
int rc;
struct context *ctx;
struct context ctx_new;
if (!ss_initialized) {
*sid = SECSID_NULL;
return 0;
}
read_lock(&policy_rwlock);
if (secattr->flags & NETLBL_SECATTR_CACHE)
*sid = *(u32 *)secattr->cache->data;
else if (secattr->flags & NETLBL_SECATTR_SECID)
*sid = secattr->attr.secid;
else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
rc = -EIDRM;
ctx = sidtab_search(&sidtab, SECINITSID_NETMSG);
if (ctx == NULL)
goto out;
context_init(&ctx_new);
ctx_new.user = ctx->user;
ctx_new.role = ctx->role;
ctx_new.type = ctx->type;
mls_import_netlbl_lvl(&ctx_new, secattr);
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
rc = ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
secattr->attr.mls.cat);
if (rc)
goto out;
memcpy(&ctx_new.range.level[1].cat,
&ctx_new.range.level[0].cat,
sizeof(ctx_new.range.level[0].cat));
}
rc = -EIDRM;
if (!mls_context_isvalid(&policydb, &ctx_new))
goto out_free;
rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid);
if (rc)
goto out_free;
security_netlbl_cache_add(secattr, *sid);
ebitmap_destroy(&ctx_new.range.level[0].cat);
} else
*sid = SECSID_NULL;
read_unlock(&policy_rwlock);
return 0;
out_free:
ebitmap_destroy(&ctx_new.range.level[0].cat);
out:
read_unlock(&policy_rwlock);
return rc;
}
/**
* security_netlbl_sid_to_secattr - Convert a SELinux SID to a NetLabel secattr
* @sid: the SELinux SID
* @secattr: the NetLabel packet security attributes
*
* Description:
* Convert the given SELinux SID in @sid into a NetLabel security attribute.
* Returns zero on success, negative values on failure.
*
*/
int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr)
{
int rc;
struct context *ctx;
if (!ss_initialized)
return 0;
read_lock(&policy_rwlock);
rc = -ENOENT;
ctx = sidtab_search(&sidtab, sid);
if (ctx == NULL)
goto out;
rc = -ENOMEM;
secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1),
GFP_ATOMIC);
if (secattr->domain == NULL)
goto out;
secattr->attr.secid = sid;
secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID;
mls_export_netlbl_lvl(ctx, secattr);
rc = mls_export_netlbl_cat(ctx, secattr);
out:
read_unlock(&policy_rwlock);
return rc;
}
#endif /* CONFIG_NETLABEL */
/**
* security_read_policy - read the policy.
* @data: binary policy data
* @len: length of data in bytes
*
*/
int security_read_policy(void **data, size_t *len)
{
int rc;
struct policy_file fp;
if (!ss_initialized)
return -EINVAL;
*len = security_policydb_len();
*data = vmalloc_user(*len);
if (!*data)
return -ENOMEM;
fp.data = *data;
fp.len = *len;
read_lock(&policy_rwlock);
rc = policydb_write(&policydb, &fp);
read_unlock(&policy_rwlock);
if (rc)
return rc;
*len = (unsigned long)fp.data - (unsigned long)*data;
return 0;
}
|
bju2000/android_kernel_samsung_slteskt
|
security/selinux/ss/services.c
|
C
|
gpl-2.0
| 77,340
|
/*
* linux/arch/arm/mm/flush.c
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/highmem.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
#include "mm.h"
#ifdef CONFIG_CPU_CACHE_VIPT
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
{
unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
const int zero = 0;
set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
asm( "mcrr p15, 0, %1, %0, c14\n"
" mcr p15, 0, %2, c7, c10, 4"
:
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
: "cc");
}
static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
{
unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
unsigned long offset = vaddr & (PAGE_SIZE - 1);
unsigned long to;
set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
to = va + offset;
flush_icache_range(to, to + len);
}
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
vivt_flush_cache_mm(mm);
return;
}
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
" mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
}
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cache_is_vivt()) {
vivt_flush_cache_range(vma, start, end);
return;
}
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
" mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
}
if (vma->vm_flags & VM_EXEC)
__flush_icache_all();
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cache_is_vivt()) {
vivt_flush_cache_page(vma, user_addr, pfn);
return;
}
if (cache_is_vipt_aliasing()) {
flush_pfn_alias(pfn, user_addr);
__flush_icache_all();
}
if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
__flush_icache_all();
}
#else
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
#endif
static void flush_ptrace_access_other(void *args)
{
__flush_icache_all();
}
static
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr, unsigned long len)
{
if (cache_is_vivt()) {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
return;
}
if (cache_is_vipt_aliasing()) {
flush_pfn_alias(page_to_pfn(page), uaddr);
__flush_icache_all();
return;
}
/* VIPT non-aliasing D-cache */
if (vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
if (icache_is_vipt_aliasing())
flush_icache_alias(page_to_pfn(page), uaddr, len);
else
__cpuc_coherent_kern_range(addr, addr + len);
if (cache_ops_need_broadcast())
smp_call_function(flush_ptrace_access_other,
NULL, 1);
}
}
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*
* Note that this code needs to run on the current CPU.
*/
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
{
#ifdef CONFIG_SMP
preempt_disable();
#endif
memcpy(dst, src, len);
flush_ptrace_access(vma, page, uaddr, dst, len);
#ifdef CONFIG_SMP
preempt_enable();
#endif
}
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
if (!PageHighMem(page)) {
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} else {
void *addr = kmap_high_get(page);
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(page);
} else if (cache_is_vipt()) {
/* unmapped pages might still be cached */
addr = kmap_atomic(page);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_atomic(addr);
}
}
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
* we only need to do one flush - which would be at the relevant
* userspace colour, which is congruent with page->index.
*/
if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT);
}
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
pgoff_t pgoff;
/*
* There are possible user space mappings of this page:
* - VIVT cache: we need to also write back and invalidate all user
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
flush_dcache_mmap_lock(mapping);
vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
unsigned long offset;
/*
* If this VMA is not in our MM, we can ignore it.
*/
if (mpnt->vm_mm != mm)
continue;
if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
}
flush_dcache_mmap_unlock(mapping);
}
#if __LINUX_ARM_ARCH__ >= 6
void __sync_icache_dcache(pte_t pteval)
{
unsigned long pfn;
struct page *page;
struct address_space *mapping;
if (!pte_present_user(pteval))
return;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
/* only flush non-aliasing VIPT caches for exec mappings */
return;
pfn = pte_pfn(pteval);
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (cache_is_vipt_aliasing())
mapping = page_mapping(page);
else
mapping = NULL;
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if (pte_exec(pteval))
__flush_icache_all();
}
#endif
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.
*
* We have three cases to consider:
* - VIPT non-aliasing cache: fully coherent so nothing required.
* - VIVT: fully aliasing, so we need to handle every alias in our
* current VM view.
* - VIPT aliasing: need to handle one alias in our current VM view.
*
* If we need to handle aliasing:
* If the page only exists in the page cache and there are no user
* space mappings, we can be lazy and remember that we may have dirty
* kernel cache lines for later. Otherwise, we assume we have
* aliasing mappings.
*
* Note that we disable the lazy flush for SMP configurations where
* the cache maintenance operations are not automatically broadcasted.
*/
void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
if (!cache_ops_need_broadcast() &&
mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);
else {
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
else if (mapping)
__flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
/*
* Flush an anonymous page so that users of get_user_pages()
* can safely access the data. The expected sequence is:
*
* get_user_pages()
* -> flush_anon_page
* memcpy() to/from page
* if written to page, flush_dcache_page()
*/
void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
unsigned long pfn;
/* VIPT non-aliasing caches need do nothing */
if (cache_is_vipt_nonaliasing())
return;
/*
* Write back and invalidate userspace mapping.
*/
pfn = page_to_pfn(page);
if (cache_is_vivt()) {
flush_cache_page(vma, vmaddr, pfn);
} else {
/*
* For aliasing VIPT, we can flush an alias of the
* userspace address only.
*/
flush_pfn_alias(pfn, vmaddr);
__flush_icache_all();
}
/*
* Invalidate kernel mapping. No data should be contained
* in this mapping of the page. FIXME: this is overkill
* since we actually ask for a write-back and invalidate.
*/
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
|
Jackeagle/android_kernel_sony_c2305
|
arch/arm/mm/flush.c
|
C
|
gpl-2.0
| 9,199
|
/*
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/rpm-regulator.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
#include "mach/socinfo.h"
#include "acpuclock.h"
#include "acpuclock-krait.h"
#ifdef CONFIG_PERFLOCK
#include <mach/perflock.h>
#endif
static struct hfpll_data hfpll_data __initdata = {
.mode_offset = 0x00,
.l_offset = 0x08,
.m_offset = 0x0C,
.n_offset = 0x10,
.config_offset = 0x04,
.config_val = 0x7845C665,
.has_droop_ctl = true,
.droop_offset = 0x14,
.droop_val = 0x0108C000,
.low_vdd_l_max = 22,
.nom_vdd_l_max = 42,
.vdd[HFPLL_VDD_NONE] = 0,
.vdd[HFPLL_VDD_LOW] = 945000,
.vdd[HFPLL_VDD_NOM] = 1050000,
.vdd[HFPLL_VDD_HIGH] = 1150000,
};
static struct scalable scalable[] __initdata = {
[CPU0] = {
.hfpll_phys_base = 0x00903200,
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x4501,
.vreg[VREG_CORE] = { "krait0", 1350000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
},
[CPU1] = {
.hfpll_phys_base = 0x00903240,
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x5501,
.vreg[VREG_CORE] = { "krait1", 1350000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
},
[CPU2] = {
.hfpll_phys_base = 0x00903280,
.aux_clk_sel_phys = 0x020A8014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x6501,
.vreg[VREG_CORE] = { "krait2", 1350000 },
.vreg[VREG_MEM] = { "krait2_mem", 1150000 },
.vreg[VREG_DIG] = { "krait2_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
},
[CPU3] = {
.hfpll_phys_base = 0x009032C0,
.aux_clk_sel_phys = 0x020B8014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x7501,
.vreg[VREG_CORE] = { "krait3", 1350000 },
.vreg[VREG_MEM] = { "krait3_mem", 1150000 },
.vreg[VREG_DIG] = { "krait3_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
},
[L2] = {
.hfpll_phys_base = 0x00903300,
.aux_clk_sel_phys = 0x02011028,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x0500,
.vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
},
};
static struct msm_bus_paths bw_level_tbl[] __initdata = {
[0] = BW_MBPS(640),
[1] = BW_MBPS(1064),
[2] = BW_MBPS(1600),
[3] = BW_MBPS(2128),
[4] = BW_MBPS(3200),
[5] = BW_MBPS(4264),
};
static struct msm_bus_scale_pdata bus_scale_data __initdata = {
.usecase = bw_level_tbl,
.num_usecases = ARRAY_SIZE(bw_level_tbl),
.active_only = 1,
.name = "acpuclk-8064",
};
static struct l2_level l2_freq_tbl[] __initdata = {
[0] = { { 378000, HFPLL, 2, 0x1C }, 950000, 1050000, 1 },
[0] = { { 384000, PLL_8, 0, 0x00 }, 950000, 1050000, 1 },
[1] = { { 432000, HFPLL, 2, 0x20 }, 1050000, 1050000, 2 },
[2] = { { 486000, HFPLL, 2, 0x24 }, 1050000, 1050000, 2 },
[3] = { { 540000, HFPLL, 2, 0x28 }, 1050000, 1050000, 2 },
[4] = { { 594000, HFPLL, 1, 0x16 }, 1050000, 1050000, 2 },
[5] = { { 648000, HFPLL, 1, 0x18 }, 1050000, 1050000, 4 },
[6] = { { 702000, HFPLL, 1, 0x1A }, 1150000, 1150000, 4 },
[7] = { { 756000, HFPLL, 1, 0x1C }, 1150000, 1150000, 4 },
[8] = { { 810000, HFPLL, 1, 0x1E }, 1150000, 1150000, 4 },
[9] = { { 864000, HFPLL, 1, 0x20 }, 1150000, 1150000, 4 },
[10] = { { 918000, HFPLL, 1, 0x22 }, 1150000, 1150000, 5 },
[11] = { { 972000, HFPLL, 1, 0x24 }, 1150000, 1150000, 5 },
[12] = { { 1134000, HFPLL, 1, 0x2A }, 1150000, 1150000, 5 },
[13] = { { 1242000, HFPLL, 1, 0x2E }, 1150000, 1150000, 5 },
[14] = { { 1296000, HFPLL, 1, 0x30 }, 1150000, 1150000, 5 },
[15] = { { 1350000, HFPLL, 1, 0x32 }, 1150000, 1150000, 5 },
[16] = { { 1404000, HFPLL, 1, 0x34 }, 1150000, 1150000, 5 },
[17] = { { 1458000, HFPLL, 1, 0x36 }, 1150000, 1150000, 5 },
{ }
};
static struct acpu_level tbl_slow[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
{ 0, { 432000, HFPLL, 2, 0x20 }, L2(5), 975000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 975000 },
{ 0, { 540000, HFPLL, 2, 0x28 }, L2(5), 1000000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 1000000 },
{ 0, { 648000, HFPLL, 1, 0x18 }, L2(5), 1025000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1025000 },
{ 0, { 756000, HFPLL, 1, 0x1C }, L2(5), 1075000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 1075000 },
{ 0, { 864000, HFPLL, 1, 0x20 }, L2(5), 1100000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 1100000 },
{ 0, { 972000, HFPLL, 1, 0x24 }, L2(5), 1125000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 1125000 },
{ 0, { 1080000, HFPLL, 1, 0x28 }, L2(14), 1175000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1175000 },
{ 0, { 1188000, HFPLL, 1, 0x2C }, L2(14), 1200000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1200000 },
{ 0, { 1296000, HFPLL, 1, 0x30 }, L2(14), 1225000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
{ 0, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_nom[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 },
{ 0, { 432000, HFPLL, 2, 0x20 }, L2(5), 925000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 925000 },
{ 0, { 540000, HFPLL, 2, 0x28 }, L2(5), 950000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 950000 },
{ 0, { 648000, HFPLL, 1, 0x18 }, L2(5), 975000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 975000 },
{ 0, { 756000, HFPLL, 1, 0x1C }, L2(5), 1025000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 1025000 },
{ 0, { 864000, HFPLL, 1, 0x20 }, L2(5), 1050000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 1050000 },
{ 0, { 972000, HFPLL, 1, 0x24 }, L2(5), 1075000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 1075000 },
{ 0, { 1080000, HFPLL, 1, 0x28 }, L2(14), 1125000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1125000 },
{ 0, { 1188000, HFPLL, 1, 0x2C }, L2(14), 1150000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1150000 },
{ 0, { 1296000, HFPLL, 1, 0x30 }, L2(14), 1175000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1175000 },
{ 0, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1187500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1187500 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1200000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_fast[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 850000 },
{ 0, { 432000, HFPLL, 2, 0x20 }, L2(5), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 0, { 540000, HFPLL, 2, 0x28 }, L2(5), 900000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 900000 },
{ 0, { 648000, HFPLL, 1, 0x18 }, L2(5), 925000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 925000 },
{ 0, { 756000, HFPLL, 1, 0x1C }, L2(5), 975000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 975000 },
{ 0, { 864000, HFPLL, 1, 0x20 }, L2(5), 1000000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 1000000 },
{ 0, { 972000, HFPLL, 1, 0x24 }, L2(5), 1025000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 1025000 },
{ 0, { 1080000, HFPLL, 1, 0x28 }, L2(14), 1075000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1075000 },
{ 0, { 1188000, HFPLL, 1, 0x2C }, L2(14), 1100000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1100000 },
{ 0, { 1296000, HFPLL, 1, 0x30 }, L2(14), 1125000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1125000 },
{ 0, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1137500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1137500 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_faster[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 850000 },
{ 0, { 432000, HFPLL, 2, 0x20 }, L2(5), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 0, { 540000, HFPLL, 2, 0x28 }, L2(5), 900000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 900000 },
{ 0, { 648000, HFPLL, 1, 0x18 }, L2(5), 925000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 925000 },
{ 0, { 756000, HFPLL, 1, 0x1C }, L2(5), 962500 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 962500 },
{ 0, { 864000, HFPLL, 1, 0x20 }, L2(5), 975000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 975000 },
{ 0, { 972000, HFPLL, 1, 0x24 }, L2(5), 1000000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 1000000 },
{ 0, { 1080000, HFPLL, 1, 0x28 }, L2(14), 1050000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1050000 },
{ 0, { 1188000, HFPLL, 1, 0x2C }, L2(14), 1075000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1075000 },
{ 0, { 1296000, HFPLL, 1, 0x30 }, L2(14), 1100000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1100000 },
{ 0, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1112500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1112500 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1125000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS0_1700MHz[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 950000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 950000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 962500 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 1000000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 1025000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 1037500 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1075000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1087500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1125000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1150000 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1175000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1150000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1175000 },
#ifdef CONFIG_CPU_OC
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(15), 1175000 },
{ 1, { 1836000, HFPLL, 1, 0x44 }, L2(15), 1200000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(15), 1225000 },
#endif
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS1_1700MHz[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 950000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 950000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 962500 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 975000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 1000000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 1012500 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1037500 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1050000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1087500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1112500 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1150000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1150000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1175000 },
#ifdef CONFIG_CPU_OC
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(15), 1175000 },
{ 1, { 1836000, HFPLL, 1, 0x44 }, L2(15), 1200000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(15), 1225000 },
#endif
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS2_1700MHz[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 925000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 925000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 925000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 937500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 950000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 975000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1000000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1012500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1037500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1075000 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1100000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1125000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1150000 },
#ifdef CONFIG_CPU_OC
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(15), 1150000 },
{ 1, { 1836000, HFPLL, 1, 0x44 }, L2(15), 1175000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(15), 1225000 },
#endif
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS3_1700MHz[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 900000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 900000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 900000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 900000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 925000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 950000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 975000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 987500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1000000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1037500 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1062500 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1100000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1125000 },
#ifdef CONFIG_CPU_OC
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(15), 1125000 },
{ 1, { 1836000, HFPLL, 1, 0x44 }, L2(15), 1150000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(15), 1175000 },
#endif
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS4_1700MHz[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 875000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 875000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 887500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 900000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 925000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 950000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 962500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 975000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1000000 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1037500 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1075000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1100000 },
#ifdef CONFIG_CPU_OC
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(15), 1125000 },
{ 1, { 1836000, HFPLL, 1, 0x44 }, L2(15), 1150000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(15), 1175000 },
#endif
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS5_1700MHz[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 875000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 875000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 887500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 900000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 925000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 937500 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 950000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 962500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 987500 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1012500 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1050000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1075000 },
#ifdef CONFIG_CPU_OC
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(15), 1125000 },
{ 1, { 1836000, HFPLL, 1, 0x44 }, L2(15), 1150000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(15), 1175000 },
#endif
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS6_1700MHz[] __initdata = {
{ 1, { 162000, HFPLL, 2, 0x0C }, L2(0), 900000 },
{ 1, { 216000, HFPLL, 2, 0x10 }, L2(0), 900000 },
{ 1, { 270000, HFPLL, 2, 0x14 }, L2(0), 900000 },
{ 1, { 324000, HFPLL, 2, 0x18 }, L2(0), 925000 },
{ 1, { 378000, HFPLL, 2, 0x1C }, L2(0), 925000 },
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 875000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 875000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 887500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 900000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 925000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 937500 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 950000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 962500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 975000 },
{ 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1000000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1025000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1050000 },
#ifdef CONFIG_CPU_OC
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(15), 1100000 },
{ 1, { 1836000, HFPLL, 1, 0x44 }, L2(15), 1125000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(15), 1175000 },
#endif
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS0_2000MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 950000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 950000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 950000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 962500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 975000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 1000000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1025000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1037500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(14), 1062500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(14), 1100000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(14), 1125000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(14), 1175000 },
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(14), 1225000 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(14), 1287500 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS1_2000MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 925000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 925000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 925000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 937500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 950000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 975000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 1000000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 1012500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(14), 1037500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(14), 1075000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(14), 1100000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(14), 1137500 },
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(14), 1187500 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(14), 1250000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS2_2000MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 900000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 900000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 900000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 912500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 925000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 950000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 975000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 987500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(14), 1012500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(14), 1050000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(14), 1075000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(14), 1112500 },
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(14), 1162500 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(14), 1212500 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS3_2000MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 900000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 900000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 900000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 900000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 912500 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 937500 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 962500 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 975000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(14), 1000000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(14), 1025000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(14), 1050000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(14), 1087500 },
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(14), 1137500 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(14), 1175000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS4_2000MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 875000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 875000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 887500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 900000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 925000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 950000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 962500 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(14), 975000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(14), 1000000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(14), 1037500 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(14), 1075000 },
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(14), 1112500 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(14), 1150000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS5_2000MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 875000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 875000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 887500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 900000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 925000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 937500 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 950000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(14), 962500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(14), 987500 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(14), 1012500 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(14), 1050000 },
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(14), 1087500 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(14), 1125000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS6_2000MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 875000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 875000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(5), 887500 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(5), 900000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(5), 925000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(14), 937500 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(14), 950000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(14), 962500 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(14), 975000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(14), 1000000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(14), 1025000 },
{ 1, { 1782000, HFPLL, 1, 0x42 }, L2(14), 1062500 },
{ 1, { 1890000, HFPLL, 1, 0x46 }, L2(14), 1100000 },
{ 0, { 0 } }
};
static struct pvs_table pvs_tables[NUM_SPEED_BINS][NUM_PVS] __initdata = {
[0][PVS_SLOW] = {tbl_slow, sizeof(tbl_slow), 0 },
[0][PVS_NOMINAL] = {tbl_nom, sizeof(tbl_nom), 25000 },
[0][PVS_FAST] = {tbl_fast, sizeof(tbl_fast), 25000 },
[0][PVS_FASTER] = {tbl_faster, sizeof(tbl_faster), 25000 },
[1][0] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
[1][1] = { tbl_PVS1_1700MHz, sizeof(tbl_PVS1_1700MHz), 25000 },
[1][2] = { tbl_PVS2_1700MHz, sizeof(tbl_PVS2_1700MHz), 25000 },
[1][3] = { tbl_PVS3_1700MHz, sizeof(tbl_PVS3_1700MHz), 25000 },
[1][4] = { tbl_PVS4_1700MHz, sizeof(tbl_PVS4_1700MHz), 25000 },
[1][5] = { tbl_PVS5_1700MHz, sizeof(tbl_PVS5_1700MHz), 25000 },
[1][6] = { tbl_PVS6_1700MHz, sizeof(tbl_PVS6_1700MHz), 25000 },
[2][0] = { tbl_PVS0_2000MHz, sizeof(tbl_PVS0_2000MHz), 0 },
[2][1] = { tbl_PVS1_2000MHz, sizeof(tbl_PVS1_2000MHz), 25000 },
[2][2] = { tbl_PVS2_2000MHz, sizeof(tbl_PVS2_2000MHz), 25000 },
[2][3] = { tbl_PVS3_2000MHz, sizeof(tbl_PVS3_2000MHz), 25000 },
[2][4] = { tbl_PVS4_2000MHz, sizeof(tbl_PVS4_2000MHz), 25000 },
[2][5] = { tbl_PVS5_2000MHz, sizeof(tbl_PVS5_2000MHz), 25000 },
[2][6] = { tbl_PVS6_2000MHz, sizeof(tbl_PVS6_2000MHz), 25000 },
};
static struct acpuclk_krait_params acpuclk_8064_params __initdata = {
.scalable = scalable,
.scalable_size = sizeof(scalable),
.hfpll_data = &hfpll_data,
.pvs_tables = pvs_tables,
.l2_freq_tbl = l2_freq_tbl,
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.pte_efuse_phys = 0x007000C0,
.stby_khz = 384000,
};
static int __init acpuclk_8064_probe(struct platform_device *pdev)
{
int ret;
if (cpu_is_apq8064ab() ||
SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 2) {
acpuclk_8064_params.hfpll_data->low_vdd_l_max = 37;
acpuclk_8064_params.hfpll_data->nom_vdd_l_max = 74;
}
ret = acpuclk_krait_init(&pdev->dev, &acpuclk_8064_params);
return ret;
}
static struct platform_driver acpuclk_8064_driver = {
.driver = {
.name = "acpuclk-8064",
.owner = THIS_MODULE,
},
};
static int __init acpuclk_8064_init(void)
{
return platform_driver_probe(&acpuclk_8064_driver,
acpuclk_8064_probe);
}
device_initcall(acpuclk_8064_init);
|
GruesomeWolf/Slippery_Sloth
|
arch/arm/mach-msm/acpuclock-8064.c
|
C
|
gpl-2.0
| 28,898
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h> /* memset */
#include <libwzd-core/wzd_structs.h>
#include <libwzd-core/wzd_crc32.h>
#define C1 0x12345678
#define C2 0x9abcdef0
int main(int argc, char *argv[])
{
unsigned long c1 = C1;
unsigned long crc = 0x0;
char input1[1024];
const char * file1 = "file_crc.txt";
const unsigned long crc_ref = 0xEB2FAFAF; /* cksfv file_crc.txt */
char * srcdir = NULL;
unsigned long c2 = C2;
if (argc > 1) {
srcdir = argv[1];
} else {
srcdir = getenv("srcdir");
if (srcdir == NULL) {
fprintf(stderr, "Environment variable $srcdir not found, aborting\n");
return 1;
}
}
snprintf(input1,sizeof(input1)-1,"%s/%s",srcdir,file1);
if ( calc_crc32(input1,&crc,0,(unsigned long)-1) ) {
fprintf(stderr, "calc_crc32 failed\n");
return 1;
}
if ( crc != crc_ref ) {
fprintf(stderr, "calc_crc32 returned crap\n");
return 1;
}
if (c1 != C1) {
fprintf(stderr, "c1 nuked !\n");
return -1;
}
if (c2 != C2) {
fprintf(stderr, "c2 nuked !\n");
return -1;
}
return 0;
}
|
mathgl67/wzdftpd
|
tests/test_wzd_crc32.c
|
C
|
gpl-2.0
| 1,106
|
#ifdef CONFIG_COMPAT
#include <linux/compat.h> /* for compat_old_sigset_t */
#endif
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/fpumacro.h>
#include <asm/uctx.h>
#include <asm/siginfo.h>
#include <asm/visasm.h>
#include "entry.h"
#include "systbls.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
mc_gregset_t __user *grp;
unsigned long pc, npc, tstate;
unsigned long fp, i7;
unsigned char fenab;
int err;
flush_user_windows();
if (get_thread_wsaved() ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok(ucp, sizeof(*ucp))))
goto do_sigsegv;
grp = &ucp->uc_mcontext.mc_gregs;
err = __get_user(pc, &((*grp)[MC_PC]));
err |= __get_user(npc, &((*grp)[MC_NPC]));
if (err || ((pc | npc) & 3))
goto do_sigsegv;
if (regs->u_regs[UREG_I1]) {
sigset_t set;
if (_NSIG_WORDS == 1) {
if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
goto do_sigsegv;
} else {
if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
goto do_sigsegv;
}
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
err |= __get_user(regs->y, &((*grp)[MC_Y]));
err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
/* Skip %g7 as that's the thread register in userspace. */
err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
err |= __put_user(fp,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
err |= __put_user(i7,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
if (fenab) {
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
fprs_write(0);
err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs,
&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16,
((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
err |= __get_user(current_thread_info()->gsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
regs->tstate &= ~TSTATE_PEF;
}
if (err)
goto do_sigsegv;
return;
do_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
mc_gregset_t __user *grp;
mcontext_t __user *mcp;
unsigned long fp, i7;
unsigned char fenab;
int err;
synchronize_user_stack();
if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
goto do_sigsegv;
#if 1
fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
#else
fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
#endif
mcp = &ucp->uc_mcontext;
grp = &mcp->mc_gregs;
/* Skip over the trap instruction, first. */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc = (regs->tnpc & 0xffffffff);
regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
} else {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
}
err = 0;
if (_NSIG_WORDS == 1)
err |= __put_user(current->blocked.sig[0],
(unsigned long __user *)&ucp->uc_sigmask);
else
err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked,
sizeof(sigset_t));
err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
err |= __put_user(regs->y, &((*grp)[MC_Y]));
err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
err |= __get_user(fp,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
err |= __get_user(i7,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
err |= __put_user(fp, &(mcp->mc_fp));
err |= __put_user(i7, &(mcp->mc_i7));
err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
if (fenab) {
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(
((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
}
if (err)
goto do_sigsegv;
return;
do_sigsegv:
force_sig(SIGSEGV, current);
}
struct rt_signal_frame {
struct sparc_stackf ss;
siginfo_t info;
struct pt_regs regs;
__siginfo_fpu_t __user *fpu_save;
stack_t stack;
sigset_t mask;
__siginfo_fpu_t fpu_state;
};
static long _sigpause_common(old_sigset_t set)
{
set &= _BLOCKABLE;
spin_lock_irq(¤t->sighand->siglock);
current->saved_sigmask = current->blocked;
siginitset(¤t->blocked, set);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_restore_sigmask();
return -ERESTARTNOHAND;
}
asmlinkage long sys_sigpause(unsigned int set)
{
return _sigpause_common(set);
}
asmlinkage long sys_sigsuspend(old_sigset_t set)
{
return _sigpause_common(set);
}
static inline int
restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err;
err = __get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
current_thread_info()->fpsaved[0] |= fprs;
return err;
}
void do_rt_sigreturn(struct pt_regs *regs)
{
struct rt_signal_frame __user *sf;
unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t __user *fpu_save;
sigset_t set;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack ();
sf = (struct rt_signal_frame __user *)
(regs->u_regs [UREG_FP] + STACK_BIAS);
/* 1. Make sure we are not getting garbage from the user */
if (((unsigned long) sf) & 3)
goto segv;
err = get_user(tpc, &sf->regs.tpc);
err |= __get_user(tnpc, &sf->regs.tnpc);
if (test_thread_flag(TIF_32BIT)) {
tpc &= 0xffffffff;
tnpc &= 0xffffffff;
}
err |= ((tpc | tnpc) & 3);
/* 2. Restore the state */
err |= __get_user(regs->y, &sf->regs.y);
err |= __get_user(tstate, &sf->regs.tstate);
err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
/* User can only change condition codes and %asi in %tstate. */
regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
err |= __get_user(fpu_save, &sf->fpu_save);
if (fpu_save)
err |= restore_fpu_state(regs, &sf->fpu_state);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
if (err)
goto segv;
regs->tpc = tpc;
regs->tnpc = tnpc;
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return;
segv:
force_sig(SIGSEGV, current);
}
/* Checks if the fp is valid */
static int invalid_frame_pointer(void __user *fp, int fplen)
{
if (((unsigned long) fp) & 15)
return 1;
return 0;
}
static inline int
save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err = 0;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
}
static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
return (void __user *) -1L;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
sp -= framesize;
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~15UL;
return (void __user *) sp;
}
static inline void
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
struct rt_signal_frame __user *sf;
int sigframe_size, err;
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
sigframe_size = sizeof(struct rt_signal_frame);
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct rt_signal_frame __user *)
get_sigframe(ka, regs, sigframe_size);
if (invalid_frame_pointer (sf, sigframe_size))
goto sigill;
if (get_thread_wsaved() != 0)
goto sigill;
/* 2. Save the current process state */
err = copy_to_user(&sf->regs, regs, sizeof (*regs));
if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
err |= save_fpu_state(regs, &sf->fpu_state);
err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
/* Setup sigaltstack */
err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
err |= copy_in_user((u64 __user *)sf,
(u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
sizeof(struct reg_window));
if (info)
err |= copy_siginfo_to_user(&sf->info, info);
else {
err |= __put_user(signo, &sf->info.si_signo);
err |= __put_user(SI_NOINFO, &sf->info.si_code);
}
if (err)
goto sigsegv;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
regs->u_regs[UREG_I0] = signo;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
/* The sigcontext is passed in this way because of how it
* is defined in GLIBC's /usr/include/bits/sigcontext.h
* for sparc64. It includes the 128 bytes of siginfo_t.
*/
regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
/* 5. signal handler */
regs->tpc = (unsigned long) ka->sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
return;
sigill:
do_exit(SIGILL);
sigsegv:
force_sigsegv(signo, current);
}
static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(¤t->blocked,signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
struct sigaction *sa)
{
switch (regs->u_regs[UREG_I0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
no_system_call_restart:
regs->u_regs[UREG_I0] = EINTR;
regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
break;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
}
static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
{
struct k_sigaction ka;
int restart_syscall;
sigset_t *oldset;
siginfo_t info;
int signr;
if (pt_regs_is_syscall(regs) &&
(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
restart_syscall = 1;
} else
restart_syscall = 0;
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = ¤t->saved_sigmask;
else
oldset = ¤t->blocked;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
extern void do_signal32(sigset_t *, struct pt_regs *,
int restart_syscall,
unsigned long orig_i0);
do_signal32(oldset, regs, restart_syscall, orig_i0);
return;
}
#endif
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
/* If the debugger messes with the program counter, it clears
* the software "in syscall" bit, directing us to not perform
* a syscall restart.
*/
if (restart_syscall && !pt_regs_is_syscall(regs))
restart_syscall = 0;
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
handle_signal(signr, &ka, &info, oldset, regs);
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs, 0);
return;
}
if (restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
regs->u_regs[UREG_I0] == ERESTARTSYS ||
regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
/* replay the system call when we are done */
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
}
/* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
}
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs, orig_i0);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
}
|
luckasfb/OT_903D-kernel-2.6.35.7
|
kernel/arch/sparc/kernel/signal_64.c
|
C
|
gpl-2.0
| 18,267
|
/*
* symlink.c
*
* Symlink methods.
*
* Author: Steve Longerbeam <stevel@mvista.com, or source@mvista.com>
*
* 2003 (c) MontaVista Software, Inc.
* Copyright 2003 Sony Corporation
* Copyright 2003 Matsushita Electric Industrial Co., Ltd.
*
* This software is being distributed under the terms of the GNU General Public
* License version 2. Some or all of the technology encompassed by this
* software may be subject to one or more patents pending as of the date of
* this notice. No additional patent license will be required for GPL
* implementations of the technology. If you want to create a non-GPL
* implementation of the technology encompassed by this software, please
* contact legal@mvista.com for details including licensing terms and fees.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/fs.h>
#include <linux/pram_fs.h>
int pram_block_symlink(struct inode *inode, const char *symname, int len)
{
struct super_block * sb = inode->i_sb;
pram_off_t block;
char* blockp;
unsigned long flags;
int err;
err = pram_alloc_blocks (inode, 0, 1);
if (err)
return err;
block = pram_find_data_block(inode, 0);
blockp = pram_get_block(sb, block);
pram_lock_block(sb, blockp);
memcpy(blockp, symname, len);
blockp[len] = '\0';
pram_unlock_block(sb, blockp);
return 0;
}
static int pram_readlink(struct dentry *dentry, char *buffer, int buflen)
{
struct inode * inode = dentry->d_inode;
struct super_block * sb = inode->i_sb;
pram_off_t block;
char* blockp;
block = pram_find_data_block(inode, 0);
blockp = pram_get_block(sb, block);
return vfs_readlink(dentry, buffer, buflen, blockp);
}
static int pram_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode * inode = dentry->d_inode;
struct super_block * sb = inode->i_sb;
pram_off_t block;
char* blockp;
block = pram_find_data_block(inode, 0);
blockp = pram_get_block(sb, block);
return vfs_follow_link(nd, blockp);
}
struct inode_operations pram_symlink_inode_operations = {
readlink: pram_readlink,
follow_link: pram_follow_link,
};
|
robacklin/celinux
|
fs/pramfs/symlink.c
|
C
|
gpl-2.0
| 2,227
|
/*
* ux_text.c - Unix interface, text functions
*
* This file is part of Frotz.
*
* Frotz is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Frotz is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#define __UNIX_PORT_FILE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef USE_NCURSES_H
#include <ncurses.h>
#else
#include <curses.h>
#endif
#include "ux_frotz.h"
/* When color_enabled is FALSE, we still minimally keep track of colors by
* setting current_color to A_REVERSE if the game reads the default
* foreground and background colors and swaps them. If we don't do this,
* Strange Results can happen when playing certain V6 games when
* color_enabled is FALSE.
*/
bool color_enabled = FALSE;
/* int current_color = 0; */
static char latin1_to_ascii[] =
" ! c L >o<Y | S '' C a << not- R _ "
"^0 +/-^2 ^3 ' my P . , ^1 o >> 1/41/23/4? "
"A A A A Ae A AE C E E E E I I I I "
"Th N O O O O Oe * O U U U Ue Y Th ss "
"a a a a ae a ae c e e e e i i i i "
"th n o o o o oe : o u u u ue y th y ";
/*
* os_font_data
*
* Return true if the given font is available. The font can be
*
* TEXT_FONT
* PICTURE_FONT
* GRAPHICS_FONT
* FIXED_WIDTH_FONT
*
* The font size should be stored in "height" and "width". If
* the given font is unavailable then these values must _not_
* be changed.
*
*/
int os_font_data (int font, int *height, int *width)
{
if (font == TEXT_FONT) {
*height = 1; *width = 1; return 1; /* Truth in advertising */
}
return 0;
}/* os_font_data */
#ifdef COLOR_SUPPORT
/*
* unix_convert
*
* Converts frotz's (and Infocom's) color values to ncurses color values.
*
*/
static int unix_convert(int color)
{
switch(color) {
case BLACK_COLOUR: return COLOR_BLACK;
case RED_COLOUR: return COLOR_RED;
case GREEN_COLOUR: return COLOR_GREEN;
case YELLOW_COLOUR: return COLOR_YELLOW;
case BLUE_COLOUR: return COLOR_BLUE;
case MAGENTA_COLOUR: return COLOR_MAGENTA;
case CYAN_COLOUR: return COLOR_CYAN;
case WHITE_COLOUR: return COLOR_WHITE;
}
return 0;
}
#endif
/*
* os_set_colour
*
* Set the foreground and background colours which can be:
*
* DEFAULT_COLOUR
* BLACK_COLOUR
* RED_COLOUR
* GREEN_COLOUR
* YELLOW_COLOUR
* BLUE_COLOUR
* MAGENTA_COLOUR
* CYAN_COLOUR
* WHITE_COLOUR
*
* MS-DOS 320 columns MCGA mode only:
*
* GREY_COLOUR
*
* Amiga only:
*
* LIGHTGREY_COLOUR
* MEDIUMGREY_COLOUR
* DARKGREY_COLOUR
*
* There may be more colours in the range from 16 to 255; see the
* remarks on os_peek_colour.
*
*/
void os_set_colour (int new_foreground, int new_background)
{
if (new_foreground == 1) new_foreground = z_header.h_default_foreground;
if (new_background == 1) new_background = z_header.h_default_background;
if (u_setup.color_enabled) {
#ifdef COLOR_SUPPORT
static int colorspace[10][10];
static int n_colors = 0;
if (!colorspace[new_foreground][new_background]) {
init_pair(++n_colors, unix_convert(new_foreground),
unix_convert(new_background));
colorspace[new_foreground][new_background] = n_colors;
}
u_setup.current_color = COLOR_PAIR(colorspace[new_foreground][new_background]);
#endif
} else
u_setup.current_color = (((new_foreground == z_header.h_default_background)
&& (new_background == z_header.h_default_foreground))
? A_REVERSE : 0);
os_set_text_style(u_setup.current_text_style);
}/* os_set_colour */
/*
* os_set_text_style
*
* Set the current text style. Following flags can be set:
*
* REVERSE_STYLE
* BOLDFACE_STYLE
* EMPHASIS_STYLE (aka underline aka italics)
* FIXED_WIDTH_STYLE
*
*/
void os_set_text_style (int new_style)
{
int temp = 0;
u_setup.current_text_style = new_style;
if (new_style & REVERSE_STYLE) temp |= A_REVERSE;
if (new_style & BOLDFACE_STYLE) temp |= A_BOLD;
if (new_style & EMPHASIS_STYLE) temp |= A_UNDERLINE;
attrset(temp ^ u_setup.current_color);
}/* os_set_text_style */
/*
* os_set_font
*
* Set the font for text output. The interpreter takes care not to
* choose fonts which aren't supported by the interface.
*
*/
void os_set_font (int new_font)
{
/* Not implemented */
}/* os_set_font */
/*
* os_display_char
*
* Display a character of the current font using the current colours and
* text style. The cursor moves to the next position. Printable codes are
* all ASCII values from 32 to 126, ISO Latin-1 characters from 160 to
* 255, ZC_GAP (gap between two sentences) and ZC_INDENT (paragraph
* indentation). The screen should not be scrolled after printing to the
* bottom right corner.
*
*/
void os_display_char (zchar c)
{
if (c >= ZC_LATIN1_MIN) {
if (u_setup.plain_ascii) {
char *ptr = latin1_to_ascii + 3 * (c - ZC_LATIN1_MIN);
char c1 = *ptr++;
char c2 = *ptr++;
char c3 = *ptr++;
addch(c1);
if (c2 != ' ')
addch(c2);
if (c3 != ' ')
addch(c3);
} else
addch(c);
return;
}
if (c >= ZC_ASCII_MIN && c <= ZC_ASCII_MAX) {
addch(c);
return;
}
if (c == ZC_INDENT) {
addch(' '); addch(' '); addch(' ');
return;
}
if (c == ZC_GAP) {
addch(' '); addch(' ');
return;
}
}/* os_display_char */
/*
* os_display_string
*
* Pass a string of characters to os_display_char.
*
*/
void os_display_string (const zchar *s)
{
zchar c;
while ((c = (unsigned char) *s++) != 0)
/* Is this superfluous given it's also done in screen_word()? */
if (c == ZC_NEW_FONT || c == ZC_NEW_STYLE) {
int arg = (unsigned char) *s++;
if (c == ZC_NEW_FONT)
os_set_font (arg);
if (c == ZC_NEW_STYLE)
os_set_text_style (arg);
} else os_display_char (c);
}/* os_display_string */
/*
* os_char_width
*
* Return the width of the character in screen units.
*
*/
int os_char_width (zchar c)
{
if (c >= ZC_LATIN1_MIN && u_setup.plain_ascii) {
int width = 0;
const char *ptr = latin1_to_ascii + 3 * (c - ZC_LATIN1_MIN);
char c1 = *ptr++;
char c2 = *ptr++;
char c3 = *ptr++;
/* Why, oh, why did you declare variables that way??? */
if (c1 == c1) /* let's avoid confusing the compiler (and me) */
width++;
if (c2 != ' ')
width++;
if (c3 != ' ')
width++;
return width;
}
return 1;
}/* os_char_width*/
/*
* os_string_width
*
* Calculate the length of a word in screen units. Apart from letters,
* the word may contain special codes:
*
* NEW_STYLE - next character is a new text style
* NEW_FONT - next character is a new font
*
*/
int os_string_width (const zchar *s)
{
int width = 0;
zchar c;
while ((c = *s++) != 0)
if (c == ZC_NEW_STYLE || c == ZC_NEW_FONT) {
s++;
/* No effect */
} else width += os_char_width(c);
return width;
}/* os_string_width */
/*
* os_set_cursor
*
* Place the text cursor at the given coordinates. Top left is (1,1).
*
*/
void os_set_cursor (int y, int x)
{
/* Curses thinks the top left is (0,0) */
move(--y, --x);
}/* os_set_cursor */
/*
* os_more_prompt
*
* Display a MORE prompt, wait for a keypress and remove the MORE
* prompt from the screen.
*
*/
void os_more_prompt (void)
{
int saved_style, saved_x, saved_y;
/* Save some useful information */
saved_style = u_setup.current_text_style;
getyx(stdscr, saved_y, saved_x);
os_set_text_style(0);
addstr("[MORE]");
os_read_key(0, TRUE);
move(saved_y, saved_x);
addstr(" ");
move(saved_y, saved_x);
os_set_text_style(saved_style);
}/* os_more_prompt */
|
holzman/frotz
|
src/curses/ux_text.c
|
C
|
gpl-2.0
| 8,410
|
/*
Copyright (C) 2005 Michael K. McCarty & Fritz Bronner
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/** \file future.c This is responsible for Future Mission planning screen.
*
*/
#include <Buzz_inc.h>
#include <externs.h>
#include <assert.h>
#include <logging.h>
//Used to read steps from missStep.dat
FILE* MSteps;
char missStep[1024];
static inline char B_Mis(char x) {return missStep[x]-0x30;}
/*missStep.dat is plain text, with:
Mission Number (2 first bytes of each line)
A Coded letter, each drawing a different line (1105-1127 for all possible letters)
Numbers following each letter, which are the parameters of the function
Each line must finish with a Z, so the game stops reading
Any other char is ignored, but it's easier to read for a human that way */
LOG_DEFAULT_CATEGORY(future)
char status[5],lck[5],F1,F2,F3,F4,FMen,F5,Pad;
char JointFlag,MarFlag,JupFlag,SatFlag,MisType;
GXHEADER vh;
struct StepInfo {
i16 x_cor;
i16 y_cor;
} StepBub[MAXBUB];
struct Parameter {
char A; /**< DOCKING */
char B; /**< EVA */
char C; /**< LEM */
char D; /**< JOINT */
char E; /**< MANNED/UNMANNED/Duration 0==unmanned 1-6==duration */
char X; /**< the type of mission for assign crew and hardware */
char Z; /**< A duration mission only */
} V[62];
extern int Bub_Count;
extern struct mStr Mis;
extern struct MisEval Mev[60];
extern int SEG;
void Load_FUT_BUT(void)
{
FILE *fin;
unsigned i;
fin=sOpen("NFUTBUT.BUT","rb",0);
i=fread(screen,1,MAX_X*MAX_Y,fin);
fclose(fin);
RLED_img((char *)screen,(char *)vh.vptr,(unsigned)i,vh.w,vh.h);
return;
}
void DrawFuture(char plr,int mis,char pad)
{
int i,j;
FILE *fin;
unsigned sz;
strcpy(IKEY,"k011");strcpy(IDT,"i011");
JointFlag=0; // initialize joint flag
F1=F2=F3=F4=FMen=F5=0;
for (i=0;i<5;i++) lck[i]=status[i]=0;
FadeOut(2,pal,10,0,0);
Load_FUT_BUT();
fin=sOpen("FMIN.IMG","rb",0);
fread(&pal[0],768,1,fin);
sz=fread(screen,1,MAX_X*MAX_Y,fin);
fclose(fin);
RLED_img((char *)screen,(char *)vhptr.vptr,sz,vhptr.w,vhptr.h);
gxClearDisplay(0,0);
gr_sync ();
if (pad==2) JointFlag=0; // third pad automatic no joint mission
else
if (Data->P[plr].LaunchFacility[pad+1] == 1)
{
if (Data->P[plr].Future[pad+1].MissionCode==0) JointFlag=1; // if no mission then set joint flag
else if (Data->P[plr].Future[pad+1].part==1) // check if the part of that second mission is set
{
JointFlag=1;
Data->P[plr].Future[pad+1].MissionCode=0; // clear mission
Data->P[plr].Future[pad+1].part=0;
};
};
if (pad==1 || pad==0) {
if (Data->P[plr].LaunchFacility[pad+1]==1) JointFlag=1;
}
i=Data->Year;j=Data->Season;
TRACE3("--- Setting i=Year (%d), j=Season (%d)", i, j);
if ((i==60 && j==0) || (i==62 && j==0) || (i==64 && j==0) ||
(i==66 && j==0) || (i==69 && j==1) || (i==71 && j==1) ||
(i==73 && j==1)) {
gxVirtualVirtual(&vhptr,1,2,12,11,&vhptr,198,153,gxSET); /* Mars */
MarFlag=1; } else MarFlag=0;
if ((i==60 || i==64 || i==68 || i==72 || i==73 || i==77)) {
gxVirtualVirtual(&vhptr,14,2,64,54,&vhptr,214,130,gxSET); /* Jup */
JupFlag=1; } else JupFlag=0;
if (i==61 || i==66 || i==72) {
gxVirtualVirtual(&vhptr,66,2,114,53,&vhptr,266,135,gxSET); /* Sat */
SatFlag=1; } else SatFlag=0;
RectFill(1,1,318,21,3);RectFill(317,22,318,198,3);RectFill(1,197,316,198,3);
RectFill(1,22,2,196,3);OutBox(0,0,319,199);InBox(3,3,30,19);
InBox(3,22,316,196);
IOBox(242,3,315,19);
ShBox(5,24,183,47);
ShBox(5,24,201,47); //name box
ShBox(5,74,41,82); // RESET
ShBox(5,49,53,72); //dur/man
ShBox(43,74,53,82); // lock
ShBox(80,74,90,82);
ShBox(117,74,127,82);
ShBox(154,74,164,82);
ShBox(191,74,201,82);
ShBox(5,84,16,130); //arrows up
ShBox(5,132,16,146); //middle box
ShBox(5,148,16,194); // down
ShBox(203,24,238,31); // new right boxes
RectFill(206,36,235,44,7);
ShBox(203,33,238,47);
InBox(205,35,236,45);
UPArrow(8,95);DNArrow(8,157);
gxVirtualDisplay(&vh,140,5,5,132,15,146,0);
Toggle(5,1);draw_Pie(0);OutBox(5,49,53,72);
Toggle(1,1);TogBox(55,49,0);
Toggle(2,1);TogBox(92,49,0);
Toggle(3,1);TogBox(129,49,0);
FMen=F1=F2=F3=F4=F5=0;
for (i=1;i<4;i++){
if (status[i]!=0) {
Toggle(i,1);
}
};
if (JointFlag==0) {
F4=2;lck[4]=1;
Toggle(4,1);
InBox(191,74,201,82);
PlaceRX(5);
TogBox(166,49,1);
}
else {
F4=0; lck[4]=0;
status[4]=0;
Toggle(4,1);
OutBox(191,74,201,82);
ClearRX(5);
TogBox(166,49,0);
};
gr_sync ();
Missions(plr,8,37,mis,1);
GetMinus(plr);
grSetColor(5);
/* lines of text are 1:8,30 2:8,37 3:8,44 */
switch(pad) { // These used to say Pad 1, 2, 3 -Leon
case 0: PrintAt(8,30,"PAD A:");break;
case 1: PrintAt(8,30,"PAD B:");break;
case 2: PrintAt(8,30,"PAD C:");break;
default:break;
};
grSetColor(1);
PrintAt(9,80,"RESET");
PrintAt(256,13,"CONTINUE");
grSetColor(11);
if (Data->Season==0) PrintAt(200,9,"SPRING");
else PrintAt(205,9,"FALL");
PrintAt(206,16,"19");
DispNum(0,0,Data->Year);
grSetColor(1);
FlagSm(plr,4,4);
DispBig(40,5,"FUTURE MISSIONS",0,-1);
FadeIn(2,pal,10,0,0);
return;
}
void ClearDisplay(void)
{
gxVirtualDisplay(&vhptr,202,48,202,48,241,82,0);
gxVirtualDisplay(&vhptr,17,83,17,83,241,195,0);
gxVirtualDisplay(&vhptr,242,23,242,23,315,195,0);
grSetColor(1);
return;
}
int GetMinus(char plr)
{
char i;int u;
i=PrestMin(plr);
RectFill(206,36,235,44,7);
if (i<3) u=1; //ok
else if (i<9) u=10; //caution
else u=19; //danger
gxVirtualDisplay(&vh,203,u,203,24,238,31,0);
grSetColor(11);
if (i>0) PrintAt(210,42,"-");
else grMoveTo(210,42);
DispNum(0,0,i);
grSetColor(1);
return 0;
}
void SetParameters(void)
{
int i;
FILE *fin;
fin=sOpen("MISSION.DAT","rb",0);
for (i=0;i<62;i++) {
fread(&Mis,sizeof Mis,1,fin);
V[i].A=Mis.Doc; V[i].B=Mis.EVA;
V[i].C=Mis.LM; V[i].D=Mis.Jt;
V[i].E=Mis.Days; V[i].X=Mis.mCrew;
V[i].Z=Mis.Dur;
}
fclose(fin);
return;
}
void DrawLocks(void)
{
int i;
for (i=0;i<5;i++)
if (lck[i]==1) PlaceRX(i+1);
else ClearRX(i+1);
return;
}
/** set the toggles???
*
* \param wh the button
* \param i in or out
*/
void Toggle(int wh,int i)
{
TRACE3("->Toggle(wh %d, i %d)", wh, i);
switch(wh)
{
case 1:if (i==1) gxVirtualDisplay(&vh,1,21,55,49,89,81,0);else
gxVirtualDisplay(&vh,1,56,55,49,89,81,0); break;
case 2:if(i==1) gxVirtualDisplay(&vh,38,21,92,49,127,81,0);else
gxVirtualDisplay(&vh,38,56,92,49,127,81,0); break;
case 3:if(i==1) gxVirtualDisplay(&vh,75,21,129,49,163,81,0);else
gxVirtualDisplay(&vh,75,56,129,49,163,81,0); break;
case 4:if(i==1) gxVirtualDisplay(&vh,112,21,166,49,200,81,0);else
gxVirtualDisplay(&vh,112,56,166,49,200,81,0); break;
case 5:if (i==1) gxVirtualDisplay(&vh,153,1,5,49,52,71,0);
else gxVirtualDisplay(&vh,153,26,5,49,52,71,0); break;
default:break;
}
TRACE1("<-Toggle()");
return;
}
void TogBox(int x,int y,int st)
{
TRACE4("->TogBox(x %d, y %d, st %d)", x, y, st);
char sta[2][2]={{2,4},{4,2}};
grSetColor(sta[st][0]);
grMoveTo(0+x,y+32);grLineTo(0+x,y+0);grLineTo(34+x,y+0);
grSetColor(sta[st][1]);
grMoveTo(x+0,y+33);grLineTo(23+x,y+33);grLineTo(23+x,y+23);
grLineTo(x+35,y+23);grLineTo(x+35,y+0);
TRACE1("<-TogBox()");
return;
}
void PianoKey(int X)
{
TRACE2("->PianoKey(X %d)", X);
int t;
if (F1==0) {
if (V[X].A==1) {Toggle(1,1);status[1]=1;}
else {Toggle(1,0);PlaceRX(1);status[1]=0;}}
if (F2==0) {
if (V[X].B==1) {Toggle(2,1);status[2]=1;}
else {Toggle(2,0);PlaceRX(2);status[2]=0;}}
if (F3==0) {
if (V[X].C==1) {Toggle(3,1);status[3]=1;}
else {Toggle(3,0);PlaceRX(3);status[3]=0;}}
if (F4==0) {
if (V[X].D==1) {Toggle(4,0);status[4]=1;}
else {Toggle(4,1);status[4]=0; }}
if (F5==-1 || (F5==0 && V[X].E==0))
{
Toggle(5,0);
status[0]=0;
}
else
{
Toggle(5,1);
t=(F5==0) ? V[X].E : F5;
assert(0 <= t);
draw_Pie(t);
status[0]=t;
}
DrawLocks();
TRACE1("<-PianoKey()");
return;
}
/** draw a piechart
*
* The piechart is indicating the number of astronauts on this mission.
*
* \param s something of an offset...
*/
void draw_Pie(int s)
{
int off;
if (s==0) off=1;
else off=s*20;
gxVirtualDisplay(&vh,off,1,7,51,25,69,0);
return;
}
void PlaceRX(int s)
{
switch(s)
{
case 1: RectFill(44,75,52,81,8);break;
case 2: RectFill(81,75,89,81,8);break;
case 3: RectFill(118,75,126,81,8);break;
case 4: RectFill(155,75,163,81,8);break;
case 5: RectFill(192,75,200,81,8);break;
default:break;
}
return;
}
void ClearRX(int s)
{
switch(s)
{
case 1: RectFill(44,75,52,81,3);break;
case 2: RectFill(81,75,89,81,3);break;
case 3: RectFill(118,75,126,81,3);break;
case 4: RectFill(155,75,163,81,3);break;
case 5: RectFill(192,75,200,81,3);break;
default:break;
}
return;
}
int UpSearchRout(int num,char plr)
{
int found=0,orig,c1=0,c2=0,c3=0,c4=0,c5=0,c6=1,c7=1,c8=1;
orig=num;
if (num >= 56+plr) num=0;
else num++;
while (found==0)
{
c1=0;c2=0;c3=0;c4=0;c5=0;c6=1;c7=1;c8=1;
if (F1==V[num].A) c1=1; /* condition one is true */
if (F1==0 && V[num].A==1) c1=1;
if (F1==2 && V[num].A==0) c1=1;
if (F2==V[num].B) c2=1; /* condition two is true */
if (F2==0 && V[num].B==1) c2=1;
if (F2==2 && V[num].B==0) c2=1;
if (F3==V[num].C) c3=1; /* condition three is true */
if (F3==0 && V[num].C==1) c3=1;
if (F3==2 && V[num].C==0) c3=1;
if (F4==V[num].D) c4=1; /* condition four is true */
if (F4==0 && V[num].D==1) c4=1;
if (F4==2 && V[num].D==0) c4=1;
if (num==0) c5=1;
else {
if (F5==-1 && V[num].Z==0 && V[num].E==0) c5=1;
if (F5==0) c5=1;
if (F5>1 && V[num].Z==1) c5=1;
if (F5==V[num].E) c5=1;
};
if ((num==32 || num==36) && F5==2) c5=0;
// planet check
if (num==10 && MarFlag==0) c6=0;
if (num==12 && JupFlag==0) c7=0;
if (num==13 && SatFlag==0) c8=0;
if (c1 && c2 && c3 && c4 && c5 && c6 && c7 && c8) found=1;
if (num==orig) return(0);
if (found==0) {
if (num==56+plr) num=0;
else ++num;
}
}; /* end while */
return(num);
}
int DownSearchRout(int num,char plr)
{
int found=0,orig,c1=0,c2=0,c3=0,c4=0,c5=0,c6=1,c7=1,c8=1;
orig=num;
if (num<=0) num=56+plr;
else --num;
while (found==0)
{
c1=0;c2=0;c3=0;c4=0;c5=0;c6=1;c7=1;c8=1;
if (F1==V[num].A) c1=1;
if (F1==0 && V[num].A==1) c1=1; /* condition one is true */
if (F1==2 && V[num].A==0) c1=1;
if (F2==V[num].B) c2=1; /* condition two is true */
if (F2==0 && V[num].B==1) c2=1; /* condition one is true */
if (F2==2 && V[num].B==0) c2=1;
if (F3==V[num].C) c3=1; /* condition three is true */
if (F3==0 && V[num].C==1) c3=1; /* condition one is true */
if (F3==2 && V[num].C==0) c3=1;
if (F4==V[num].D) c4=1; /* condition four is true */
if (F4==0 && V[num].D==1) c4=1; /* condition one is true */
if (F4==2 && V[num].D==0) c4=1;
if (num==0) c5=1;
else {
if (F5==-1 && V[num].Z==0 && V[num].E==0) c5=1; // locked on zero duration
if (F5==0) c5=1; // nothing set
if (F5>1 && V[num].Z==1) c5=1; // set duration with duration mission
if (F5==V[num].E) c5=1; // the duration is equal to what is preset
};
if ((num==32 || num==36) && F5==2) c5=0;
// planet check
if (num==10 && MarFlag==0) c6=0;
if (num==12 && JupFlag==0) c7=0;
if (num==13 && SatFlag==0) c8=0;
if (c1 && c2 && c3 && c4 && c5 && c6 && c7 && c8) found=1;
if (num==orig) return(0);
if (found==0) {
if (num==0) num=56+plr;
else --num;
}
}; /* end while */
return(num);
}
void
Future(char plr)
{
/** \todo the whole Future()-function is 500 >lines and unreadable */
TRACE1("->Future(plr)");
int MisNum = 0, DuraType = 0, MaxDur = 6, i, ii;
int setting = -1, prev_setting = -1;
int Ok, NewType;
GXHEADER local, local2;
GV(&local, 166, 9);
GV(&local2, 177, 197);
GV(&vh,240,90); /* global variable */
begfut:
MisNum = FutureCheck(plr, 0);
if (MisNum == 5)
{
DV(&local);
DV(&local2);
DV(&vh);
return;
}
F1 = F2 = F3 = F4 = FMen = F5 = 0;
// memset(buffer, 0x00, 20000);
for (i = 0; i < 5; i++)
lck[i] = status[i] = 0;
SetParameters();
strcpy(IDT, "i011");
Pad = MisNum;
DuraType = FMen = MisType = 0;
ClrFut(plr, MisNum);
DrawFuture(plr, MisType, MisNum);
begfut_noredraw:
// for (i=0;i<5;i++) ClearRX(i+1);
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
while (1)
{
GetMouse();
prev_setting = setting;
setting = -1;
if (key == '-' && SEG > 1)
SEG--;
if (key == '+' && SEG < 500)
SEG++;
if (key >= 65 && key < Bub_Count + 65)
setting = key - 65;
for (ii = 0; ii < Bub_Count; ii++)
{
if (x >= StepBub[ii].x_cor && x <= StepBub[ii].x_cor + 7
&& y >= StepBub[ii].y_cor && y <= StepBub[ii].y_cor + 7)
setting = ii;
}
if (setting >= 0)
{
if (prev_setting < 0)
gxGetImage(&local, 18, 186, 183, 194, 0);
if (prev_setting != setting)
{
ShBox(18, 186, 183, 194);
grSetColor(1);
MisStep(21, 192, Mev[setting].loc);
}
}
else if (setting < 0 && prev_setting >= 0)
{
gxPutImage(&local, gxSET, 18, 186, 0);
}
if (Mis.Dur <= V[MisType].E && ((x >= 244 && y >= 5 && x <= 313
&& y <= 17 && mousebuttons > 0) || key == K_ENTER))
{
InBox(244, 5, 313, 17);
WaitForMouseUp();
if (key > 0)
delay(300);
key = 0;
OutBox(244, 5, 313, 17);
gxGetImage(&local2, 74, 3, 250, 199, 0);
NewType = V[MisType].X;
Data->P[plr].Future[MisNum].Duration = DuraType;
Ok = HardCrewAssign(plr, Pad, MisType, NewType);
gxPutImage(&local2, gxSET, 74, 3, 0);
// DV(&local2);
if (Ok == 1)
{
Data->P[plr].Future[MisNum].Duration = DuraType;
goto begfut; // return to loop
}
else
{
ClrFut(plr, MisNum);
// DuraType = FMen = MisType = 0;
key = 0;
goto begfut_noredraw;
// DrawFuture(plr, MisType, MisNum);
}
key = 0;
};
// continue
if ((((x >= 5 && y >= 49 && x <= 53 && y <= 72) || (x >= 43
&& y >= 74 && x <= 53 && y <= 82))
&& mousebuttons > 0) || (key == '!' || key == '1'))
{
if ((x >= 43 && y >= 74 && x <= 53 && y <= 82) || key == '!')
{
lck[0] = abs(lck[0] - 1);
if (lck[0] == 1)
InBox(43, 74, 53, 82);
else
OutBox(43, 74, 53, 82);
if (lck[0] == 1)
F5 = (status[0] == 0) ? -1 : status[0];
if (lck[0] == 1)
PlaceRX(1);
else
ClearRX(1);
if (lck[0] == 0)
{
F5 = 0;
status[0] = 0;
}
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
}
else if (lck[0] != 1)
{
InBox(5, 49, 53, 72);
if (DuraType == MaxDur)
DuraType = 0;
else
DuraType++;
Data->P[plr].Future[MisNum].Duration = DuraType;
if (DuraType == 0)
Toggle(5, 0);
else if (DuraType == 1)
Toggle(5, 1);
if (DuraType != 0)
draw_Pie(DuraType);
status[0] = DuraType;
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
grSetColor(34);
OutBox(5, 49, 53, 72);
};
key = 0;
/* Duration */
};
if ((x >= 5 && y >= 74 && x <= 41 && y <= 82 && mousebuttons > 0)
|| (key == K_ESCAPE))
{
InBox(5, 74, 41, 82);
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
MisType = 0;
if (DuraType != 0)
Toggle(5, 0);
FMen = DuraType = F1 = F2 = F3 = F4 = F5 = 0;
for (i = 1; i < 4; i++)
if (status[i] != 0)
Toggle(i, 1);
if (JointFlag == 0)
{
F4 = 2;
lck[4] = 1;
Toggle(4, 1);
InBox(191, 74, 201, 82);
PlaceRX(5);
TogBox(166, 49, 1);
}
else
{
F4 = 0;
lck[4] = 0;
status[4] = 0;
Toggle(4, 1);
OutBox(191, 74, 201, 82);
ClearRX(5);
TogBox(166, 49, 0);
};
for (i = 0; i < 4; i++)
{
lck[i] = status[i] = 0;
}
OutBox(5, 49, 53, 72);
OutBox(43, 74, 53, 82);
TogBox(55, 49, 0);
OutBox(80, 74, 90, 82);
TogBox(92, 49, 0);
OutBox(117, 74, 127, 82);
TogBox(129, 49, 0);
OutBox(154, 74, 164, 82);
ClrFut(plr, MisNum);
Data->P[plr].Future[MisNum].Duration = 0;
Missions(plr, 8, 37, MisType, 1);
GetMinus(plr);
OutBox(5, 74, 41, 82);
key = 0;
/* Reset */
};
if ((x >= 55 && y >= 49 && x <= 90 && y <= 82 && mousebuttons > 0)
|| (key == '2' || key == '@'))
{
if ((x >= 80 && y >= 74 && x <= 90 && y <= 82) || (key == '@'))
{
if (lck[1] == 0)
InBox(80, 74, 90, 82);
else
OutBox(80, 74, 90, 82);
lck[1] = abs(lck[1] - 1);
if (lck[1] == 1)
PlaceRX(2);
else
ClearRX(2);
if ((status[1] == 0) && (lck[1] == 1))
F1 = 2;
else if ((status[1] == 1) && (lck[1] == 1))
F1 = 1;
else
F1 = 0;
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
}
else if (lck[1] != 1)
{
TogBox(55, 49, 1);
if (status[1] == 0)
Toggle(1, 1);
else
Toggle(1, 0);
status[1] = abs(status[1] - 1);
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
TogBox(55, 49, 0);
}; /* Docking */
key = 0;
};
if ((x >= 92 && y >= 49 && x <= 127 && y <= 82 && mousebuttons > 0)
|| (key == '3' || key == '#'))
{
if ((x >= 117 && y >= 74 && x <= 127 && y <= 82) || (key == '#'))
{
if (lck[2] == 0)
InBox(117, 74, 127, 82);
else
OutBox(117, 74, 127, 82);
lck[2] = abs(lck[2] - 1);
if (lck[2] == 1)
PlaceRX(3);
else
ClearRX(3);
if ((status[2] == 0) && (lck[2] == 1))
F2 = 2;
else if ((status[2] == 1) && (lck[2] == 1))
F2 = 1;
else
F2 = 0;
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
}
else if (lck[2] != 1)
{
TogBox(92, 49, 1);
if (status[2] == 0)
Toggle(2, 1);
else
{
Toggle(2, 0);
};
status[2] = abs(status[2] - 1);
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
TogBox(92, 49, 0);
}; /* EVA */
key = 0;
};
if ((x >= 129 && y >= 49 && x <= 164 && y <= 82 && mousebuttons > 0)
|| (key == '4' || key == '$'))
{
if ((x >= 154 && y >= 74 && x <= 164 && y <= 82) || (key == '$'))
{
if (lck[3] == 0)
InBox(154, 74, 164, 82);
else
OutBox(154, 74, 164, 82);
lck[3] = abs(lck[3] - 1); // F3=lck[3];
if (lck[3] == 1)
PlaceRX(4);
else
ClearRX(4);
if ((status[3] == 0) && (lck[3] == 1))
F3 = 2;
else if ((status[3] == 1) && (lck[3] == 1))
F3 = 1;
else
F3 = 0;
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
}
else if (lck[3] != 1)
{
TogBox(129, 49, 1);
if (status[3] == 0)
Toggle(3, 1);
else
{
Toggle(3, 0);
};
status[3] = abs(status[3] - 1);
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
TogBox(129, 49, 0);
}; /* LEM */
key = 0;
};
if (((x >= 166 && y >= 49 && x <= 201 && y <= 82 && mousebuttons > 0)
|| (key == '5' || key == '%')) && (JointFlag == 1))
{
if ((x > 191 && y >= 74 && x <= 201 && y <= 82) || (key == '%'))
{
if (lck[4] == 0)
InBox(191, 74, 201, 82);
else
OutBox(191, 74, 201, 82);
lck[4] = abs(lck[4] - 1);
if (lck[4] == 1)
PlaceRX(5);
else
ClearRX(5);
if ((status[4] == 0) && (lck[4] == 1))
F4 = 2;
else if ((status[4] == 1) && (lck[4] == 1))
F4 = 1;
else
F4 = 0;
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
}
else if (lck[4] != 1)
{
TogBox(166, 49, 1);
status[4] = abs(status[4] - 1);
if (status[4] == 0)
{
Toggle(4, 1);
}
else
{
Toggle(4, 0);
}
while (1)
{
GetMouse();
if (mousebuttons == 0)
break;
}
TogBox(166, 49, 0);
}; /* Joint Launch */
key = 0;
};
if ((x >= 5 && y >= 84 && x <= 16 && y <= 130 && mousebuttons > 0)
|| (key == UP_ARROW))
{
InBox(5, 84, 16, 130);
for (i = 0; i < 50; i++)
{
key = 0;
GetMouse();
delay(10);
if (mousebuttons == 0)
{
MisType = UpSearchRout(MisType, plr);
Data->P[plr].Future[MisNum].MissionCode = MisType;
i = 51;
}
}
while (mousebuttons == 1 || key == UP_ARROW)
{
MisType = UpSearchRout(MisType, plr);
Data->P[plr].Future[MisNum].MissionCode = MisType;
Missions(plr, 8, 37, MisType, 3);
DuraType = status[0];
delay(100);
key = 0;
GetMouse();
}
Missions(plr, 8, 37, MisType, 3);
DuraType = status[0];
OutBox(5, 84, 16, 130);
key = 0;
/* Mission Type plus */
};
if ((x >= 5 && y >= 132 && x < 16 && y <= 146 && mousebuttons > 0)
|| (key == K_SPACE))
{
InBox(5, 132, 16, 146);
WaitForMouseUp();
delay(50);
MisType = Data->P[plr].Future[MisNum].MissionCode;
assert(0 <= MisType);
if (MisType != 0){
Missions(plr, 8, 37, MisType, 1);
}
else{
Missions(plr, 8, 37, MisType, 3);
}
OutBox(5, 132, 16, 146);
key = 0;
}
if ((x >= 5 && y >= 148 && x <= 16 && y <= 194 && mousebuttons > 0)
|| (key == DN_ARROW))
{
InBox(5, 148, 16, 194);
for (i = 0; i < 50; i++)
{
key = 0;
GetMouse();
delay(10);
if (mousebuttons == 0)
{
MisType = DownSearchRout(MisType, plr);
Data->P[plr].Future[MisNum].MissionCode = MisType;
i = 51;
}
key = 0;
}
while (mousebuttons == 1 || key == DN_ARROW)
{
MisType = DownSearchRout(MisType, plr);
Data->P[plr].Future[MisNum].MissionCode = MisType;
Missions(plr, 8, 37, MisType, 3);
DuraType = status[0];
delay(100);
key = 0;
GetMouse();
}
Missions(plr, 8, 37, MisType, 3);
DuraType = status[0];
OutBox(5, 148, 16, 194);
key = 0;
/* Mission Type minus */
};
} // while
TRACE1("<-Future()");
}
/** draws the bubble on the screen,
* starts with upper left coor
*
* \param x x-coord of the upper left corner of the bubble
* \param y y-coord of the upper left corner of the bubble
*/
void Bd(int x,int y)
{
int x1,y1,x2,y2;
x1=x-2; y1=y; x2=x-1; y2=y-1;
RectFill(x1,y1,x1+8,y1+4,21);
RectFill(x2,y2,x2+6,y2+6,21);
grSetColor(1);
grMoveTo(x,y+4);
/** \note references Bub_Count to determine the number of the character to draw in the bubble */
DispChr(65+Bub_Count);
StepBub[Bub_Count].x_cor=x1;
StepBub[Bub_Count].y_cor=y1;
++Bub_Count;
return;
}
/** Print the duration of a mission
*
* \param x duration code
*
* \todo Link this at whatever place the duration is actually defined
*/
void DurPri(int x)
{
grSetColor(5);
switch(x)
{
case -1:PrintAt(112,30,"NO DURATION");break;
case 0:PrintAt(112,30,"NO DURATION");break;
case 1:PrintAt(112,30,"1 - 2 DAYS");break;
case 2:PrintAt(112,30,"3 - 5 DAYS");break;
case 3:PrintAt(112,30,"6 - 7 DAYS");break;
case 4:PrintAt(112,30,"8 - 12 DAYS");break;
case 5:PrintAt(112,30,"13 - 16 DAYS");break;
case 6:PrintAt(112,30,"17 - 20 DAYS");break;
default:break;
};
return;
}
void MissionName(int val,int xx,int yy,int len)
{
TRACE5("->MissionName(val %d, xx %d, yy %d, len %d)", val, xx, yy, len);
int i,j=0;
GetMisType(val);
grMoveTo(xx,yy);
for (i=0;i<50;i++) {
if (j>len && Mis.Name[i]==' ') {yy+=7;j=0;grMoveTo(xx,yy);}
else DispChr(Mis.Name[i]);
j++;if (Mis.Name[i]=='\0') break;
};
TRACE1("<-MissionName");
return;
}
/** Missions() will draw the future missions among other things
*
* \param plr Player
* \param X screen coord for mission name string
* \param Y screen coord for mission name string
* \param val mission number
* \param bub if set to 0 or 3 the function will not draw stuff
*/
void Missions(char plr,int X,int Y,int val,char bub)
{
TRACE5("->Missions(plr, X %d, Y %d, val %d, bub %c)", X, Y, val, bub);
if (bub==1 || bub==3) {
PianoKey(val);
Bub_Count=0; // set the initial bub_count
ClearDisplay();
RectFill(6,31,182,46,3);
RectFill(80,25,175,30,3);grSetColor(5);
PrintAt(55,30,"TYPE: ");DispNum(0,0,val);
grSetColor(5);
if (V[val].E>0) {
if (F5 > V[val].E && Mis.Dur==1) DurPri(F5);
else DurPri(V[val].E);}
else DurPri(F5);
} else grSetColor(1);
MissionName(val,X,Y,24);
if (bub==3) GetMinus(plr);
if (bub==0 || bub==3) {return;}
MSteps=sOpen("missSteps.dat","r",FT_DATA);
if (fgets(missStep, 1024, MSteps) == NULL)
memset (missStep, 0, sizeof missStep);
while (!feof(MSteps)&&((missStep[0]-0x30)*10+(missStep[1]-0x30))!=val) {
if (fgets(missStep, 1024, MSteps) == NULL)
break;
}
fclose(MSteps);
int n;
for (n=2;missStep[n]!='Z';n++)
switch (missStep[n]) {
case 'A': Draw_IJ (B_Mis(++n)); break;
case 'B': Draw_IJV (B_Mis(++n)); break;
case 'C': OrbOut (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3)); n+=3; break;
case 'D': LefEarth (B_Mis(n+1),B_Mis(n+2)); n+=2; break;
case 'E': OrbIn (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3)); n+=3; break;
case 'F': OrbMid (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3),B_Mis(n+4)); n+=4; break;
case 'G': LefOrb (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3),B_Mis(n+4)); n+=4; break;
case 'H': Draw_LowS (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3),B_Mis(n+4),B_Mis(n+5),B_Mis(n+6)); n+=6; break;
case 'I': Fly_By (); break;
case 'J': VenMarMerc (B_Mis(++n)); break;
case 'K': Draw_PQR (); break;
case 'L': Draw_PST (); break;
case 'M': Draw_GH (B_Mis(n+1),B_Mis(n+2)); n+=2; break;
case 'N': Q_Patch (); break;
case 'O': RghtMoon (B_Mis(n+1),B_Mis(n+2)); n+=2; break;
case 'P': DrawLunPas (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3),B_Mis(n+4)); n+=4; break;
case 'Q': DrawLefMoon (B_Mis(n+1),B_Mis(n+2)); n+=2; break;
case 'R': DrawSTUV (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3),B_Mis(n+4)); n+=4; break;
case 'S': Draw_HighS (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3)); n+=3; break;
case 'T': DrawMoon (B_Mis(n+1),B_Mis(n+2),B_Mis(n+3),B_Mis(n+4),B_Mis(n+5),B_Mis(n+6),B_Mis(n+7)); n+=7; break;
case 'U': LefGap (B_Mis(++n)); break;
case 'V': S_Patch (B_Mis(++n)); break;
case 'W': DrawZ (); break;
default : break;
}
gr_sync ();
MissionCodes(plr,MisType,Pad);
TRACE1("<-Missions()");
} // end function missions
#ifdef DEAD_CODE
/** Draws stuff about choosing a program and having < 2 groups available
*
* \deprecated This function appears to be deprecated.
*/
char FutBad(void)
{
char i;
grSetColor(0);
ShBox(84,41,232,128);
InBox(91,47,225,103);
IOBox(91,107,225,123);
grSetColor(1);
PrintAt(150,117,"EXIT");
grSetColor(11);
PrintAt(96,60,"YOU HAVE SELECTED A");
PrintAt(96,70,"PROGRAM WITH LESS THAN");
PrintAt(96,80,"TWO GROUPS AVAILABLE.");
WaitForMouseUp();
i=0;
while(i==0) {
GetMouse();
if (mousebuttons!=0) {
if (x>=93 && y>=109 && x<=223 && y<=121) {
InBox(93,109,223,123);i=3;
delay(50);
};
};
}; /* End while */
return (i);
}
#endif
/* vim: set noet ts=4 sw=4 tw=77: */
|
raceintospace/raceintospace-cvs
|
future.c
|
C
|
gpl-2.0
| 27,931
|
// Copyright (C) 1999-2000 Id Software, Inc.
//
#include "ui_local.h"
/*********************************************************************************
SPECIFY SERVER
*********************************************************************************/
#define MAX_LISTBOXITEMS 128
#define MAX_LISTBOXWIDTH 40
#define MAX_LEAGUENAME 80
#define SPECIFYLEAGUE_FRAMEL "menu/art/frame2_l"
#define SPECIFYLEAGUE_FRAMER "menu/art/frame1_r"
#define SPECIFYLEAGUE_BACK0 "menu/art/back_0"
#define SPECIFYLEAGUE_BACK1 "menu/art/back_1"
#define SPECIFYLEAGUE_ARROWS0 "menu/art/arrows_vert_0"
#define SPECIFYLEAGUE_UP "menu/art/arrows_vert_top"
#define SPECIFYLEAGUE_DOWN "menu/art/arrows_vert_bot"
#define GLOBALRANKINGS_LOGO "menu/art/gr/grlogo"
#define GLOBALRANKINGS_LETTERS "menu/art/gr/grletters"
#define ID_SPECIFYLEAGUENAME 100
#define ID_SPECIFYLEAGUELIST 101
#define ID_SPECIFYLEAGUEUP 102
#define ID_SPECIFYLEAGUEDOWN 103
#define ID_SPECIFYLEAGUEBACK 104
static char* specifyleague_artlist[] =
{
SPECIFYLEAGUE_FRAMEL,
SPECIFYLEAGUE_FRAMER,
SPECIFYLEAGUE_ARROWS0,
SPECIFYLEAGUE_UP,
SPECIFYLEAGUE_DOWN,
SPECIFYLEAGUE_BACK0,
SPECIFYLEAGUE_BACK1,
GLOBALRANKINGS_LOGO,
GLOBALRANKINGS_LETTERS,
NULL
};
static char playername[80];
typedef struct
{
menuframework_s menu;
menutext_s banner;
menubitmap_s framel;
menubitmap_s framer;
menufield_s rankname;
menulist_s list;
menubitmap_s arrows;
menubitmap_s up;
menubitmap_s down;
menubitmap_s back;
menubitmap_s grlogo;
menubitmap_s grletters;
} specifyleague_t;
static specifyleague_t s_specifyleague;
typedef struct {
char buff[MAX_LISTBOXWIDTH];
char leaguename[MAX_LEAGUENAME];
} table_t;
table_t league_table[MAX_LISTBOXITEMS];
char *leaguename_items[MAX_LISTBOXITEMS];
static void SpecifyLeague_GetList()
{
int count = 0;
int i;
/* The Player Name has changed. We need to perform another search */
Q_strncpyz( playername,
s_specifyleague.rankname.field.buffer,
sizeof(playername) );
count = trap_CL_UI_RankGetLeauges( playername );
for(i = 0; i < count; i++)
{
char s[MAX_LEAGUENAME];
const char *var;
var = va( "leaguename%i", i+1 );
trap_Cvar_VariableStringBuffer( var, s, sizeof(s) );
Q_strncpyz(league_table[i].leaguename, s, sizeof(league_table[i].leaguename) );
Q_strncpyz(league_table[i].buff, league_table[i].leaguename, sizeof(league_table[i].buff) );
}
s_specifyleague.list.numitems = count;
}
/*
=================
SpecifyLeague_Event
=================
*/
static void SpecifyLeague_Event( void* ptr, int event )
{
int id;
id = ((menucommon_s*)ptr)->id;
//if( event != QM_ACTIVATED && id != ID_SPECIFYLEAGUELIST ) {
// return;
//}
switch (id)
{
case ID_SPECIFYLEAGUELIST:
if( event == QM_GOTFOCUS ) {
//ArenaServers_UpdatePicture();
}
break;
case ID_SPECIFYLEAGUEUP:
if( event == QM_ACTIVATED )
ScrollList_Key( &s_specifyleague.list, K_UPARROW );
break;
case ID_SPECIFYLEAGUEDOWN:
if( event == QM_ACTIVATED )
ScrollList_Key( &s_specifyleague.list, K_DOWNARROW );
break;
case ID_SPECIFYLEAGUENAME:
if( (event == QM_LOSTFOCUS) &&
(Q_strncmp(playername,
s_specifyleague.rankname.field.buffer,
strlen(s_specifyleague.rankname.field.buffer)) != 0))
{
SpecifyLeague_GetList();
}
break;
case ID_SPECIFYLEAGUEBACK:
if( event == QM_ACTIVATED )
{
trap_Cvar_Set( "sv_leagueName", league_table[s_specifyleague.list.curvalue].leaguename);
UI_PopMenu();
}
break;
}
}
/*
=================
SpecifyLeague_MenuInit
=================
*/
void SpecifyLeague_MenuInit( void )
{
int i;
// zero set all our globals
memset( &s_specifyleague, 0 ,sizeof(specifyleague_t) );
SpecifyLeague_Cache();
s_specifyleague.menu.wrapAround = qtrue;
s_specifyleague.menu.fullscreen = qtrue;
s_specifyleague.banner.generic.type = MTYPE_BTEXT;
s_specifyleague.banner.generic.x = 320;
s_specifyleague.banner.generic.y = 16;
s_specifyleague.banner.string = "CHOOSE LEAGUE";
s_specifyleague.banner.color = color_white;
s_specifyleague.banner.style = UI_CENTER;
s_specifyleague.framel.generic.type = MTYPE_BITMAP;
s_specifyleague.framel.generic.name = SPECIFYLEAGUE_FRAMEL;
s_specifyleague.framel.generic.flags = QMF_INACTIVE;
s_specifyleague.framel.generic.x = 0;
s_specifyleague.framel.generic.y = 78;
s_specifyleague.framel.width = 256;
s_specifyleague.framel.height = 334;
s_specifyleague.framer.generic.type = MTYPE_BITMAP;
s_specifyleague.framer.generic.name = SPECIFYLEAGUE_FRAMER;
s_specifyleague.framer.generic.flags = QMF_INACTIVE;
s_specifyleague.framer.generic.x = 376;
s_specifyleague.framer.generic.y = 76;
s_specifyleague.framer.width = 256;
s_specifyleague.framer.height = 334;
s_specifyleague.grlogo.generic.type = MTYPE_BITMAP;
s_specifyleague.grlogo.generic.name = GLOBALRANKINGS_LOGO;
s_specifyleague.grlogo.generic.flags = QMF_INACTIVE;
s_specifyleague.grlogo.generic.x = 0;
s_specifyleague.grlogo.generic.y = 0;
s_specifyleague.grlogo.width = 64;
s_specifyleague.grlogo.height = 128;
s_specifyleague.rankname.generic.type = MTYPE_FIELD;
s_specifyleague.rankname.generic.name = "Player Name:";
s_specifyleague.rankname.generic.flags = QMF_PULSEIFFOCUS|QMF_SMALLFONT;
s_specifyleague.rankname.generic.callback = SpecifyLeague_Event;
s_specifyleague.rankname.generic.id = ID_SPECIFYLEAGUENAME;
s_specifyleague.rankname.generic.x = 226;
s_specifyleague.rankname.generic.y = 128;
s_specifyleague.rankname.field.widthInChars = 32;
s_specifyleague.rankname.field.maxchars = 80;
s_specifyleague.list.generic.type = MTYPE_SCROLLLIST;
s_specifyleague.list.generic.flags = QMF_HIGHLIGHT_IF_FOCUS;
s_specifyleague.list.generic.id = ID_SPECIFYLEAGUELIST;
s_specifyleague.list.generic.callback = SpecifyLeague_Event;
s_specifyleague.list.generic.x = 160;
s_specifyleague.list.generic.y = 200;
s_specifyleague.list.width = MAX_LISTBOXWIDTH;
s_specifyleague.list.height = 8;
s_specifyleague.list.itemnames = (const char **)leaguename_items;
s_specifyleague.list.numitems = 0;
for( i = 0; i < MAX_LISTBOXITEMS; i++ ) {
league_table[i].buff[0] = 0;
league_table[i].leaguename[0] = 0;
leaguename_items[i] = league_table[i].buff;
}
s_specifyleague.arrows.generic.type = MTYPE_BITMAP;
s_specifyleague.arrows.generic.name = SPECIFYLEAGUE_ARROWS0;
s_specifyleague.arrows.generic.flags = QMF_LEFT_JUSTIFY|QMF_INACTIVE;
s_specifyleague.arrows.generic.callback = SpecifyLeague_Event;
s_specifyleague.arrows.generic.x = 512;
s_specifyleague.arrows.generic.y = 240-64+16;
s_specifyleague.arrows.width = 64;
s_specifyleague.arrows.height = 128;
s_specifyleague.up.generic.type = MTYPE_BITMAP;
s_specifyleague.up.generic.flags = QMF_LEFT_JUSTIFY|QMF_PULSEIFFOCUS|QMF_MOUSEONLY;
s_specifyleague.up.generic.callback = SpecifyLeague_Event;
s_specifyleague.up.generic.id = ID_SPECIFYLEAGUEUP;
s_specifyleague.up.generic.x = 512;
s_specifyleague.up.generic.y = 240-64+16;
s_specifyleague.up.width = 64;
s_specifyleague.up.height = 64;
s_specifyleague.up.focuspic = SPECIFYLEAGUE_UP;
s_specifyleague.down.generic.type = MTYPE_BITMAP;
s_specifyleague.down.generic.flags = QMF_LEFT_JUSTIFY|QMF_PULSEIFFOCUS|QMF_MOUSEONLY;
s_specifyleague.down.generic.callback = SpecifyLeague_Event;
s_specifyleague.down.generic.id = ID_SPECIFYLEAGUEDOWN;
s_specifyleague.down.generic.x = 512;
s_specifyleague.down.generic.y = 240+16;
s_specifyleague.down.width = 64;
s_specifyleague.down.height = 64;
s_specifyleague.down.focuspic = SPECIFYLEAGUE_DOWN;
s_specifyleague.back.generic.type = MTYPE_BITMAP;
s_specifyleague.back.generic.name = SPECIFYLEAGUE_BACK0;
s_specifyleague.back.generic.flags = QMF_LEFT_JUSTIFY|QMF_PULSEIFFOCUS;
s_specifyleague.back.generic.callback = SpecifyLeague_Event;
s_specifyleague.back.generic.id = ID_SPECIFYLEAGUEBACK;
s_specifyleague.back.generic.x = 0;
s_specifyleague.back.generic.y = 480-64;
s_specifyleague.back.width = 128;
s_specifyleague.back.height = 64;
s_specifyleague.back.focuspic = SPECIFYLEAGUE_BACK1;
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.banner );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.framel );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.framer );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.grlogo );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.rankname );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.list );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.arrows );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.up );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.down );
Menu_AddItem( &s_specifyleague.menu, &s_specifyleague.back );
// initialize any menu variables
Q_strncpyz( s_specifyleague.rankname.field.buffer,
UI_Cvar_VariableString("name"),
sizeof(s_specifyleague.rankname.field.buffer) );
Q_strncpyz( playername,
UI_Cvar_VariableString("name"),
sizeof(playername) );
SpecifyLeague_GetList();
}
/*
=================
SpecifyLeague_Cache
=================
*/
void SpecifyLeague_Cache( void )
{
int i;
// touch all our pics
for (i=0; ;i++)
{
if (!specifyleague_artlist[i])
break;
trap_R_RegisterShaderNoMip(specifyleague_artlist[i]);
}
}
/*
=================
UI_SpecifyLeagueMenu
=================
*/
void UI_SpecifyLeagueMenu( void )
{
SpecifyLeague_MenuInit();
UI_PushMenu( &s_specifyleague.menu );
}
|
tectronics/battle-of-the-sexes
|
reference/bots_q3_127/code/q3_ui/ui_specifyleague.c
|
C
|
gpl-2.0
| 9,959
|
/*
* Copyright (c) 2002-2009 BalaBit IT Ltd, Budapest, Hungary
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Note that this permission is granted for only version 2 of the GPL.
*
* As an additional exemption you are allowed to compile & link against the
* OpenSSL libraries as published by the OpenSSL project. See the file
* COPYING for details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "gprocess.h"
#include "misc.h"
#include "messages.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <time.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <termios.h>
#include <signal.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <pwd.h>
#include <grp.h>
#if ENABLE_LINUX_CAPS
# include <sys/capability.h>
# include <sys/prctl.h>
#endif
/*
* NOTES:
*
* * pidfile is created and removed by the daemon (e.g. the child) itself,
* the parent does not touch that
*
* * we communicate with the user using stderr (using fprintf) as long as it
* is available and using syslog() afterwards
*
* * there are 3 processes involved in safe_background mode (e.g. auto-restart)
* - startup process which was started by the user (zorpctl)
* - supervisor process which automatically restarts the daemon when it exits abnormally
* - daemon processes which perform the actual task at hand
*
* The startup process delivers the result of the first startup to its
* caller, if we can deliver a failure in this case then restarts will not
* be performed (e.g. if the first startup fails, the daemon will not be
* restarted even if auto-restart was enabled). After the first successful
* start, the startup process exits (delivering that startup was
* successful) and the supervisor process wait()s for the daemon processes
* to exit. If they exit prematurely (e.g. they crash) they will be
* restarted, if the startup is not successful in this case the restart
* will be attempted again just as if they crashed.
*
* The processes communicate with two pairs of pipes, startup_result_pipe
* is used to indicate success/failure to the startup process,
* init_result_pipe (as in "initialization") is used to deliver success
* reports from the daemon to the supervisor.
*/
typedef enum
{
G_PK_STARTUP,
G_PK_SUPERVISOR,
G_PK_DAEMON,
} GProcessKind;
#define G_PROCESS_FD_LIMIT_RESERVE 64
#define G_PROCESS_FAILURE_NOTIFICATION PATH_PREFIX "/sbin/syslog-ng-failure"
/* pipe used to deliver the initialization result to the calling process */
static gint startup_result_pipe[2] = { -1, -1 };
/* pipe used to deliver initialization result to the supervisor */
static gint init_result_pipe[2] = { -1, -1 };
static GProcessKind process_kind = G_PK_STARTUP;
static gboolean stderr_present = TRUE;
/* global variables */
static struct
{
GProcessMode mode;
const gchar *name;
const gchar *user;
gint uid;
const gchar *group;
gint gid;
const gchar *chroot_dir;
const gchar *pidfile;
const gchar *pidfile_dir;
const gchar *cwd;
const gchar *caps;
gint argc;
gchar **argv;
gchar *argv_start;
size_t argv_env_len;
gchar *argv_orig;
gboolean core;
gint fd_limit_min;
gint check_period;
gboolean (*check_fn)(void);
} process_opts =
{
.mode = G_PM_SAFE_BACKGROUND,
.argc = 0,
.argv = NULL,
.argv_start = NULL,
.argv_env_len = 0,
#ifdef __CYGWIN__
.fd_limit_min = 256,
#else
.fd_limit_min = 4096,
#endif
.check_period = -1,
.check_fn = NULL,
.uid = -1,
.gid = -1
};
#if ENABLE_LINUX_CAPS
/**
* g_process_cap_modify:
* @capability: capability to turn off or on
* @onoff: specifies whether the capability should be enabled or disabled
*
* This function modifies the current permitted set of capabilities by
* enabling or disabling the capability specified in @capability.
*
* Returns: whether the operation was successful.
**/
gboolean
g_process_cap_modify(int capability, int onoff)
{
cap_t caps;
if (!process_opts.caps)
return TRUE;
caps = cap_get_proc();
if (!caps)
return FALSE;
if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &capability, onoff) == -1)
{
msg_error("Error managing capability set, cap_set_flag returned an error",
evt_tag_errno("error", errno),
NULL);
cap_free(caps);
return FALSE;
}
if (cap_set_proc(caps) == -1)
{
gchar *cap_text;
cap_text = cap_to_text(caps, NULL);
msg_error("Error managing capability set, cap_set_proc returned an error",
evt_tag_str("caps", cap_text),
evt_tag_errno("error", errno),
NULL);
cap_free(cap_text);
cap_free(caps);
return FALSE;
}
cap_free(caps);
return TRUE;
}
/**
* g_process_cap_save:
*
* Save the set of current capabilities and return it. The caller might
* restore the saved set of capabilities by using cap_restore().
*
* Returns: the current set of capabilities
**/
cap_t
g_process_cap_save(void)
{
if (!process_opts.caps)
return NULL;
return cap_get_proc();
}
/**
* cap_restore:
* @r: capability set saved by cap_save()
*
* Restore the set of current capabilities specified by @r.
*
* Returns: whether the operation was successful.
**/
void
g_process_cap_restore(cap_t r)
{
gboolean rc;
if (!process_opts.caps)
return;
rc = cap_set_proc(r) != -1;
cap_free(r);
if (!rc)
{
gchar *cap_text;
cap_text = cap_to_text(r, NULL);
msg_error("Error managing capability set, cap_set_proc returned an error",
evt_tag_str("caps", cap_text),
evt_tag_errno("error", errno),
NULL);
cap_free(cap_text);
return;
}
return;
}
#endif
/**
* g_process_set_mode:
* @mode: an element from ZProcessMode
*
* This function should be called by the daemon to set the processing mode
* as specified by @mode.
**/
void
g_process_set_mode(GProcessMode mode)
{
process_opts.mode = mode;
}
/**
* g_process_set_name:
* @name: the name of the process to be reported as program name
*
* This function should be called by the daemon to set the program name
* which is present in various error message and might influence the PID
* file if not overridden by g_process_set_pidfile().
**/
void
g_process_set_name(const gchar *name)
{
process_opts.name = name;
}
/**
* g_process_set_user:
* @user: the name of the user the process should switch to during startup
*
* This function should be called by the daemon to set the user name.
**/
void
g_process_set_user(const gchar *user)
{
if (!process_opts.user)
process_opts.user = user;
}
/**
* g_process_set_group:
* @group: the name of the group the process should switch to during startup
*
* This function should be called by the daemon to set the group name.
**/
void
g_process_set_group(const gchar *group)
{
if (!process_opts.group)
process_opts.group = group;
}
/**
* g_process_set_chroot:
* @chroot_dir: the name of the chroot directory the process should switch to during startup
*
* This function should be called by the daemon to set the chroot directory
**/
void
g_process_set_chroot(const gchar *chroot_dir)
{
if (!process_opts.chroot_dir)
process_opts.chroot_dir = chroot_dir;
}
/**
* g_process_set_pidfile:
* @pidfile: the name of the complete pid file with full path
*
* This function should be called by the daemon to set the PID file name to
* store the pid of the process. This value will be used as the pidfile
* directly, neither name nor pidfile_dir influences the pidfile location if
* this is set.
**/
void
g_process_set_pidfile(const gchar *pidfile)
{
if (!process_opts.pidfile)
process_opts.pidfile = pidfile;
}
/**
* g_process_set_pidfile_dir:
* @pidfile_dir: name of the pidfile directory
*
* This function should be called by the daemon to set the PID file
* directory. This value is not used if set_pidfile() was called.
**/
void
g_process_set_pidfile_dir(const gchar *pidfile_dir)
{
if (!process_opts.pidfile_dir)
process_opts.pidfile_dir = pidfile_dir;
}
/**
* g_process_set_working_dir:
* @working_dir: name of the working directory
*
* This function should be called by the daemon to set the working
* directory. The process will change its current directory to this value or
* to pidfile_dir if it is unset.
**/
void
g_process_set_working_dir(const gchar *cwd)
{
if (!process_opts.cwd)
process_opts.cwd = cwd;
}
/**
* g_process_set_caps:
* @caps: capability specification in text form
*
* This function should be called by the daemon to set the initial
* capability set. The process will change its capabilities to this value
* during startup, provided it has enough permissions to do so.
**/
void
g_process_set_caps(const gchar *caps)
{
if (!process_opts.caps)
process_opts.caps = caps;
}
/**
* g_process_set_argv_space:
* @argc: Original argc, as received by the main function in it's first parameter
* @argv: Original argv, as received by the main function in it's second parameter
*
* This function should be called by the daemon if it wants to enable
* process title manipulation in the supervisor process.
**/
void
g_process_set_argv_space(gint argc, gchar **argv)
{
extern char **environ;
gchar *lastargv = NULL;
gchar **envp = environ;
gint i;
if (process_opts.argv)
return;
process_opts.argv = argv;
process_opts.argc = argc;
for (i = 0; envp[i] != NULL; i++)
;
environ = g_new(char *, i + 1);
/*
* Find the last argv string or environment variable within
* our process memory area.
*/
for (i = 0; i < process_opts.argc; i++)
{
if (lastargv == NULL || lastargv + 1 == process_opts.argv[i])
lastargv = process_opts.argv[i] + strlen(process_opts.argv[i]);
}
for (i = 0; envp[i] != NULL; i++)
{
if (lastargv + 1 == envp[i])
lastargv = envp[i] + strlen(envp[i]);
}
process_opts.argv_start = process_opts.argv[0];
process_opts.argv_env_len = lastargv - process_opts.argv[0] - 1;
process_opts.argv_orig = malloc(sizeof(gchar) * process_opts.argv_env_len);
memcpy(process_opts.argv_orig, process_opts.argv_start, process_opts.argv_env_len);
/*
* Copy environment
* XXX - will truncate env on strdup fail
*/
for (i = 0; envp[i] != NULL; i++)
environ[i] = g_strdup(envp[i]);
environ[i] = NULL;
}
/**
* g_process_set_check:
* @check_period: check period in seconds
* @check_fn: checker function
*
* Installs a checker function that is called at the specified rate.
* The checked process is allowed to run as long as this function
* returns TRUE.
*/
void
g_process_set_check(gint check_period, gboolean (*check_fn)(void))
{
process_opts.check_period = check_period;
process_opts.check_fn = check_fn;
}
/**
* g_process_message:
* @fmt: format string
* @...: arguments to @fmt
*
* This function sends a message to the client preferring to use the stderr
* channel as long as it is available and switching to using syslog() if it
* isn't. Generally the stderr channell will be available in the startup
* process and in the beginning of the first startup in the
* supervisor/daemon processes. Later on the stderr fd will be closed and we
* have to fall back to using the system log.
**/
void
g_process_message(const gchar *fmt, ...)
{
gchar buf[2048];
va_list ap;
va_start(ap, fmt);
g_vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
if (stderr_present)
fprintf(stderr, "%s: %s\n", process_opts.name, buf);
else
{
gchar name[32];
g_snprintf(name, sizeof(name), "%s/%s", process_kind == G_PK_SUPERVISOR ? "supervise" : "daemon", process_opts.name);
openlog(name, LOG_PID, LOG_DAEMON);
syslog(LOG_CRIT, "%s\n", buf);
closelog();
}
}
/**
* g_process_detach_tty:
*
* This function is called from g_process_start() to detach from the
* controlling tty.
**/
static void
g_process_detach_tty(void)
{
if (process_opts.mode != G_PM_FOREGROUND)
{
/* detach ourselves from the tty when not staying in the foreground */
if (isatty(STDIN_FILENO))
{
#ifdef TIOCNOTTY
ioctl(STDIN_FILENO, TIOCNOTTY, 0);
#endif
setsid();
}
}
}
/**
* g_process_change_limits:
*
* Set fd limit.
*
**/
static void
g_process_change_limits(void)
{
struct rlimit limit;
limit.rlim_cur = limit.rlim_max = process_opts.fd_limit_min;
if (setrlimit(RLIMIT_NOFILE, &limit) < 0)
g_process_message("Error setting file number limit; limit='%d'; error='%s'", process_opts.fd_limit_min, g_strerror(errno));
}
/**
* g_process_detach_stdio:
*
* Use /dev/null as input/output/error. This function is idempotent, can be
* called any number of times without harm.
**/
static void
g_process_detach_stdio(void)
{
gint devnull_fd;
if (process_opts.mode != G_PM_FOREGROUND && stderr_present)
{
devnull_fd = open("/dev/null", O_RDONLY);
if (devnull_fd >= 0)
{
dup2(devnull_fd, STDIN_FILENO);
close(devnull_fd);
}
devnull_fd = open("/dev/null", O_WRONLY);
if (devnull_fd >= 0)
{
dup2(devnull_fd, STDOUT_FILENO);
dup2(devnull_fd, STDERR_FILENO);
close(devnull_fd);
}
stderr_present = FALSE;
}
}
/**
* g_process_enable_core:
*
* Enable core file dumping by setting PR_DUMPABLE and changing the core
* file limit to infinity.
**/
static void
g_process_enable_core(void)
{
struct rlimit limit;
if (process_opts.core)
{
#if ENABLE_LINUX_CAPS
if (!prctl(PR_GET_DUMPABLE, 0, 0, 0, 0))
{
gint rc;
rc = prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
if (rc < 0)
g_process_message("Cannot set process to be dumpable; error='%s'", g_strerror(errno));
}
#endif
limit.rlim_cur = limit.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_CORE, &limit) < 0)
g_process_message("Error setting core limit to infinity; error='%s'", g_strerror(errno));
}
}
/**
* g_process_format_pidfile_name:
* @buf: buffer to store the pidfile name
* @buflen: size of @buf
*
* Format the pid file name according to the settings specified by the
* process.
**/
static const gchar *
g_process_format_pidfile_name(gchar *buf, gsize buflen)
{
const gchar *pidfile = process_opts.pidfile;
if (pidfile == NULL)
{
g_snprintf(buf, buflen, "%s/%s.pid", process_opts.pidfile_dir ? process_opts.pidfile_dir : PATH_PIDFILEDIR, process_opts.name);
pidfile = buf;
}
else if (pidfile[0] != '/')
{
/* complete path to pidfile not specified, assume it is a relative path to pidfile_dir */
g_snprintf(buf, buflen, "%s/%s", process_opts.pidfile_dir ? process_opts.pidfile_dir : PATH_PIDFILEDIR, pidfile);
pidfile = buf;
}
return pidfile;
}
/**
* g_process_write_pidfile:
* @pid: pid to write into the pidfile
*
* Write the pid to the pidfile.
**/
static void
g_process_write_pidfile(pid_t pid)
{
gchar buf[256];
const gchar *pidfile;
FILE *fd;
pidfile = g_process_format_pidfile_name(buf, sizeof(buf));
fd = fopen(pidfile, "w");
if (fd != NULL)
{
fprintf(fd, "%d\n", (int) pid);
fclose(fd);
}
else
{
g_process_message("Error creating pid file; file='%s', error='%s'", pidfile, g_strerror(errno));
}
}
/**
* g_process_remove_pidfile:
*
* Remove the pidfile.
**/
static void
g_process_remove_pidfile(void)
{
gchar buf[256];
const gchar *pidfile;
pidfile = g_process_format_pidfile_name(buf, sizeof(buf));
if (unlink(pidfile) < 0)
{
g_process_message("Error removing pid file; file='%s', error='%s'", pidfile, g_strerror(errno));
}
}
/**
* g_process_change_root:
*
* Change the current root to the value specified by the user, causes the
* startup process to fail if this function returns FALSE. (e.g. the user
* specified a chroot but we could not change to that directory)
*
* Returns: TRUE to indicate success
**/
static gboolean
g_process_change_root(void)
{
if (process_opts.chroot_dir)
{
if (chroot(process_opts.chroot_dir) < 0)
{
g_process_message("Error in chroot(); chroot='%s', error='%s'\n", process_opts.chroot_dir, g_strerror(errno));
return FALSE;
}
if (chdir("/") < 0)
{
g_process_message("Error in chdir() after chroot; chroot='%s', error='%s'\n", process_opts.chroot_dir, g_strerror(errno));
return FALSE;
}
}
return TRUE;
}
/**
* g_process_change_user:
*
* Change the current user/group/groups to the value specified by the user.
* causes the startup process to fail if this function returns FALSE. (e.g.
* the user requested the uid/gid to change we could not change to that uid)
*
* Returns: TRUE to indicate success
**/
static gboolean
g_process_change_user(void)
{
#if ENABLE_LINUX_CAPS
if (process_opts.caps)
prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0);
#endif
if (process_opts.gid >= 0)
{
if (setgid((gid_t) process_opts.gid) < 0)
{
g_process_message("Error in setgid(); group='%s', gid='%d', error='%s'", process_opts.group, process_opts.gid, g_strerror(errno));
if (getuid() == 0)
return FALSE;
}
if (process_opts.user && initgroups(process_opts.user, (gid_t) process_opts.gid) < 0)
{
g_process_message("Error in initgroups(); user='%s', error='%s'", process_opts.user, g_strerror(errno));
if (getuid() == 0)
return FALSE;
}
}
if (process_opts.uid >= 0)
{
if (setuid((uid_t) process_opts.uid) < 0)
{
g_process_message("Error in setuid(); user='%s', uid='%d', error='%s'", process_opts.user, process_opts.uid, g_strerror(errno));
if (getuid() == 0)
return FALSE;
}
}
return TRUE;
}
#if ENABLE_LINUX_CAPS
/**
* g_process_change_caps:
*
* Change the current capset to the value specified by the user. causes the
* startup process to fail if this function returns FALSE, but we only do
* this if the capset cannot be parsed, otherwise a failure changing the
* capabilities will not result in failure
*
* Returns: TRUE to indicate success
**/
static gboolean
g_process_change_caps(void)
{
if (process_opts.caps)
{
cap_t cap = cap_from_text(process_opts.caps);
if (cap == NULL)
{
g_process_message("Error parsing capabilities: %s", process_opts.caps);
process_opts.caps = NULL;
return FALSE;
}
else
{
if (cap_set_proc(cap) == -1)
{
g_process_message("Error setting capabilities, capability management disabled; error='%s'", g_strerror(errno));
process_opts.caps = NULL;
}
cap_free(cap);
}
}
return TRUE;
}
#else
static gboolean
g_process_change_caps(void)
{
return TRUE;
}
#endif
static void
g_process_resolve_names(void)
{
if (process_opts.user && !resolve_user(process_opts.user, &process_opts.uid))
{
g_process_message("Error resolving user; user='%s'", process_opts.user);
process_opts.uid = -1;
}
if (process_opts.group && !resolve_group(process_opts.group, &process_opts.gid))
{
g_process_message("Error resolving group; group='%s'", process_opts.group);
process_opts.gid = -1;
}
}
/**
* g_process_change_dir:
*
* Change the current working directory to the value specified by the user
* and verify that the daemon would be able to dump core to that directory
* if that is requested.
**/
static void
g_process_change_dir(void)
{
const gchar *cwd = NULL;
if (process_opts.mode != G_PM_FOREGROUND)
{
if (process_opts.cwd)
cwd = process_opts.cwd;
else if (process_opts.pidfile_dir)
cwd = process_opts.pidfile_dir;
if (!cwd)
cwd = PATH_PIDFILEDIR;
if (cwd)
chdir(cwd);
}
/* this check is here to avoid having to change directory early in the startup process */
if ((process_opts.core) && access(".", W_OK) < 0)
{
gchar buf[256];
getcwd(buf, sizeof(buf));
g_process_message("Unable to write to current directory, core dumps will not be generated; dir='%s', error='%s'", buf, g_strerror(errno));
}
}
/**
* g_process_send_result:
* @ret_num: exit code of the process
*
* This function is called to notify our parent process (which is the same
* executable process but separated with a fork()) about the result of the
* process startup phase. Specifying ret_num == 0 means that everything was
* dandy, all other values mean that the initialization failed and the
* parent should exit using ret_num as the exit code. The function behaves
* differently depending on which process it was called from, determined by
* the value of the process_kind global variable. In the daemon process it
* writes to init_result_pipe, in the startup process it writes to the
* startup_result_pipe.
*
* This function can only be called once, further invocations will do nothing.
**/
static void
g_process_send_result(guint ret_num)
{
gchar buf[10];
guint buf_len;
gint *fd;
if (process_kind == G_PK_SUPERVISOR)
fd = &startup_result_pipe[1];
else if (process_kind == G_PK_DAEMON)
fd = &init_result_pipe[1];
else
g_assert_not_reached();
if (*fd != -1)
{
buf_len = g_snprintf(buf, sizeof(buf), "%d\n", ret_num);
write(*fd, buf, buf_len);
close(*fd);
*fd = -1;
}
}
/**
* g_process_recv_result:
*
* Retrieves an exit code value from one of the result pipes depending on
* which process the function was called from. This function can be called
* only once, further invocations will return non-zero result code.
**/
static gint
g_process_recv_result(void)
{
gchar ret_buf[6];
gint ret_num = 1;
gint *fd;
/* FIXME: use a timer */
if (process_kind == G_PK_SUPERVISOR)
fd = &init_result_pipe[0];
else if (process_kind == G_PK_STARTUP)
fd = &startup_result_pipe[0];
else
g_assert_not_reached();
if (*fd != -1)
{
memset(ret_buf, 0, sizeof(ret_buf));
if (read(*fd, ret_buf, sizeof(ret_buf)) > 0)
{
ret_num = atoi(ret_buf);
}
else
{
/* the process probably crashed without telling a proper exit code */
ret_num = 1;
}
close(*fd);
*fd = -1;
}
return ret_num;
}
/**
* g_process_perform_startup:
*
* This function is the startup process, never returns, the startup process exits here.
**/
static void
g_process_perform_startup(void)
{
/* startup process */
exit(g_process_recv_result());
}
#define SPT_PADCHAR '\0'
static void
g_process_setproctitle(const gchar* proc_title)
{
size_t len;
g_assert(process_opts.argv_start != NULL);
len = g_strlcpy(process_opts.argv_start, proc_title, process_opts.argv_env_len);
for (; len < process_opts.argv_env_len; ++len)
process_opts.argv_start[len] = SPT_PADCHAR;
}
#define PROC_TITLE_SPACE 1024
/**
* g_process_perform_supervise:
*
* Supervise process, returns only in the context of the daemon process, the
* supervisor process exits here.
**/
static void
g_process_perform_supervise(void)
{
pid_t pid;
gboolean first = TRUE, exited = FALSE;
gchar proc_title[PROC_TITLE_SPACE];
g_snprintf(proc_title, PROC_TITLE_SPACE, "supervising %s", process_opts.name);
g_process_setproctitle(proc_title);
while (1)
{
if (pipe(init_result_pipe) != 0)
{
g_process_message("Error daemonizing process, cannot open pipe; error='%s'", g_strerror(errno));
g_process_startup_failed(1, TRUE);
}
/* fork off a child process */
if ((pid = fork()) < 0)
{
g_process_message("Error forking child process; error='%s'", g_strerror(errno));
g_process_startup_failed(1, TRUE);
}
else if (pid != 0)
{
gint rc;
gboolean deadlock = FALSE;
/* this is the supervisor process */
/* shut down init_result_pipe write side */
close(init_result_pipe[1]);
init_result_pipe[1] = -1;
rc = g_process_recv_result();
if (first)
{
/* first time encounter, we have a chance to report back, do it */
g_process_send_result(rc);
if (rc != 0)
break;
g_process_detach_stdio();
}
first = FALSE;
if (rc != 0)
{
gint i = 0;
/* initialization failed in daemon, it will probably exit soon, wait and restart */
while (i < 6 && waitpid(pid, &rc, WNOHANG) == 0)
{
if (i > 3)
kill(pid, i > 4 ? SIGKILL : SIGTERM);
sleep(1);
i++;
}
if (i == 6)
g_process_message("Initialization failed but the daemon did not exit, even when forced to, trying to recover; pid='%d'", pid);
continue;
}
if (process_opts.check_fn && (process_opts.check_period >= 0))
{
gint i = 1;
while (!(exited = waitpid(pid, &rc, WNOHANG)))
{
if (i >= process_opts.check_period)
{
if (!process_opts.check_fn())
break;
i = 0;
}
sleep(1);
i++;
}
if (!exited)
{
gint j = 0;
g_process_message("Daemon deadlock detected, killing process;");
deadlock = TRUE;
while (j < 6 && waitpid(pid, &rc, WNOHANG) == 0)
{
if (j > 3)
kill(pid, j > 4 ? SIGKILL : SIGABRT);
sleep(1);
j++;
}
if (j == 6)
g_process_message("The daemon did not exit after deadlock, even when forced to, trying to recover; pid='%d'", pid);
}
}
else
{
waitpid(pid, &rc, 0);
}
if (deadlock || WIFSIGNALED(rc) || (WIFEXITED(rc) && WEXITSTATUS(rc) != 0))
{
gchar argbuf[64];
if (!access(G_PROCESS_FAILURE_NOTIFICATION, R_OK | X_OK))
{
const gchar *notify_reason;
pid_t npid = fork();
gint nrc;
switch (npid)
{
case -1:
g_process_message("Could not fork for external notification; reason='%s'", strerror(errno));
break;
case 0:
switch(fork())
{
case -1:
g_process_message("Could not fork for external notification; reason='%s'", strerror(errno));
exit(1);
break;
case 0:
if (deadlock)
{
notify_reason = "deadlock detected";
argbuf[0] = 0;
}
else
{
snprintf(argbuf, sizeof(argbuf), "%d", WIFSIGNALED(rc) ? WTERMSIG(rc) : WEXITSTATUS(rc));
if (WIFSIGNALED(rc))
notify_reason = "signalled";
else
notify_reason = "non-zero exit code";
}
execlp(G_PROCESS_FAILURE_NOTIFICATION, G_PROCESS_FAILURE_NOTIFICATION,
SAFE_STRING(process_opts.name),
SAFE_STRING(process_opts.chroot_dir),
SAFE_STRING(process_opts.pidfile_dir),
SAFE_STRING(process_opts.pidfile),
SAFE_STRING(process_opts.cwd),
SAFE_STRING(process_opts.caps),
notify_reason,
argbuf,
(deadlock || !WIFSIGNALED(rc) || WTERMSIG(rc) != SIGKILL) ? "restarting" : "not-restarting",
(gchar*) NULL);
g_process_message("Could not execute external notification; reason='%s'", strerror(errno));
break;
default:
exit(0);
break;
} /* child process */
default:
waitpid(npid, &nrc, 0);
break;
}
}
if (deadlock || !WIFSIGNALED(rc) || WTERMSIG(rc) != SIGKILL)
{
g_process_message("Daemon exited due to a deadlock/signal/failure, restarting; exitcode='%d'", rc);
sleep(1);
}
else
{
g_process_message("Daemon was killed, not restarting; exitcode='%d'", rc);
break;
}
}
else
{
g_process_message("Daemon exited gracefully, not restarting; exitcode='%d'", rc);
break;
}
}
else
{
/* this is the daemon process, thus we should return to the caller of g_process_start() */
/* shut down init_result_pipe read side */
process_kind = G_PK_DAEMON;
close(init_result_pipe[0]);
init_result_pipe[0] = -1;
memcpy(process_opts.argv_start, process_opts.argv_orig, process_opts.argv_env_len);
return;
}
}
exit(0);
}
/**
* g_process_start:
*
* Start the process as directed by the options set by various
* g_process_set_*() functions.
**/
void
g_process_start(void)
{
pid_t pid;
g_process_detach_tty();
g_process_change_limits();
g_process_resolve_names();
if (process_opts.mode == G_PM_BACKGROUND)
{
/* no supervisor, sends result to startup process directly */
if (pipe(init_result_pipe) != 0)
{
g_process_message("Error daemonizing process, cannot open pipe; error='%s'", g_strerror(errno));
exit(1);
}
if ((pid = fork()) < 0)
{
g_process_message("Error forking child process; error='%s'", g_strerror(errno));
exit(1);
}
else if (pid != 0)
{
/* shut down init_result_pipe write side */
close(init_result_pipe[1]);
/* connect startup_result_pipe with init_result_pipe */
startup_result_pipe[0] = init_result_pipe[0];
init_result_pipe[0] = -1;
g_process_perform_startup();
/* NOTE: never returns */
g_assert_not_reached();
}
process_kind = G_PK_DAEMON;
/* shut down init_result_pipe read side */
close(init_result_pipe[0]);
init_result_pipe[0] = -1;
}
else if (process_opts.mode == G_PM_SAFE_BACKGROUND)
{
/* full blown startup/supervisor/daemon */
if (pipe(startup_result_pipe) != 0)
{
g_process_message("Error daemonizing process, cannot open pipe; error='%s'", g_strerror(errno));
exit(1);
}
/* first fork off supervisor process */
if ((pid = fork()) < 0)
{
g_process_message("Error forking child process; error='%s'", g_strerror(errno));
exit(1);
}
else if (pid != 0)
{
/* this is the startup process */
/* shut down startup_result_pipe write side */
close(startup_result_pipe[1]);
startup_result_pipe[1] = -1;
/* NOTE: never returns */
g_process_perform_startup();
g_assert_not_reached();
}
/* this is the supervisor process */
/* shut down startup_result_pipe read side */
close(startup_result_pipe[0]);
startup_result_pipe[0] = -1;
process_kind = G_PK_SUPERVISOR;
g_process_perform_supervise();
/* we only return in the daamon process here */
}
else if (process_opts.mode == G_PM_FOREGROUND)
{
process_kind = G_PK_DAEMON;
}
else
{
g_assert_not_reached();
}
/* daemon process, we should return to the caller to perform work */
setsid();
/* NOTE: we need to signal the parent in case of errors from this point.
* This is accomplished by writing the appropriate exit code to
* init_result_pipe, the easiest way doing so is calling g_process_startup_failed.
* */
if (!g_process_change_root() ||
!g_process_change_user() ||
!g_process_change_caps())
{
g_process_startup_failed(1, TRUE);
}
g_process_enable_core();
g_process_change_dir();
}
/**
* g_process_startup_failed:
* @ret_num: exit code
* @may_exit: whether to exit the process
*
* This is a public API function to be called by the user code when
* initialization failed.
**/
void
g_process_startup_failed(guint ret_num, gboolean may_exit)
{
if (process_kind != G_PK_STARTUP)
g_process_send_result(ret_num);
if (may_exit)
{
exit(ret_num);
}
else
{
g_process_detach_stdio();
}
}
/**
* g_process_startup_ok:
*
* This is a public API function to be called by the user code when
* initialization was successful, we can report back to the user.
**/
void
g_process_startup_ok(void)
{
g_process_write_pidfile(getpid());
g_process_send_result(0);
g_process_detach_stdio();
}
/**
* g_process_finish:
*
* This is a public API function to be called by the user code when the
* daemon exits after properly initialized (e.g. when it terminates because
* of SIGTERM). This function currently only removes the PID file.
**/
void
g_process_finish(void)
{
g_process_remove_pidfile();
}
static gboolean
g_process_process_mode_arg(const gchar *option_name G_GNUC_UNUSED, const gchar *value, gpointer data G_GNUC_UNUSED, GError **error)
{
if (strcmp(value, "foreground") == 0)
{
process_opts.mode = G_PM_FOREGROUND;
}
else if (strcmp(value, "background") == 0)
{
process_opts.mode = G_PM_BACKGROUND;
}
else if (strcmp(value, "safe-background") == 0)
{
process_opts.mode = G_PM_SAFE_BACKGROUND;
}
else
{
g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_BAD_VALUE, "Error parsing process-mode argument");
return FALSE;
}
return TRUE;
}
static GOptionEntry g_process_option_entries[] =
{
{ "foreground", 'F', G_OPTION_FLAG_REVERSE, G_OPTION_ARG_NONE, &process_opts.mode, "Do not go into the background after initialization", NULL },
{ "process-mode", 0, 0, G_OPTION_ARG_CALLBACK, g_process_process_mode_arg , "Set process running mode", "<foreground|background|safe-background>" },
{ "user", 'u', 0, G_OPTION_ARG_STRING, &process_opts.user, "Set the user to run as", "<user>" },
{ "uid", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &process_opts.user, NULL, NULL },
{ "group", 'g', 0, G_OPTION_ARG_STRING, &process_opts.group, "Set the group to run as", "<group>" },
{ "gid", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &process_opts.group, NULL, NULL },
{ "chroot", 'C', 0, G_OPTION_ARG_STRING, &process_opts.chroot_dir, "Chroot to this directory", "<dir>" },
{ "caps", 0, 0, G_OPTION_ARG_STRING, &process_opts.caps, "Set default capability set", "<capspec>" },
{ "no-caps", 0, G_OPTION_FLAG_REVERSE, G_OPTION_ARG_NONE, &process_opts.caps, "Disable managing Linux capabilities", NULL },
{ "pidfile", 'p', 0, G_OPTION_ARG_STRING, &process_opts.pidfile, "Set path to pid file", "<pidfile>" },
{ "enable-core", 0, 0, G_OPTION_ARG_NONE, &process_opts.core, "Enable dumping core files", NULL },
{ "fd-limit", 0, 0, G_OPTION_ARG_INT, &process_opts.fd_limit_min, "The minimum required number of fds", NULL },
{ NULL, 0, 0, 0, NULL, NULL, NULL },
};
void
g_process_add_option_group(GOptionContext *ctx)
{
GOptionGroup *group;
group = g_option_group_new("process", "Process options", "Process options", NULL, NULL);
g_option_group_add_entries(group, g_process_option_entries);
g_option_context_add_group(ctx, group);
}
|
ystk/debian-syslog-ng
|
src/gprocess.c
|
C
|
gpl-2.0
| 37,305
|
/*
* PROJECT: Boot Loader
* LICENSE: BSD - See COPYING.ARM in the top level directory
* FILE: boot/armllb/hw/versatile/hwclcd.c
* PURPOSE: LLB CLCD Routines for Versatile
* PROGRAMMERS: ReactOS Portable Systems Group
*/
#include "precomp.h"
#define LCDTIMING0_PPL(x) ((((x) / 16 - 1) & 0x3f) << 2)
#define LCDTIMING1_LPP(x) (((x) & 0x3ff) - 1)
#define LCDCONTROL_LCDPWR (1 << 11)
#define LCDCONTROL_LCDEN (1)
#define LCDCONTROL_LCDBPP(x) (((x) & 7) << 1)
#define LCDCONTROL_LCDTFT (1 << 5)
#define PL110_LCDTIMING0 (PVOID)0x10120000
#define PL110_LCDTIMING1 (PVOID)0x10120004
#define PL110_LCDTIMING2 (PVOID)0x10120008
#define PL110_LCDUPBASE (PVOID)0x10120010
#define PL110_LCDLPBASE (PVOID)0x10120014
#define PL110_LCDCONTROL (PVOID)0x10120018
PUSHORT LlbHwVideoBuffer;
VOID
NTAPI
LlbHwVersaClcdInitialize(VOID)
{
/* Set framebuffer address */
WRITE_REGISTER_ULONG(PL110_LCDUPBASE, (ULONG)LlbHwGetFrameBuffer());
WRITE_REGISTER_ULONG(PL110_LCDLPBASE, (ULONG)LlbHwGetFrameBuffer());
/* Initialize timings to 720x400 */
WRITE_REGISTER_ULONG(PL110_LCDTIMING0, LCDTIMING0_PPL(LlbHwGetScreenWidth()));
WRITE_REGISTER_ULONG(PL110_LCDTIMING1, LCDTIMING1_LPP(LlbHwGetScreenHeight()));
/* Enable the TFT/LCD Display */
WRITE_REGISTER_ULONG(PL110_LCDCONTROL,
LCDCONTROL_LCDEN |
LCDCONTROL_LCDTFT |
LCDCONTROL_LCDPWR |
LCDCONTROL_LCDBPP(4));
}
ULONG
NTAPI
LlbHwGetScreenWidth(VOID)
{
return 720;
}
ULONG
NTAPI
LlbHwGetScreenHeight(VOID)
{
return 400;
}
PVOID
NTAPI
LlbHwGetFrameBuffer(VOID)
{
return (PVOID)0x000A0000;
}
ULONG
NTAPI
LlbHwVideoCreateColor(IN ULONG Red,
IN ULONG Green,
IN ULONG Blue)
{
return (((Blue >> 3) << 11)| ((Green >> 2) << 5)| ((Red >> 3) << 0));
}
/* EOF */
|
GreenteaOS/Kernel
|
third-party/reactos/boot/armllb/hw/versatile/hwclcd.c
|
C
|
gpl-2.0
| 1,933
|
/*
* Copyright (C) 2011 ST-Ericsson SA.
* Copyright (C) 2009 Motorola, Inc.
*
* License Terms: GNU General Public License v2
*
* Simple driver for National Semiconductor LM3530 Backlight driver chip
*
* Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
* based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com>
*/
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/led-lm3530.h>
#include <linux/types.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#define LM3530_LED_DEV "lcd-backlight"
#define LM3530_NAME "lm3530-led"
#define LM3530_GEN_CONFIG 0x10
#define LM3530_ALS_CONFIG 0x20
#define LM3530_BRT_RAMP_RATE 0x30
#define LM3530_ALS_IMP_SELECT 0x41
#define LM3530_BRT_CTRL_REG 0xA0
#define LM3530_ALS_ZB0_REG 0x60
#define LM3530_ALS_ZB1_REG 0x61
#define LM3530_ALS_ZB2_REG 0x62
#define LM3530_ALS_ZB3_REG 0x63
#define LM3530_ALS_Z0T_REG 0x70
#define LM3530_ALS_Z1T_REG 0x71
#define LM3530_ALS_Z2T_REG 0x72
#define LM3530_ALS_Z3T_REG 0x73
#define LM3530_ALS_Z4T_REG 0x74
#define LM3530_REG_MAX 14
/* General Control Register */
#define LM3530_EN_I2C_SHIFT (0)
#define LM3530_RAMP_LAW_SHIFT (1)
#define LM3530_MAX_CURR_SHIFT (2)
#define LM3530_EN_PWM_SHIFT (5)
#define LM3530_PWM_POL_SHIFT (6)
#define LM3530_EN_PWM_SIMPLE_SHIFT (7)
#define LM3530_ENABLE_I2C (1 << LM3530_EN_I2C_SHIFT)
#define LM3530_ENABLE_PWM (1 << LM3530_EN_PWM_SHIFT)
#define LM3530_POL_LOW (1 << LM3530_PWM_POL_SHIFT)
#define LM3530_ENABLE_PWM_SIMPLE (1 << LM3530_EN_PWM_SIMPLE_SHIFT)
/* ALS Config Register Options */
#define LM3530_ALS_AVG_TIME_SHIFT (0)
#define LM3530_EN_ALS_SHIFT (3)
#define LM3530_ALS_SEL_SHIFT (5)
#define LM3530_ENABLE_ALS (3 << LM3530_EN_ALS_SHIFT)
/* Brightness Ramp Rate Register */
#define LM3530_BRT_RAMP_FALL_SHIFT (0)
#define LM3530_BRT_RAMP_RISE_SHIFT (3)
/* ALS Resistor Select */
#define LM3530_ALS1_IMP_SHIFT (0)
#define LM3530_ALS2_IMP_SHIFT (4)
/* Zone Boundary Register defaults */
#define LM3530_ALS_ZB_MAX (4)
#define LM3530_ALS_WINDOW_mV (1000)
#define LM3530_ALS_OFFSET_mV (4)
/* Zone Target Register defaults */
#define LM3530_DEF_ZT_0 (0x7F)
#define LM3530_DEF_ZT_1 (0x66)
#define LM3530_DEF_ZT_2 (0x4C)
#define LM3530_DEF_ZT_3 (0x33)
#define LM3530_DEF_ZT_4 (0x19)
/* 7 bits are used for the brightness : LM3530_BRT_CTRL_REG */
#define MAX_BRIGHTNESS (127)
struct lm3530_mode_map {
const char *mode;
enum lm3530_mode mode_val;
};
static struct lm3530_mode_map mode_map[] = {
{ "man", LM3530_BL_MODE_MANUAL },
{ "als", LM3530_BL_MODE_ALS },
{ "pwm", LM3530_BL_MODE_PWM },
};
/**
* struct lm3530_data
* @led_dev: led class device
* @client: i2c client
* @pdata: LM3530 platform data
* @mode: mode of operation - manual, ALS, PWM
* @regulator: regulator
* @brighness: previous brightness value
* @enable: regulator is enabled
*/
struct lm3530_data {
struct led_classdev led_dev;
struct i2c_client *client;
struct lm3530_platform_data *pdata;
enum lm3530_mode mode;
struct regulator *regulator;
enum led_brightness brightness;
bool enable;
};
static const u8 lm3530_reg[LM3530_REG_MAX] = {
LM3530_GEN_CONFIG,
LM3530_ALS_CONFIG,
LM3530_BRT_RAMP_RATE,
LM3530_ALS_IMP_SELECT,
LM3530_BRT_CTRL_REG,
LM3530_ALS_ZB0_REG,
LM3530_ALS_ZB1_REG,
LM3530_ALS_ZB2_REG,
LM3530_ALS_ZB3_REG,
LM3530_ALS_Z0T_REG,
LM3530_ALS_Z1T_REG,
LM3530_ALS_Z2T_REG,
LM3530_ALS_Z3T_REG,
LM3530_ALS_Z4T_REG,
};
static int lm3530_get_mode_from_str(const char *str)
{
int i;
for (i = 0; i < ARRAY_SIZE(mode_map); i++)
if (sysfs_streq(str, mode_map[i].mode))
return mode_map[i].mode_val;
return -1;
}
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
int i;
u8 gen_config;
u8 als_config = 0;
u8 brt_ramp;
u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
u8 zones[LM3530_ALS_ZB_MAX];
u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pdata = drvdata->pdata;
struct i2c_client *client = drvdata->client;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
case LM3530_BL_MODE_ALS:
gen_config |= LM3530_ENABLE_I2C;
break;
case LM3530_BL_MODE_PWM:
gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
(pdata->pwm_pol_hi << LM3530_PWM_POL_SHIFT);
break;
}
if (drvdata->mode == LM3530_BL_MODE_ALS) {
if (pdata->als_vmax == 0) {
pdata->als_vmin = 0;
pdata->als_vmax = LM3530_ALS_WINDOW_mV;
}
als_vmin = pdata->als_vmin;
als_vmax = pdata->als_vmax;
if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
pdata->als_vmax = als_vmax =
als_vmin + LM3530_ALS_WINDOW_mV;
/* n zone boundary makes n+1 zones */
als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
als_vstep + (i * als_vstep)) * LED_FULL)
/ 1000;
als_config =
(pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
(LM3530_ENABLE_ALS) |
(pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
als_imp_sel =
(pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
(pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
}
brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
(pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
if (drvdata->brightness)
brightness = drvdata->brightness;
else
brightness = drvdata->brightness = pdata->brt_val;
if (brightness > drvdata->led_dev.max_brightness)
brightness = drvdata->led_dev.max_brightness;
reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
reg_val[3] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */
reg_val[5] = zones[0]; /* LM3530_ALS_ZB0_REG */
reg_val[6] = zones[1]; /* LM3530_ALS_ZB1_REG */
reg_val[7] = zones[2]; /* LM3530_ALS_ZB2_REG */
reg_val[8] = zones[3]; /* LM3530_ALS_ZB3_REG */
reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
reg_val[12] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
reg_val[13] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
if (!drvdata->enable) {
ret = regulator_enable(drvdata->regulator);
if (ret) {
dev_err(&drvdata->client->dev,
"Enable regulator failed\n");
return ret;
}
drvdata->enable = true;
}
for (i = 0; i < LM3530_REG_MAX; i++) {
/* do not update brightness register when pwm mode */
if (lm3530_reg[i] == LM3530_BRT_CTRL_REG &&
drvdata->mode == LM3530_BL_MODE_PWM) {
if (pwm->pwm_set_intensity)
pwm->pwm_set_intensity(reg_val[i],
drvdata->led_dev.max_brightness);
continue;
}
ret = i2c_smbus_write_byte_data(client,
lm3530_reg[i], reg_val[i]);
if (ret)
break;
}
return ret;
}
static void lm3530_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brt_val)
{
int err;
struct lm3530_data *drvdata =
container_of(led_cdev, struct lm3530_data, led_dev);
struct lm3530_platform_data *pdata = drvdata->pdata;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
u8 max_brightness = led_cdev->max_brightness;
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
if (!drvdata->enable) {
err = lm3530_init_registers(drvdata);
if (err) {
dev_err(&drvdata->client->dev,
"Register Init failed: %d\n", err);
break;
}
}
/* set the brightness in brightness control register*/
err = i2c_smbus_write_byte_data(drvdata->client,
LM3530_BRT_CTRL_REG, brt_val);
if (err)
dev_err(&drvdata->client->dev,
"Unable to set brightness: %d\n", err);
else
drvdata->brightness = brt_val;
if (brt_val == 0) {
err = regulator_disable(drvdata->regulator);
if (err)
dev_err(&drvdata->client->dev,
"Disable regulator failed\n");
drvdata->enable = false;
}
break;
case LM3530_BL_MODE_ALS:
break;
case LM3530_BL_MODE_PWM:
if (pwm->pwm_set_intensity)
pwm->pwm_set_intensity(brt_val, max_brightness);
break;
default:
break;
}
}
static ssize_t lm3530_mode_get(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm3530_data *drvdata;
int i, len = 0;
drvdata = container_of(led_cdev, struct lm3530_data, led_dev);
for (i = 0; i < ARRAY_SIZE(mode_map); i++)
if (drvdata->mode == mode_map[i].mode_val)
len += sprintf(buf + len, "[%s] ", mode_map[i].mode);
else
len += sprintf(buf + len, "%s ", mode_map[i].mode);
len += sprintf(buf + len, "\n");
return len;
}
static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm3530_data *drvdata;
struct lm3530_pwm_data *pwm;
u8 max_brightness;
int mode, err;
drvdata = container_of(led_cdev, struct lm3530_data, led_dev);
pwm = &drvdata->pdata->pwm_data;
max_brightness = led_cdev->max_brightness;
mode = lm3530_get_mode_from_str(buf);
if (mode < 0) {
dev_err(dev, "Invalid mode\n");
return -EINVAL;
}
drvdata->mode = mode;
/* set pwm to low if unnecessary */
if (mode != LM3530_BL_MODE_PWM && pwm->pwm_set_intensity)
pwm->pwm_set_intensity(0, max_brightness);
err = lm3530_init_registers(drvdata);
if (err) {
dev_err(dev, "Setting %s Mode failed :%d\n", buf, err);
return err;
}
return sizeof(drvdata->mode);
}
static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
static int __devinit lm3530_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm3530_platform_data *pdata = client->dev.platform_data;
struct lm3530_data *drvdata;
int err = 0;
if (pdata == NULL) {
dev_err(&client->dev, "platform data required\n");
err = -ENODEV;
goto err_out;
}
/* BL mode */
if (pdata->mode > LM3530_BL_MODE_PWM) {
dev_err(&client->dev, "Illegal Mode request\n");
err = -EINVAL;
goto err_out;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "I2C_FUNC_I2C not supported\n");
err = -EIO;
goto err_out;
}
drvdata = kzalloc(sizeof(struct lm3530_data), GFP_KERNEL);
if (drvdata == NULL) {
err = -ENOMEM;
goto err_out;
}
drvdata->mode = pdata->mode;
drvdata->client = client;
drvdata->pdata = pdata;
drvdata->brightness = LED_OFF;
drvdata->enable = false;
drvdata->led_dev.name = LM3530_LED_DEV;
drvdata->led_dev.brightness_set = lm3530_brightness_set;
drvdata->led_dev.max_brightness = MAX_BRIGHTNESS;
i2c_set_clientdata(client, drvdata);
drvdata->regulator = regulator_get(&client->dev, "vin");
if (IS_ERR(drvdata->regulator)) {
dev_err(&client->dev, "regulator get failed\n");
err = PTR_ERR(drvdata->regulator);
drvdata->regulator = NULL;
goto err_regulator_get;
}
if (drvdata->pdata->brt_val) {
err = lm3530_init_registers(drvdata);
if (err < 0) {
dev_err(&client->dev,
"Register Init failed: %d\n", err);
err = -ENODEV;
goto err_reg_init;
}
}
err = led_classdev_register(&client->dev, &drvdata->led_dev);
if (err < 0) {
dev_err(&client->dev, "Register led class failed: %d\n", err);
err = -ENODEV;
goto err_class_register;
}
err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode);
if (err < 0) {
dev_err(&client->dev, "File device creation failed: %d\n", err);
err = -ENODEV;
goto err_create_file;
}
return 0;
err_create_file:
led_classdev_unregister(&drvdata->led_dev);
err_class_register:
err_reg_init:
regulator_put(drvdata->regulator);
err_regulator_get:
kfree(drvdata);
err_out:
return err;
}
static int __devexit lm3530_remove(struct i2c_client *client)
{
struct lm3530_data *drvdata = i2c_get_clientdata(client);
device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
if (drvdata->enable)
regulator_disable(drvdata->regulator);
regulator_put(drvdata->regulator);
led_classdev_unregister(&drvdata->led_dev);
kfree(drvdata);
return 0;
}
static const struct i2c_device_id lm3530_id[] = {
{LM3530_NAME, 0},
{}
};
MODULE_DEVICE_TABLE(i2c, lm3530_id);
static struct i2c_driver lm3530_i2c_driver = {
.probe = lm3530_probe,
.remove = __devexit_p(lm3530_remove),
.id_table = lm3530_id,
.driver = {
.name = LM3530_NAME,
.owner = THIS_MODULE,
},
};
module_i2c_driver(lm3530_i2c_driver);
MODULE_DESCRIPTION("Back Light driver for LM3530");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
|
Jackeagle/android_kernel_sony_c2305
|
drivers/leds/leds-lm3530.c
|
C
|
gpl-2.0
| 13,280
|
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#define MAX_SIZE 1010
int getarray(int a[])
{
int i=0,count;
scanf("%d",&count);
for(; i<count ; i++)
scanf("%d",&a[i]);
return count;
}
int find(int a[], int n, int val)
{
int i,true=0;
for(i=0; i<n; i++)
{
if(a[i]==val)
{
true=1;
return i;
}
}
if(true==0)
return -1;
}
int main()
{
int cases, i;
int arr[MAX_SIZE], size;
int val, found = 0;
scanf("%d", &cases);
for(i = 1; i <= cases; i++)
{
size = getarray(arr);
scanf("%d", &val);
found = find(arr, size, val);
if(found == -1)
{
printf("NOT FOUND\n");
continue;
}
printf("%d\n", found);
}
return 0;
}
|
zning1994/practice
|
Program/CLanguage/exp/exp7/exA.c
|
C
|
gpl-2.0
| 890
|
/*****************************************************************************/
/* */
/* Ittiam 802.11 MAC SOFTWARE */
/* */
/* ITTIAM SYSTEMS PVT LTD, BANGALORE */
/* COPYRIGHT(C) 2005 */
/* */
/* This program is proprietary to Ittiam Systems Private Limited and */
/* is protected under Indian Copyright Law as an unpublished work. Its use */
/* and disclosure is limited by the terms and conditions of a license */
/* agreement. It may not be copied or otherwise reproduced or disclosed to */
/* persons outside the licensee's organization except in accordance with the*/
/* terms and conditions of such an agreement. All copies and */
/* reproductions shall be the property of Ittiam Systems Private Limited and*/
/* must bear this notice in its entirety. */
/* */
/*****************************************************************************/
/*****************************************************************************/
/* */
/* File Name : common.c */
/* */
/* Description : This file contains the functions used by both AP/STA */
/* modes in MAC. */
/* */
/* List of Functions : set_dscr_fn */
/* get_dscr_fn */
/* */
/* Issues / Problems : None */
/* */
/*****************************************************************************/
/*****************************************************************************/
/* File Includes */
/*****************************************************************************/
#include "common.h"
#include "mh.h"
#include "csl_linux.h"
#include "trout_share_mem.h"
#ifdef MAC_HW_UNIT_TEST_MODE
#include "mh_test.h"
#endif /* MAC_HW_UNIT_TEST_MODE */
/*****************************************************************************/
/* Global Variables */
/*****************************************************************************/
UWORD32 g_calib_cnt = DEFAULT_CALIB_COUNT;
extern UWORD32 g_done_wifi_suspend;
#ifdef DEBUG_MODE
mac_stats_t g_mac_stats = {0};
reset_stats_t g_reset_stats = {0};
UWORD8 g_enable_debug_print = 1;
UWORD8 g_11n_print_stats = 0;
#endif /* DEBUG_MODE */
#ifdef MEM_DEBUG_MODE
mem_stats_t g_mem_stats = {0};
#endif /* MEM_DEBUG_MODE */
#ifdef MAC_HW_UNIT_TEST_MODE
#endif /* MAC_HW_UNIT_TEST_MODE */
#ifdef DSCR_MACROS_NOT_DEFINED
/*****************************************************************************/
/* */
/* Function Name : set_dscr_fn */
/* */
/* Description : This function modifies the packet descriptor with the */
/* new specified value. The descriptor field to be modified */
/* is specified by the descriptor offset and width */
/* */
/* Inputs : 1) Offset of the descriptor field */
/* 2) Width of the descriptor field */
/* 3) Pointer to the packet descriptor to be modified */
/* 4) The new value for the descriptor field */
/* */
/* Globals : None */
/* */
/* Processing : Modifies the specified descriptor with the supplied */
/* value. */
/* */
/* Outputs : None */
/* Returns : None */
/* Issues : None */
/* */
/*****************************************************************************/
void set_dscr_fn(UWORD8 offset, UWORD16 width, UWORD32 *ptr, UWORD32 value)
{
UWORD32 mask_inverse = 0;
UWORD32 mask = 0;
UWORD32 temp = 0;
UWORD32 shift_offset = 32 - width - offset;
#ifdef DEBUG_MODE
if((width + offset) > 32)
{
/* Signal Erroneous input */
}
#endif /* DEBUG_MODE */
/* Calculate the inverse of the Mask */
if(width < 32)
mask_inverse = ((1 << width) - 1) << shift_offset;
else
mask_inverse = 0xFFFFFFFF;
/* Generate the mask */
mask = ~mask_inverse;
/* Read the descriptor word in little endian format */
temp = convert_to_le(*ptr);
/* Updating the value of the descriptor field with the help of masks */
temp = ((value << shift_offset) & mask_inverse) | (temp & mask);
/* Swap the byte order in the word if required for endian-ness change */
*ptr = convert_to_le(temp);
}
/*****************************************************************************/
/* */
/* Function Name : get_dscr_fn */
/* */
/* Description : This function reads a word32 location to extract a */
/* specified descriptor field.of specified width. */
/* */
/* Inputs : 1) Offset of the descriptor field in the 32 bit boundary */
/* 2) Width of the desired descriptor field */
/* 3) Pointer to the word32 location of the field */
/* */
/* Globals : None */
/* */
/* Processing : Reads the descriptor for the specific field and returns */
/* the value. */
/* */
/* Outputs : None */
/* */
/* Returns : Descriptor field value */
/* */
/* Issues : None */
/* */
/*****************************************************************************/
UWORD32 get_dscr_fn(UWORD8 offset, UWORD16 width, UWORD32 *ptr)
{
UWORD32 mask = 0;
UWORD32 temp = 0;
UWORD32 value = 0;
UWORD32 shift_offset = 32 - width - offset;
#ifdef DEBUG_MODE
if((width + offset) > 32)
{
/* Signal Erroneous input */
}
#endif /* DEBUG_MODE */
/* Calculate the Mask */
if(width < 32)
mask = ((1 << width) - 1) << shift_offset;
else
mask = 0xFFFFFFFF;
/* Swap the byte order in the word if required for endian-ness change */
temp = convert_to_le(*ptr);
/* Obtain the value of the descriptor field with the help of masks */
value = (temp & mask) >> shift_offset;
return value;
}
#endif /* DSCR_MACROS_NOT_DEFINED */
#ifdef MWLAN
/*****************************************************************************/
/* */
/* Function Name : itm_memset */
/* */
/* Description : This function sets the specified number of bytes in the */
/* buffer to the required value. The functionality is */
/* similar to the standard memset function. The new */
/* implementation was required due to the bug seen when */
/* using memset function across the bridge on MWLAN. */
/* */
/* Inputs : 1) Pointer to the buffer. */
/* 2) Value of character to set */
/* 3) Number of characters to set. */
/* */
/* Globals : None */
/* */
/* Processing : */
/* */
/* Outputs : None */
/* */
/* Returns : Pointer to the buffer */
/* */
/* Issues : None */
/* */
/*****************************************************************************/
void *itm_memset(void *buff, UWORD8 val, UWORD32 num)
{
UWORD8 *cbuff = (UWORD8 *)buff;
if(num < 20)
while(num--)
*cbuff++ = val;
else
{
UWORD32 *wbuff = NULL;
UWORD32 wval = val;
UWORD32 temp = 0;
temp = (UWORD32)cbuff & 0x3;
/* Unaligned buffer */
num -= temp;
while(temp--)
*cbuff++ = val;
/* Word transfers */
wval += (wval << 8) + (wval << 16) + (wval << 24);
wbuff = (UWORD32 *)cbuff;
while(num > 3)
{
*wbuff++ = wval;
num -= 4;
}
/* Unaligned length */
cbuff = (UWORD8 *)wbuff;
while(num--)
*cbuff++ = val;
}
return buff;
}
#endif /* MWLAN */
/*****************************************************************************/
/* */
/* Function Name : calibrate_delay_loop */
/* */
/* Description : This function calibrates the delay loop counter using */
/* MAC H/w TSF Timer . */
/* */
/* Inputs : None */
/* */
/* Globals : g_calib_cnt */
/* */
/* Processing : MAC H/w version register is read a fixed number of times */
/* to introduce delay in S/w. This function calibrates this */
/* delay mechanism. It updates the global variable */
/* (g_calib_cnt) which holds the number of times the */
/* version register should be read to introduce a delay of */
/* 10us. */
/* */
/* Outputs : None */
/* Returns : None */
/* Issues : None */
/* */
/*****************************************************************************/
void calibrate_delay_loop(void)
{
UWORD32 i = 0;
UWORD32 entry_time = 0;
UWORD32 exit_time = 0;
BOOL_T pa_enabled = BFALSE;
BOOL_T tbtt_mask = BFALSE;
UWORD32 tsf_ctrl_bkup = 0;
UWORD32 calib_thresh = 0;
TROUT_FUNC_ENTER;
/* Backup the registers which will be used for the calibration process */
pa_enabled = is_machw_enabled();
tbtt_mask = is_machw_tbtt_int_masked();
tsf_ctrl_bkup = get_machw_tsf_ctrl();
critical_section_start();
/* PA is disabled but TBTT Interrupts can still come. Mask it. */
disable_machw_phy_and_pa();
mask_machw_tbtt_int();
set_machw_tsf_start();
set_machw_tsf_beacon_tx_suspend_enable();
/* Initialize Calibration Parameters */
calib_thresh = 1000;
entry_time = get_machw_tsf_timer_lo();
for(i = 0; i < calib_thresh; i++)
{
GET_TIME(); //modified by chengwg.
}
exit_time = get_machw_tsf_timer_lo();
/* Restore the Backed-up registers */
if(pa_enabled == BTRUE)
enable_machw_phy_and_pa();
if(tbtt_mask == BFALSE)
unmask_machw_tbtt_int();
set_machw_tsf_ctrl(tsf_ctrl_bkup);
critical_section_end();
/* The Delay Calibration Count is computed to provide a delay of 10us */
if(exit_time > entry_time)
g_calib_cnt = ((calib_thresh+2)*10)/(exit_time-entry_time) + 1;
TROUT_DBG4("Delay Calibration: Cnt=%d Delay=%d Calib_Cnt=%d\n",calib_thresh+2,
(exit_time-entry_time), g_calib_cnt);
TROUT_FUNC_EXIT;
}
#ifdef COMBO_SCAN
void calibrate_delay_loop_plus(void)
{
UWORD32 i = 0;
UWORD32 entry_time = 0;
UWORD32 exit_time = 0;
BOOL_T pa_enabled = BFALSE;
BOOL_T tbtt_mask = BFALSE;
UWORD32 tsf_ctrl_bkup = 0;
UWORD32 calib_thresh = 0;
TROUT_FUNC_ENTER;
/* Backup the registers which will be used for the calibration process */
pa_enabled = is_machw_enabled();
tbtt_mask = is_machw_tbtt_int_masked();
tsf_ctrl_bkup = get_machw_tsf_ctrl();
critical_section_start();
/* PA is disabled but TBTT Interrupts can still come. Mask it. */
disable_machw_phy_and_pa();
mask_machw_tbtt_int();
set_machw_tsf_start();
set_machw_tsf_beacon_tx_suspend_enable();
/* Initialize Calibration Parameters */
calib_thresh = 1000;
entry_time = get_machw_tsf_timer_lo();
for(i = 0; i < calib_thresh; i++)
{
GET_TIME(); //modified by chengwg.
}
exit_time = get_machw_tsf_timer_lo();
/* Restore the Backed-up registers */
if(pa_enabled == BTRUE)
enable_machw_phy_and_pa();
if(tbtt_mask == BFALSE)
unmask_machw_tbtt_int();
set_machw_tsf_ctrl(tsf_ctrl_bkup);
critical_section_end();
/* The Delay Calibration Count is computed to provide a delay of 10us */
//if(exit_time > entry_time)
//g_calib_cnt = ((calib_thresh+2)*10)/(exit_time-entry_time) + 1;
//TROUT_DBG4("Delay Calibration: Cnt=%d Delay=%d Calib_Cnt=%d\n",calib_thresh+2,
//(exit_time-entry_time), g_calib_cnt);
TROUT_FUNC_EXIT;
}
#endif
/*****************************************************************************/
/* */
/* Function Name : add_calib_delay */
/* */
/* Description : This function provides a minimum S/w delay of the */
/* required time specified in units of 10us */
/* */
/* Inputs : 1) The required delay in units of 10us. i.e. Input 10 */
/* will provide a delay of 100us */
/* */
/* Globals : g_calib_cnt */
/* */
/* Processing : The MAC H/w version number is read continuously for a */
/* precomputed number of times to provide the required */
/* delay. */
/* */
/* Outputs : None */
/* */
/* Returns : None */
/* */
/* Issues : None */
/* */
/*****************************************************************************/
void add_calib_delay(UWORD32 delay)
{
UWORD32 i = 0;
// UWORD32 j = 0;
UWORD32 delay_thresh = g_calib_cnt * delay;
for(i = 0; i < delay_thresh; i++)
//j += get_machw_pa_ver();
//j += (*(volatile UWORD32 *)HOST_DELAY_FOR_TROUT_PHY); //add by chengq.
GET_TIME();
}
#ifdef DEBUG_MODE
void print_ba_debug_stats(void)
{
UWORD32 idx = 0;
PRINTK("BA Frames Rxd = %d\n\r", g_mac_stats.babarxd);
PRINTK("BAR Frames successfully Txd = %d\n\r", g_mac_stats.babartxd);
PRINTK("BAR Frames Rxd = %d\n\r", g_mac_stats.babarrxd);
PRINTK("Data Frames retransmitted = %d\n\r", g_mac_stats.badatretx);
PRINTK("Times Window is moved = %d\n\r", g_mac_stats.bawinmove);
PRINTK("BAR Tx-Failures = %d\n\r", g_mac_stats.babarfail);
PRINTK("Data Tx-Failures = %d\n\r", g_mac_stats.badatfail);
PRINTK("Missing Buffers = %d\n\r", g_mac_stats.babufmiss);
PRINTK("Frames deleted during Buffer cleanup = %d\n\r", g_mac_stats.badatclnup);
PRINTK("Pending Frames discarded = %d\n\r", g_mac_stats.bapenddrop);
PRINTK("Frames Txd from the Pending Q = %d\n\r", g_mac_stats.bapendingtxwlantxd);
PRINTK("Stale BA frames received = %d\n\r", g_mac_stats.baoldbarxd);
PRINTK("Stale BA frames received = %d\n\r", g_mac_stats.baoldbarrxd);
PRINTK("Frames received out of window and hence droped = %d\n\r",g_mac_stats.barxdatoutwin);
PRINTK("Re-queue failures = %d\n\r", g_mac_stats.bartrqfail);
PRINTK("Number of blocks Qed = %d\n\r", g_mac_stats.banumblks);
PRINTK("Number of Frames Qed = %d\n\r", g_mac_stats.banumqed);
PRINTK("Number of times the pending Q was empty while enqueing = %d\n\r",g_mac_stats.baemptyQ);
PRINTK("Num of times grp=%d\n\r", g_mac_stats.num_buffto);
PRINTK("ba_num_dq=%d\n\r", g_mac_stats.ba_num_dq);
PRINTK("ba_num_dqed=%d\n\r", g_mac_stats.ba_num_dqed);
PRINTK("batxfba=%d\n\r", g_mac_stats.batxfba);
for(idx = 0; idx < 10; idx++)
PRINTK("batemp[%d] = %d\n\r", idx, g_mac_stats.batemp[idx]);
}
UWORD8 print_mem_stats(void)
{
UWORD8 print_flag = 0;
#ifdef MEM_DEBUG_MODE
print_flag |= printe("nosizeallocexc", g_mem_stats.nosizeallocexc);
print_flag |= printe("nofreeallocexc", g_mem_stats.nofreeallocexc);
print_flag |= printe("reallocexc", g_mem_stats.reallocexc);
print_flag |= printe("corruptallocexc", g_mem_stats.corruptallocexc);
print_flag |= printe("nullfreeexc", g_mem_stats.nullfreeexc);
print_flag |= printe("oobfreeexc", g_mem_stats.oobfreeexc);
print_flag |= printe("refreeexc", g_mem_stats.refreeexc);
print_flag |= printe("corruptfreeexc", g_mem_stats.corruptfreeexc);
print_flag |= printe("invalidfreeexc", g_mem_stats.invalidfreeexc);
print_flag |= printe("excessfreeexc", g_mem_stats.excessfreeexc);
print_flag |= printe("nulladdexc", g_mem_stats.nulladdexc);
print_flag |= printe("oobaddexc", g_mem_stats.oobaddexc);
print_flag |= printe("freeaddexc", g_mem_stats.freeaddexc);
print_flag |= printe("invalidaddexc", g_mem_stats.invalidaddexc);
print_flag |= printe("excessaddexc", g_mem_stats.excessaddexc);
print_flag |= printe("nofreeDscrallocexc[0]", g_mem_stats.nofreeDscrallocexc[0]);
print_flag |= printe("nofreeDscrallocexc[1]", g_mem_stats.nofreeDscrallocexc[1]);
print_flag |= printe("nofreePktallocexc[0]", g_mem_stats.nofreePktallocexc[0]);
print_flag |= printe("nofreePktallocexc[1]", g_mem_stats.nofreePktallocexc[1]);
print_flag |= printe("nofreePktallocexc[2]", g_mem_stats.nofreePktallocexc[2]);
print_flag |= printe("nofreePktallocexc[3]", g_mem_stats.nofreePktallocexc[3]);
print_flag |= printe("nofreePktallocexc[4]", g_mem_stats.nofreePktallocexc[4]);
print_flag |= printe("nofreeLocalallocexc[0]", g_mem_stats.nofreeLocalallocexc[0]);
print_flag |= printe("nofreeLocalallocexc[1]", g_mem_stats.nofreeLocalallocexc[1]);
print_flag |= printe("nofreeLocalallocexc[2]", g_mem_stats.nofreeLocalallocexc[2]);
print_flag |= printe("nofreeLocalallocexc[3]", g_mem_stats.nofreeLocalallocexc[3]);
print_flag |= printe("nofreeLocalallocexc[4]", g_mem_stats.nofreeLocalallocexc[4]);
print_flag |= printe("nofreeLocalallocexc[5]", g_mem_stats.nofreeLocalallocexc[5]);
print_flag |= printe("nofreeLocalallocexc[6]", g_mem_stats.nofreeLocalallocexc[6]);
print_flag |= printe("nofreeLocalallocexc[7]", g_mem_stats.nofreeLocalallocexc[7]);
print_flag |= printe("nofreeEventallocexc", g_mem_stats.nofreeEventallocexc);
/* Print the size of the maximum Shared memory used and reset it after that */
PRINTK("Max Scratch Memory Utilized = %d", get_max_scratch_mem_usage());
reset_scratch_mem_usage();
#endif /* MEM_DEBUG_MODE */
return print_flag;
}
void print_debug_stats(void)
{
UWORD8 i = 0;
#ifdef MEM_DEBUG_MODE
PRINTK("Memory Statistics\n\r");
PRINTK("sdalloc = %d\n\r",g_mem_stats.sdalloc);
PRINTK("sdfree = %d\n\r",g_mem_stats.sdfree);
PRINTK("sdtotalfree = %d\n\r",g_mem_stats.sdtotalfree);
PRINTK("spalloc = %d\n\r",g_mem_stats.spalloc);
PRINTK("spfree = %d\n\r",g_mem_stats.spfree);
PRINTK("sptotalfree = %d\n\r",g_mem_stats.sptotalfree);
PRINTK("lalloc = %d\n\r",g_mem_stats.lalloc);
PRINTK("lfree = %d\n\r",g_mem_stats.lfree);
PRINTK("ltotalfree = %d\n\r",g_mem_stats.ltotalfree);
PRINTK("ealloc = %d\n\r",g_mem_stats.ealloc);
PRINTK("efree = %d\n\r",g_mem_stats.efree);
PRINTK("etotalfree = %d\n\r",g_mem_stats.etotalfree);
PRINTK("nosizeallocexc = %d\n\r",g_mem_stats.nosizeallocexc);
PRINTK("nofreeallocexc = %d\n\r",g_mem_stats.nofreeallocexc);
PRINTK("reallocexc = %d\n\r",g_mem_stats.reallocexc);
PRINTK("corruptallocexc = %d\n\r",g_mem_stats.corruptallocexc);
PRINTK("nullfreeexc = %d\n\r",g_mem_stats.nullfreeexc);
PRINTK("oobfreeexc = %d\n\r",g_mem_stats.oobfreeexc);
PRINTK("refreeexc = %d\n\r",g_mem_stats.refreeexc);
PRINTK("corruptfreeexc = %d\n\r",g_mem_stats.corruptfreeexc);
PRINTK("invalidfreeexc = %d\n\r",g_mem_stats.invalidfreeexc);
PRINTK("excessfreeexc = %d\n\r",g_mem_stats.excessfreeexc);
PRINTK("nulladdexc = %d\n\r",g_mem_stats.nulladdexc);
PRINTK("oobaddexc = %d\n\r",g_mem_stats.oobaddexc);
PRINTK("freeaddexc = %d\n\r",g_mem_stats.freeaddexc);
PRINTK("invalidaddexc = %d\n\r",g_mem_stats.invalidaddexc);
PRINTK("excessaddexc = %d\n\r",g_mem_stats.excessaddexc);
PRINTK("nofreeDscrallocexc[0] = %d\n\r",g_mem_stats.nofreeDscrallocexc[0]);
PRINTK("nofreeDscrallocexc[1] = %d\n\r",g_mem_stats.nofreeDscrallocexc[1]);
for(i = 0; i < 5; i++)
PRINTK("nofreePktallocexc[%d] = %d\n\r",i,
g_mem_stats.nofreePktallocexc[i]);
for(i = 0; i < 8; i++)
PRINTK("nofreeLocalallocexc[%d] = %d\n\r",i,
g_mem_stats.nofreeLocalallocexc[i]);
PRINTK("nofreeEventallocexc = %d\n\r",
g_mem_stats.nofreeEventallocexc);
#endif /* MEM_DEBUG_MODE */
PRINTK("\nMAC Statistics\n\r");
#ifndef MAC_HW_UNIT_TEST_MODE
PRINTK("itbtt = %d\n\r",g_mac_stats.itbtt);
PRINTK("itxc = %d\n\r",g_mac_stats.itxc);
PRINTK("irxc = %d\n\r",g_mac_stats.irxc);
PRINTK("ihprxc = %d\n\r",g_mac_stats.ihprxc);
PRINTK("ierr = %d\n\r",g_mac_stats.ierr);
PRINTK("ideauth = %d\n\r",g_mac_stats.ideauth);
PRINTK("icapend = %d\n\r",g_mac_stats.icapend);
PRINTK("enpmsdu = %d\n\r",g_mac_stats.enpmsdu);
PRINTK("erxqemp = %d\n\r",g_mac_stats.erxqemp);
PRINTK("etxsus1machang = %d\n\r",g_mac_stats.etxsus1machang);
PRINTK("etxsus1phyhang = %d\n\r",g_mac_stats.etxsus1phyhang);
PRINTK("etxsus3 = %d\n\r",g_mac_stats.etxsus3);
PRINTK("ebus = %d\n\r",g_mac_stats.ebus);
PRINTK("ebwrsig = %d\n\r",g_mac_stats.ebwrsig);
PRINTK("emsaddr = %d\n\r",g_mac_stats.emsaddr);
PRINTK("etxfifo = %d\n\r",g_mac_stats.etxfifo);
PRINTK("erxfifo = %d\n\r",g_mac_stats.erxfifo);
PRINTK("ehprxfifo = %d\n\r",g_mac_stats.ehprxfifo);
PRINTK("etxqempt = %d\n\r",g_mac_stats.etxqempt);
PRINTK("edmanoerr = %d\n\r",g_mac_stats.edmanoerr);
PRINTK("etxcenr = %d\n\r",g_mac_stats.etxcenr);
PRINTK("erxcenr = %d\n\r",g_mac_stats.erxcenr);
PRINTK("esgaf = %d\n\r",g_mac_stats.esgaf);
PRINTK("eother = %d\n\r",g_mac_stats.eother);
PRINTK("qatxp = %d\n\r",g_mac_stats.qatxp);
PRINTK("qdtxp = %d\n\r",g_mac_stats.qdtxp);
#else /* MAC_HW_UNIT_TEST_MODE */
PRINTK("rxci = %d\n\r",g_test_stats.rxci);
PRINTK("hprxci = %d\n\r",g_test_stats.hprxci);
PRINTK("txci = %d\n\r",g_test_stats.txci);
PRINTK("tbtti = %d\n\r",g_test_stats.tbtti);
PRINTK("erri = %d\n\r",g_test_stats.erri);
PRINTK("capei = %d\n\r",g_test_stats.capei);
PRINTK("uki = %d\n\r",g_test_stats.uki);
PRINTK("err.enpmsdu = %d\n\r",g_test_stats.exp.enpmsdu);
PRINTK("err.erxqemp = %d\n\r",g_test_stats.exp.erxqemp);
PRINTK("err.emsaddr = %d\n\r",g_test_stats.exp.emsaddr);
PRINTK("err.etxsus1machang = %d\n\r",g_test_stats.exp.etxsus1machang);
PRINTK("err.etxsus1phyhang = %d\n\r",g_test_stats.exp.etxsus1phyhang);
PRINTK("err.etxsus3 = %d\n\r",g_test_stats.exp.etxsus3);
PRINTK("err.ebus = %d\n\r",g_test_stats.exp.ebus);
PRINTK("err.ebwrsig = %d\n\r",g_test_stats.exp.ebwrsig);
PRINTK("err.etxqempt = %d\n\r",g_test_stats.exp.etxqempt);
PRINTK("err.edmanoerr = %d\n\r",g_test_stats.exp.edmanoerr);
PRINTK("err.etxcenr = %d\n\r",g_test_stats.exp.etxcenr);
PRINTK("err.erxcenr = %d\n\r",g_test_stats.exp.erxcenr);
PRINTK("err.esgaf = %d\n\r",g_test_stats.exp.esgaf);
PRINTK("err.etxfifo = %d\n\r",g_test_stats.exp.etxfifo);
PRINTK("err.erxfifo = %d\n\r",g_test_stats.exp.erxfifo);
PRINTK("err.eother = %d\n\r",g_test_stats.exp.eother);
#endif /* MAC_HW_UNIT_TEST_MODE */
}
void print_build_flags(void)
{
#ifdef ETHERNET_HOST
PRINTK("ETHERNET_HOST\n\r");
#endif /* ETHERNET_HOST */
#ifdef GENERIC_HOST
PRINTK("GENERIC_HOST\n\r");
#endif /* GENERIC_HOST */
#ifdef PHY_802_11n
PRINTK("PHY_802_11n\n\r");
#endif /* PHY_802_11n */
#ifdef GENERIC_PHY
PRINTK("GENERIC_PHY\n\r");
#endif /* GENERIC_PHY */
#ifdef ITTIAM_PHY
PRINTK("ITTIAM_PHY\n\r");
#endif /* ITTIAM_PHY */
#ifdef BSS_ACCESS_POINT_MODE
PRINTK("BSS_ACCESS_POINT_MODE\n\r");
#endif /* BSS_ACCESS_POINT_MODE */
#ifdef IBSS_BSS_STATION_MODE
PRINTK("IBSS_BSS_STATION_MODE\n\r");
#endif /* IBSS_BSS_STATION_MODE */
#ifdef MAC_HW_UNIT_TEST_MODE
PRINTK("MAC_HW_UNIT_TEST_MODE \n\r");
#endif /* MAC_HW_UNIT_TEST_MODE */
#ifdef MAC_802_11I
PRINTK("MAC_802_11I \n\r");
#endif /* MAC_802_11I */
#ifdef SUPP_11I
PRINTK("SUPP_11I \n\r");
#endif /* SUPP_11I */
#ifdef MAC_WMM
PRINTK("MAC_WMM \n\r");
#endif /* MAC_WMM */
#ifdef MAC_802_11N
PRINTK("MAC_802_11N \n\r");
#endif /* MAC_802_11N */
#ifdef MAC_802_1X
PRINTK("MAC_802_1X \n\r");
#endif /* MAC_802_1X */
#ifdef MAC_802_11H
PRINTK("MAC_802_11H \n\r");
#endif /* MAC_802_11H */
#ifdef GENERIC_RF
PRINTK("GENERIC_RF\n\r");
#endif /* GENERIC_RF */
#ifdef RF_MAXIM_ITTIAM
PRINTK("RF_MAXIM_ITTIAM \n\r");
#endif /* RF_MAXIM_ITTIAM */
// 20120709 caisf masked, merged ittiam mac v1.2 code
#if 0
#ifdef RF_AIROHA_ITTIAM
PRINTK("RF_AIROHA_ITTIAM \n\r");
#endif /* RF_AIROHA_ITTIAM */
#endif
#ifdef MAX2829
PRINTK("MAX2829 \n\r");
#endif /* MAX2829 */
// 20120709 caisf masked, merged ittiam mac v1.2 code
#if 0
#ifdef MAX2830_32
PRINTK("MAX2830_32 \n\r");
#endif /* MAX2830_32 */
#ifdef AL2236
PRINTK("AL2236 \n\r");
#endif /* AL2236 */
#ifdef AL7230
PRINTK("AL7230 \n\r");
#endif /* AL7230 */
#endif
#ifdef MWLAN
PRINTK("MWLAN \n\r");
#endif /* MWLAN */
#ifdef OS_LINUX_CSL_TYPE
PRINTK("OS_LINUX_CSL_TYPE \n\r");
#endif /* OS_LINUX_CSL_TYPE */
#ifdef DEBUG_MODE
PRINTK("DEBUG_MODE \n\r");
#endif /* DEBUG_MODE */
#ifdef USE_PROCESSOR_DMA
PRINTK("USE_PROCESSOR_DMA \n\r");
#endif /* USE_PROCESSOR_DMA */
#ifdef EDCA_DEMO_KLUDGE
PRINTK("EDCA_DEMO_KLUDGE \n\r");
#endif /* EDCA_DEMO_KLUDGE */
#ifdef LOCALMEM_TX_DSCR
PRINTK("LOCALMEM_TX_DSCR \n\r");
#endif /* LOCALMEM_TX_DSCR */
#ifdef AUTORATE_FEATURE
PRINTK("AUTORATE_FEATURE \n\r");
#endif /* AUTORATE_FEATURE */
#ifdef DISABLE_MACHW_DEFRAG
PRINTK("DISABLE_MACHW_DEFRAG \n\r");
#endif /* DISABLE_MACHW_DEFRAG */
#ifdef DISABLE_MACHW_DEAGGR
PRINTK("DISABLE_MACHW_DEAGGR \n\r");
#endif /* DISABLE_MACHW_DEAGGR */
#ifdef PHY_TEST_MAX_PKT_RX
PRINTK("PHY_TEST_MAX_PKT_RX \n\r");
#endif /* PHY_TEST_MAX_PKT_RX */
#ifdef DEFAULT_SME
PRINTK("DEFAULT_SME \n\r");
#endif /* DEFAULT_SME */
#ifdef NO_ACTION_RESET
PRINTK("NO_ACTION_RESET \n\r");
#endif /* NO_ACTION_RESET */
#ifdef LITTLE_ENDIAN
PRINTK("LITTLE_ENDIAN \n\r");
#endif /* LITTLE_ENDIAN */
#ifdef DSCR_MACROS_NOT_DEFINED
PRINTK("DSCR_MACROS_NOT_DEFINED \n\r");
#endif /* DSCR_MACROS_NOT_DEFINED */
#ifdef PHY_CONTINUOUS_TX_MODE
PRINTK("PHY_CONTINUOUS_TX_MODE \n\r");
#endif /* PHY_CONTINUOUS_TX_MODE */
#ifdef HANDLE_ERROR_INTR
PRINTK("HANDLE_ERROR_INTR \n\r");
#endif /* HANDLE_ERROR_INTR */
#ifdef MEM_DEBUG_MODE
PRINTK("MEM_DEBUG_MODE \n\r");
#endif /* MEM_DEBUG_MODE */
#ifdef MEM_STRUCT_SIZES_INIT
PRINTK("MEM_STRUCT_SIZES_INIT \n\r");
#endif /* MEM_STRUCT_SIZES_INIT */
#ifdef TX_ABORT_FEATURE
PRINTK("TX_ABORT_FEATURE \n\r");
#endif /* TX_ABORT_FEATURE */
}
#endif /* DEBUG_MODE */
/*chenq add itm trace*/
/*flag of ShareMemInfo*/
int g_debug_print_tx_pkt_on = 0;
int g_debug_print_rx_ptk_on = 0;
int g_debug_print_tx_buf_on = 0;
int g_debug_print_rx_buf_on = 0;
int g_debug_buf_use_info_start = 0;
/*flag of MacTxRxStatistics*/
int g_debug_txrx_reg_info_start = 0;
int g_debug_txrx_frame_info_start = 0;
int g_debug_rx_size_info_start = 0;
int g_debug_isr_info_start = 0;
/*flag of SpiSdioDmaState*/
int g_debug_print_spisdio_bus_on = 0;
int g_debug_print_dma_do_on = 0;
int g_debug_spisdiodma_isr_info_start = 0;
/*flag of MacFsmMibState*/
int g_debug_print_fsm_on = 0;
int g_debug_print_assoc_on = 0;
int g_debug_print_Enc_auth_on = 0;
int g_debug_print_wps_on = 0;
int g_debug_print_ps_on = 0;//PowerSave
int g_debug_print_wd_on = 0;//WiFi-Direct
int g_debug_print_txrx_path_on = 0;
/*flag of Host6820Info*/
//no add
void Reset_itm_trace_flag(void)
{
/*flag of ShareMemInfo*/
g_debug_print_tx_pkt_on = 0;
g_debug_print_rx_ptk_on = 0;
g_debug_print_tx_buf_on = 0;
g_debug_print_rx_buf_on = 0;
g_debug_buf_use_info_start = 0;
/*flag of MacTxRxStatistics*/
g_debug_txrx_reg_info_start = 0;
g_debug_txrx_frame_info_start = 0;
g_debug_rx_size_info_start = 0;
g_debug_isr_info_start = 0;
/*flag of SpiSdioDmaState*/
g_debug_print_spisdio_bus_on = 0;
g_debug_print_dma_do_on = 0;
g_debug_spisdiodma_isr_info_start = 0;
/*flag of MacFsmMibState*/
g_debug_print_fsm_on = 0;
g_debug_print_assoc_on = 0;
g_debug_print_Enc_auth_on = 0;
g_debug_print_wps_on = 0;
g_debug_print_ps_on = 0;//PowerSave
g_debug_print_wd_on = 0;//WiFi-Direct
g_debug_print_txrx_path_on = 0;
/*flag of Host6820Info*/
//no add
}
void ShareMemInfo(int type,int flag,int value,char * reserved2ext)
{
if(type == itm_debug_plog_sharemem_tx_pkt)
{
g_debug_print_tx_pkt_on = value;
}
else if(type == itm_debug_plog_sharemem_rx_ptk)
{
g_debug_print_rx_ptk_on = value;
}
else if(type == itm_debug_plog_sharemem_tx_buf)
{
g_debug_print_tx_buf_on = value;
}
else if(type == itm_debug_plog_sharemem_rx_buf)
{
g_debug_print_rx_buf_on = value;
}
else if(type == itm_debug_plog_sharemem_buf_use)
{
if(flag == counter_start)
{
g_debug_buf_use_info_start = counter_start;
}
else if( ( flag == counter_end ) && ( g_debug_buf_use_info_start == counter_end ) )
{
/*printk("already in counter_end stat\n");*/
}
else if( flag == counter_end )
{
g_debug_buf_use_info_start = counter_end;
}
}
}
void MacTxRxStatistics(int type,int flag,char * reserved2ext)
{
if(type == itm_debug_plog_mactxrx_reg)
{
if(flag == counter_start)
{
g_debug_txrx_reg_info_start = counter_start;
}
else if( ( flag == counter_end ) && ( g_debug_txrx_reg_info_start == counter_end ) )
{
/*printk("already in counter_end stat\n");*/
}
else if( flag == counter_end )
{
g_debug_txrx_reg_info_start = counter_end;
}
}
else if(type == itm_debug_plog_mactxrx_frame)
{
if(flag == counter_start)
{
g_debug_txrx_frame_info_start = counter_start;
}
else if( ( flag == counter_end ) && ( g_debug_txrx_frame_info_start == counter_end ) )
{
/*printk("already in counter_end stat\n");*/
}
else if( flag == counter_end )
{
g_debug_txrx_frame_info_start = counter_end;
}
}
else if(type == itm_debug_plog_mactxrx_rx_size)
{
if(flag == counter_start)
{
g_debug_rx_size_info_start = counter_start;
}
else if( ( flag == counter_end ) && ( g_debug_rx_size_info_start == counter_end ) )
{
/*printk("already in counter_end stat\n");*/
}
else if( flag == counter_end )
{
g_debug_rx_size_info_start = counter_end;
}
}
else if(type == itm_debug_plog_mactxrx_isr)
{
if(flag == counter_start)
{
g_debug_isr_info_start = counter_start;
}
else if( ( flag == counter_end ) && ( g_debug_isr_info_start == counter_end ) )
{
/*printk("already in counter_end stat\n");*/
}
else if( flag == counter_end )
{
g_debug_isr_info_start = counter_end;
}
}
}
void SpiSdioDmaState(int type,int flag,int value,char * reserved2ext)
{
if(type == itm_debug_plog_spisdiodma_spisdio)
{
g_debug_print_spisdio_bus_on = value;
}
else if(type == itm_debug_plog_spisdiodma_dma)
{
g_debug_print_dma_do_on = value;
}
else if(type == itm_debug_plog_spisdiodma_isr)
{
if(flag == counter_start)
{
g_debug_spisdiodma_isr_info_start = counter_start;
}
else if( ( flag == counter_end ) && ( g_debug_spisdiodma_isr_info_start == counter_end ) )
{
/*printk("already in counter_end stat\n");*/
}
else if( flag == counter_end )
{
g_debug_spisdiodma_isr_info_start = counter_end;
}
}
}
void MacFsmMibState(int type,int value,char * reserved2ext)
{
if(type == itm_debug_plog_macfsm_mib_fsm)
{
g_debug_print_fsm_on = value;
}
else if(type == itm_debug_plog_macfsm_mib_assoc)
{
g_debug_print_assoc_on = value;
}
else if(type == itm_debug_plog_macfsm_mib_Enc_auth)
{
g_debug_print_Enc_auth_on = value;
}
else if(type == itm_debug_plog_macfsm_mib_wps)
{
g_debug_print_wps_on = value;
}
else if(type == itm_debug_plog_macfsm_mib_ps)//PowerSave
{
g_debug_print_ps_on = value;//PowerSave
}
else if(type == itm_debug_plog_macfsm_mib_wd)//WiFi-Direct
{
g_debug_print_wd_on = value;//WiFi-Direct
}
else if(type == itm_debug_plog_macfsm_mib_txrx_path)
{
g_debug_print_txrx_path_on = value;
}
else if(type == itm_debug_plog_macfsm_mib_mibapp)
{
/*print ...*/
}
else if(type == itm_debug_plog_macfsm_mib_mibprtcl)
{
/*print ...*/
}
else if(type == itm_debug_plog_macfsm_mib_mibmac)
{
/*print ...*/
}
}
void Host6820Info(int type,char * reserved2ext)
{
}
/*chenq add end*/
#ifdef TROUT_WIFI_POWER_SLEEP_ENABLE
/*
* Notify co-processor to handle Power Management event
* through interrupt.
* Author: Keguang
* Date: 20130321
*/
inline void notify_cp_with_handshake(uint msg, uint retry)
{
uint i = retry;
uint count = host_read_trout_reg((UWORD32)rSYSREG_INFO1_FROM_ARM) + 1;
//#ifdef POWERSAVE_DEBUG
pr_info("rSYSREG_POWER_CTRL: %x\n", host_read_trout_reg((UWORD32)rSYSREG_POWER_CTRL));
//#endif
host_write_trout_reg((UWORD32)msg, (UWORD32)rSYSREG_HOST2ARM_INFO1); /*load message*/
host_write_trout_reg((UWORD32)0x1, (UWORD32)rSYSREG_GEN_ISR_2_ARM7); /*interrupt CP*/
/*pr_info("command done!\n");*/
/*wait for CP*/
while((host_read_trout_reg((UWORD32)rSYSREG_INFO1_FROM_ARM) != count) && i--) {
msleep(10);
//#ifdef POWERSAVE_DEBUG
pr_info("Done! rSYSREG_POWER_CTRL: %x\n", host_read_trout_reg((UWORD32)rSYSREG_POWER_CTRL));
//#endif
}
pr_info("!!! rSYSREG_POWER_CTRL: %x, retry %d, i %d\n", host_read_trout_reg((UWORD32)rSYSREG_POWER_CTRL), retry, i);
host_write_trout_reg(0x0, (UWORD32)rSYSREG_HOST2ARM_INFO1); /*clear message*/
if(msg == PS_MSG_WIFI_SUSPEND_MAGIC)
g_done_wifi_suspend = 1;
else if(msg == PS_MSG_WIFI_RESUME_MAGIC)
g_done_wifi_suspend = 0;
}
EXPORT_SYMBOL(notify_cp_with_handshake);
extern int prepare_null_frame_for_cp(UWORD8 psm, BOOL_T is_qos, UWORD8 priority);
void check_and_retransmit(void)
{
uint which_frame = 0;
uint sta = 0;
uint vs;
uint retry = 0;
unsigned char tmp[200];
uint *pw = (uint *)&tmp[0];
which_frame = root_host_read_trout_reg((UWORD32)rSYSREG_HOST2ARM_INFO1);
if(which_frame)
sta = BEACON_MEM_BEGIN;
else
sta = BEACON_MEM_BEGIN + 200;
root_host_read_trout_ram((void *)tmp, (void *)sta, TX_DSCR_LEN);
if(((tmp[3] >> 5) & 0x3) == 0x3){
goto retx;
}
if((tmp[20] & 0x3) != 0x3){
printk("SF0-CASUED\n");
goto retx;
}
/* arrive here, means the last frame ARM7 sent was success(AP acked) do nothing*/
return;
retx:
tmp[3] &= 0x9F;
tmp[3] |= 0x20;
tmp[20] &= 0xFC;
vs = root_host_read_trout_reg((UWORD32)rMAC_TSF_TIMER_LO);
vs = (vs >> 10) & 0xFFFF;
pw[3] &= 0xFFFF0000;
pw[3] |= vs;
printk("RE-TX\n");
root_host_write_trout_ram((void *)sta, (void *)tmp, TX_DSCR_LEN);
root_host_write_trout_reg((UWORD32)sta, (UWORD32)rMAC_EDCA_PRI_HP_Q_PTR);
msleep(20);
return;
}
/*for internal use only*/
inline void root_notify_cp_with_handshake(uint msg, uint retry)
{
uint i = retry;
uint count = root_host_read_trout_reg((UWORD32)rSYSREG_INFO1_FROM_ARM) + 1;
#ifdef POWERSAVE_DEBUG
pr_info("rSYSREG_POWER_CTRL: %x\n", root_host_read_trout_reg((UWORD32)rSYSREG_POWER_CTRL));
/*pr_info("command %x\n", msg);*/
#endif
root_host_write_trout_reg((UWORD32)msg, (UWORD32)rSYSREG_HOST2ARM_INFO1); /*load message*/
root_host_write_trout_reg((UWORD32)0x1, (UWORD32)rSYSREG_GEN_ISR_2_ARM7); /*interrupt CP*/
/*pr_info("command done!\n");*/
if((msg & 0xFFFF) == PS_MSG_ARM7_EBEA_KC_MAGIC){
printk("EBEA.......\n");
msleep(75);
check_and_retransmit();
}
/*wait for CP*/
while((root_host_read_trout_reg((UWORD32)rSYSREG_INFO1_FROM_ARM) != count) && i--) {
msleep(10);
#ifdef POWERSAVE_DEBUG
pr_info("Done! rSYSREG_POWER_CTRL: %x\n", root_host_read_trout_reg((UWORD32)rSYSREG_POWER_CTRL));
/*pr_info("expected %x, SYSREG_INFO1_FROM_ARM = %x\n", count, root_host_read_trout_reg((UWORD32)rSYSREG_INFO1_FROM_ARM));*/
#endif
}
pr_info("@@@ rSYSREG_POWER_CTRL: %x, retry %d, i %d\n", root_host_read_trout_reg((UWORD32)rSYSREG_POWER_CTRL), retry, i);
root_host_write_trout_reg(0x0, (UWORD32)rSYSREG_HOST2ARM_INFO1); /*clear message*/
if(msg == PS_MSG_WIFI_SUSPEND_MAGIC)
g_done_wifi_suspend = 1;
else if(msg == PS_MSG_WIFI_RESUME_MAGIC)
g_done_wifi_suspend = 0;
}
#endif
|
abgoyal/zen_u105_kernel
|
drivers/net/wireless/trout/mac/src/Common/common.c
|
C
|
gpl-2.0
| 42,309
|
/*
* Asterisk -- An open source telephony toolkit.
*
* Copyright (C) 2005-2006, Kevin P. Fleming
*
* Kevin P. Fleming <kpfleming@digium.com>
*
* See http://www.asterisk.org for more information about
* the Asterisk project. Please do not directly contact
* any of the maintainers of this project for assistance;
* the project provides a web site, mailing lists and IRC
* channels for your use.
*
* This program is free software, distributed under the terms of
* the GNU General Public License Version 2. See the LICENSE file
* at the top of the source tree.
*/
/*! \file
*
* \brief Background DNS update manager
*
* \author Kevin P. Fleming <kpfleming@digium.com>
*
* \bug There is a minor race condition. In the event that an IP address
* of a dnsmgr managed host changes, there is the potential for the consumer
* of that address to access the in_addr data at the same time that the dnsmgr
* thread is in the middle of updating it to the new address.
*/
#include "asterisk.h"
ASTERISK_FILE_VERSION(__FILE__, "$Revision: 130752 $")
#include "asterisk/_private.h"
#include <regex.h>
#include <signal.h>
#include "asterisk/dnsmgr.h"
#include "asterisk/linkedlists.h"
#include "asterisk/utils.h"
#include "asterisk/config.h"
#include "asterisk/sched.h"
#include "asterisk/cli.h"
#include "asterisk/manager.h"
static struct sched_context *sched;
static int refresh_sched = -1;
static pthread_t refresh_thread = AST_PTHREADT_NULL;
struct ast_dnsmgr_entry {
/*! where we will store the resulting address */
struct in_addr *result;
/*! the last result, used to check if address has changed */
struct in_addr last;
/*! Set to 1 if the entry changes */
int changed:1;
ast_mutex_t lock;
AST_RWLIST_ENTRY(ast_dnsmgr_entry) list;
/*! just 1 here, but we use calloc to allocate the correct size */
char name[1];
};
static AST_RWLIST_HEAD_STATIC(entry_list, ast_dnsmgr_entry);
AST_MUTEX_DEFINE_STATIC(refresh_lock);
#define REFRESH_DEFAULT 300
static int enabled;
static int refresh_interval;
struct refresh_info {
struct entry_list *entries;
int verbose;
unsigned int regex_present:1;
regex_t filter;
};
static struct refresh_info master_refresh_info = {
.entries = &entry_list,
.verbose = 0,
};
struct ast_dnsmgr_entry *ast_dnsmgr_get(const char *name, struct in_addr *result)
{
struct ast_dnsmgr_entry *entry;
if (!result || ast_strlen_zero(name) || !(entry = ast_calloc(1, sizeof(*entry) + strlen(name))))
return NULL;
entry->result = result;
ast_mutex_init(&entry->lock);
strcpy(entry->name, name);
memcpy(&entry->last, result, sizeof(entry->last));
AST_RWLIST_WRLOCK(&entry_list);
AST_RWLIST_INSERT_HEAD(&entry_list, entry, list);
AST_RWLIST_UNLOCK(&entry_list);
return entry;
}
void ast_dnsmgr_release(struct ast_dnsmgr_entry *entry)
{
if (!entry)
return;
AST_RWLIST_WRLOCK(&entry_list);
AST_RWLIST_REMOVE(&entry_list, entry, list);
AST_RWLIST_UNLOCK(&entry_list);
ast_verb(4, "removing dns manager for '%s'\n", entry->name);
ast_mutex_destroy(&entry->lock);
ast_free(entry);
}
int ast_dnsmgr_lookup(const char *name, struct in_addr *result, struct ast_dnsmgr_entry **dnsmgr)
{
struct ast_hostent ahp;
struct hostent *hp;
if (ast_strlen_zero(name) || !result || !dnsmgr)
return -1;
if (*dnsmgr && !strcasecmp((*dnsmgr)->name, name))
return 0;
ast_verb(4, "doing dnsmgr_lookup for '%s'\n", name);
/* if it's actually an IP address and not a name,
there's no need for a managed lookup */
if (inet_aton(name, result))
return 0;
/* do a lookup now but add a manager so it will automagically get updated in the background */
if ((hp = ast_gethostbyname(name, &ahp)))
memcpy(result, hp->h_addr, sizeof(result));
/* if dnsmgr is not enable don't bother adding an entry */
if (!enabled)
return 0;
ast_verb(3, "adding dns manager for '%s'\n", name);
*dnsmgr = ast_dnsmgr_get(name, result);
return !*dnsmgr;
}
/*
* Refresh a dnsmgr entry
*/
static int dnsmgr_refresh(struct ast_dnsmgr_entry *entry, int verbose)
{
struct ast_hostent ahp;
struct hostent *hp;
char iabuf[INET_ADDRSTRLEN];
char iabuf2[INET_ADDRSTRLEN];
struct in_addr tmp;
int changed = 0;
ast_mutex_lock(&entry->lock);
if (verbose)
ast_verb(3, "refreshing '%s'\n", entry->name);
if ((hp = ast_gethostbyname(entry->name, &ahp))) {
/* check to see if it has changed, do callback if requested (where de callback is defined ????) */
memcpy(&tmp, hp->h_addr, sizeof(tmp));
if (tmp.s_addr != entry->last.s_addr) {
ast_copy_string(iabuf, ast_inet_ntoa(entry->last), sizeof(iabuf));
ast_copy_string(iabuf2, ast_inet_ntoa(tmp), sizeof(iabuf2));
ast_log(LOG_NOTICE, "host '%s' changed from %s to %s\n",
entry->name, iabuf, iabuf2);
memcpy(entry->result, hp->h_addr, sizeof(entry->result));
memcpy(&entry->last, hp->h_addr, sizeof(entry->last));
changed = entry->changed = 1;
}
}
ast_mutex_unlock(&entry->lock);
return changed;
}
int ast_dnsmgr_refresh(struct ast_dnsmgr_entry *entry)
{
return dnsmgr_refresh(entry, 0);
}
/*
* Check if dnsmgr entry has changed from since last call to this function
*/
int ast_dnsmgr_changed(struct ast_dnsmgr_entry *entry)
{
int changed;
ast_mutex_lock(&entry->lock);
changed = entry->changed;
entry->changed = 0;
ast_mutex_unlock(&entry->lock);
return changed;
}
static void *do_refresh(void *data)
{
for (;;) {
pthread_testcancel();
usleep((ast_sched_wait(sched)*1000));
pthread_testcancel();
ast_sched_runq(sched);
}
return NULL;
}
static int refresh_list(const void *data)
{
struct refresh_info *info = (struct refresh_info *)data;
struct ast_dnsmgr_entry *entry;
/* if a refresh or reload is already in progress, exit now */
if (ast_mutex_trylock(&refresh_lock)) {
if (info->verbose)
ast_log(LOG_WARNING, "DNS Manager refresh already in progress.\n");
return -1;
}
ast_verb(3, "Refreshing DNS lookups.\n");
AST_RWLIST_RDLOCK(info->entries);
AST_RWLIST_TRAVERSE(info->entries, entry, list) {
if (info->regex_present && regexec(&info->filter, entry->name, 0, NULL, 0))
continue;
dnsmgr_refresh(entry, info->verbose);
}
AST_RWLIST_UNLOCK(info->entries);
ast_mutex_unlock(&refresh_lock);
/* automatically reschedule based on the interval */
return refresh_interval * 1000;
}
void dnsmgr_start_refresh(void)
{
if (refresh_sched > -1) {
AST_SCHED_DEL(sched, refresh_sched);
refresh_sched = ast_sched_add_variable(sched, 100, refresh_list, &master_refresh_info, 1);
}
}
static int do_reload(int loading);
static char *handle_cli_reload(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
switch (cmd) {
case CLI_INIT:
e->command = "dnsmgr reload";
e->usage =
"Usage: dnsmgr reload\n"
" Reloads the DNS manager configuration.\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
if (a->argc > 2)
return CLI_SHOWUSAGE;
do_reload(0);
return CLI_SUCCESS;
}
static char *handle_cli_refresh(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
struct refresh_info info = {
.entries = &entry_list,
.verbose = 1,
};
switch (cmd) {
case CLI_INIT:
e->command = "dnsmgr refresh";
e->usage =
"Usage: dnsmgr refresh [pattern]\n"
" Peforms an immediate refresh of the managed DNS entries.\n"
" Optional regular expression pattern is used to filter the entries to refresh.\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
if (!enabled) {
ast_cli(a->fd, "DNS Manager is disabled.\n");
return 0;
}
if (a->argc > 3) {
return CLI_SHOWUSAGE;
}
if (a->argc == 3) {
if (regcomp(&info.filter, a->argv[2], REG_EXTENDED | REG_NOSUB)) {
return CLI_SHOWUSAGE;
} else {
info.regex_present = 1;
}
}
refresh_list(&info);
if (info.regex_present) {
regfree(&info.filter);
}
return CLI_SUCCESS;
}
static char *handle_cli_status(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
int count = 0;
struct ast_dnsmgr_entry *entry;
switch (cmd) {
case CLI_INIT:
e->command = "dnsmgr status";
e->usage =
"Usage: dnsmgr status\n"
" Displays the DNS manager status.\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
if (a->argc > 2)
return CLI_SHOWUSAGE;
ast_cli(a->fd, "DNS Manager: %s\n", enabled ? "enabled" : "disabled");
ast_cli(a->fd, "Refresh Interval: %d seconds\n", refresh_interval);
AST_RWLIST_RDLOCK(&entry_list);
AST_RWLIST_TRAVERSE(&entry_list, entry, list)
count++;
AST_RWLIST_UNLOCK(&entry_list);
ast_cli(a->fd, "Number of entries: %d\n", count);
return CLI_SUCCESS;
}
static struct ast_cli_entry cli_reload = AST_CLI_DEFINE(handle_cli_reload, "Reloads the DNS manager configuration");
static struct ast_cli_entry cli_refresh = AST_CLI_DEFINE(handle_cli_refresh, "Performs an immediate refresh");
static struct ast_cli_entry cli_status = AST_CLI_DEFINE(handle_cli_status, "Display the DNS manager status");
int dnsmgr_init(void)
{
if (!(sched = sched_context_create())) {
ast_log(LOG_ERROR, "Unable to create schedule context.\n");
return -1;
}
ast_cli_register(&cli_reload);
ast_cli_register(&cli_status);
ast_cli_register(&cli_refresh);
return do_reload(1);
}
int dnsmgr_reload(void)
{
return do_reload(0);
}
static int do_reload(int loading)
{
struct ast_config *config;
struct ast_flags config_flags = { loading ? 0 : CONFIG_FLAG_FILEUNCHANGED };
const char *interval_value;
const char *enabled_value;
int interval;
int was_enabled;
int res = -1;
if ((config = ast_config_load("dnsmgr.conf", config_flags)) == CONFIG_STATUS_FILEUNCHANGED)
return 0;
/* ensure that no refresh cycles run while the reload is in progress */
ast_mutex_lock(&refresh_lock);
/* reset defaults in preparation for reading config file */
refresh_interval = REFRESH_DEFAULT;
was_enabled = enabled;
enabled = 0;
AST_SCHED_DEL(sched, refresh_sched);
if (config) {
if ((enabled_value = ast_variable_retrieve(config, "general", "enable"))) {
enabled = ast_true(enabled_value);
}
if ((interval_value = ast_variable_retrieve(config, "general", "refreshinterval"))) {
if (sscanf(interval_value, "%d", &interval) < 1)
ast_log(LOG_WARNING, "Unable to convert '%s' to a numeric value.\n", interval_value);
else if (interval < 0)
ast_log(LOG_WARNING, "Invalid refresh interval '%d' specified, using default\n", interval);
else
refresh_interval = interval;
}
ast_config_destroy(config);
}
if (enabled && refresh_interval)
ast_log(LOG_NOTICE, "Managed DNS entries will be refreshed every %d seconds.\n", refresh_interval);
/* if this reload enabled the manager, create the background thread
if it does not exist */
if (enabled) {
if (!was_enabled && (refresh_thread == AST_PTHREADT_NULL)) {
if (ast_pthread_create_background(&refresh_thread, NULL, do_refresh, NULL) < 0) {
ast_log(LOG_ERROR, "Unable to start refresh thread.\n");
}
}
/* make a background refresh happen right away */
refresh_sched = ast_sched_add_variable(sched, 100, refresh_list, &master_refresh_info, 1);
res = 0;
}
/* if this reload disabled the manager and there is a background thread,
kill it */
else if (!enabled && was_enabled && (refresh_thread != AST_PTHREADT_NULL)) {
/* wake up the thread so it will exit */
pthread_cancel(refresh_thread);
pthread_kill(refresh_thread, SIGURG);
pthread_join(refresh_thread, NULL);
refresh_thread = AST_PTHREADT_NULL;
res = 0;
}
else
res = 0;
ast_mutex_unlock(&refresh_lock);
manager_event(EVENT_FLAG_SYSTEM, "Reload", "Module: DNSmgr\r\nStatus: %s\r/nMessage: DNSmgr reload Requested\r\n", enabled ? "Enabled" : "Disabled");
return res;
}
|
nicwolff/asterisk-agi-mp3
|
main/dnsmgr.c
|
C
|
gpl-2.0
| 11,631
|
/* pictool: ANSI C converter for Tibia's PIC files
* (c) 2007-2009 Ivan Vucica
* Part of OpenTibia project
*
* Although written in ANSI C, this makes use of #pragma pack(),
* make sure your compiler supports packed structures, or else.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* Headers */
#include <stdio.h>
#include <stdlib.h>
#include <SDL/SDL.h>
#include <errno.h>
#if !_MSC_VER
#include <unistd.h>
#endif
#if !BAZEL_BUILD
#include "../../sprdata.h"
#else
#include "sprdata.h"
#endif
#include "picfuncs.h"
#pragma pack(1)
typedef struct {
uint32_t signature;
uint16_t imgcount;
} fileheader_t;
typedef struct {
uint8_t width, height;
uint8_t unk1, unk2, unk3; /* FIXME (ivucica#4#) zerocoolz says this should be colorkey, according to http://otfans.net/showpost.php?p=840634&postcount=134 */
} picheader_t;
#pragma pack()
static int filesize (FILE* f) {
int loc = ftell(f);
int size = 0;
fseek(f, 0, SEEK_END);
size = ftell(f);
fseek(f, loc, SEEK_SET);
return size;
}
int writesprite (FILE *f, SDL_Surface *s, int offx, int offy, uint16_t *datasize) {
return writeSprData(f, s, offx, offy, datasize);
}
int readsprite (FILE *f, uint32_t sprloc, SDL_Surface *s, int offx, int offy) {
int oldloc = ftell(f);
int r;
fseek(f, sprloc, SEEK_SET);
r = readSprData(f, s, offx, offy);
fseek(f, oldloc, SEEK_SET);
return r;
}
int picdetails (const char* filename) {
FILE *f;
int i,j,k;
fileheader_t fh;
picheader_t ph;
uint32_t sprloc;
f = fopen(filename, "rb");
printf("information for %s\n", filename);
if (!f)
return -1;
fread(&fh, sizeof(fh), 1, f);
printf("signature %d\n", fh.signature);
printf("imagecount %d\n", fh.imgcount);
for(i = 0; i < fh.imgcount ; i++){
fread(&ph, sizeof(ph), 1, f);
printf("img%d width %d height %d bg rgb #%02x%02x%02x\n", i, ph.width, ph.height, ph.unk1, ph.unk2, ph.unk3);
for(j = 0; j<ph.height; j++){
for(k = 0; k < ph.width; k++){
fread(&sprloc, sizeof(sprloc), 1, f);
printf("sprite img %d x %d y %d location %d\n", i,k,j,sprloc);
}
}
}
return 0;
}
int dumperror_stderr(char* txt, ...)
{
va_list vl;
va_start(vl, txt);
int r = vfprintf(stderr, txt, vl);
va_end(vl);
return r;
}
int (*pictool_dumperror)(char*,...) = dumperror_stderr;
int writepic(const char* filename, int index, SDL_Surface *s){
FILE *fi, *fo;
fileheader_t fh;
picheader_t ph;
uint32_t sprloc, sproffset;
size_t continuationposi, continuationposo;
uint16_t datasize;
void *data;
int i,j,k;
fi = fopen(filename, "rb");
fo = fopen("__tmp__.pic","wb+");
if (!fi || !fo)
return -1;
fread(&fh, sizeof(fh), 1, fi);
fwrite(&fh, sizeof(fh), 1, fo);
sproffset = fh.imgcount * (sizeof(ph)+1)-2;
for(i = 0; i < fh.imgcount; i++){
fread(&ph, sizeof(ph), 1, fi);
if(i == index){
ph.width = s->w / 32;
ph.height = s->h / 32;
}
sproffset += ph.width * ph.height * 4;
fseek(fi, ph.width*ph.height*4, SEEK_CUR);
}
fseek(fi, sizeof(fh), SEEK_SET);
for(i = 0; i < fh.imgcount; i++){
fread(&ph, sizeof(ph), 1, fi);
if(i != index){
if(!ph.width || !ph.height){
fprintf(stderr, "pictool: width or height are 0\n");
return (10);
}
fwrite(&ph, sizeof(ph), 1, fo);
for(j=0; j < ph.width * ph.height; j++){
fread(&sprloc, sizeof(sprloc), 1, fi);
if(sproffset > 4000000){
dumperror_stderr("pictool: infinite loop\n");
exit(8);
}
if(sprloc > filesize(fi)){
dumperror_stderr("pictool: bad spr pointer\n");
exit(9);
}
fwrite(&sproffset, sizeof(sproffset), 1, fo);
continuationposi = ftell(fi);
continuationposo = ftell(fo);
fseek(fi, sprloc, SEEK_SET);
fseek(fo, sproffset, SEEK_SET);
fread(&datasize, sizeof(datasize), 1, fi);
fwrite(&datasize, sizeof(datasize), 1, fo);
data = malloc(datasize+2);
if(!data){
dumperror_stderr("pictool: allocation problem\n");
return (7);
}
fread(data, datasize+2, 1, fi);
fwrite(data, datasize+2, 1, fo);
free(data);
fseek(fo, continuationposo, SEEK_SET);
fseek(fi, continuationposi, SEEK_SET);
sproffset += datasize+2; // 2 == space for datasize
}
fflush(fo);
}
else{
fseek(fi, ph.width*ph.height*4, SEEK_CUR);
ph.width = s->w / 32; ph.height = s->h / 32;
fwrite(&ph, sizeof(ph), 1, fo);
for(j = 0; j < ph.height; j++){
for(k = 0; k < ph.width; k++){
/*printf("Placing %d %d on %d\n", j, k, sproffset);*/
fwrite(&sproffset, sizeof(sproffset), 1, fo);
continuationposo = ftell(fo);
fseek(fo, sproffset, SEEK_SET);
writesprite(fo, s, k * 32, j*32, &datasize);
/*printf("Its size is: %d\n", datasize);*/
fseek(fo, continuationposo, SEEK_SET);
sproffset += datasize+2;
}
}
fflush(fo);
}
}
fclose(fo);
fclose(fi);
if(rename("__tmp__.pic", filename)){
if (errno == 17) {// file exists
if(unlink(filename)) {
if (errno != 2)
return 93;
}
if(rename("__tmp__.pic", filename)){
return 92;
}
} else {
return 92;
}
}
return 0;
}
int readpic (const char* filename, int index, SDL_Surface **sr) {
/* index >= -1; -1 means that we should print out details */
SDL_Surface *s=NULL;
FILE *f;
int i,j,k;
fileheader_t fh;
picheader_t ph;
uint32_t sprloc;
uint32_t magenta;
f = fopen(filename, "rb");
if (!f)
return -1;
fread(&fh,sizeof(fh),1,f);
for(i = 0; i < fh.imgcount && i <= index; i++){
fread(&ph, sizeof(ph), 1, f);
if(i == index){
s = SDL_CreateRGBSurface(SDL_SWSURFACE, ph.width*32, ph.height*32, 32, 0xFF, 0xFF00, 0xFF0000, 0xFF000000);
if(!s){
printf("CreateRGBSurface failed: %s\n", SDL_GetError());
return -1;
}
magenta = SDL_MapRGB(s->format, 255, 0, 255);
SDL_FillRect(s, NULL, magenta);
/* FIXME (ivucica#4#) Above statement is potentially unportable to architectures with
* different endianess. Lilliputtans would be happier if we took a look at SDL
* docs and corrected this. */
for(j = 0; j < ph.height; j++){
for(k = 0; k < ph.width; k++){
fread(&sprloc, sizeof(sprloc), 1, f);
dbgprintf(":: reading sprite at pos %d %d\n", j, k);
if(readsprite(f, sprloc, s, k*32, j*32)){ /* TODO (ivucica#1#) cleanup sdl surface upon error */
return -1;
}
}
}
}
else{
fseek(f, sizeof(sprloc)*ph.height*ph.width, SEEK_CUR);
}
}
fclose(f);
*sr = s;
return 0;
}
|
opentibia/yatc
|
tools/pictool/picfuncs.c
|
C
|
gpl-2.0
| 7,210
|
/*
* Demo on how to use /dev/crypto device for ciphering.
*
* Placed under public domain.
*
*/
#include <poll.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <sys/ioctl.h>
#include <crypto/cryptodev.h>
#include "asynchelper.h"
#include "testhelper.h"
#ifdef ENABLE_ASYNC
static int debug = 0;
#define DATA_SIZE 8*1024
#define BLOCK_SIZE 16
#define KEY_SIZE 16
static int
test_crypto(int cfd)
{
uint8_t plaintext_raw[DATA_SIZE + 63], *plaintext;
uint8_t ciphertext_raw[DATA_SIZE + 63], *ciphertext;
uint8_t iv[BLOCK_SIZE];
uint8_t key[KEY_SIZE];
struct session_op sess;
#ifdef CIOCGSESSINFO
struct session_info_op siop;
#endif
struct crypt_op cryp;
if (debug) printf("running %s\n", __func__);
memset(&sess, 0, sizeof(sess));
memset(&cryp, 0, sizeof(cryp));
memset(key, 0x33, sizeof(key));
memset(iv, 0x03, sizeof(iv));
/* Get crypto session for AES128 */
sess.cipher = CRYPTO_AES_CBC;
sess.keylen = KEY_SIZE;
sess.key = key;
if (ioctl(cfd, CIOCGSESSION, &sess)) {
perror("ioctl(CIOCGSESSION)");
return 1;
}
if (debug) printf("%s: got the session\n", __func__);
#ifdef CIOCGSESSINFO
siop.ses = sess.ses;
if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
perror("ioctl(CIOCGSESSINFO)");
return 1;
}
plaintext = buf_align(plaintext_raw, siop.alignmask);
ciphertext = buf_align(ciphertext_raw, siop.alignmask);
#else
plaintext = plaintext_raw;
ciphertext = ciphertext_raw;
#endif
memset(plaintext, 0x15, DATA_SIZE);
/* Encrypt data.in to data.encrypted */
cryp.ses = sess.ses;
cryp.len = DATA_SIZE;
cryp.src = plaintext;
cryp.dst = ciphertext;
cryp.iv = iv;
cryp.op = COP_ENCRYPT;
DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
if (debug) printf("%s: data encrypted\n", __func__);
if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
perror("ioctl(CIOCFSESSION)");
return 1;
}
if (debug) printf("%s: session finished\n", __func__);
if (ioctl(cfd, CIOCGSESSION, &sess)) {
perror("ioctl(CIOCGSESSION)");
return 1;
}
if (debug) printf("%s: got new session\n", __func__);
/* Decrypt data.encrypted to data.decrypted */
cryp.ses = sess.ses;
cryp.len = DATA_SIZE;
cryp.src = ciphertext;
cryp.dst = ciphertext;
cryp.iv = iv;
cryp.op = COP_DECRYPT;
DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
if (debug) printf("%s: data encrypted\n", __func__);
/* Verify the result */
if (memcmp(plaintext, ciphertext, DATA_SIZE) != 0) {
fprintf(stderr,
"FAIL: Decrypted data are different from the input data.\n");
return 1;
} else if (debug)
printf("Test passed\n");
/* Finish crypto session */
if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
perror("ioctl(CIOCFSESSION)");
return 1;
}
return 0;
}
static int test_aes(int cfd)
{
uint8_t plaintext1_raw[BLOCK_SIZE + 63], *plaintext1;
uint8_t ciphertext1[BLOCK_SIZE] = { 0xdf, 0x55, 0x6a, 0x33, 0x43, 0x8d, 0xb8, 0x7b, 0xc4, 0x1b, 0x17, 0x52, 0xc5, 0x5e, 0x5e, 0x49 };
uint8_t iv1[BLOCK_SIZE];
uint8_t key1[KEY_SIZE] = { 0xff, 0xff, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
uint8_t plaintext2_data[BLOCK_SIZE] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x00 };
uint8_t plaintext2_raw[BLOCK_SIZE + 63], *plaintext2;
uint8_t ciphertext2[BLOCK_SIZE] = { 0xb7, 0x97, 0x2b, 0x39, 0x41, 0xc4, 0x4b, 0x90, 0xaf, 0xa7, 0xb2, 0x64, 0xbf, 0xba, 0x73, 0x87 };
uint8_t iv2[BLOCK_SIZE];
uint8_t key2[KEY_SIZE];
struct session_op sess1, sess2;
#ifdef CIOCGSESSINFO
struct session_info_op siop1, siop2;
#endif
struct crypt_op cryp1, cryp2;
memset(&sess1, 0, sizeof(sess1));
memset(&sess2, 0, sizeof(sess2));
memset(&cryp1, 0, sizeof(cryp1));
memset(&cryp2, 0, sizeof(cryp2));
/* Get crypto session for AES128 */
sess1.cipher = CRYPTO_AES_CBC;
sess1.keylen = KEY_SIZE;
sess1.key = key1;
if (ioctl(cfd, CIOCGSESSION, &sess1)) {
perror("ioctl(CIOCGSESSION)");
return 1;
}
#ifdef CIOCGSESSINFO
siop1.ses = sess1.ses;
if (ioctl(cfd, CIOCGSESSINFO, &siop1)) {
perror("ioctl(CIOCGSESSINFO)");
return 1;
}
plaintext1 = buf_align(plaintext1_raw, siop1.alignmask);
#else
plaintext1 = plaintext1_raw;
#endif
memset(plaintext1, 0x0, BLOCK_SIZE);
memset(iv1, 0x0, sizeof(iv1));
memset(key2, 0x0, sizeof(key2));
/* Get second crypto session for AES128 */
sess2.cipher = CRYPTO_AES_CBC;
sess2.keylen = KEY_SIZE;
sess2.key = key2;
if (ioctl(cfd, CIOCGSESSION, &sess2)) {
perror("ioctl(CIOCGSESSION)");
return 1;
}
#ifdef CIOCGSESSINFO
siop2.ses = sess2.ses;
if (ioctl(cfd, CIOCGSESSINFO, &siop2)) {
perror("ioctl(CIOCGSESSINFO)");
return 1;
}
plaintext2 = buf_align(plaintext2_raw, siop2.alignmask);
#else
plaintext2 = plaintext2_raw;
#endif
memcpy(plaintext2, plaintext2_data, BLOCK_SIZE);
/* Encrypt data.in to data.encrypted */
cryp1.ses = sess1.ses;
cryp1.len = BLOCK_SIZE;
cryp1.src = plaintext1;
cryp1.dst = plaintext1;
cryp1.iv = iv1;
cryp1.op = COP_ENCRYPT;
DO_OR_DIE(do_async_crypt(cfd, &cryp1), 0);
if (debug) printf("cryp1 written out\n");
memset(iv2, 0x0, sizeof(iv2));
/* Encrypt data.in to data.encrypted */
cryp2.ses = sess2.ses;
cryp2.len = BLOCK_SIZE;
cryp2.src = plaintext2;
cryp2.dst = plaintext2;
cryp2.iv = iv2;
cryp2.op = COP_ENCRYPT;
DO_OR_DIE(do_async_crypt(cfd, &cryp2), 0);
if (debug) printf("cryp2 written out\n");
DO_OR_DIE(do_async_fetch(cfd, &cryp1), 0);
DO_OR_DIE(do_async_fetch(cfd, &cryp2), 0);
if (debug) printf("cryp1 + cryp2 successfully read\n");
/* Verify the result */
if (memcmp(plaintext1, ciphertext1, BLOCK_SIZE) != 0) {
int i;
fprintf(stderr,
"FAIL: Decrypted data are different from the input data.\n");
printf("plaintext:");
for (i = 0; i < BLOCK_SIZE; i++) {
if ((i % 30) == 0)
printf("\n");
printf("%02x ", plaintext1[i]);
}
printf("ciphertext:");
for (i = 0; i < BLOCK_SIZE; i++) {
if ((i % 30) == 0)
printf("\n");
printf("%02x ", ciphertext1[i]);
}
printf("\n");
return 1;
} else {
if (debug) printf("result 1 passed\n");
}
/* Test 2 */
/* Verify the result */
if (memcmp(plaintext2, ciphertext2, BLOCK_SIZE) != 0) {
int i;
fprintf(stderr,
"FAIL: Decrypted data are different from the input data.\n");
printf("plaintext:");
for (i = 0; i < BLOCK_SIZE; i++) {
if ((i % 30) == 0)
printf("\n");
printf("%02x ", plaintext2[i]);
}
printf("ciphertext:");
for (i = 0; i < BLOCK_SIZE; i++) {
if ((i % 30) == 0)
printf("\n");
printf("%02x ", ciphertext2[i]);
}
printf("\n");
return 1;
} else {
if (debug) printf("result 2 passed\n");
}
if (debug) printf("AES Test passed\n");
/* Finish crypto session */
if (ioctl(cfd, CIOCFSESSION, &sess1.ses)) {
perror("ioctl(CIOCFSESSION)");
return 1;
}
if (ioctl(cfd, CIOCFSESSION, &sess2.ses)) {
perror("ioctl(CIOCFSESSION)");
return 1;
}
return 0;
}
int
main(int argc, char** argv)
{
int fd = -1, cfd = -1;
if (argc > 1) debug = 1;
/* Open the crypto device */
fd = open("/dev/crypto", O_RDWR, 0);
if (fd < 0) {
perror("open(/dev/crypto)");
return 1;
}
/* Clone file descriptor */
if (ioctl(fd, CRIOGET, &cfd)) {
perror("ioctl(CRIOGET)");
return 1;
}
/* Set close-on-exec (not really neede here) */
if (fcntl(cfd, F_SETFD, 1) == -1) {
perror("fcntl(F_SETFD)");
return 1;
}
/* Run the test itself */
if (test_aes(cfd))
return 1;
if (test_crypto(cfd))
return 1;
/* Close cloned descriptor */
if (close(cfd)) {
perror("close(cfd)");
return 1;
}
/* Close the original descriptor */
if (close(fd)) {
perror("close(fd)");
return 1;
}
return 0;
}
#else
int
main(int argc, char** argv)
{
return (0);
}
#endif
|
cryptodev-linux/cryptodev-linux
|
tests/async_cipher.c
|
C
|
gpl-2.0
| 7,773
|
/*
* $Id: x2c.c,v 1.7 2009/06/02 09:40:53 bnv Exp $
* $Log: x2c.c,v $
* Revision 1.7 2009/06/02 09:40:53 bnv
* MVS/CMS corrections
*
* Revision 1.6 2008/07/15 07:40:54 bnv
* #include changed from <> to ""
*
* Revision 1.5 2008/07/14 13:08:16 bnv
* MVS,CMS support
*
* Revision 1.4 2002/06/11 12:37:15 bnv
* Added: CDECL
*
* Revision 1.3 2001/06/25 18:49:48 bnv
* Header changed to Id
*
* Revision 1.2 1999/11/26 12:52:25 bnv
* Changed: To use the new macros
*
* Revision 1.1 1998/07/02 17:20:58 bnv
* Initial Version
*
*/
#include <ctype.h>
#include "lerror.h"
#include "lstring.h"
/* ------------------ Lx2c ------------------ */
void __CDECL
Lx2c( const PLstr to, const PLstr from )
{
int i,j,r;
char *t,*f;
L2STR(from);
Lfx(to,LLEN(*from)/2+1); /* a rough estimation */
t = LSTR(*to); f = LSTR(*from);
for (i=r=0; i<LLEN(*from); ) {
for (; ISSPACE(f[i]) && (i<LLEN(*from)); i++) ;; /*skip spaces*/
for (j=i; ISXDIGIT(f[j]) && (j<LLEN(*from)); j++) ;; /* find hexdigits */
if ((i<LLEN(*from)) && (j==i)) { /* Ooops wrong character */
Lerror(ERR_INVALID_HEX_CONST,0);
LZEROSTR(*to); /* return null when error occures */
return;
}
if ((j-i)&1) {
t[r++] = HEXVAL(f[i]);
i++;
}
for (; i<j; i+=2)
t[r++] = (HEXVAL(f[i])<<4) | HEXVAL(f[i+1]);
}
LTYPE(*to) = LSTRING_TY;
LLEN(*to) = r;
} /* Lx2c */
|
vlachoudis/brexx
|
lstring/x2c.c
|
C
|
gpl-2.0
| 1,386
|
/*
* This file is part of mpv.
*
* mpv is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* mpv is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
/// \file
/// \ingroup Config
#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <strings.h>
#include <assert.h>
#include <stdbool.h>
#include "libmpv/client.h"
#include "mpv_talloc.h"
#include "m_config.h"
#include "options/m_option.h"
#include "common/msg.h"
#include "common/msg_control.h"
static const union m_option_value default_value;
// Profiles allow to predefine some sets of options that can then
// be applied later on with the internal -profile option.
#define MAX_PROFILE_DEPTH 20
// Maximal include depth.
#define MAX_RECURSION_DEPTH 8
struct m_profile {
struct m_profile *next;
char *name;
char *desc;
int num_opts;
// Option/value pair array.
char **opts;
};
// In the file local case, this contains the old global value.
struct m_opt_backup {
struct m_opt_backup *next;
struct m_config_option *co;
void *backup;
};
static int parse_include(struct m_config *config, struct bstr param, bool set,
int flags)
{
if (param.len == 0)
return M_OPT_MISSING_PARAM;
if (!set)
return 1;
if (config->recursion_depth >= MAX_RECURSION_DEPTH) {
MP_ERR(config, "Maximum 'include' nesting depth exceeded.\n");
return M_OPT_INVALID;
}
char *filename = bstrdup0(NULL, param);
config->recursion_depth += 1;
config->includefunc(config->includefunc_ctx, filename, flags);
config->recursion_depth -= 1;
talloc_free(filename);
return 1;
}
static int parse_profile(struct m_config *config, const struct m_option *opt,
struct bstr name, struct bstr param, bool set, int flags)
{
if (!bstrcmp0(param, "help")) {
struct m_profile *p;
if (!config->profiles) {
MP_INFO(config, "No profiles have been defined.\n");
return M_OPT_EXIT - 1;
}
MP_INFO(config, "Available profiles:\n");
for (p = config->profiles; p; p = p->next)
MP_INFO(config, "\t%s\t%s\n", p->name, p->desc ? p->desc : "");
MP_INFO(config, "\n");
return M_OPT_EXIT - 1;
}
char **list = NULL;
int r = m_option_type_string_list.parse(config->log, opt, name, param, &list);
if (r < 0)
return r;
if (!list || !list[0])
return M_OPT_INVALID;
for (int i = 0; list[i]; i++) {
if (set)
r = m_config_set_profile(config, list[i], flags);
if (r < 0)
break;
}
m_option_free(opt, &list);
return r;
}
static int show_profile(struct m_config *config, bstr param)
{
struct m_profile *p;
if (!param.len)
return M_OPT_MISSING_PARAM;
if (!(p = m_config_get_profile(config, param))) {
MP_ERR(config, "Unknown profile '%.*s'.\n", BSTR_P(param));
return M_OPT_EXIT - 1;
}
if (!config->profile_depth)
MP_INFO(config, "Profile %s: %s\n", p->name,
p->desc ? p->desc : "");
config->profile_depth++;
for (int i = 0; i < p->num_opts; i++) {
MP_INFO(config, "%*s%s=%s\n", config->profile_depth, "",
p->opts[2 * i], p->opts[2 * i + 1]);
if (config->profile_depth < MAX_PROFILE_DEPTH
&& !strcmp(p->opts[2*i], "profile")) {
char *e, *list = p->opts[2 * i + 1];
while ((e = strchr(list, ','))) {
int l = e - list;
if (!l)
continue;
show_profile(config, (bstr){list, e - list});
list = e + 1;
}
if (list[0] != '\0')
show_profile(config, bstr0(list));
}
}
config->profile_depth--;
if (!config->profile_depth)
MP_INFO(config, "\n");
return M_OPT_EXIT - 1;
}
static int list_options(struct m_config *config)
{
m_config_print_option_list(config);
return M_OPT_EXIT;
}
// The memcpys are supposed to work around the strict aliasing violation,
// that would result if we just dereferenced a void** (where the void** is
// actually casted from struct some_type* ). The dummy struct type is in
// theory needed, because void* and struct pointers could have different
// representations, while pointers to different struct types don't.
static void *substruct_read_ptr(const void *ptr)
{
struct mp_dummy_ *res;
memcpy(&res, ptr, sizeof(res));
return res;
}
static void substruct_write_ptr(void *ptr, void *val)
{
struct mp_dummy_ *src = val;
memcpy(ptr, &src, sizeof(src));
}
static void add_options(struct m_config *config,
const char *parent_name,
void *optstruct,
const void *optstruct_def,
const struct m_option *defs);
static void config_destroy(void *p)
{
struct m_config *config = p;
m_config_restore_backups(config);
for (int n = 0; n < config->num_opts; n++)
m_option_free(config->opts[n].opt, config->opts[n].data);
}
struct m_config *m_config_new(void *talloc_ctx, struct mp_log *log,
size_t size, const void *defaults,
const struct m_option *options)
{
struct m_config *config = talloc(talloc_ctx, struct m_config);
talloc_set_destructor(config, config_destroy);
*config = (struct m_config)
{.log = log, .size = size, .defaults = defaults, .options = options};
// size==0 means a dummy object is created
if (size) {
config->optstruct = talloc_zero_size(config, size);
if (defaults)
memcpy(config->optstruct, defaults, size);
}
if (options)
add_options(config, "", config->optstruct, defaults, options);
return config;
}
struct m_config *m_config_from_obj_desc(void *talloc_ctx, struct mp_log *log,
struct m_obj_desc *desc)
{
return m_config_new(talloc_ctx, log, desc->priv_size, desc->priv_defaults,
desc->options);
}
// Like m_config_from_obj_desc(), but don't allocate option struct.
struct m_config *m_config_from_obj_desc_noalloc(void *talloc_ctx,
struct mp_log *log,
struct m_obj_desc *desc)
{
return m_config_new(talloc_ctx, log, 0, desc->priv_defaults, desc->options);
}
int m_config_set_obj_params(struct m_config *conf, char **args)
{
for (int n = 0; args && args[n * 2 + 0]; n++) {
int r = m_config_set_option(conf, bstr0(args[n * 2 + 0]),
bstr0(args[n * 2 + 1]));
if (r < 0)
return r;
}
return 0;
}
int m_config_apply_defaults(struct m_config *config, const char *name,
struct m_obj_settings *defaults)
{
int r = 0;
for (int n = 0; defaults && defaults[n].name; n++) {
struct m_obj_settings *entry = &defaults[n];
if (name && strcmp(entry->name, name) == 0) {
r = m_config_set_obj_params(config, entry->attribs);
break;
}
}
return r;
}
static void ensure_backup(struct m_config *config, struct m_config_option *co)
{
if (co->opt->type->flags & M_OPT_TYPE_HAS_CHILD)
return;
if (co->opt->flags & M_OPT_GLOBAL)
return;
if (!co->data)
return;
for (struct m_opt_backup *cur = config->backup_opts; cur; cur = cur->next) {
if (cur->co->data == co->data) // comparing data ptr catches aliases
return;
}
struct m_opt_backup *bc = talloc_ptrtype(NULL, bc);
*bc = (struct m_opt_backup) {
.co = co,
.backup = talloc_zero_size(bc, co->opt->type->size),
};
m_option_copy(co->opt, bc->backup, co->data);
bc->next = config->backup_opts;
config->backup_opts = bc;
co->is_set_locally = true;
}
void m_config_restore_backups(struct m_config *config)
{
while (config->backup_opts) {
struct m_opt_backup *bc = config->backup_opts;
config->backup_opts = bc->next;
m_option_copy(bc->co->opt, bc->co->data, bc->backup);
m_option_free(bc->co->opt, bc->backup);
bc->co->is_set_locally = false;
talloc_free(bc);
}
}
void m_config_backup_opt(struct m_config *config, const char *opt)
{
struct m_config_option *co = m_config_get_co(config, bstr0(opt));
if (co) {
ensure_backup(config, co);
} else {
MP_ERR(config, "Option %s not found.\n", opt);
}
}
void m_config_backup_all_opts(struct m_config *config)
{
for (int n = 0; n < config->num_opts; n++)
ensure_backup(config, &config->opts[n]);
}
// Given an option --opt, add --no-opt (if applicable).
static void add_negation_option(struct m_config *config,
struct m_config_option *orig,
const char *parent_name)
{
const struct m_option *opt = orig->opt;
int value;
if (opt->type == CONF_TYPE_FLAG) {
value = 0;
} else if (opt->type == CONF_TYPE_CHOICE) {
// Find out whether there's a "no" choice.
// m_option_parse() should be used for this, but it prints
// unsilenceable error messages.
struct m_opt_choice_alternatives *alt = opt->priv;
for ( ; alt->name; alt++) {
if (strcmp(alt->name, "no") == 0)
break;
}
if (!alt->name)
return;
value = alt->value;
} else {
return;
}
struct m_option *no_opt = talloc_ptrtype(config, no_opt);
*no_opt = (struct m_option) {
.name = opt->name,
.type = CONF_TYPE_STORE,
.flags = opt->flags & (M_OPT_NOCFG | M_OPT_GLOBAL | M_OPT_PRE_PARSE),
.offset = opt->offset,
.max = value,
};
// Add --no-sub-opt
struct m_config_option co = *orig;
co.name = talloc_asprintf(config, "no-%s", orig->name);
co.opt = no_opt;
co.is_generated = true;
MP_TARRAY_APPEND(config, config->opts, config->num_opts, co);
// Add --sub-no-opt (unfortunately needed for: "--sub=...:no-opt")
if (parent_name[0]) {
co.name = talloc_asprintf(config, "%s-no-%s", parent_name, opt->name);
MP_TARRAY_APPEND(config, config->opts, config->num_opts, co);
}
}
static void m_config_add_option(struct m_config *config,
const char *parent_name,
void *optstruct,
const void *optstruct_def,
const struct m_option *arg);
static void add_options(struct m_config *config,
const char *parent_name,
void *optstruct,
const void *optstruct_def,
const struct m_option *defs)
{
for (int i = 0; defs && defs[i].name; i++)
m_config_add_option(config, parent_name, optstruct, optstruct_def, &defs[i]);
}
// Initialize a field with a given value. In case this is dynamic data, it has
// to be allocated and copied. src can alias dst, also can be NULL.
static void init_opt_inplace(const struct m_option *opt, void *dst,
const void *src)
{
union m_option_value temp = {0};
if (src)
memcpy(&temp, src, opt->type->size);
memset(dst, 0, opt->type->size);
m_option_copy(opt, dst, &temp);
}
static void m_config_add_option(struct m_config *config,
const char *parent_name,
void *optstruct,
const void *optstruct_def,
const struct m_option *arg)
{
assert(config != NULL);
assert(arg != NULL);
struct m_config_option co = {
.opt = arg,
.name = arg->name,
};
if (arg->offset >= 0) {
if (optstruct)
co.data = (char *)optstruct + arg->offset;
if (optstruct_def)
co.default_data = (char *)optstruct_def + arg->offset;
}
if (arg->defval)
co.default_data = arg->defval;
if (!co.default_data)
co.default_data = &default_value;
// Fill in the full name
if (!co.name[0]) {
co.name = parent_name;
} else if (parent_name[0]) {
co.name = talloc_asprintf(config, "%s-%s", parent_name, co.name);
}
// Option with children -> add them
if (arg->type->flags & M_OPT_TYPE_HAS_CHILD) {
const struct m_sub_options *subopts = arg->priv;
void *new_optstruct = NULL;
if (co.data) {
new_optstruct = m_config_alloc_struct(config, subopts);
substruct_write_ptr(co.data, new_optstruct);
}
const void *new_optstruct_def = substruct_read_ptr(co.default_data);
if (!new_optstruct_def)
new_optstruct_def = subopts->defaults;
add_options(config, co.name, new_optstruct,
new_optstruct_def, subopts->opts);
} else {
// Initialize options
if (co.data && co.default_data) {
if (arg->type->flags & M_OPT_TYPE_DYNAMIC) {
// Would leak memory by overwriting *co.data repeatedly.
for (int i = 0; i < config->num_opts; i++) {
if (co.data == config->opts[i].data)
assert(0);
}
}
init_opt_inplace(arg, co.data, co.default_data);
}
}
if (arg->name[0]) // no own name -> hidden
MP_TARRAY_APPEND(config, config->opts, config->num_opts, co);
add_negation_option(config, &co, parent_name);
if (co.opt->type == &m_option_type_alias) {
co.is_generated = true; // hide it
const char *alias = (const char *)co.opt->priv;
char no_alias[40];
snprintf(no_alias, sizeof(no_alias), "no-%s", alias);
if (m_config_get_co(config, bstr0(no_alias))) {
struct m_option *new = talloc_zero(config, struct m_option);
new->name = talloc_asprintf(config, "no-%s", co.name);
new->priv = talloc_strdup(config, no_alias);
new->type = &m_option_type_alias;
new->offset = -1;
m_config_add_option(config, "", NULL, NULL, new);
}
}
if (co.opt->type == &m_option_type_removed)
co.is_generated = true; // hide it
}
struct m_config_option *m_config_get_co(const struct m_config *config,
struct bstr name)
{
if (!name.len)
return NULL;
for (int n = 0; n < config->num_opts; n++) {
struct m_config_option *co = &config->opts[n];
struct bstr coname = bstr0(co->name);
bool matches = false;
if ((co->opt->type->flags & M_OPT_TYPE_ALLOW_WILDCARD)
&& bstr_endswith0(coname, "*")) {
coname.len--;
if (bstrcmp(bstr_splice(name, 0, coname.len), coname) == 0)
matches = true;
} else if (bstrcmp(coname, name) == 0)
matches = true;
if (matches) {
const char *prefix = config->is_toplevel ? "--" : "";
if (co->opt->type == &m_option_type_alias) {
const char *alias = (const char *)co->opt->priv;
if (!co->warning_was_printed) {
MP_WARN(config, "Warning: option %s%s was replaced with "
"%s%s and might be removed in the future.\n",
prefix, co->name, prefix, alias);
co->warning_was_printed = true;
}
return m_config_get_co(config, bstr0(alias));
} else if (co->opt->type == &m_option_type_removed) {
if (!co->warning_was_printed) {
char *msg = co->opt->priv;
if (msg) {
MP_FATAL(config, "Option %s%s was removed: %s\n",
prefix, co->name, msg);
} else {
MP_FATAL(config, "Option %s%s was removed.\n",
prefix, co->name);
}
co->warning_was_printed = true;
}
return NULL;
} else if (co->opt->deprecation_message) {
if (!co->warning_was_printed) {
MP_WARN(config, "Warning: option %s%s is deprecated "
"and might be removed in the future (%s).\n",
prefix, co->name, co->opt->deprecation_message);
co->warning_was_printed = true;
}
}
return co;
}
}
return NULL;
}
const char *m_config_get_positional_option(const struct m_config *config, int p)
{
int pos = 0;
for (int n = 0; n < config->num_opts; n++) {
struct m_config_option *co = &config->opts[n];
if (!co->is_generated) {
if (pos == p)
return co->name;
pos++;
}
}
return NULL;
}
// return: <0: M_OPT_ error, 0: skip, 1: check, 2: set
static int handle_set_opt_flags(struct m_config *config,
struct m_config_option *co, int flags)
{
int optflags = co->opt->flags;
bool set = !(flags & M_SETOPT_CHECK_ONLY);
if ((flags & M_SETOPT_PRE_PARSE_ONLY) && !(optflags & M_OPT_PRE_PARSE))
return 0;
if ((flags & M_SETOPT_PRESERVE_CMDLINE) && co->is_set_from_cmdline)
set = false;
if ((flags & M_SETOPT_NO_FIXED) && (optflags & M_OPT_FIXED))
return M_OPT_INVALID;
if ((flags & M_SETOPT_NO_PRE_PARSE) && (optflags & M_OPT_PRE_PARSE))
return M_OPT_INVALID;
// Check if this option isn't forbidden in the current mode
if ((flags & M_SETOPT_FROM_CONFIG_FILE) && (optflags & M_OPT_NOCFG)) {
MP_ERR(config, "The %s option can't be used in a config file.\n",
co->name);
return M_OPT_INVALID;
}
if (flags & M_SETOPT_BACKUP) {
if (optflags & M_OPT_GLOBAL) {
MP_ERR(config, "The %s option is global and can't be set per-file.\n",
co->name);
return M_OPT_INVALID;
}
if (set)
ensure_backup(config, co);
}
return set ? 2 : 1;
}
static void handle_on_set(struct m_config *config, struct m_config_option *co,
int flags)
{
if (flags & M_SETOPT_FROM_CMDLINE) {
co->is_set_from_cmdline = true;
// Mark aliases too
if (co->data) {
for (int n = 0; n < config->num_opts; n++) {
struct m_config_option *co2 = &config->opts[n];
if (co2->data == co->data)
co2->is_set_from_cmdline = true;
}
}
}
if (config->global && (co->opt->flags & M_OPT_TERM))
mp_msg_update_msglevels(config->global);
}
// The type data points to is as in: m_config_get_co(config, name)->opt
int m_config_set_option_raw(struct m_config *config, struct m_config_option *co,
void *data, int flags)
{
if (!co)
return M_OPT_UNKNOWN;
// This affects some special options like "include", "profile". Maybe these
// should work, or maybe not. For now they would require special code.
if (!co->data)
return M_OPT_UNKNOWN;
int r = handle_set_opt_flags(config, co, flags);
if (r <= 1)
return r;
m_option_copy(co->opt, co->data, data);
handle_on_set(config, co, flags);
return 0;
}
static int parse_subopts(struct m_config *config, char *name, char *prefix,
struct bstr param, int flags);
static int m_config_parse_option(struct m_config *config, struct bstr name,
struct bstr param, int flags)
{
assert(config != NULL);
struct m_config_option *co = m_config_get_co(config, name);
if (!co)
return M_OPT_UNKNOWN;
// This is the only mandatory function
assert(co->opt->type->parse);
int r = handle_set_opt_flags(config, co, flags);
if (r <= 0)
return r;
bool set = r == 2;
if (set) {
MP_VERBOSE(config, "Setting option '%.*s' = '%.*s' (flags = %d)\n",
BSTR_P(name), BSTR_P(param), flags);
}
if (config->includefunc && bstr_equals0(name, "include"))
return parse_include(config, param, set, flags);
if (config->use_profiles && bstr_equals0(name, "profile"))
return parse_profile(config, co->opt, name, param, set, flags);
if (config->use_profiles && bstr_equals0(name, "show-profile"))
return show_profile(config, param);
if (bstr_equals0(name, "list-options"))
return list_options(config);
// Option with children are a bit different to parse
if (co->opt->type->flags & M_OPT_TYPE_HAS_CHILD) {
char prefix[110];
assert(strlen(co->name) < 100);
sprintf(prefix, "%s-", co->name);
return parse_subopts(config, (char *)co->name, prefix, param, flags);
}
r = m_option_parse(config->log, co->opt, name, param, set ? co->data : NULL);
if (r >= 0 && set)
handle_on_set(config, co, flags);
return r;
}
static int parse_subopts(struct m_config *config, char *name, char *prefix,
struct bstr param, int flags)
{
char **lst = NULL;
// Split the argument into child options
int r = m_option_type_subconfig.parse(config->log, NULL, bstr0(""), param, &lst);
if (r < 0)
return r;
// Parse the child options
for (int i = 0; lst && lst[2 * i]; i++) {
// Build the full name
char n[110];
if (snprintf(n, 110, "%s%s", prefix, lst[2 * i]) > 100)
abort();
r = m_config_parse_option(config,bstr0(n), bstr0(lst[2 * i + 1]), flags);
if (r < 0) {
if (r > M_OPT_EXIT) {
MP_ERR(config, "Error parsing suboption %s/%s (%s)\n",
name, lst[2 * i], m_option_strerror(r));
r = M_OPT_INVALID;
}
break;
}
}
talloc_free(lst);
return r;
}
int m_config_parse_suboptions(struct m_config *config, char *name,
char *subopts)
{
if (!subopts || !*subopts)
return 0;
int r = parse_subopts(config, name, "", bstr0(subopts), 0);
if (r < 0 && r > M_OPT_EXIT) {
MP_ERR(config, "Error parsing suboption %s (%s)\n",
name, m_option_strerror(r));
r = M_OPT_INVALID;
}
return r;
}
int m_config_set_option_ext(struct m_config *config, struct bstr name,
struct bstr param, int flags)
{
int r = m_config_parse_option(config, name, param, flags);
if (r < 0 && r > M_OPT_EXIT) {
MP_ERR(config, "Error parsing option %.*s (%s)\n",
BSTR_P(name), m_option_strerror(r));
r = M_OPT_INVALID;
}
return r;
}
int m_config_set_option(struct m_config *config, struct bstr name,
struct bstr param)
{
return m_config_set_option_ext(config, name, param, 0);
}
int m_config_set_option_node(struct m_config *config, bstr name,
struct mpv_node *data, int flags)
{
struct m_config_option *co = m_config_get_co(config, name);
if (!co)
return M_OPT_UNKNOWN;
int r;
// Do this on an "empty" type to make setting the option strictly overwrite
// the old value, as opposed to e.g. appending to lists.
union m_option_value val = {0};
if (data->format == MPV_FORMAT_STRING) {
bstr param = bstr0(data->u.string);
r = m_option_parse(mp_null_log, co->opt, name, param, &val);
} else {
r = m_option_set_node(co->opt, &val, data);
}
if (r >= 0)
r = m_config_set_option_raw(config, co, &val, flags);
if (mp_msg_test(config->log, MSGL_V)) {
char *s = m_option_type_node.print(NULL, data);
MP_VERBOSE(config, "Setting option '%.*s' = %s (flags = %d) -> %d\n",
BSTR_P(name), s ? s : "?", flags, r);
talloc_free(s);
}
m_option_free(co->opt, &val);
return r;
}
const struct m_option *m_config_get_option(const struct m_config *config,
struct bstr name)
{
assert(config != NULL);
struct m_config_option *co = m_config_get_co(config, name);
return co ? co->opt : NULL;
}
int m_config_option_requires_param(struct m_config *config, bstr name)
{
const struct m_option *opt = m_config_get_option(config, name);
if (opt) {
if (bstr_endswith0(name, "-clr"))
return 0;
return m_option_required_params(opt);
}
return M_OPT_UNKNOWN;
}
static int sort_opt_compare(const void *pa, const void *pb)
{
const struct m_config_option *a = pa;
const struct m_config_option *b = pb;
return strcasecmp(a->name, b->name);
}
void m_config_print_option_list(const struct m_config *config)
{
char min[50], max[50];
int count = 0;
const char *prefix = config->is_toplevel ? "--" : "";
struct m_config_option *sorted =
talloc_memdup(NULL, config->opts, config->num_opts * sizeof(sorted[0]));
if (config->is_toplevel)
qsort(sorted, config->num_opts, sizeof(sorted[0]), sort_opt_compare);
MP_INFO(config, "Options:\n\n");
for (int i = 0; i < config->num_opts; i++) {
struct m_config_option *co = &sorted[i];
const struct m_option *opt = co->opt;
if (opt->type->flags & M_OPT_TYPE_HAS_CHILD)
continue;
if (co->is_generated)
continue;
if (opt->type == &m_option_type_alias ||
opt->type == &m_option_type_removed)
continue;
MP_INFO(config, " %s%-30s", prefix, co->name);
if (opt->type == &m_option_type_choice) {
MP_INFO(config, " Choices:");
struct m_opt_choice_alternatives *alt = opt->priv;
for (int n = 0; alt[n].name; n++)
MP_INFO(config, " %s", alt[n].name);
if (opt->flags & (M_OPT_MIN | M_OPT_MAX))
MP_INFO(config, " (or an integer)");
} else {
MP_INFO(config, " %s", co->opt->type->name);
}
if (opt->flags & (M_OPT_MIN | M_OPT_MAX)) {
snprintf(min, sizeof(min), "any");
snprintf(max, sizeof(max), "any");
if (opt->flags & M_OPT_MIN)
snprintf(min, sizeof(min), "%.14g", opt->min);
if (opt->flags & M_OPT_MAX)
snprintf(max, sizeof(max), "%.14g", opt->max);
MP_INFO(config, " (%s to %s)", min, max);
}
char *def = NULL;
if (co->default_data)
def = m_option_print(co->opt, co->default_data);
if (def) {
MP_INFO(config, " (default: %s)", def);
talloc_free(def);
}
if (opt->flags & M_OPT_GLOBAL)
MP_INFO(config, " [global]");
if (opt->flags & M_OPT_NOCFG)
MP_INFO(config, " [nocfg]");
if (opt->flags & M_OPT_FILE)
MP_INFO(config, " [file]");
MP_INFO(config, "\n");
count++;
}
MP_INFO(config, "\nTotal: %d options\n", count);
talloc_free(sorted);
}
char **m_config_list_options(void *ta_parent, const struct m_config *config)
{
char **list = talloc_new(ta_parent);
int count = 0;
for (int i = 0; i < config->num_opts; i++) {
struct m_config_option *co = &config->opts[i];
const struct m_option *opt = co->opt;
if (opt->type->flags & M_OPT_TYPE_HAS_CHILD)
continue;
if (co->is_generated)
continue;
// For use with CONF_TYPE_STRING_LIST, it's important not to set list
// as allocation parent.
char *s = talloc_strdup(ta_parent, co->name);
MP_TARRAY_APPEND(ta_parent, list, count, s);
}
MP_TARRAY_APPEND(ta_parent, list, count, NULL);
return list;
}
struct m_profile *m_config_get_profile(const struct m_config *config, bstr name)
{
for (struct m_profile *p = config->profiles; p; p = p->next) {
if (bstr_equals0(name, p->name))
return p;
}
return NULL;
}
struct m_profile *m_config_get_profile0(const struct m_config *config,
char *name)
{
return m_config_get_profile(config, bstr0(name));
}
struct m_profile *m_config_add_profile(struct m_config *config, char *name)
{
if (!name || !name[0] || strcmp(name, "default") == 0)
return NULL; // never a real profile
struct m_profile *p = m_config_get_profile0(config, name);
if (p)
return p;
p = talloc_zero(config, struct m_profile);
p->name = talloc_strdup(p, name);
p->next = config->profiles;
config->profiles = p;
return p;
}
void m_profile_set_desc(struct m_profile *p, bstr desc)
{
talloc_free(p->desc);
p->desc = bstrdup0(p, desc);
}
int m_config_set_profile_option(struct m_config *config, struct m_profile *p,
bstr name, bstr val)
{
int i = m_config_set_option_ext(config, name, val,
M_SETOPT_CHECK_ONLY |
M_SETOPT_FROM_CONFIG_FILE);
if (i < 0)
return i;
p->opts = talloc_realloc(p, p->opts, char *, 2 * (p->num_opts + 2));
p->opts[p->num_opts * 2] = bstrdup0(p, name);
p->opts[p->num_opts * 2 + 1] = bstrdup0(p, val);
p->num_opts++;
p->opts[p->num_opts * 2] = p->opts[p->num_opts * 2 + 1] = NULL;
return 1;
}
int m_config_set_profile(struct m_config *config, char *name, int flags)
{
struct m_profile *p = m_config_get_profile0(config, name);
if (!p) {
MP_WARN(config, "Unknown profile '%s'.\n", name);
return M_OPT_INVALID;
}
if (config->profile_depth > MAX_PROFILE_DEPTH) {
MP_WARN(config, "WARNING: Profile inclusion too deep.\n");
return M_OPT_UNKNOWN;
}
config->profile_depth++;
for (int i = 0; i < p->num_opts; i++) {
m_config_set_option_ext(config,
bstr0(p->opts[2 * i]),
bstr0(p->opts[2 * i + 1]),
flags | M_SETOPT_FROM_CONFIG_FILE);
}
config->profile_depth--;
return 0;
}
void *m_config_alloc_struct(void *talloc_ctx,
const struct m_sub_options *subopts)
{
void *substruct = talloc_zero_size(talloc_ctx, subopts->size);
if (subopts->defaults)
memcpy(substruct, subopts->defaults, subopts->size);
return substruct;
}
struct dtor_info {
const struct m_sub_options *opts;
void *ptr;
};
static void free_substruct(void *ptr)
{
struct dtor_info *d = ptr;
for (int n = 0; d->opts->opts && d->opts->opts[n].type; n++) {
const struct m_option *opt = &d->opts->opts[n];
void *dst = (char *)d->ptr + opt->offset;
m_option_free(opt, dst);
}
}
// Passing ptr==NULL initializes it from proper defaults.
void *m_sub_options_copy(void *talloc_ctx, const struct m_sub_options *opts,
const void *ptr)
{
void *new = m_config_alloc_struct(talloc_ctx, opts);
struct dtor_info *dtor = talloc_ptrtype(new, dtor);
*dtor = (struct dtor_info){opts, new};
talloc_set_destructor(dtor, free_substruct);
for (int n = 0; opts->opts && opts->opts[n].type; n++) {
const struct m_option *opt = &opts->opts[n];
if (opt->offset < 0)
continue;
void *src = ptr ? (char *)ptr + opt->offset : NULL;
void *dst = (char *)new + opt->offset;
if (opt->type->flags & M_OPT_TYPE_HAS_CHILD) {
// Specifying a default struct for a sub-option field in the
// containing struct's default struct is not supported here.
// (Out of laziness. Could possibly be supported.)
assert(!substruct_read_ptr(dst));
const struct m_sub_options *subopts = opt->priv;
const void *sub_src = NULL;
if (src)
sub_src = substruct_read_ptr(src);
if (!sub_src)
sub_src = subopts->defaults;
void *sub_dst = m_sub_options_copy(new, subopts, sub_src);
substruct_write_ptr(dst, sub_dst);
} else {
init_opt_inplace(opt, dst, src);
}
}
return new;
}
struct m_config *m_config_dup(void *talloc_ctx, struct m_config *config)
{
struct m_config *new = m_config_new(talloc_ctx, config->log, config->size,
config->defaults, config->options);
assert(new->num_opts == config->num_opts);
for (int n = 0; n < new->num_opts; n++) {
assert(new->opts[n].opt->type == config->opts[n].opt->type);
m_option_copy(new->opts[n].opt, new->opts[n].data, config->opts[n].data);
}
return new;
}
|
Floens/mpv
|
options/m_config.c
|
C
|
gpl-2.0
| 33,574
|
#include <dfsch/lib/crypto.h>
static void ecb_setup(dfsch_block_cipher_mode_context_t* cipher,
uint8_t* iv,
size_t iv_len){
if (iv_len != 0){
dfsch_error("ECB mode has no IV", NULL);
}
}
static void ecb_encrypt(dfsch_block_cipher_mode_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->cipher->cipher->block_size;
int i;
for (i = 0; i < blocks; i++){
context->cipher->cipher->encrypt(context->cipher,
in + (bsize * i), out + (bsize * i));
}
}
static void ecb_decrypt(dfsch_block_cipher_mode_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->cipher->cipher->block_size;
int i;
for (i = 0; i < blocks; i++){
context->cipher->cipher->decrypt(context->cipher,
in + (bsize * i), out + (bsize * i));
}
}
dfsch_block_cipher_mode_t dfsch_crypto_ecb_mode = {
.type = {
.type = DFSCH_BLOCK_CIPHER_MODE_TYPE,
.name = "crypto:ecb",
.size = sizeof(dfsch_block_cipher_mode_context_t),
},
.name = "ECB",
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
.setup = ecb_setup
};
static void memxor(uint8_t* dst, uint8_t* src, size_t count){
while (count){
*dst ^= *src;
dst++;
src++;
count--;
}
}
typedef struct cbc_context_t {
dfsch_block_cipher_mode_context_t parent;
uint8_t* iv;
} cbc_context_t;
static void cbc_setup(cbc_context_t* context,
uint8_t* iv,
size_t iv_len){
if (iv_len != context->parent.cipher->cipher->block_size){
dfsch_error("CBC IV length must be equal to block size", NULL);
}
context->iv = GC_MALLOC_ATOMIC(iv_len);
memcpy(context->iv, iv, iv_len);
}
static void cbc_encrypt(cbc_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->parent.cipher->cipher->block_size;
int i;
for (i = 0; i < blocks; i++){
memxor(context->iv, in + (bsize * i), bsize);
context->parent.cipher->cipher->encrypt(context->parent.cipher,
context->iv,
context->iv);
memcpy(out + (bsize * i), context->iv, bsize);
}
}
static void cbc_decrypt(cbc_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->parent.cipher->cipher->block_size;
int i;
uint8_t tmp[bsize];
for (i = 0; i < blocks; i++){
memcpy(tmp, in + (bsize * i), bsize);
context->parent.cipher->cipher->decrypt(context->parent.cipher,
in + (bsize * i),
out + (bsize * i));
memxor(out + (bsize * i), context->iv, bsize);
memcpy(context->iv, tmp, bsize);
}
}
dfsch_block_cipher_mode_t dfsch_crypto_cbc_mode = {
.type = {
.type = DFSCH_BLOCK_CIPHER_MODE_TYPE,
.name = "crypto:cbc",
.size = sizeof(cbc_context_t),
},
.name = "CBC",
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
.setup = cbc_setup
};
typedef struct cfb_context_t {
dfsch_block_cipher_mode_context_t parent;
uint8_t* iv;
} cfb_context_t;
static void cfb_setup(cfb_context_t* context,
uint8_t* iv,
size_t iv_len){
if (iv_len != context->parent.cipher->cipher->block_size){
dfsch_error("CFB IV length must be equal to block size", NULL);
}
context->iv = GC_MALLOC_ATOMIC(iv_len);
memcpy(context->iv, iv, iv_len);
}
static void cfb_encrypt(cfb_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->parent.cipher->cipher->block_size;
int i;
for (i = 0; i < blocks; i++){
context->parent.cipher->cipher->encrypt(context->parent.cipher,
context->iv,
context->iv);
memxor(context->iv, in + (bsize * i), bsize);
memcpy(out + (bsize * i), context->iv, bsize);
}
}
static void cfb_decrypt(cfb_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->parent.cipher->cipher->block_size;
int i;
uint8_t tmp[bsize];
for (i = 0; i < blocks; i++){
memcpy(tmp, in + (bsize * i), bsize);
context->parent.cipher->cipher->encrypt(context->parent.cipher,
context->iv,
out + (bsize * i));
memxor(out + (bsize * i), tmp, bsize);
memcpy(context->iv, tmp, bsize);
}
}
dfsch_block_cipher_mode_t dfsch_crypto_cfb_mode = {
.type = {
.type = DFSCH_BLOCK_CIPHER_MODE_TYPE,
.name = "crypto:cfb",
.size = sizeof(cfb_context_t),
},
.name = "CFB",
.encrypt = cfb_encrypt,
.decrypt = cfb_decrypt,
.setup = cfb_setup
};
typedef struct ofb_context_t {
dfsch_block_cipher_mode_context_t parent;
uint8_t* iv;
} ofb_context_t;
static void ofb_setup(ofb_context_t* context,
uint8_t* iv,
size_t iv_len){
if (iv_len != context->parent.cipher->cipher->block_size){
dfsch_error("OFB IV length must be equal to block size", NULL);
}
context->iv = GC_MALLOC_ATOMIC(iv_len);
memcpy(context->iv, iv, iv_len);
}
static void ofb_operate(ofb_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->parent.cipher->cipher->block_size;
int i;
for (i = 0; i < blocks; i++){
context->parent.cipher->cipher->encrypt(context->parent.cipher,
context->iv,
context->iv);
memcpy(out + (bsize * i), in + (bsize * i), bsize);
memxor(out + (bsize * i), context->iv, bsize);
}
}
dfsch_block_cipher_mode_t dfsch_crypto_ofb_mode = {
.type = {
.type = DFSCH_BLOCK_CIPHER_MODE_TYPE,
.name = "crypto:ofb",
.size = sizeof(ofb_context_t),
},
.name = "OFB",
.encrypt = ofb_operate,
.decrypt = ofb_operate,
.setup = ofb_setup
};
/* This implementation of CTR mode comes from NIST recommendation,
which is different in significant details from AES-CTR used by TLS
and IPsec (which are even mutually different). CTR mode can use
various additional data from underlying protocol, which
unfortunately means that each protocol uses completely different
method of construing CTR value */
typedef struct ctr_context_t {
dfsch_block_cipher_mode_context_t parent;
uint8_t* ctr;
} ctr_context_t;
static void ctr_setup(ctr_context_t* context,
uint8_t* iv,
size_t iv_len){
if (iv_len != context->parent.cipher->cipher->block_size){
dfsch_error("CTR IV length must be equal to block size", NULL);
}
context->ctr = GC_MALLOC_ATOMIC(iv_len);
memcpy(context->ctr, iv, iv_len);
}
static void ctr_operate(ctr_context_t* context,
uint8_t* in,
uint8_t* out,
size_t blocks){
size_t bsize = context->parent.cipher->cipher->block_size;
int i;
int j;
uint8_t tmp[bsize];
for (i = 0; i < blocks; i++){
context->parent.cipher->cipher->encrypt(context->parent.cipher,
context->ctr,
tmp);
memcpy(out + (bsize * i), in + (bsize * i), bsize);
memxor(out + (bsize * i), tmp, bsize);
/* Increment counter, little endian */
for (j = 0; j < bsize; j++){
context->ctr[j]++;
if (context->ctr[j] != 0){
break;
}
}
}
}
dfsch_block_cipher_mode_t dfsch_crypto_ctr_mode = {
.type = {
.type = DFSCH_BLOCK_CIPHER_MODE_TYPE,
.name = "crypto:ctr",
.size = sizeof(ctr_context_t),
},
.name = "CTR",
.encrypt = ctr_operate,
.decrypt = ctr_operate,
.setup = ctr_setup
};
typedef struct block_stream_mode_t {
dfsch_stream_cipher_t parent;
dfsch_block_cipher_t* cipher;
} block_stream_mode_t;
dfsch_type_t dfsch_block_stream_mode_type = {
.type = DFSCH_META_TYPE,
.superclass = DFSCH_STREAM_CIPHER_TYPE,
.name = "block-stream-mode",
.size = sizeof(block_stream_mode_t),
};
typedef struct block_stream_context_t {
block_stream_mode_t* mode;
dfsch_block_cipher_context_t* cipher;
uint8_t* next_input;
uint8_t* last_output;
size_t output_offset;
size_t output_size;
} block_stream_context_t;
static void bs_ofb_setup(block_stream_context_t* ctx,
uint8_t *key,
size_t keylen,
uint8_t *nonce,
size_t nonce_len){
if (nonce_len != ctx->mode->cipher->block_size){
dfsch_error("Nonce for OFB mode must be same size as cipher's block",
NULL);
}
ctx->cipher = dfsch_setup_block_cipher(ctx->mode->cipher, key, keylen);
ctx->next_input = GC_MALLOC_ATOMIC(ctx->mode->cipher->block_size);
ctx->last_output = GC_MALLOC_ATOMIC(ctx->mode->cipher->block_size);
ctx->output_offset = ctx->mode->cipher->block_size;
ctx->output_size = ctx->mode->cipher->block_size;
memcpy(ctx->next_input, nonce, ctx->output_size);
}
static void bs_ofb_encrypt_bytes(block_stream_context_t* ctx,
uint8_t* out,
size_t outlen){
while (outlen){
if (ctx->output_offset >= ctx->output_size){
ctx->cipher->cipher->encrypt(ctx->cipher,
ctx->next_input,
ctx->last_output);
memcpy(ctx->next_input, ctx->last_output, ctx->output_size);
ctx->output_offset = 0;
}
*out ^= ctx->last_output[ctx->output_offset];
ctx->output_offset++;
out++;
outlen--;
}
}
dfsch_stream_cipher_t* dfsch_make_ofb_cipher(dfsch_block_cipher_t* cipher){
block_stream_mode_t* bs = dfsch_make_object(DFSCH_BLOCK_STREAM_MODE_TYPE);
bs->parent.name = dfsch_saprintf("%s in OFB mode",
cipher->name);
bs->parent.type.name = dfsch_saprintf("%s-ofb", cipher->type.name);
bs->parent.type.size = sizeof(block_stream_context_t);
bs->parent.setup = bs_ofb_setup;
bs->parent.encrypt_bytes = bs_ofb_encrypt_bytes;
return bs;
}
static void bs_ctr_setup(block_stream_context_t* ctx,
uint8_t *key,
size_t keylen,
uint8_t *nonce,
size_t nonce_len){
if (nonce_len != ctx->mode->cipher->block_size){
dfsch_error("Nonce for OFB mode must be same size as cipher's block",
NULL);
}
ctx->cipher = dfsch_setup_block_cipher(ctx->mode->cipher, key, keylen);
ctx->next_input = GC_MALLOC_ATOMIC(ctx->mode->cipher->block_size);
ctx->last_output = GC_MALLOC_ATOMIC(ctx->mode->cipher->block_size);
ctx->output_offset = ctx->mode->cipher->block_size;
ctx->output_size = ctx->mode->cipher->block_size;
memcpy(ctx->next_input, nonce, ctx->output_size);
}
static void bs_ctr_encrypt_bytes(block_stream_context_t* ctx,
uint8_t* out,
size_t outlen){
int i;
while (outlen){
if (ctx->output_offset >= ctx->output_size){
ctx->cipher->cipher->encrypt(ctx->cipher,
ctx->next_input,
ctx->last_output);
for (i = 0; i < ctx->output_size; i++){
ctx->next_input[i]++;
if (ctx->next_input[i] != 0){
break;
}
}
ctx->output_offset = 0;
}
*out ^= ctx->last_output[ctx->output_offset];
ctx->output_offset++;
out++;
outlen--;
}
}
dfsch_stream_cipher_t* dfsch_make_ctr_cipher(dfsch_block_cipher_t* cipher){
block_stream_mode_t* bs = dfsch_make_object(DFSCH_BLOCK_STREAM_MODE_TYPE);
bs->parent.name = dfsch_saprintf("%s in CTR mode",
cipher->name);
bs->parent.type.name = dfsch_saprintf("%s-ctr", cipher->type.name);
bs->parent.type.size = sizeof(block_stream_context_t);
bs->parent.setup = bs_ctr_setup;
bs->parent.encrypt_bytes = bs_ctr_encrypt_bytes;
return bs;
}
|
adh/dfsch
|
lib/crypto/modes.c
|
C
|
gpl-2.0
| 12,669
|
/*
* arch/arm/mach-tegra/tegra3_dvfs.c
*
* Copyright (C) 2010-2011 NVIDIA Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/kobject.h>
#include <linux/err.h>
#include "clock.h"
#include "dvfs.h"
#include "fuse.h"
#include "board.h"
#include "tegra3_emc.h"
static bool tegra_dvfs_cpu_disabled;
static bool tegra_dvfs_core_disabled;
static struct dvfs *cpu_dvfs;
static const int cpu_millivolts[MAX_DVFS_FREQS] = {
750, 800, 825, 850, 875, 900, 950, 975, 1000, 1025, 1050, 1100, 1200, 1275, 1275, 1275, 1300, 1325};
//750, 800, 825, 850, 875, 912, 975, 1000, 1025, 1050, 1075, 1100, 1150, 1200, 1212, 1225, 1250, 1300};
static const unsigned int cpu_cold_offs_mhz[MAX_DVFS_FREQS] = {
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50};
static const int core_millivolts[MAX_DVFS_FREQS] = {
900, 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350};
#define KHZ 1000
#define MHZ 1000000
/* VDD_CPU >= (VDD_CORE - cpu_below_core) */
/* VDD_CORE >= min_level(VDD_CPU), see tegra3_get_core_floor_mv() below */
#define VDD_CPU_BELOW_VDD_CORE 300
static int cpu_below_core = VDD_CPU_BELOW_VDD_CORE;
#define VDD_SAFE_STEP 100
static struct dvfs_rail tegra3_dvfs_rail_vdd_cpu = {
.reg_id = "vdd_cpu",
.max_millivolts = 1300,
.min_millivolts = 750,
.step = VDD_SAFE_STEP,
.jmp_to_zero = true,
};
static struct dvfs_rail tegra3_dvfs_rail_vdd_core = {
.reg_id = "vdd_core",
.max_millivolts = 1350,
.min_millivolts = 900,
.step = VDD_SAFE_STEP,
};
static struct dvfs_rail *tegra3_dvfs_rails[] = {
&tegra3_dvfs_rail_vdd_cpu,
&tegra3_dvfs_rail_vdd_core,
};
static int tegra3_get_core_floor_mv(int cpu_mv)
{
if (cpu_mv < 800)
return 950;
if (cpu_mv < 900)
return 1000;
if (cpu_mv < 1000)
return 1100;
if ((tegra_cpu_speedo_id() < 2) ||
(tegra_cpu_speedo_id() == 4) ||
(tegra_cpu_speedo_id() == 7) ||
(tegra_cpu_speedo_id() == 8))
return 1200;
if (cpu_mv < 1100)
return 1200;
if (cpu_mv <= 1250)
return 1300;
BUG();
}
/* vdd_core must be >= min_level as a function of vdd_cpu */
static int tegra3_dvfs_rel_vdd_cpu_vdd_core(struct dvfs_rail *vdd_cpu,
struct dvfs_rail *vdd_core)
{
int core_floor = max(vdd_cpu->new_millivolts, vdd_cpu->millivolts);
core_floor = tegra3_get_core_floor_mv(core_floor);
return max(vdd_core->new_millivolts, core_floor);
}
/* vdd_cpu must be >= (vdd_core - cpu_below_core) */
static int tegra3_dvfs_rel_vdd_core_vdd_cpu(struct dvfs_rail *vdd_core,
struct dvfs_rail *vdd_cpu)
{
int cpu_floor;
if (vdd_cpu->new_millivolts == 0)
return 0; /* If G CPU is off, core relations can be ignored */
cpu_floor = max(vdd_core->new_millivolts, vdd_core->millivolts) -
cpu_below_core;
return max(vdd_cpu->new_millivolts, cpu_floor);
}
static struct dvfs_relationship tegra3_dvfs_relationships[] = {
{
.from = &tegra3_dvfs_rail_vdd_cpu,
.to = &tegra3_dvfs_rail_vdd_core,
.solve = tegra3_dvfs_rel_vdd_cpu_vdd_core,
.solved_at_nominal = true,
},
{
.from = &tegra3_dvfs_rail_vdd_core,
.to = &tegra3_dvfs_rail_vdd_cpu,
.solve = tegra3_dvfs_rel_vdd_core_vdd_cpu,
},
};
#define CPU_DVFS(_clk_name, _speedo_id, _process_id, _mult, _freqs...) \
{ \
.clk_name = _clk_name, \
.speedo_id = _speedo_id, \
.process_id = _process_id, \
.freqs = {_freqs}, \
.freqs_mult = _mult, \
.millivolts = cpu_millivolts, \
.auto_dvfs = true, \
.dvfs_rail = &tegra3_dvfs_rail_vdd_cpu, \
}
static struct dvfs cpu_dvfs_table[] = {
/* Cpu voltages (mV): 800, 825, 850, 875, 900, 912, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200, 1212, 1237 */
CPU_DVFS("cpu_g", 0, 0, MHZ, 1, 1, 684, 684, 817, 817, 1026, 1102, 1149, 1187, 1225, 1282, 1300),
CPU_DVFS("cpu_g", 0, 1, MHZ, 1, 1, 807, 807, 948, 948, 1117, 1171, 1206, 1300),
CPU_DVFS("cpu_g", 0, 2, MHZ, 1, 1, 883, 883, 1039, 1039, 1178, 1206, 1300),
CPU_DVFS("cpu_g", 0, 3, MHZ, 1, 1, 931, 931, 1102, 1102, 1216, 1300),
CPU_DVFS("cpu_g", 1, 0, MHZ, 460, 460, 550, 550, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1280, 1300),
CPU_DVFS("cpu_g", 1, 1, MHZ, 480, 480, 650, 650, 780, 780, 990, 1040, 1100, 1200, 1300),
CPU_DVFS("cpu_g", 1, 2, MHZ, 520, 520, 700, 700, 860, 860, 1050, 1150, 1200, 1300),
CPU_DVFS("cpu_g", 1, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1300),
CPU_DVFS("cpu_g", 2, 1, MHZ, 480, 480, 650, 650, 780, 780, 990, 1040, 1100, 1200, 1250, 1300, 1330, 1400),
CPU_DVFS("cpu_g", 2, 2, MHZ, 520, 520, 700, 700, 860, 860, 1050, 1150, 1200, 1280, 1300, 1350, 1400),
CPU_DVFS("cpu_g", 2, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1300, 1350, 1400),
CPU_DVFS("cpu_g", 3, 1, MHZ, 480, 480, 650, 650, 780, 780, 990, 1040, 1100, 1200, 1250, 1300, 1330, 1400),
CPU_DVFS("cpu_g", 3, 2, MHZ, 520, 520, 700, 700, 860, 860, 1050, 1150, 1200, 1280, 1300, 1350, 1400),
CPU_DVFS("cpu_g", 3, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1300, 1350, 1400),
CPU_DVFS("cpu_g", 7, 0, MHZ, 460, 460, 550, 550, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1240, 1280, 1320, 1480, 1500, 1600),
CPU_DVFS("cpu_g", 7, 1, MHZ, 480, 480, 650, 650, 780, 780, 990, 1040, 1100, 1200, 1250, 1300, 1330, 1480, 1500, 1600),
CPU_DVFS("cpu_g", 7, 2, MHZ, 520, 520, 700, 700, 860, 860, 1050, 1150, 1200, 1280, 1300, 1480, 1500, 1600),
CPU_DVFS("cpu_g", 7, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1330, 1480, 1500, 1600),
CPU_DVFS("cpu_g", 5, 2, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1330, 1370, 1400, 1470, 1500, 1540, 1600, 1650, 1700),
CPU_DVFS("cpu_g", 5, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1330, 1370, 1400, 1470, 1500, 1500, 1540, 1540, 1700),
CPU_DVFS("cpu_g", 5, 4, MHZ, 550, 550, 770, 770, 940, 940, 1160, 1240, 1280, 1360, 1390, 1470, 1500, 1520, 1520, 1590, 1700),
CPU_DVFS("cpu_g", 6, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1330, 1370, 1400, 1470, 1500, 1500, 1540, 1540, 1700),
CPU_DVFS("cpu_g", 6, 4, MHZ, 550, 550, 770, 770, 940, 940, 1160, 1240, 1280, 1360, 1390, 1470, 1500, 1520, 1520, 1590, 1700),
CPU_DVFS("cpu_g", 4, 0, MHZ, 460, 460, 550, 550, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1240, 1280, 1320, 1360, 1600),
CPU_DVFS("cpu_g", 4, 1, MHZ, 480, 480, 650, 650, 780, 780, 990, 1040, 1100, 1200, 1250, 1300, 1330, 1360, 1500, 1600),
CPU_DVFS("cpu_g", 4, 2, MHZ, 520, 520, 700, 700, 860, 860, 1050, 1150, 1200, 1280, 1300, 1340, 1480, 1600),
CPU_DVFS("cpu_g", 4, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1270, 1300, 1340, 1480, 1600),
CPU_DVFS("cpu_g", 4, 4, MHZ, 550, 550, 770, 770, 940, 940, 1160, 1300, 1340, 1480, 1600),
CPU_DVFS("cpu_g", 8, 0, MHZ, 460, 460, 550, 550, 680, 680, 820, 970, 1040, 1080, 1150, 1200, 1280, 1300),
CPU_DVFS("cpu_g", 8, 1, MHZ, 480, 480, 650, 650, 780, 780, 990, 1040, 1100, 1200, 1300),
CPU_DVFS("cpu_g", 8, 2, MHZ, 520, 520, 700, 700, 860, 860, 1050, 1150, 1200, 1300),
CPU_DVFS("cpu_g", 8, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1300),
CPU_DVFS("cpu_g", 8, 4, MHZ, 550, 550, 770, 770, 940, 940, 1160, 1300),
CPU_DVFS("cpu_g", 9, -1, MHZ, 1, 1, 1, 1, 1, 900, 900, 900, 900, 900, 900, 900, 900, 900),
CPU_DVFS("cpu_g", 10, -1, MHZ, 1, 1, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900, 900),
CPU_DVFS("cpu_g", 11, -1, MHZ, 1, 1, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600),
CPU_DVFS("cpu_g", 12, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1330, 1370, 1400, 1470, 1500, 1500, 1540, 1540, 1700),
CPU_DVFS("cpu_g", 12, 4, MHZ, 550, 550, 770, 770, 940, 940, 1160, 1240, 1280, 1360, 1390, 1470, 1500, 1520, 1520, 1590, 1700),
CPU_DVFS("cpu_g", 13, 3, MHZ, 550, 550, 770, 770, 910, 910, 1150, 1230, 1280, 1330, 1370, 1400, 1470, 1500, 1500, 1540, 1540, 1700),
CPU_DVFS("cpu_g", 13, 4, MHZ, 550, 550, 770, 770, 940, 940, 1160, 1240, 1280, 1360, 1390, 1470, 1500, 1520, 1520, 1590, 1700),
/*
* "Safe entry" to be used when no match for chip speedo, process
* corner is found (just to boot at low rate); must be the last one
*/
CPU_DVFS("cpu_g", -1, -1, MHZ, 1, 1, 216, 216, 300),
};
#define CORE_DVFS(_clk_name, _speedo_id, _auto, _mult, _freqs...) \
{ \
.clk_name = _clk_name, \
.speedo_id = _speedo_id, \
.process_id = -1, \
.freqs = {_freqs}, \
.freqs_mult = _mult, \
.millivolts = core_millivolts, \
.auto_dvfs = _auto, \
.dvfs_rail = &tegra3_dvfs_rail_vdd_core, \
}
static struct dvfs core_dvfs_table[] = {
/* Core voltages (mV): 950, 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350 */
/* Clock limits for internal blocks, PLLs */
CORE_DVFS("cpu_lp", 0, 1, KHZ, 1, 294000, 342000, 427000, 475000, 500000, 500000, 500000, 500000),
CORE_DVFS("cpu_lp", 1, 1, KHZ, 204000, 294000, 342000, 427000, 475000, 500000, 500000, 500000, 500000),
CORE_DVFS("cpu_lp", 2, 1, KHZ, 204000, 295000, 370000, 428000, 475000, 513000, 579000, 620000, 620000),
CORE_DVFS("cpu_lp", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 450000, 450000, 450000),
CORE_DVFS("emc", 0, 1, KHZ, 1, 266500, 266500, 266500, 266500, 533000, 533000, 533000, 533000),
CORE_DVFS("emc", 1, 1, KHZ, 102000, 408000, 408000, 408000, 416000, 750000, 750000, 750000, 750000),
CORE_DVFS("emc", 2, 1, KHZ, 102000, 408000, 408000, 408000, 416000, 750000, 750000, 800000, 900000),
CORE_DVFS("emc", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 625000, 625000, 625000),
CORE_DVFS("sbus", 0, 1, KHZ, 1, 136000, 164000, 191000, 216000, 216000, 216000, 216000, 216000),
CORE_DVFS("sbus", 1, 1, KHZ, 51000, 205000, 205000, 227000, 227000, 267000, 267000, 267000, 267000),
CORE_DVFS("sbus", 2, 1, KHZ, 51000, 205000, 205000, 227000, 227000, 267000, 334000, 334000, 334000),
CORE_DVFS("sbus", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 378000, 378000, 378000),
CORE_DVFS("vi", 0, 1, KHZ, 1, 216000, 285000, 300000, 300000, 300000, 300000, 300000, 300000),
CORE_DVFS("vi", 1, 1, KHZ, 1, 216000, 267000, 300000, 371000, 409000, 409000, 409000, 409000),
CORE_DVFS("vi", 2, 1, KHZ, 1, 219000, 267000, 300000, 371000, 409000, 425000, 425000, 425000),
CORE_DVFS("vi", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 470000, 470000, 470000),
CORE_DVFS("vde", 0, 1, KHZ, 1, 228000, 275000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("mpe", 0, 1, KHZ, 1, 234000, 285000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("2d", 0, 1, KHZ, 1, 267000, 285000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("epp", 0, 1, KHZ, 1, 267000, 285000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("3d", 0, 1, KHZ, 1, 234000, 285000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("3d2", 0, 1, KHZ, 1, 234000, 285000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("se", 0, 1, KHZ, 1, 267000, 285000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("vde", 1, 1, KHZ, 1, 228000, 275000, 332000, 380000, 416000, 484000, 520000, 666000),
CORE_DVFS("mpe", 1, 1, KHZ, 1, 234000, 285000, 332000, 380000, 416000, 484000, 484000, 484000),
CORE_DVFS("2d", 1, 1, KHZ, 1, 267000, 285000, 332000, 380000, 416000, 484000, 484000, 484000),
CORE_DVFS("epp", 1, 1, KHZ, 1, 267000, 285000, 332000, 380000, 416000, 484000, 484000, 484000),
CORE_DVFS("3d", 1, 1, KHZ, 1, 234000, 285000, 332000, 380000, 416000, 484000, 484000, 484000),
CORE_DVFS("3d2", 1, 1, KHZ, 1, 234000, 285000, 332000, 380000, 416000, 484000, 484000, 484000),
CORE_DVFS("se", 1, 1, KHZ, 1, 267000, 285000, 332000, 380000, 416000, 484000, 484000, 484000),
CORE_DVFS("vde", 2, 1, KHZ, 1, 247000, 304000, 352000, 400000, 437000, 484000, 520000, 600000),
CORE_DVFS("mpe", 2, 1, KHZ, 1, 247000, 304000, 361000, 408000, 446000, 484000, 520000, 600000),
CORE_DVFS("2d", 2, 1, KHZ, 1, 267000, 304000, 361000, 408000, 446000, 484000, 520000, 600000),
CORE_DVFS("epp", 2, 1, KHZ, 1, 267000, 304000, 361000, 408000, 446000, 484000, 520000, 600000),
CORE_DVFS("3d", 2, 1, KHZ, 1, 247000, 304000, 361000, 408000, 446000, 484000, 520000, 600000),
CORE_DVFS("3d2", 2, 1, KHZ, 1, 247000, 304000, 361000, 408000, 446000, 484000, 520000, 600000),
CORE_DVFS("se", 2, 1, KHZ, 1, 267000, 304000, 361000, 408000, 446000, 484000, 520000, 600000),
CORE_DVFS("vde", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 484000, 484000, 484000),
CORE_DVFS("mpe", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 484000, 484000, 484000),
CORE_DVFS("2d", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 484000, 484000, 484000),
CORE_DVFS("epp", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 484000, 484000, 484000),
CORE_DVFS("3d", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 484000, 484000, 484000),
CORE_DVFS("3d2", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 484000, 484000, 484000),
CORE_DVFS("se", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 625000, 625000, 625000),
CORE_DVFS("host1x", 0, 1, KHZ, 1, 152000, 188000, 222000, 254000, 267000, 267000, 267000, 267000),
CORE_DVFS("host1x", 1, 1, KHZ, 1, 152000, 188000, 222000, 254000, 267000, 267000, 267000, 267000),
CORE_DVFS("host1x", 2, 1, KHZ, 1, 152000, 188000, 222000, 254000, 267000, 267000, 267000, 300000),
CORE_DVFS("host1x", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 242000, 242000, 242000),
CORE_DVFS("cbus", 0, 1, KHZ, 1, 228000, 275000, 332000, 380000, 416000, 416000, 416000, 416000),
CORE_DVFS("cbus", 1, 1, KHZ, 1, 267000, 304000, 380000, 416000, 484000, 484000, 484000, 484000),
CORE_DVFS("cbus", 2, 1, KHZ, 1, 247000, 304000, 352000, 400000, 437000, 484000, 520000, 600000),
CORE_DVFS("cbus", 3, 1, KHZ, 1, 484000, 484000, 484000, 484000, 484000, 484000, 484000, 484000),
CORE_DVFS("pll_c", -1, 1, KHZ, 533000, 667000, 667000, 800000, 800000, 1066000, 1066000, 1066000, 1200000),
/*
* PLLM dvfs is common across all speedo IDs with one special exception
* for T30 and T33, rev A02+, provided PLLM usage is restricted. Both
* common and restricted table are included, and table selection is
* handled by is_pllm_dvfs() below.
*/
CORE_DVFS("pll_m", -1, 1, KHZ, 533000, 667000, 667000, 800000, 800000, 1066000, 1066000, 1066000, 1066000),
#ifdef CONFIG_TEGRA_PLLM_RESTRICTED
CORE_DVFS("pll_m", 2, 1, KHZ, 533000, 800000, 800000, 800000, 800000, 1066000, 1066000, 1066000, 1066000),
#endif
/* Core voltages (mV): 950, 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350 */
/* Clock limits for I/O peripherals */
CORE_DVFS("mipi", 0, 1, KHZ, 1, 1, 1, 1, 1, 1, 1, 1, 1),
CORE_DVFS("mipi", 1, 1, KHZ, 1, 1, 1, 1, 1, 60000, 60000, 60000, 60000),
CORE_DVFS("mipi", 2, 1, KHZ, 1, 1, 1, 1, 1, 60000, 60000, 60000, 60000),
CORE_DVFS("mipi", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 1, 1, 1),
CORE_DVFS("fuse_burn", -1, 1, KHZ, 1, 1, 1, 1, 26000, 26000, 26000, 26000, 26000),
CORE_DVFS("sdmmc1", -1, 1, KHZ, 104000, 104000, 104000, 104000, 104000, 208000, 208000, 208000, 208000),
CORE_DVFS("sdmmc3", -1, 1, KHZ, 104000, 104000, 104000, 104000, 104000, 208000, 208000, 208000, 208000),
CORE_DVFS("ndflash", -1, 1, KHZ, 1, 120000, 120000, 120000, 200000, 200000, 200000, 200000, 200000),
CORE_DVFS("nor", 0, 1, KHZ, 1, 115000, 130000, 130000, 133000, 133000, 133000, 133000, 133000),
CORE_DVFS("nor", 1, 1, KHZ, 1, 115000, 130000, 130000, 133000, 133000, 133000, 133000, 133000),
CORE_DVFS("nor", 2, 1, KHZ, 1, 115000, 130000, 130000, 133000, 133000, 133000, 133000, 133000),
CORE_DVFS("nor", 3, 1, KHZ, 1, 1, 1, 1, 1, 1, 108000, 108000, 108000),
CORE_DVFS("sbc1", -1, 1, KHZ, 1, 52000, 60000, 60000, 60000, 100000, 100000, 100000, 100000),
CORE_DVFS("sbc2", -1, 1, KHZ, 1, 52000, 60000, 60000, 60000, 100000, 100000, 100000, 100000),
CORE_DVFS("sbc3", -1, 1, KHZ, 1, 52000, 60000, 60000, 60000, 100000, 100000, 100000, 100000),
CORE_DVFS("sbc4", -1, 1, KHZ, 1, 52000, 60000, 60000, 60000, 100000, 100000, 100000, 100000),
CORE_DVFS("sbc5", -1, 1, KHZ, 1, 52000, 60000, 60000, 60000, 100000, 100000, 100000, 100000),
CORE_DVFS("sbc6", -1, 1, KHZ, 1, 52000, 60000, 60000, 60000, 100000, 100000, 100000, 100000),
CORE_DVFS("usbd", -1, 1, KHZ, 1, 480000, 480000, 480000, 480000, 480000, 480000, 480000, 480000),
CORE_DVFS("usb2", -1, 1, KHZ, 1, 480000, 480000, 480000, 480000, 480000, 480000, 480000, 480000),
CORE_DVFS("usb3", -1, 1, KHZ, 1, 480000, 480000, 480000, 480000, 480000, 480000, 480000, 480000),
CORE_DVFS("sata", -1, 1, KHZ, 1, 216000, 216000, 216000, 216000, 216000, 216000, 216000, 216000),
CORE_DVFS("sata_oob", -1, 1, KHZ, 1, 216000, 216000, 216000, 216000, 216000, 216000, 216000, 216000),
CORE_DVFS("pcie", -1, 1, KHZ, 1, 250000, 250000, 250000, 250000, 250000, 250000, 250000, 250000),
CORE_DVFS("afi", -1, 1, KHZ, 1, 250000, 250000, 250000, 250000, 250000, 250000, 250000, 250000),
CORE_DVFS("pll_e", -1, 1, KHZ, 1, 100000, 100000, 100000, 100000, 100000, 100000, 100000, 100000),
CORE_DVFS("tvdac", -1, 1, KHZ, 1, 220000, 220000, 220000, 220000, 220000, 220000, 220000, 220000),
CORE_DVFS("tvo", -1, 1, KHZ, 1, 1, 297000, 297000, 297000, 297000, 297000, 297000, 297000),
CORE_DVFS("cve", -1, 1, KHZ, 1, 1, 297000, 297000, 297000, 297000, 297000, 297000, 297000),
CORE_DVFS("dsia", -1, 1, KHZ, 1, 275000, 275000, 275000, 275000, 275000, 275000, 275000, 275000),
CORE_DVFS("dsib", -1, 1, KHZ, 1, 275000, 275000, 275000, 275000, 275000, 275000, 275000, 275000),
CORE_DVFS("hdmi", -1, 1, KHZ, 1, 148500, 148500, 148500, 148500, 148500, 148500, 148500, 148500),
/*
* The clock rate for the display controllers that determines the
* necessary core voltage depends on a divider that is internal
* to the display block. Disable auto-dvfs on the display clocks,
* and let the display driver call tegra_dvfs_set_rate manually
*/
CORE_DVFS("disp1", 0, 0, KHZ, 1, 120000, 120000, 120000, 120000, 190000, 190000, 190000, 190000),
CORE_DVFS("disp1", 1, 0, KHZ, 1, 155000, 268000, 268000, 268000, 268000, 268000, 268000, 268000),
CORE_DVFS("disp1", 2, 0, KHZ, 1, 155000, 268000, 268000, 268000, 268000, 268000, 268000, 268000),
CORE_DVFS("disp1", 3, 0, KHZ, 1, 120000, 120000, 120000, 120000, 190000, 190000, 190000, 190000),
CORE_DVFS("disp2", 0, 0, KHZ, 1, 120000, 120000, 120000, 120000, 190000, 190000, 190000, 190000),
CORE_DVFS("disp2", 1, 0, KHZ, 1, 155000, 268000, 268000, 268000, 268000, 268000, 268000, 268000),
CORE_DVFS("disp2", 2, 0, KHZ, 1, 155000, 268000, 268000, 268000, 268000, 268000, 268000, 268000),
CORE_DVFS("disp2", 3, 0, KHZ, 1, 120000, 120000, 120000, 120000, 190000, 190000, 190000, 190000),
CORE_DVFS("pwm", -1, 1, KHZ, 1, 408000, 408000, 408000, 408000, 408000, 408000, 408000, 408000),
CORE_DVFS("spdif_out", -1, 1, KHZ, 1, 26000, 26000, 26000, 26000, 26000, 26000, 26000, 26000),
};
int tegra_dvfs_disable_core_set(const char *arg, const struct kernel_param *kp)
{
int ret;
ret = param_set_bool(arg, kp);
if (ret)
return ret;
if (tegra_dvfs_core_disabled)
tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_core);
else
tegra_dvfs_rail_enable(&tegra3_dvfs_rail_vdd_core);
return 0;
}
int tegra_dvfs_disable_cpu_set(const char *arg, const struct kernel_param *kp)
{
int ret;
ret = param_set_bool(arg, kp);
if (ret)
return ret;
if (tegra_dvfs_cpu_disabled)
tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_cpu);
else
tegra_dvfs_rail_enable(&tegra3_dvfs_rail_vdd_cpu);
return 0;
}
int tegra_dvfs_disable_get(char *buffer, const struct kernel_param *kp)
{
return param_get_bool(buffer, kp);
}
static struct kernel_param_ops tegra_dvfs_disable_core_ops = {
.set = tegra_dvfs_disable_core_set,
.get = tegra_dvfs_disable_get,
};
static struct kernel_param_ops tegra_dvfs_disable_cpu_ops = {
.set = tegra_dvfs_disable_cpu_set,
.get = tegra_dvfs_disable_get,
};
module_param_cb(disable_core, &tegra_dvfs_disable_core_ops,
&tegra_dvfs_core_disabled, 0644);
module_param_cb(disable_cpu, &tegra_dvfs_disable_cpu_ops,
&tegra_dvfs_cpu_disabled, 0644);
static bool __init is_pllm_dvfs(struct clk *c, struct dvfs *d)
{
#ifdef CONFIG_TEGRA_PLLM_RESTRICTED
/* Do not apply common PLLM dvfs table on T30, T33, T37 rev A02+ and
do not apply restricted PLLM dvfs table for other SKUs/revs */
int cpu = tegra_cpu_speedo_id();
if (((cpu == 2) || (cpu == 5) || (cpu == 13)) ==
(d->speedo_id == -1))
return false;
#endif
/* Check if PLLM boot frequency can be applied to clock tree at
minimum voltage. If yes, no need to enable dvfs on PLLM */
if (clk_get_rate_all_locked(c) <= d->freqs[0] * d->freqs_mult)
return false;
return true;
}
static void __init init_dvfs_one(struct dvfs *d, int nominal_mv_index)
{
int ret;
struct clk *c = tegra_get_clock_by_name(d->clk_name);
if (!c) {
pr_debug("tegra3_dvfs: no clock found for %s\n",
d->clk_name);
return;
}
/*
* Update max rate for auto-dvfs clocks, except EMC.
* EMC is a special case, since EMC dvfs is board dependent: max rate
* and EMC scaling frequencies are determined by tegra BCT (flashed
* together with the image) and board specific EMC DFS table; we will
* check the scaling ladder against nominal core voltage when the table
* is loaded (and if on particular board the table is not loaded, EMC
* scaling is disabled).
*/
if (!(c->flags & PERIPH_EMC_ENB) && d->auto_dvfs) {
BUG_ON(!d->freqs[nominal_mv_index]);
tegra_init_max_rate(
c, d->freqs[nominal_mv_index] * d->freqs_mult);
}
d->max_millivolts = d->dvfs_rail->nominal_millivolts;
/*
* Check if we may skip enabling dvfs on PLLM. PLLM is a special case,
* since its frequency never exceeds boot rate, and configuration with
* restricted PLLM usage is possible.
*/
if (!(c->flags & PLLM) || is_pllm_dvfs(c, d)) {
ret = tegra_enable_dvfs_on_clk(c, d);
if (ret)
pr_err("tegra3_dvfs: failed to enable dvfs on %s\n",
c->name);
}
}
static void __init init_dvfs_cold(struct dvfs *d, int nominal_mv_index)
{
int i;
unsigned long offs;
BUG_ON((nominal_mv_index == 0) || (nominal_mv_index > d->num_freqs));
for (i = 0; i < d->num_freqs; i++) {
offs = cpu_cold_offs_mhz[i] * MHZ;
if (i > nominal_mv_index)
d->alt_freqs[i] = d->alt_freqs[i - 1];
else if (d->freqs[i] > offs)
d->alt_freqs[i] = d->freqs[i] - offs;
else {
d->alt_freqs[i] = d->freqs[i];
pr_warn("tegra3_dvfs: cold offset %lu is too high for"
" regular dvfs limit %lu\n", offs, d->freqs[i]);
}
if (i)
BUG_ON(d->alt_freqs[i] < d->alt_freqs[i - 1]);
}
d->alt_freqs_state = ALT_FREQS_DISABLED;
}
static bool __init match_dvfs_one(struct dvfs *d, int speedo_id, int process_id)
{
if ((d->process_id != -1 && d->process_id != process_id) ||
(d->speedo_id != -1 && d->speedo_id != speedo_id)) {
pr_debug("tegra3_dvfs: rejected %s speedo %d,"
" process %d\n", d->clk_name, d->speedo_id,
d->process_id);
return false;
}
return true;
}
static int __init get_cpu_nominal_mv_index(
int speedo_id, int process_id, struct dvfs **cpu_dvfs)
{
int i, j, mv;
struct dvfs *d;
struct clk *c;
/*
* Find maximum cpu voltage that satisfies cpu_to_core dependency for
* nominal core voltage ("solve from cpu to core at nominal"). Clip
* result to the nominal cpu level for the chips with this speedo_id.
*/
mv = tegra3_dvfs_rail_vdd_core.nominal_millivolts;
for (i = 0; i < MAX_DVFS_FREQS; i++) {
if ((cpu_millivolts[i] == 0) ||
tegra3_get_core_floor_mv(cpu_millivolts[i]) > mv)
break;
}
BUG_ON(i == 0);
mv = cpu_millivolts[i - 1];
BUG_ON(mv < tegra3_dvfs_rail_vdd_cpu.min_millivolts);
mv = min(mv, tegra_cpu_speedo_mv());
/*
* Find matching cpu dvfs entry, and use it to determine index to the
* final nominal voltage, that satisfies the following requirements:
* - allows CPU to run at minimum of the maximum rates specified in
* the dvfs entry and clock tree
* - does not violate cpu_to_core dependency as determined above
*/
for (i = 0, j = 0; j < ARRAY_SIZE(cpu_dvfs_table); j++) {
d = &cpu_dvfs_table[j];
if (match_dvfs_one(d, speedo_id, process_id)) {
c = tegra_get_clock_by_name(d->clk_name);
BUG_ON(!c);
for (; i < MAX_DVFS_FREQS; i++) {
if ((d->freqs[i] == 0) ||
(cpu_millivolts[i] == 0) ||
(mv < cpu_millivolts[i]))
break;
if (c->max_rate <= d->freqs[i]*d->freqs_mult) {
i++;
break;
}
}
break;
}
}
BUG_ON(i == 0);
if (j == (ARRAY_SIZE(cpu_dvfs_table) - 1))
pr_err("tegra3_dvfs: WARNING!!!\n"
"tegra3_dvfs: no cpu dvfs table found for chip speedo_id"
" %d and process_id %d: set CPU rate limit at %lu\n"
"tegra3_dvfs: WARNING!!!\n",
speedo_id, process_id, d->freqs[i-1] * d->freqs_mult);
*cpu_dvfs = d;
return (i - 1);
}
static int __init get_core_nominal_mv_index(int speedo_id)
{
int i;
int mv = tegra_core_speedo_mv();
int core_edp_limit = get_core_edp();
/*
* Start with nominal level for the chips with this speedo_id. Then,
* make sure core nominal voltage is below edp limit for the board
* (if edp limit is set).
*/
if (core_edp_limit)
mv = min(mv, core_edp_limit);
/* Round nominal level down to the nearest core scaling step */
for (i = 0; i < MAX_DVFS_FREQS; i++) {
if ((core_millivolts[i] == 0) || (mv < core_millivolts[i]))
break;
}
if (i == 0) {
pr_err("tegra3_dvfs: unable to adjust core dvfs table to"
" nominal voltage %d\n", mv);
return -ENOSYS;
}
return (i - 1);
}
void __init tegra_soc_init_dvfs(void)
{
int cpu_speedo_id = tegra_cpu_speedo_id();
int soc_speedo_id = tegra_soc_speedo_id();
int cpu_process_id = tegra_cpu_process_id();
int core_process_id = tegra_core_process_id();
int i;
int core_nominal_mv_index;
int cpu_nominal_mv_index;
#ifndef CONFIG_TEGRA_CORE_DVFS
tegra_dvfs_core_disabled = true;
#endif
#ifndef CONFIG_TEGRA_CPU_DVFS
tegra_dvfs_cpu_disabled = true;
#endif
/*
* Find nominal voltages for core (1st) and cpu rails before rail
* init. Nominal voltage index in the scaling ladder will also be
* used to determine max dvfs frequency for the respective domains.
*/
core_nominal_mv_index = get_core_nominal_mv_index(soc_speedo_id);
if (core_nominal_mv_index < 0) {
tegra3_dvfs_rail_vdd_core.disabled = true;
tegra_dvfs_core_disabled = true;
core_nominal_mv_index = 0;
}
tegra3_dvfs_rail_vdd_core.nominal_millivolts =
core_millivolts[core_nominal_mv_index];
cpu_nominal_mv_index = get_cpu_nominal_mv_index(
cpu_speedo_id, cpu_process_id, &cpu_dvfs);
BUG_ON((cpu_nominal_mv_index < 0) || (!cpu_dvfs));
tegra3_dvfs_rail_vdd_cpu.nominal_millivolts =
cpu_millivolts[cpu_nominal_mv_index];
/* Init rail structures and dependencies */
tegra_dvfs_init_rails(tegra3_dvfs_rails, ARRAY_SIZE(tegra3_dvfs_rails));
tegra_dvfs_add_relationships(tegra3_dvfs_relationships,
ARRAY_SIZE(tegra3_dvfs_relationships));
/* Search core dvfs table for speedo/process matching entries and
initialize dvfs-ed clocks */
for (i = 0; i < ARRAY_SIZE(core_dvfs_table); i++) {
struct dvfs *d = &core_dvfs_table[i];
if (!match_dvfs_one(d, soc_speedo_id, core_process_id))
continue;
init_dvfs_one(d, core_nominal_mv_index);
}
/* Initialize matching cpu dvfs entry already found when nominal
voltage was determined */
init_dvfs_one(cpu_dvfs, cpu_nominal_mv_index);
init_dvfs_cold(cpu_dvfs, cpu_nominal_mv_index);
/* Finally disable dvfs on rails if necessary */
if (tegra_dvfs_core_disabled)
tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_core);
if (tegra_dvfs_cpu_disabled)
tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_cpu);
pr_info("tegra dvfs: VDD_CPU nominal %dmV, scaling %s\n",
tegra3_dvfs_rail_vdd_cpu.nominal_millivolts,
tegra_dvfs_cpu_disabled ? "disabled" : "enabled");
pr_info("tegra dvfs: VDD_CORE nominal %dmV, scaling %s\n",
tegra3_dvfs_rail_vdd_core.nominal_millivolts,
tegra_dvfs_core_disabled ? "disabled" : "enabled");
}
void tegra_cpu_dvfs_alter(int edp_thermal_index, bool before_clk_update)
{
bool enable = !edp_thermal_index;
if (enable != before_clk_update) {
int ret = tegra_dvfs_alt_freqs_set(cpu_dvfs, enable);
WARN_ONCE(ret, "tegra dvfs: failed to set CPU alternative"
" frequency limits for cold temeperature\n");
}
}
int tegra_dvfs_rail_disable_prepare(struct dvfs_rail *rail)
{
int ret = 0;
if (tegra_emc_get_dram_type() != DRAM_TYPE_DDR3)
return ret;
if (((&tegra3_dvfs_rail_vdd_core == rail) &&
(rail->nominal_millivolts > TEGRA_EMC_BRIDGE_MVOLTS_MIN)) ||
((&tegra3_dvfs_rail_vdd_cpu == rail) &&
(tegra3_get_core_floor_mv(rail->nominal_millivolts) >
TEGRA_EMC_BRIDGE_MVOLTS_MIN))) {
struct clk *bridge = tegra_get_clock_by_name("bridge.emc");
BUG_ON(!bridge);
ret = clk_enable(bridge);
pr_info("%s: %s: %s bridge.emc\n", __func__,
rail->reg_id, ret ? "failed to enable" : "enabled");
}
return ret;
}
int tegra_dvfs_rail_post_enable(struct dvfs_rail *rail)
{
if (tegra_emc_get_dram_type() != DRAM_TYPE_DDR3)
return 0;
if (((&tegra3_dvfs_rail_vdd_core == rail) &&
(rail->nominal_millivolts > TEGRA_EMC_BRIDGE_MVOLTS_MIN)) ||
((&tegra3_dvfs_rail_vdd_cpu == rail) &&
(tegra3_get_core_floor_mv(rail->nominal_millivolts) >
TEGRA_EMC_BRIDGE_MVOLTS_MIN))) {
struct clk *bridge = tegra_get_clock_by_name("bridge.emc");
BUG_ON(!bridge);
clk_disable(bridge);
pr_info("%s: %s: disabled bridge.emc\n",
__func__, rail->reg_id);
}
return 0;
}
/*
* sysfs and dvfs interfaces to cap tegra core domains frequencies
*/
static DEFINE_MUTEX(core_cap_lock);
struct core_cap {
int refcnt;
int level;
};
static struct core_cap tegra3_core_cap;
static struct core_cap kdvfs_core_cap;
static struct core_cap user_core_cap;
static struct kobject *cap_kobj;
/* Arranged in order required for enabling/lowering the cap */
static struct {
const char *cap_name;
struct clk *cap_clk;
unsigned long freqs[MAX_DVFS_FREQS];
} core_cap_table[] = {
{ .cap_name = "cap.cbus" },
{ .cap_name = "cap.sclk" },
{ .cap_name = "cap.emc" },
};
static void core_cap_level_set(int level)
{
int i, j;
for (j = 0; j < ARRAY_SIZE(core_millivolts); j++) {
int v = core_millivolts[j];
if ((v == 0) || (level < v))
break;
}
j = (j == 0) ? 0 : j - 1;
level = core_millivolts[j];
if (level < tegra3_core_cap.level) {
for (i = 0; i < ARRAY_SIZE(core_cap_table); i++)
if (core_cap_table[i].cap_clk)
clk_set_rate(core_cap_table[i].cap_clk,
core_cap_table[i].freqs[j]);
} else if (level > tegra3_core_cap.level) {
for (i = ARRAY_SIZE(core_cap_table) - 1; i >= 0; i--)
if (core_cap_table[i].cap_clk)
clk_set_rate(core_cap_table[i].cap_clk,
core_cap_table[i].freqs[j]);
}
tegra3_core_cap.level = level;
}
static void core_cap_update(void)
{
int new_level = tegra3_dvfs_rail_vdd_core.max_millivolts;
if (kdvfs_core_cap.refcnt)
new_level = min(new_level, kdvfs_core_cap.level);
if (user_core_cap.refcnt)
new_level = min(new_level, user_core_cap.level);
if (tegra3_core_cap.level != new_level)
core_cap_level_set(new_level);
}
static void core_cap_enable(bool enable)
{
int i;
if (enable) {
tegra3_core_cap.refcnt++;
if (tegra3_core_cap.refcnt == 1)
for (i = 0; i < ARRAY_SIZE(core_cap_table); i++)
if (core_cap_table[i].cap_clk)
clk_enable(core_cap_table[i].cap_clk);
} else if (tegra3_core_cap.refcnt) {
tegra3_core_cap.refcnt--;
if (tegra3_core_cap.refcnt == 0)
for (i = ARRAY_SIZE(core_cap_table) - 1; i >= 0; i--)
if (core_cap_table[i].cap_clk)
clk_disable(core_cap_table[i].cap_clk);
}
core_cap_update();
}
static ssize_t
core_cap_state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d (%d)\n", tegra3_core_cap.refcnt ? 1 : 0,
user_core_cap.refcnt ? 1 : 0);
}
static ssize_t
core_cap_state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int state;
if (sscanf(buf, "%d", &state) != 1)
return -1;
mutex_lock(&core_cap_lock);
if (state) {
user_core_cap.refcnt++;
if (user_core_cap.refcnt == 1)
core_cap_enable(true);
} else if (user_core_cap.refcnt) {
user_core_cap.refcnt--;
if (user_core_cap.refcnt == 0)
core_cap_enable(false);
}
mutex_unlock(&core_cap_lock);
return count;
}
static ssize_t
core_cap_level_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d (%d)\n", tegra3_core_cap.level,
user_core_cap.level);
}
static ssize_t
core_cap_level_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int level;
if (sscanf(buf, "%d", &level) != 1)
return -1;
mutex_lock(&core_cap_lock);
user_core_cap.level = level;
core_cap_update();
mutex_unlock(&core_cap_lock);
return count;
}
static struct kobj_attribute cap_state_attribute =
__ATTR(core_cap_state, 0644, core_cap_state_show, core_cap_state_store);
static struct kobj_attribute cap_level_attribute =
__ATTR(core_cap_level, 0644, core_cap_level_show, core_cap_level_store);
const struct attribute *cap_attributes[] = {
&cap_state_attribute.attr,
&cap_level_attribute.attr,
NULL,
};
void tegra_dvfs_core_cap_enable(bool enable)
{
mutex_lock(&core_cap_lock);
if (enable) {
kdvfs_core_cap.refcnt++;
if (kdvfs_core_cap.refcnt == 1)
core_cap_enable(true);
} else if (kdvfs_core_cap.refcnt) {
kdvfs_core_cap.refcnt--;
if (kdvfs_core_cap.refcnt == 0)
core_cap_enable(false);
}
mutex_unlock(&core_cap_lock);
}
void tegra_dvfs_core_cap_level_set(int level)
{
mutex_lock(&core_cap_lock);
kdvfs_core_cap.level = level;
core_cap_update();
mutex_unlock(&core_cap_lock);
}
static int __init init_core_cap_one(struct clk *c, unsigned long *freqs)
{
int i, v, next_v;
unsigned long rate, next_rate = 0;
for (i = 0; i < ARRAY_SIZE(core_millivolts); i++) {
v = core_millivolts[i];
if (v == 0)
break;
for (;;) {
rate = next_rate;
next_rate = clk_round_rate(c, rate + 1000);
if (IS_ERR_VALUE(next_rate)) {
pr_debug("tegra3_dvfs: failed to round %s"
" rate %lu", c->name, rate);
return -EINVAL;
}
if (rate == next_rate)
break;
next_v = tegra_dvfs_predict_millivolts(
c->parent, next_rate);
if (IS_ERR_VALUE(next_rate)) {
pr_debug("tegra3_dvfs: failed to predict %s mV"
" for rate %lu", c->name, next_rate);
return -EINVAL;
}
if (next_v > v)
break;
}
if (rate == 0) {
rate = next_rate;
pr_warn("tegra3_dvfs: minimum %s rate %lu requires"
" %d mV", c->name, rate, next_v);
}
freqs[i] = rate;
next_rate = rate;
}
return 0;
}
static int __init tegra_dvfs_init_core_cap(void)
{
int i;
struct clk *c = NULL;
tegra3_core_cap.level = kdvfs_core_cap.level = user_core_cap.level =
tegra3_dvfs_rail_vdd_core.max_millivolts;
for (i = 0; i < ARRAY_SIZE(core_cap_table); i++) {
c = tegra_get_clock_by_name(core_cap_table[i].cap_name);
if (!c || !c->parent ||
init_core_cap_one(c, core_cap_table[i].freqs)) {
pr_err("tegra3_dvfs: failed to initialize %s frequency"
" table", core_cap_table[i].cap_name);
continue;
}
core_cap_table[i].cap_clk = c;
}
cap_kobj = kobject_create_and_add("tegra_cap", kernel_kobj);
if (!cap_kobj) {
pr_err("tegra3_dvfs: failed to create sysfs cap object");
return 0;
}
if (sysfs_create_files(cap_kobj, cap_attributes)) {
pr_err("tegra3_dvfs: failed to create sysfs cap interface");
return 0;
}
pr_info("tegra dvfs: tegra sysfs cap interface is initialized\n");
return 0;
}
late_initcall(tegra_dvfs_init_core_cap);
|
AndroidDeveloperAlliance/ZenKernel_Grouper
|
arch/arm/mach-tegra/tegra3_dvfs.c
|
C
|
gpl-2.0
| 37,574
|
/**
* @file id_10361.c
* @brief AOAPC I 10361
* @author chenxilinsidney
* @version 1.0
* @date 2015-03-24
*/
#include <stdio.h>
#include <string.h>
#define MAX_LINE_LENGTH 101
char line[2][MAX_LINE_LENGTH];
int main()
{
int num_case;
scanf("%d\n", &num_case);
while (num_case--) {
gets(*line);
gets(*(line+ 1));
int line_length_a = strlen(*line);
int line_length_b = strlen(*(line + 1));
int line_index = 0;
int char_position[4] = {0};
int position_index = 0;
/* output first line */
while (line_index < line_length_a) {
int character = line[0][line_index];
if (character != '<' && character != '>')
putchar(character);
else
char_position[position_index++] = line_index;
line_index++;
}
printf("\n");
/* output second line */
line[1][line_length_b - 3] = '\0';
printf("%s", line[1]);
for (position_index = 2; position_index >= 0; position_index--)
for (line_index = char_position[position_index] + 1; line_index <
char_position[position_index + 1]; line_index++)
putchar(line[0][line_index]);
printf("%s", line[0] + char_position[3] + 1);
printf("\n");
}
return 0;
}
|
chenxilinsidney/funnycprogram
|
acm/aoapc/id_10361.c
|
C
|
gpl-2.0
| 1,360
|
/* -*- mode: c -*- */
/* $Id$ */
/* Copyright (C) 2004-2013 Alexander Chernov <cher@ejudge.ru> */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "checker_internal.h"
#include <errno.h>
#include "l10n_impl.h"
int
checker_read_int(
int ind,
const char *name,
int eof_error_flag,
int *p_val)
{
int x;
char sb[128], *db = 0, *vb = 0, *ep = 0;
size_t ds = 0;
if (!name) name = "";
vb = checker_read_buf_2(ind, name, eof_error_flag, sb, sizeof(sb), &db, &ds);
if (!vb) return -1;
if (!*vb) {
fatal_read(ind, _("%s: no int32 value"), name);
}
errno = 0;
x = strtol(vb, &ep, 10);
if (*ep) {
fatal_read(ind, _("%s: cannot parse int32 value"), name);
}
if (errno) {
fatal_read(ind, _("%s: int32 value is out of range"), name);
}
*p_val = x;
return 1;
}
|
misty-fungus/ejudge-debian
|
checkers/read_int.c
|
C
|
gpl-2.0
| 1,296
|
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/msm_kgsl.h>
#include <linux/regulator/machine.h>
#include <mach/irqs.h>
#include <mach/msm_iomap.h>
#include <mach/board.h>
#include <mach/dma.h>
#include <mach/dal_axi.h>
#include <asm/mach/flash.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/mmc.h>
#include <mach/rpc_hsusb.h>
#include <mach/socinfo.h>
#include "devices.h"
#include "devices-msm7x2xa.h"
#include "footswitch.h"
#include "acpuclock.h"
/* Address of GSBI blocks */
#define MSM_GSBI0_PHYS 0xA1200000
#define MSM_GSBI1_PHYS 0xA1300000
/* GSBI QUPe devices */
#define MSM_GSBI0_QUP_PHYS (MSM_GSBI0_PHYS + 0x80000)
#define MSM_GSBI1_QUP_PHYS (MSM_GSBI1_PHYS + 0x80000)
static struct resource gsbi0_qup_i2c_resources[] = {
{
.name = "qup_phys_addr",
.start = MSM_GSBI0_QUP_PHYS,
.end = MSM_GSBI0_QUP_PHYS + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "gsbi_qup_i2c_addr",
.start = MSM_GSBI0_PHYS,
.end = MSM_GSBI0_PHYS + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_err_intr",
.start = INT_PWB_I2C,
.end = INT_PWB_I2C,
.flags = IORESOURCE_IRQ,
},
{
.name = "i2c_clk",
.start = 60,
.end = 60,
.flags = IORESOURCE_IO,
},
{
.name = "i2c_sda",
.start = 61,
.end = 61,
.flags = IORESOURCE_IO,
},
};
/* Use GSBI0 QUP for /dev/i2c-0 */
struct platform_device msm_gsbi0_qup_i2c_device = {
.name = "qup_i2c",
.id = MSM_GSBI0_QUP_I2C_BUS_ID,
.num_resources = ARRAY_SIZE(gsbi0_qup_i2c_resources),
.resource = gsbi0_qup_i2c_resources,
};
static struct resource gsbi1_qup_i2c_resources[] = {
{
.name = "qup_phys_addr",
.start = MSM_GSBI1_QUP_PHYS,
.end = MSM_GSBI1_QUP_PHYS + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "gsbi_qup_i2c_addr",
.start = MSM_GSBI1_PHYS,
.end = MSM_GSBI1_PHYS + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "qup_err_intr",
.start = INT_ARM11_DMA,
.end = INT_ARM11_DMA,
.flags = IORESOURCE_IRQ,
},
{
.name = "i2c_clk",
.start = 131,
.end = 131,
.flags = IORESOURCE_IO,
},
{
.name = "i2c_sda",
.start = 132,
.end = 132,
.flags = IORESOURCE_IO,
},
};
/* Use GSBI1 QUP for /dev/i2c-1 */
struct platform_device msm_gsbi1_qup_i2c_device = {
.name = "qup_i2c",
.id = MSM_GSBI1_QUP_I2C_BUS_ID,
.num_resources = ARRAY_SIZE(gsbi1_qup_i2c_resources),
.resource = gsbi1_qup_i2c_resources,
};
#define MSM_HSUSB_PHYS 0xA0800000
static struct resource resources_hsusb_otg[] = {
{
.start = MSM_HSUSB_PHYS,
.end = MSM_HSUSB_PHYS + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_HS,
.end = INT_USB_HS,
.flags = IORESOURCE_IRQ,
},
};
static u64 dma_mask = 0xffffffffULL;
struct platform_device msm_device_otg = {
.name = "msm_otg",
.id = -1,
.num_resources = ARRAY_SIZE(resources_hsusb_otg),
.resource = resources_hsusb_otg,
.dev = {
.dma_mask = &dma_mask,
.coherent_dma_mask = 0xffffffffULL,
},
};
static struct resource resources_gadget_peripheral[] = {
{
.start = MSM_HSUSB_PHYS,
.end = MSM_HSUSB_PHYS + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_HS,
.end = INT_USB_HS,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_gadget_peripheral = {
.name = "msm_hsusb",
.id = -1,
.num_resources = ARRAY_SIZE(resources_gadget_peripheral),
.resource = resources_gadget_peripheral,
.dev = {
.dma_mask = &dma_mask,
.coherent_dma_mask = 0xffffffffULL,
},
};
static struct resource resources_hsusb_host[] = {
{
.start = MSM_HSUSB_PHYS,
.end = MSM_HSUSB_PHYS + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_HS,
.end = INT_USB_HS,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_hsusb_host = {
.name = "msm_hsusb_host",
.id = 0,
.num_resources = ARRAY_SIZE(resources_hsusb_host),
.resource = resources_hsusb_host,
.dev = {
.dma_mask = &dma_mask,
.coherent_dma_mask = 0xffffffffULL,
},
};
static struct platform_device *msm_host_devices[] = {
&msm_device_hsusb_host,
};
int msm_add_host(unsigned int host, struct msm_usb_host_platform_data *plat)
{
struct platform_device *pdev;
pdev = msm_host_devices[host];
if (!pdev)
return -ENODEV;
pdev->dev.platform_data = plat;
return platform_device_register(pdev);
}
static struct resource msm_dmov_resource[] = {
{
.start = INT_ADM_AARM,
.end = (resource_size_t)MSM_DMOV_BASE,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_dmov = {
.name = "msm_dmov",
.id = -1,
.resource = msm_dmov_resource,
.num_resources = ARRAY_SIZE(msm_dmov_resource),
};
struct platform_device msm_device_smd = {
.name = "msm_smd",
.id = -1,
};
static struct resource resources_uart1[] = {
{
.start = INT_UART1,
.end = INT_UART1,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART1_PHYS,
.end = MSM_UART1_PHYS + MSM_UART1_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
static struct resource resources_uart2[] = {
{
.start = INT_UART2,
.end = INT_UART2,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART2_PHYS,
.end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
static struct resource resources_uart3[] = {
{
.start = INT_UART3,
.end = INT_UART3,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART3_PHYS,
.end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm_device_uart1 = {
.name = "msm_serial",
.id = 0,
.num_resources = ARRAY_SIZE(resources_uart1),
.resource = resources_uart1,
};
struct platform_device msm_device_uart2 = {
.name = "msm_serial",
.id = 1,
.num_resources = ARRAY_SIZE(resources_uart2),
.resource = resources_uart2,
};
struct platform_device msm_device_uart3 = {
.name = "msm_serial",
.id = 2,
.num_resources = ARRAY_SIZE(resources_uart3),
.resource = resources_uart3,
};
#define MSM_UART1DM_PHYS 0xA0200000
static struct resource msm_uart1_dm_resources[] = {
{
.start = MSM_UART1DM_PHYS,
.end = MSM_UART1DM_PHYS + PAGE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_UART1DM_IRQ,
.end = INT_UART1DM_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = INT_UART1DM_RX,
.end = INT_UART1DM_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = DMOV_HSUART1_TX_CHAN,
.end = DMOV_HSUART1_RX_CHAN,
.name = "uartdm_channels",
.flags = IORESOURCE_DMA,
},
{
.start = DMOV_HSUART1_TX_CRCI,
.end = DMOV_HSUART1_RX_CRCI,
.name = "uartdm_crci",
.flags = IORESOURCE_DMA,
},
};
static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32);
struct platform_device msm_device_uart_dm1 = {
.name = "msm_serial_hs",
.id = 0,
.num_resources = ARRAY_SIZE(msm_uart1_dm_resources),
.resource = msm_uart1_dm_resources,
.dev = {
.dma_mask = &msm_uart_dm1_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
#define MSM_UART2DM_PHYS 0xA0300000
static struct resource msm_uart2dm_resources[] = {
{
.start = MSM_UART2DM_PHYS,
.end = MSM_UART2DM_PHYS + PAGE_SIZE - 1,
.name = "uartdm_resource",
.flags = IORESOURCE_MEM,
},
{
.start = INT_UART2DM_IRQ,
.end = INT_UART2DM_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_uart_dm2 = {
.name = "msm_serial_hsl",
.id = 0,
.num_resources = ARRAY_SIZE(msm_uart2dm_resources),
.resource = msm_uart2dm_resources,
};
#define MSM_NAND_PHYS 0xA0A00000
#define MSM_NANDC01_PHYS 0xA0A40000
#define MSM_NANDC10_PHYS 0xA0A80000
#define MSM_NANDC11_PHYS 0xA0AC0000
#define EBI2_REG_BASE 0xA0D00000
static struct resource resources_nand[] = {
[0] = {
.name = "msm_nand_dmac",
.start = DMOV_NAND_CHAN,
.end = DMOV_NAND_CHAN,
.flags = IORESOURCE_DMA,
},
[1] = {
.name = "msm_nand_phys",
.start = MSM_NAND_PHYS,
.end = MSM_NAND_PHYS + 0x7FF,
.flags = IORESOURCE_MEM,
},
[2] = {
.name = "msm_nandc01_phys",
.start = MSM_NANDC01_PHYS,
.end = MSM_NANDC01_PHYS + 0x7FF,
.flags = IORESOURCE_MEM,
},
[3] = {
.name = "msm_nandc10_phys",
.start = MSM_NANDC10_PHYS,
.end = MSM_NANDC10_PHYS + 0x7FF,
.flags = IORESOURCE_MEM,
},
[4] = {
.name = "msm_nandc11_phys",
.start = MSM_NANDC11_PHYS,
.end = MSM_NANDC11_PHYS + 0x7FF,
.flags = IORESOURCE_MEM,
},
[5] = {
.name = "ebi2_reg_base",
.start = EBI2_REG_BASE,
.end = EBI2_REG_BASE + 0x60,
.flags = IORESOURCE_MEM,
},
};
struct flash_platform_data msm_nand_data;
struct platform_device msm_device_nand = {
.name = "msm_nand",
.id = -1,
.num_resources = ARRAY_SIZE(resources_nand),
.resource = resources_nand,
.dev = {
.platform_data = &msm_nand_data,
},
};
#define MSM_SDC1_BASE 0xA0400000
#define MSM_SDC2_BASE 0xA0500000
#define MSM_SDC3_BASE 0xA0600000
#define MSM_SDC4_BASE 0xA0700000
static struct resource resources_sdc1[] = {
{
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC1_0,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
},
{
.name = "sdcc_dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
.name = "sdcc_dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
}
};
static struct resource resources_sdc2[] = {
{
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC2_0,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
},
{
.name = "sdcc_dma_chnl",
.start = DMOV_SDC2_CHAN,
.end = DMOV_SDC2_CHAN,
.flags = IORESOURCE_DMA,
},
{
.name = "sdcc_dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
}
};
#ifdef CONFIG_ARCH_MSM7X27A
static struct resource resources_sdc3[] = {
{
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC3_0,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
.name = "sdcc_dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
.name = "sdcc_dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
},
};
#else
static struct resource resources_sdc3[] = {
{
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC3_0,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
.name = "sdcc_dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
.name = "sdcc_dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
},
};
#endif
static struct resource resources_sdc4[] = {
{
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC4_0,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
},
{
.name = "sdcc_dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
.name = "sdcc_dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
},
};
struct platform_device msm_device_sdc1 = {
.name = "msm_sdcc",
.id = 1,
.num_resources = ARRAY_SIZE(resources_sdc1),
.resource = resources_sdc1,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc2 = {
.name = "msm_sdcc",
.id = 2,
.num_resources = ARRAY_SIZE(resources_sdc2),
.resource = resources_sdc2,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc3 = {
.name = "msm_sdcc",
.id = 3,
.num_resources = ARRAY_SIZE(resources_sdc3),
.resource = resources_sdc3,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc4 = {
.name = "msm_sdcc",
.id = 4,
.num_resources = ARRAY_SIZE(resources_sdc4),
.resource = resources_sdc4,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
static struct platform_device *msm_sdcc_devices[] __initdata = {
&msm_device_sdc1,
&msm_device_sdc2,
&msm_device_sdc3,
&msm_device_sdc4,
};
int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat)
{
struct platform_device *pdev;
if (controller < 1 || controller > 4)
return -EINVAL;
pdev = msm_sdcc_devices[controller-1];
pdev->dev.platform_data = plat;
return platform_device_register(pdev);
}
#define MDP_BASE 0xAA200000
#define MIPI_DSI_HW_BASE 0xA1100000
static struct resource msm_mipi_dsi_resources[] = {
{
.name = "mipi_dsi",
.start = MIPI_DSI_HW_BASE,
.end = MIPI_DSI_HW_BASE + 0x000F0000 - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_DSI_IRQ,
.end = INT_DSI_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device msm_mipi_dsi_device = {
.name = "mipi_dsi",
.id = 1,
.num_resources = ARRAY_SIZE(msm_mipi_dsi_resources),
.resource = msm_mipi_dsi_resources,
};
static struct resource msm_mdp_resources[] = {
{
.name = "mdp",
.start = MDP_BASE,
.end = MDP_BASE + 0x000F1008 - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_MDP,
.end = INT_MDP,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device msm_mdp_device = {
.name = "mdp",
.id = 0,
.num_resources = ARRAY_SIZE(msm_mdp_resources),
.resource = msm_mdp_resources,
};
static struct platform_device msm_lcdc_device = {
.name = "lcdc",
.id = 0,
};
#ifdef CONFIG_MSM_KGSL_ADRENO200
static struct resource kgsl_3d0_resources[] = {
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0xA0000000,
.end = 0xA001ffff,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_3D0_IRQ,
.start = INT_GRAPHICS,
.end = INT_GRAPHICS,
.flags = IORESOURCE_IRQ,
},
};
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwrlevel = {
{
.gpu_freq = 245760000,
.bus_freq = 200000000,
},
{
.gpu_freq = 192000000,
.bus_freq = 160000000,
},
{
.gpu_freq = 133330000,
.bus_freq = 0,
},
},
.init_level = 0,
.num_levels = 3,
.set_grp_async = set_grp_xbar_async,
.idle_timeout = HZ,
.strtstp_sleepwake = true,
.nap_allowed = false,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM,
};
struct platform_device msm_kgsl_3d0 = {
.name = "kgsl-3d0",
.id = 0,
.num_resources = ARRAY_SIZE(kgsl_3d0_resources),
.resource = kgsl_3d0_resources,
.dev = {
.platform_data = &kgsl_3d0_pdata,
},
};
void __init msm7x25a_kgsl_3d0_init(void)
{
if (cpu_is_msm7x25a() || cpu_is_msm7x25aa()) {
kgsl_3d0_pdata.num_levels = 2;
kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 133330000;
kgsl_3d0_pdata.pwrlevel[0].bus_freq = 160000000;
kgsl_3d0_pdata.pwrlevel[1].gpu_freq = 96000000;
kgsl_3d0_pdata.pwrlevel[1].bus_freq = 0;
}
}
#endif
static void __init msm_register_device(struct platform_device *pdev, void *data)
{
int ret;
pdev->dev.platform_data = data;
ret = platform_device_register(pdev);
if (ret)
dev_err(&pdev->dev,
"%s: platform_device_register() failed = %d\n",
__func__, ret);
}
void __init msm_fb_register_device(char *name, void *data)
{
if (!strncmp(name, "mdp", 3))
msm_register_device(&msm_mdp_device, data);
else if (!strncmp(name, "mipi_dsi", 8))
msm_register_device(&msm_mipi_dsi_device, data);
else if (!strncmp(name, "lcdc", 4))
msm_register_device(&msm_lcdc_device, data);
else
printk(KERN_ERR "%s: unknown device! %s\n", __func__, name);
}
#define PERPH_WEB_BLOCK_ADDR (0xA9D00040)
#define PDM0_CTL_OFFSET (0x04)
#define SIZE_8B (0x08)
static struct resource resources_led[] = {
{
.start = PERPH_WEB_BLOCK_ADDR,
.end = PERPH_WEB_BLOCK_ADDR + (SIZE_8B) - 1,
.name = "led-gpio-pdm",
.flags = IORESOURCE_MEM,
},
};
static struct led_info msm_kpbl_pdm_led_pdata = {
.name = "keyboard-backlight",
};
struct platform_device led_pdev = {
.name = "leds-msm-pdm",
/* use pdev id to represent pdm id */
.id = 0,
.num_resources = ARRAY_SIZE(resources_led),
.resource = resources_led,
.dev = {
.platform_data = &msm_kpbl_pdm_led_pdata,
},
};
struct platform_device asoc_msm_pcm = {
.name = "msm-dsp-audio",
.id = 0,
};
struct platform_device asoc_msm_dai0 = {
.name = "msm-codec-dai",
.id = 0,
};
struct platform_device asoc_msm_dai1 = {
.name = "msm-cpu-dai",
.id = 0,
};
int __init msm7x2x_misc_init(void)
{
msm_clock_init(&msm7x27a_clock_init_data);
if (cpu_is_msm7x27aa())
acpuclk_init(&acpuclk_7x27aa_soc_data);
else
acpuclk_init(&acpuclk_7x27a_soc_data);
return 0;
}
#ifdef CONFIG_CACHE_L2X0
static int __init msm7x27x_cache_init(void)
{
int aux_ctrl = 0;
/* Way Size 010(0x2) 32KB */
aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) | \
(0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | \
(0x1 << L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT);
l2x0_init(MSM_L2CC_BASE, aux_ctrl, L2X0_AUX_CTRL_MASK);
return 0;
}
#else
static int __init msm7x27x_cache_init(void){ return 0; }
#endif
void __init msm_common_io_init(void)
{
msm_map_common_io();
msm7x27x_cache_init();
if (socinfo_init() < 0)
pr_err("%s: socinfo_init() failed!\n", __func__);
}
struct platform_device *msm_footswitch_devices[] = {
FS_PCOM(FS_GFX3D, "fs_gfx3d"),
};
unsigned msm_num_footswitch_devices = ARRAY_SIZE(msm_footswitch_devices);
|
cuteprince/ics_kernel_3.0.16_htc_pico
|
arch/arm/mach-msm/devices-msm7x27a.c
|
C
|
gpl-2.0
| 17,688
|
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/mfd/wcd9xxx/core.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd9xxx/wcd9306_registers.h>
#include <linux/mfd/wcd9xxx/pdata.h>
#include <linux/regulator/consumer.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
#include "wcd9306.h"
#include "wcd9xxx-resmgr.h"
#include "wcd9xxx-common.h"
#define TAPAN_HPH_PA_SETTLE_COMP_ON 5000
#define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
#define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone"
#define TAPAN_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 20))
#define TAPAN_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */
#define TAPAN_VDD_CX_OPTIMAL_UA 10000
#define TAPAN_VDD_CX_SLEEP_UA 2000
/* RX_HPH_CNP_WG_TIME increases by 0.24ms */
#define TAPAN_WG_TIME_FACTOR_US 240
#define TAPAN_SB_PGD_PORT_RX_BASE 0x40
#define TAPAN_SB_PGD_PORT_TX_BASE 0x50
#define TAPAN_REGISTER_START_OFFSET 0x800
#define CODEC_REG_CFG_MINOR_VER 1
static struct regulator *tapan_codec_find_regulator(
struct snd_soc_codec *codec,
const char *name);
static atomic_t kp_tapan_priv;
static int spkr_drv_wrnd_param_set(const char *val,
const struct kernel_param *kp);
static int spkr_drv_wrnd = 1;
static struct kernel_param_ops spkr_drv_wrnd_param_ops = {
.set = spkr_drv_wrnd_param_set,
.get = param_get_int,
};
module_param_cb(spkr_drv_wrnd, &spkr_drv_wrnd_param_ops, &spkr_drv_wrnd, 0644);
MODULE_PARM_DESC(spkr_drv_wrnd,
"Run software workaround to avoid leakage on the speaker drive");
#define WCD9306_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
#define WCD9302_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
#define NUM_DECIMATORS 4
#define NUM_INTERPOLATORS 4
#define BITS_PER_REG 8
/* This actual number of TX ports supported in slimbus slave */
#define TAPAN_TX_PORT_NUMBER 16
#define TAPAN_RX_PORT_START_NUMBER 16
/* Nummer of TX ports actually connected from Slimbus slave to codec Digital */
#define TAPAN_SLIM_CODEC_TX_PORTS 5
#define TAPAN_I2S_MASTER_MODE_MASK 0x08
#define TAPAN_MCLK_CLK_12P288MHZ 12288000
#define TAPAN_MCLK_CLK_9P6MHZ 9600000
#define TAPAN_SLIM_CLOSE_TIMEOUT 1000
#define TAPAN_SLIM_IRQ_OVERFLOW (1 << 0)
#define TAPAN_SLIM_IRQ_UNDERFLOW (1 << 1)
#define TAPAN_SLIM_IRQ_PORT_CLOSED (1 << 2)
enum tapan_codec_type {
WCD9306,
WCD9302,
};
static enum tapan_codec_type codec_ver;
/*
* Multiplication factor to compute impedance on Tapan
* This is computed from (Vx / (m*Ical)) = (10mV/(180*30uA))
*/
#define TAPAN_ZDET_MUL_FACTOR 1852
static struct afe_param_cdc_reg_cfg audio_reg_cfg[] = {
{
CODEC_REG_CFG_MINOR_VER,
(TAPAN_REGISTER_START_OFFSET + TAPAN_SB_PGD_PORT_TX_BASE),
SB_PGD_PORT_TX_WATERMARK_N, 0x1E, 8, 0x1
},
{
CODEC_REG_CFG_MINOR_VER,
(TAPAN_REGISTER_START_OFFSET + TAPAN_SB_PGD_PORT_TX_BASE),
SB_PGD_PORT_TX_ENABLE_N, 0x1, 8, 0x1
},
{
CODEC_REG_CFG_MINOR_VER,
(TAPAN_REGISTER_START_OFFSET + TAPAN_SB_PGD_PORT_RX_BASE),
SB_PGD_PORT_RX_WATERMARK_N, 0x1E, 8, 0x1
},
{
CODEC_REG_CFG_MINOR_VER,
(TAPAN_REGISTER_START_OFFSET + TAPAN_SB_PGD_PORT_RX_BASE),
SB_PGD_PORT_RX_ENABLE_N, 0x1, 8, 0x1
},
{
CODEC_REG_CFG_MINOR_VER,
(TAPAN_REGISTER_START_OFFSET + TAPAN_A_CDC_ANC1_IIR_B1_CTL),
AANC_FF_GAIN_ADAPTIVE, 0x4, 8, 0
},
{
CODEC_REG_CFG_MINOR_VER,
(TAPAN_REGISTER_START_OFFSET + TAPAN_A_CDC_ANC1_IIR_B1_CTL),
AANC_FFGAIN_ADAPTIVE_EN, 0x8, 8, 0
},
{
CODEC_REG_CFG_MINOR_VER,
(TAPAN_REGISTER_START_OFFSET + TAPAN_A_CDC_ANC1_GAIN_CTL),
AANC_GAIN_CONTROL, 0xFF, 8, 0
},
};
static struct afe_param_cdc_reg_cfg_data tapan_audio_reg_cfg = {
.num_registers = ARRAY_SIZE(audio_reg_cfg),
.reg_data = audio_reg_cfg,
};
static struct afe_param_id_cdc_aanc_version tapan_cdc_aanc_version = {
.cdc_aanc_minor_version = AFE_API_VERSION_CDC_AANC_VERSION,
.aanc_hw_version = AANC_HW_BLOCK_VERSION_2,
};
enum {
AIF1_PB = 0,
AIF1_CAP,
AIF2_PB,
AIF2_CAP,
AIF3_PB,
AIF3_CAP,
NUM_CODEC_DAIS,
};
enum {
RX_MIX1_INP_SEL_ZERO = 0,
RX_MIX1_INP_SEL_SRC1,
RX_MIX1_INP_SEL_SRC2,
RX_MIX1_INP_SEL_IIR1,
RX_MIX1_INP_SEL_IIR2,
RX_MIX1_INP_SEL_RX1,
RX_MIX1_INP_SEL_RX2,
RX_MIX1_INP_SEL_RX3,
RX_MIX1_INP_SEL_RX4,
RX_MIX1_INP_SEL_RX5,
RX_MIX1_INP_SEL_AUXRX,
};
#define TAPAN_COMP_DIGITAL_GAIN_OFFSET 3
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
static struct snd_soc_dai_driver tapan_dai[];
static const DECLARE_TLV_DB_SCALE(aux_pga_gain, 0, 2, 0);
/* Codec supports 2 IIR filters */
enum {
IIR1 = 0,
IIR2,
IIR_MAX,
};
/* Codec supports 5 bands */
enum {
BAND1 = 0,
BAND2,
BAND3,
BAND4,
BAND5,
BAND_MAX,
};
enum {
COMPANDER_0,
COMPANDER_1,
COMPANDER_2,
COMPANDER_MAX,
};
enum {
COMPANDER_FS_8KHZ = 0,
COMPANDER_FS_16KHZ,
COMPANDER_FS_32KHZ,
COMPANDER_FS_48KHZ,
COMPANDER_FS_96KHZ,
COMPANDER_FS_192KHZ,
COMPANDER_FS_MAX,
};
struct comp_sample_dependent_params {
u32 peak_det_timeout;
u32 rms_meter_div_fact;
u32 rms_meter_resamp_fact;
};
struct hpf_work {
struct tapan_priv *tapan;
u32 decimator;
u8 tx_hpf_cut_of_freq;
struct delayed_work dwork;
};
static struct hpf_work tx_hpf_work[NUM_DECIMATORS];
static const struct wcd9xxx_ch tapan_rx_chs[TAPAN_RX_MAX] = {
WCD9XXX_CH(TAPAN_RX_PORT_START_NUMBER, 0),
WCD9XXX_CH(TAPAN_RX_PORT_START_NUMBER + 1, 1),
WCD9XXX_CH(TAPAN_RX_PORT_START_NUMBER + 2, 2),
WCD9XXX_CH(TAPAN_RX_PORT_START_NUMBER + 3, 3),
WCD9XXX_CH(TAPAN_RX_PORT_START_NUMBER + 4, 4),
};
static const struct wcd9xxx_ch tapan_tx_chs[TAPAN_TX_MAX] = {
WCD9XXX_CH(0, 0),
WCD9XXX_CH(1, 1),
WCD9XXX_CH(2, 2),
WCD9XXX_CH(3, 3),
WCD9XXX_CH(4, 4),
};
static const u32 vport_check_table[NUM_CODEC_DAIS] = {
0, /* AIF1_PB */
(1 << AIF2_CAP) | (1 << AIF3_CAP), /* AIF1_CAP */
0, /* AIF2_PB */
(1 << AIF1_CAP) | (1 << AIF3_CAP), /* AIF2_CAP */
0, /* AIF2_PB */
(1 << AIF1_CAP) | (1 << AIF2_CAP), /* AIF2_CAP */
};
static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
0, /* AIF1_PB */
0, /* AIF1_CAP */
};
enum {
CP_REG_BUCK = 0,
CP_REG_BHELPER,
CP_REG_MAX,
};
struct tapan_priv {
struct snd_soc_codec *codec;
u32 adc_count;
u32 rx_bias_count;
s32 dmic_1_2_clk_cnt;
s32 dmic_3_4_clk_cnt;
s32 dmic_5_6_clk_cnt;
s32 ldo_h_users;
s32 micb_2_users;
u32 anc_slot;
bool anc_func;
/*track adie loopback mode*/
bool lb_mode;
/*track tapan interface type*/
u8 intf_type;
/* num of slim ports required */
struct wcd9xxx_codec_dai_data dai[NUM_CODEC_DAIS];
/*compander*/
int comp_enabled[COMPANDER_MAX];
u32 comp_fs[COMPANDER_MAX];
/* Maintain the status of AUX PGA */
int aux_pga_cnt;
u8 aux_l_gain;
u8 aux_r_gain;
bool dec_active[NUM_DECIMATORS];
bool spkr_pa_widget_on;
struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
/* resmgr module */
struct wcd9xxx_resmgr resmgr;
/* mbhc module */
struct wcd9xxx_mbhc mbhc;
/* class h specific data */
struct wcd9xxx_clsh_cdc_data clsh_d;
/* pointers to regulators required for chargepump */
struct regulator *cp_regulators[CP_REG_MAX];
/*
* list used to save/restore registers at start and
* end of impedance measurement
*/
struct list_head reg_save_restore;
int (*machine_codec_event_cb)(struct snd_soc_codec *codec,
enum wcd9xxx_codec_event);
};
static const u32 comp_shift[] = {
0,
1,
2,
};
static const int comp_rx_path[] = {
COMPANDER_1,
COMPANDER_1,
COMPANDER_2,
COMPANDER_2,
COMPANDER_MAX,
};
static const struct comp_sample_dependent_params comp_samp_params[] = {
{
/* 8 Khz */
.peak_det_timeout = 0x06,
.rms_meter_div_fact = 0x09,
.rms_meter_resamp_fact = 0x06,
},
{
/* 16 Khz */
.peak_det_timeout = 0x07,
.rms_meter_div_fact = 0x0A,
.rms_meter_resamp_fact = 0x0C,
},
{
/* 32 Khz */
.peak_det_timeout = 0x08,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x1E,
},
{
/* 48 Khz */
.peak_det_timeout = 0x09,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x28,
},
{
/* 96 Khz */
.peak_det_timeout = 0x0A,
.rms_meter_div_fact = 0x0C,
.rms_meter_resamp_fact = 0x50,
},
{
/* 192 Khz */
.peak_det_timeout = 0x0B,
.rms_meter_div_fact = 0xC,
.rms_meter_resamp_fact = 0xA0,
},
};
static unsigned short rx_digital_gain_reg[] = {
TAPAN_A_CDC_RX1_VOL_CTL_B2_CTL,
TAPAN_A_CDC_RX2_VOL_CTL_B2_CTL,
TAPAN_A_CDC_RX3_VOL_CTL_B2_CTL,
TAPAN_A_CDC_RX4_VOL_CTL_B2_CTL,
};
static unsigned short tx_digital_gain_reg[] = {
TAPAN_A_CDC_TX1_VOL_CTL_GAIN,
TAPAN_A_CDC_TX2_VOL_CTL_GAIN,
TAPAN_A_CDC_TX3_VOL_CTL_GAIN,
TAPAN_A_CDC_TX4_VOL_CTL_GAIN,
};
static int spkr_drv_wrnd_param_set(const char *val,
const struct kernel_param *kp)
{
struct snd_soc_codec *codec;
int ret, old;
struct tapan_priv *priv;
priv = (struct tapan_priv *)atomic_read(&kp_tapan_priv);
if (!priv) {
pr_debug("%s: codec isn't yet registered\n", __func__);
return 0;
}
codec = priv->codec;
mutex_lock(&codec->mutex);
old = spkr_drv_wrnd;
ret = param_set_int(val, kp);
if (ret) {
mutex_unlock(&codec->mutex);
return ret;
}
dev_dbg(codec->dev, "%s: spkr_drv_wrnd %d -> %d\n",
__func__, old, spkr_drv_wrnd);
if ((old == -1 || old == 0) && spkr_drv_wrnd == 1) {
WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
wcd9xxx_resmgr_get_bandgap(&priv->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80, 0x80);
} else if (old == 1 && spkr_drv_wrnd == 0) {
WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
wcd9xxx_resmgr_put_bandgap(&priv->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
if (!priv->spkr_pa_widget_on)
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80,
0x00);
}
mutex_unlock(&codec->mutex);
return 0;
}
static int tapan_get_anc_slot(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = tapan->anc_slot;
return 0;
}
static int tapan_put_anc_slot(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
tapan->anc_slot = ucontrol->value.integer.value[0];
return 0;
}
static int tapan_get_anc_func(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = (tapan->anc_func == true ? 1 : 0);
return 0;
}
static int tapan_put_anc_func(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = &codec->dapm;
mutex_lock(&dapm->codec->mutex);
tapan->anc_func = (!ucontrol->value.integer.value[0] ? false : true);
dev_err(codec->dev, "%s: anc_func %x", __func__, tapan->anc_func);
if (tapan->anc_func == true) {
pr_info("enable anc virtual widgets");
snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
snd_soc_dapm_enable_pin(dapm, "ANC HEADPHONE");
snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
snd_soc_dapm_enable_pin(dapm, "ANC EAR");
snd_soc_dapm_disable_pin(dapm, "HPHR");
snd_soc_dapm_disable_pin(dapm, "HPHL");
snd_soc_dapm_disable_pin(dapm, "HEADPHONE");
snd_soc_dapm_disable_pin(dapm, "EAR PA");
snd_soc_dapm_disable_pin(dapm, "EAR");
} else {
pr_info("disable anc virtual widgets");
snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
snd_soc_dapm_disable_pin(dapm, "ANC EAR");
snd_soc_dapm_enable_pin(dapm, "HPHR");
snd_soc_dapm_enable_pin(dapm, "HPHL");
snd_soc_dapm_enable_pin(dapm, "HEADPHONE");
snd_soc_dapm_enable_pin(dapm, "EAR PA");
snd_soc_dapm_enable_pin(dapm, "EAR");
}
snd_soc_dapm_sync(dapm);
mutex_unlock(&dapm->codec->mutex);
return 0;
}
static int tapan_loopback_mode_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = tapan->lb_mode;
dev_dbg(codec->dev, "%s: lb_mode = %d\n",
__func__, tapan->lb_mode);
return 0;
}
static int tapan_loopback_mode_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
__func__, ucontrol->value.integer.value[0]);
switch (ucontrol->value.integer.value[0]) {
case 0:
tapan->lb_mode = false;
break;
case 1:
tapan->lb_mode = true;
break;
default:
return -EINVAL;
}
return 0;
}
static int tapan_pa_gain_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u8 ear_pa_gain;
int rc = 0;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
ear_pa_gain = snd_soc_read(codec, TAPAN_A_RX_EAR_GAIN);
ear_pa_gain = ear_pa_gain >> 5;
switch (ear_pa_gain) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
ucontrol->value.integer.value[0] = ear_pa_gain;
break;
case 7:
ucontrol->value.integer.value[0] = (ear_pa_gain - 1);
break;
default:
rc = -EINVAL;
pr_err("%s: ERROR: Unsupported Ear Gain = 0x%x\n",
__func__, ear_pa_gain);
break;
}
dev_dbg(codec->dev, "%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
return rc;
}
static int tapan_pa_gain_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u8 ear_pa_gain;
int rc = 0;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
__func__, ucontrol->value.integer.value[0]);
switch (ucontrol->value.integer.value[0]) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
ear_pa_gain = ucontrol->value.integer.value[0];
break;
case 6:
ear_pa_gain = 0x07;
break;
default:
rc = -EINVAL;
break;
}
if (!rc)
snd_soc_update_bits(codec, TAPAN_A_RX_EAR_GAIN,
0xE0, ear_pa_gain << 5);
return rc;
}
static int tapan_get_iir_enable_audio_mixer(
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
int iir_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->reg;
int band_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->shift;
ucontrol->value.integer.value[0] =
(snd_soc_read(codec, (TAPAN_A_CDC_IIR1_CTL + 16 * iir_idx)) &
(1 << band_idx)) != 0;
dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
iir_idx, band_idx,
(uint32_t)ucontrol->value.integer.value[0]);
return 0;
}
static int tapan_put_iir_enable_audio_mixer(
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
int iir_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->reg;
int band_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->shift;
int value = ucontrol->value.integer.value[0];
/* Mask first 5 bits, 6-8 are reserved */
snd_soc_update_bits(codec, (TAPAN_A_CDC_IIR1_CTL + 16 * iir_idx),
(1 << band_idx), (value << band_idx));
pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
iir_idx, band_idx,
((snd_soc_read(codec, (TAPAN_A_CDC_IIR1_CTL + 16 * iir_idx)) &
(1 << band_idx)) != 0));
return 0;
}
static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
int iir_idx, int band_idx,
int coeff_idx)
{
uint32_t value = 0;
/* Address does not automatically update if reading */
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
((band_idx * BAND_MAX + coeff_idx)
* sizeof(uint32_t)) & 0x7F);
value |= snd_soc_read(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx));
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
((band_idx * BAND_MAX + coeff_idx)
* sizeof(uint32_t) + 1) & 0x7F);
value |= (snd_soc_read(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 8);
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
((band_idx * BAND_MAX + coeff_idx)
* sizeof(uint32_t) + 2) & 0x7F);
value |= (snd_soc_read(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 16);
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
((band_idx * BAND_MAX + coeff_idx)
* sizeof(uint32_t) + 3) & 0x7F);
/* Mask bits top 2 bits since they are reserved */
value |= ((snd_soc_read(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) & 0x3F) << 24);
return value;
}
static int tapan_get_iir_band_audio_mixer(
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
int iir_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->reg;
int band_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->shift;
ucontrol->value.integer.value[0] =
get_iir_band_coeff(codec, iir_idx, band_idx, 0);
ucontrol->value.integer.value[1] =
get_iir_band_coeff(codec, iir_idx, band_idx, 1);
ucontrol->value.integer.value[2] =
get_iir_band_coeff(codec, iir_idx, band_idx, 2);
ucontrol->value.integer.value[3] =
get_iir_band_coeff(codec, iir_idx, band_idx, 3);
ucontrol->value.integer.value[4] =
get_iir_band_coeff(codec, iir_idx, band_idx, 4);
dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
"%s: IIR #%d band #%d b1 = 0x%x\n"
"%s: IIR #%d band #%d b2 = 0x%x\n"
"%s: IIR #%d band #%d a1 = 0x%x\n"
"%s: IIR #%d band #%d a2 = 0x%x\n",
__func__, iir_idx, band_idx,
(uint32_t)ucontrol->value.integer.value[0],
__func__, iir_idx, band_idx,
(uint32_t)ucontrol->value.integer.value[1],
__func__, iir_idx, band_idx,
(uint32_t)ucontrol->value.integer.value[2],
__func__, iir_idx, band_idx,
(uint32_t)ucontrol->value.integer.value[3],
__func__, iir_idx, band_idx,
(uint32_t)ucontrol->value.integer.value[4]);
return 0;
}
static void set_iir_band_coeff(struct snd_soc_codec *codec,
int iir_idx, int band_idx,
uint32_t value)
{
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
(value & 0xFF));
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
(value >> 8) & 0xFF);
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
(value >> 16) & 0xFF);
/* Mask top 2 bits, 7-8 are reserved */
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
(value >> 24) & 0x3F);
}
static int tapan_put_iir_band_audio_mixer(
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
int iir_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->reg;
int band_idx = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->shift;
/* Mask top bit it is reserved */
/* Updates addr automatically for each B2 write */
snd_soc_write(codec,
(TAPAN_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
set_iir_band_coeff(codec, iir_idx, band_idx,
ucontrol->value.integer.value[0]);
set_iir_band_coeff(codec, iir_idx, band_idx,
ucontrol->value.integer.value[1]);
set_iir_band_coeff(codec, iir_idx, band_idx,
ucontrol->value.integer.value[2]);
set_iir_band_coeff(codec, iir_idx, band_idx,
ucontrol->value.integer.value[3]);
set_iir_band_coeff(codec, iir_idx, band_idx,
ucontrol->value.integer.value[4]);
dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
"%s: IIR #%d band #%d b1 = 0x%x\n"
"%s: IIR #%d band #%d b2 = 0x%x\n"
"%s: IIR #%d band #%d a1 = 0x%x\n"
"%s: IIR #%d band #%d a2 = 0x%x\n",
__func__, iir_idx, band_idx,
get_iir_band_coeff(codec, iir_idx, band_idx, 0),
__func__, iir_idx, band_idx,
get_iir_band_coeff(codec, iir_idx, band_idx, 1),
__func__, iir_idx, band_idx,
get_iir_band_coeff(codec, iir_idx, band_idx, 2),
__func__, iir_idx, band_idx,
get_iir_band_coeff(codec, iir_idx, band_idx, 3),
__func__, iir_idx, band_idx,
get_iir_band_coeff(codec, iir_idx, band_idx, 4));
return 0;
}
static int tapan_get_compander(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
int comp = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->shift;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = tapan->comp_enabled[comp];
return 0;
}
static int tapan_set_compander(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
int comp = ((struct soc_multi_mixer_control *)
kcontrol->private_value)->shift;
int value = ucontrol->value.integer.value[0];
dev_dbg(codec->dev, "%s: Compander %d enable current %d, new %d\n",
__func__, comp, tapan->comp_enabled[comp], value);
tapan->comp_enabled[comp] = value;
if (comp == COMPANDER_1 &&
tapan->comp_enabled[comp] == 1) {
/* Wavegen to 5 msec */
snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDA);
snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x15);
snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x2A);
/* Enable Chopper */
snd_soc_update_bits(codec,
TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x20);
pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
__func__);
} else if (comp == COMPANDER_1 &&
tapan->comp_enabled[comp] == 0) {
/* Wavegen to 20 msec */
snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDB);
snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x58);
snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x1A);
/* Disable CHOPPER block */
snd_soc_update_bits(codec,
TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x10);
pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
__func__);
}
return 0;
}
static int tapan_config_gain_compander(struct snd_soc_codec *codec,
int comp, bool enable)
{
int ret = 0;
switch (comp) {
case COMPANDER_0:
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_GAIN,
1 << 2, !enable << 2);
break;
case COMPANDER_1:
snd_soc_update_bits(codec, TAPAN_A_RX_HPH_L_GAIN,
1 << 5, !enable << 5);
snd_soc_update_bits(codec, TAPAN_A_RX_HPH_R_GAIN,
1 << 5, !enable << 5);
break;
case COMPANDER_2:
snd_soc_update_bits(codec, TAPAN_A_RX_LINE_1_GAIN,
1 << 5, !enable << 5);
snd_soc_update_bits(codec, TAPAN_A_RX_LINE_2_GAIN,
1 << 5, !enable << 5);
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
return ret;
}
static void tapan_discharge_comp(struct snd_soc_codec *codec, int comp)
{
/* Level meter DIV Factor to 5*/
snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
0x05 << 4);
/* RMS meter Sampling to 0x01 */
snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
/* Worst case timeout for compander CnP sleep timeout */
usleep_range(3000, 3000);
}
static enum wcd9xxx_buck_volt tapan_codec_get_buck_mv(
struct snd_soc_codec *codec)
{
int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
int i;
bool found_regulator = false;
for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
if (pdata->regulator[i].name == NULL)
continue;
if (!strncmp(pdata->regulator[i].name,
WCD9XXX_SUPPLY_BUCK_NAME,
sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
found_regulator = true;
if ((pdata->regulator[i].min_uV ==
WCD9XXX_CDC_BUCK_MV_1P8) ||
(pdata->regulator[i].min_uV ==
WCD9XXX_CDC_BUCK_MV_2P15))
buck_volt = pdata->regulator[i].min_uV;
break;
}
}
if (!found_regulator)
dev_err(codec->dev,
"%s: Failed to find regulator for %s\n",
__func__, WCD9XXX_SUPPLY_BUCK_NAME);
else
dev_dbg(codec->dev,
"%s: S4 voltage requested is %d\n",
__func__, buck_volt);
return buck_volt;
}
static int tapan_config_compander(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
int mask, enable_mask;
u8 rdac5_mux;
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
const int comp = w->shift;
const u32 rate = tapan->comp_fs[comp];
const struct comp_sample_dependent_params *comp_params =
&comp_samp_params[rate];
enum wcd9xxx_buck_volt buck_mv;
dev_dbg(codec->dev, "%s: %s event %d compander %d, enabled %d",
__func__, w->name, event, comp, tapan->comp_enabled[comp]);
if (!tapan->comp_enabled[comp])
return 0;
/* Compander 0 has single channel */
mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
buck_mv = tapan_codec_get_buck_mv(codec);
rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
rdac5_mux = (rdac5_mux & 0x04) >> 2;
if (comp == COMPANDER_0) { /* SPK compander */
enable_mask = 0x02;
} else if (comp == COMPANDER_1) { /* HPH compander */
enable_mask = 0x03;
} else if (comp == COMPANDER_2) { /* LO compander */
if (rdac5_mux == 0) { /* DEM4 */
/* for LO Stereo SE, enable Compander 2 left
* channel on RX3 interpolator Path and Compander 2
* rigt channel on RX4 interpolator Path.
*/
enable_mask = 0x03;
} else if (rdac5_mux == 1) { /* DEM3_INV */
/* for LO mono differential only enable Compander 2
* left channel on RX3 interpolator Path.
*/
enable_mask = 0x02;
} else {
dev_err(codec->dev, "%s: invalid rdac5_mux val %d",
__func__, rdac5_mux);
return -EINVAL;
}
} else {
dev_err(codec->dev, "%s: invalid compander %d", __func__, comp);
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Set compander Sample rate */
snd_soc_update_bits(codec,
TAPAN_A_CDC_COMP0_FS_CFG + (comp * 8),
0x07, rate);
/* Set the static gain offset for HPH Path */
if (comp == COMPANDER_1) {
if (buck_mv == WCD9XXX_CDC_BUCK_MV_2P15)
snd_soc_update_bits(codec,
TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
0x80, 0x00);
else
snd_soc_update_bits(codec,
TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
0x80, 0x80);
}
/* Enable RX interpolation path compander clocks */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
0x01 << comp_shift[comp],
0x01 << comp_shift[comp]);
/* Toggle compander reset bits */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
0x01 << comp_shift[comp],
0x01 << comp_shift[comp]);
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
0x01 << comp_shift[comp], 0);
/* Set gain source to compander */
tapan_config_gain_compander(codec, comp, true);
/* Compander enable */
snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
(comp * 8), enable_mask, enable_mask);
tapan_discharge_comp(codec, comp);
/* Set sample rate dependent paramater */
snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8),
comp_params->rms_meter_resamp_fact);
snd_soc_update_bits(codec,
TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
0xF0, comp_params->rms_meter_div_fact << 4);
snd_soc_update_bits(codec,
TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
0x0F, comp_params->peak_det_timeout);
break;
case SND_SOC_DAPM_PRE_PMD:
/* Disable compander */
snd_soc_update_bits(codec,
TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
enable_mask, 0x00);
/* Toggle compander reset bits */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp],
mask << comp_shift[comp]);
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp], 0);
/* Turn off the clock for compander in pair */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
mask << comp_shift[comp], 0);
/* Set gain source to register */
tapan_config_gain_compander(codec, comp, false);
break;
}
return 0;
}
static const char * const tapan_loopback_mode_ctrl_text[] = {
"DISABLE", "ENABLE"};
static const struct soc_enum tapan_loopback_mode_ctl_enum[] = {
SOC_ENUM_SINGLE_EXT(2, tapan_loopback_mode_ctrl_text),
};
static const char * const tapan_ear_pa_gain_text[] = {"POS_6_DB", "POS_4P5_DB",
"POS_3_DB", "POS_1P5_DB",
"POS_0_DB", "NEG_2P5_DB",
"NEG_12_DB"};
static const struct soc_enum tapan_ear_pa_gain_enum[] = {
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tapan_ear_pa_gain_text),
tapan_ear_pa_gain_text),
};
static const char *const tapan_anc_func_text[] = {"OFF", "ON"};
static const struct soc_enum tapan_anc_func_enum =
SOC_ENUM_SINGLE_EXT(2, tapan_anc_func_text);
/*cut of frequency for high pass filter*/
static const char * const cf_text[] = {
"MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
};
static const struct soc_enum cf_dec1_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_TX1_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_dec2_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_TX2_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_dec3_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_TX3_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_dec4_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_TX4_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_rxmix1_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix2_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix3_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix4_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_RX4_B4_CTL, 0, 3, cf_text);
static const char * const class_h_dsm_text[] = {
"ZERO", "RX_HPHL", "RX_SPKR"
};
static const struct soc_enum class_h_dsm_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_CLSH_CTL, 2, 3, class_h_dsm_text);
static const struct snd_kcontrol_new class_h_dsm_mux =
SOC_DAPM_ENUM("CLASS_H_DSM MUX Mux", class_h_dsm_enum);
static const char * const rx1_interpolator_text[] = {
"ZERO", "RX1 MIX2"
};
static const struct soc_enum rx1_interpolator_enum =
SOC_ENUM_SINGLE(0, 0, 2, rx1_interpolator_text);
static const struct snd_kcontrol_new rx1_interpolator =
SOC_DAPM_ENUM_VIRT("RX1 INTERPOLATOR Mux", rx1_interpolator_enum);
static const char * const rx2_interpolator_text[] = {
"ZERO", "RX2 MIX2"
};
static const struct soc_enum rx2_interpolator_enum =
SOC_ENUM_SINGLE(0, 1, 2, rx2_interpolator_text);
static const struct snd_kcontrol_new rx2_interpolator =
SOC_DAPM_ENUM_VIRT("RX2 INTERPOLATOR Mux", rx2_interpolator_enum);
static int tapan_hph_impedance_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
uint32_t zl, zr;
bool hphr;
struct soc_multi_mixer_control *mc;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tapan_priv *priv = snd_soc_codec_get_drvdata(codec);
mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
hphr = mc->shift;
wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr);
ucontrol->value.integer.value[0] = hphr ? zr : zl;
return 0;
}
static const struct snd_kcontrol_new tapan_common_snd_controls[] = {
SOC_ENUM_EXT("EAR PA Gain", tapan_ear_pa_gain_enum[0],
tapan_pa_gain_get, tapan_pa_gain_put),
SOC_ENUM_EXT("LOOPBACK Mode", tapan_loopback_mode_ctl_enum[0],
tapan_loopback_mode_get, tapan_loopback_mode_put),
SOC_SINGLE_TLV("HPHL Volume", TAPAN_A_RX_HPH_L_GAIN, 0, 20, 1,
line_gain),
SOC_SINGLE_TLV("HPHR Volume", TAPAN_A_RX_HPH_R_GAIN, 0, 20, 1,
line_gain),
SOC_SINGLE_TLV("LINEOUT1 Volume", TAPAN_A_RX_LINE_1_GAIN, 0, 20, 1,
line_gain),
SOC_SINGLE_TLV("LINEOUT2 Volume", TAPAN_A_RX_LINE_2_GAIN, 0, 20, 1,
line_gain),
SOC_SINGLE_TLV("SPK DRV Volume", TAPAN_A_SPKR_DRV_GAIN, 3, 8, 1,
line_gain),
SOC_SINGLE_TLV("ADC1 Volume", TAPAN_A_TX_1_EN, 2, 19, 0, analog_gain),
SOC_SINGLE_TLV("ADC2 Volume", TAPAN_A_TX_2_EN, 2, 19, 0, analog_gain),
SOC_SINGLE_TLV("ADC3 Volume", TAPAN_A_TX_3_EN, 2, 19, 0, analog_gain),
SOC_SINGLE_TLV("ADC4 Volume", TAPAN_A_TX_4_EN, 2, 19, 0, analog_gain),
SOC_SINGLE_S8_TLV("RX1 Digital Volume", TAPAN_A_CDC_RX1_VOL_CTL_B2_CTL,
-84, 40, digital_gain),
SOC_SINGLE_S8_TLV("RX2 Digital Volume", TAPAN_A_CDC_RX2_VOL_CTL_B2_CTL,
-84, 40, digital_gain),
SOC_SINGLE_S8_TLV("RX3 Digital Volume", TAPAN_A_CDC_RX3_VOL_CTL_B2_CTL,
-84, 40, digital_gain),
SOC_SINGLE_S8_TLV("DEC1 Volume", TAPAN_A_CDC_TX1_VOL_CTL_GAIN, -84, 40,
digital_gain),
SOC_SINGLE_S8_TLV("DEC2 Volume", TAPAN_A_CDC_TX2_VOL_CTL_GAIN, -84, 40,
digital_gain),
SOC_SINGLE_S8_TLV("IIR1 INP1 Volume", TAPAN_A_CDC_IIR1_GAIN_B1_CTL, -84,
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR1 INP2 Volume", TAPAN_A_CDC_IIR1_GAIN_B2_CTL, -84,
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR1 INP3 Volume", TAPAN_A_CDC_IIR1_GAIN_B3_CTL, -84,
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR1 INP4 Volume", TAPAN_A_CDC_IIR1_GAIN_B4_CTL, -84,
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR2 INP1 Volume", TAPAN_A_CDC_IIR2_GAIN_B1_CTL, -84,
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR2 INP2 Volume", TAPAN_A_CDC_IIR2_GAIN_B2_CTL, -84,
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR2 INP3 Volume", TAPAN_A_CDC_IIR2_GAIN_B3_CTL, -84,
40, digital_gain),
SOC_SINGLE_S8_TLV("IIR2 INP4 Volume", TAPAN_A_CDC_IIR2_GAIN_B4_CTL, -84,
40, digital_gain),
SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
SOC_SINGLE("TX1 HPF Switch", TAPAN_A_CDC_TX1_MUX_CTL, 3, 1, 0),
SOC_SINGLE("TX2 HPF Switch", TAPAN_A_CDC_TX2_MUX_CTL, 3, 1, 0),
SOC_SINGLE("TX3 HPF Switch", TAPAN_A_CDC_TX3_MUX_CTL, 3, 1, 0),
SOC_SINGLE("TX4 HPF Switch", TAPAN_A_CDC_TX4_MUX_CTL, 3, 1, 0),
SOC_SINGLE("RX1 HPF Switch", TAPAN_A_CDC_RX1_B5_CTL, 2, 1, 0),
SOC_SINGLE("RX2 HPF Switch", TAPAN_A_CDC_RX2_B5_CTL, 2, 1, 0),
SOC_SINGLE("RX3 HPF Switch", TAPAN_A_CDC_RX3_B5_CTL, 2, 1, 0),
SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0,
tapan_get_iir_enable_audio_mixer, tapan_put_iir_enable_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
tapan_hph_impedance_get, NULL),
SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
tapan_hph_impedance_get, NULL),
};
static const struct snd_kcontrol_new tapan_9306_snd_controls[] = {
SOC_SINGLE_TLV("ADC5 Volume", TAPAN_A_TX_5_EN, 2, 19, 0, analog_gain),
SOC_SINGLE_S8_TLV("RX4 Digital Volume", TAPAN_A_CDC_RX4_VOL_CTL_B2_CTL,
-84, 40, digital_gain),
SOC_SINGLE_S8_TLV("DEC3 Volume", TAPAN_A_CDC_TX3_VOL_CTL_GAIN, -84, 40,
digital_gain),
SOC_SINGLE_S8_TLV("DEC4 Volume", TAPAN_A_CDC_TX4_VOL_CTL_GAIN, -84, 40,
digital_gain),
SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tapan_get_anc_slot,
tapan_put_anc_slot),
SOC_ENUM_EXT("ANC Function", tapan_anc_func_enum, tapan_get_anc_func,
tapan_put_anc_func),
SOC_SINGLE("RX4 HPF Switch", TAPAN_A_CDC_RX4_B5_CTL, 2, 1, 0),
SOC_ENUM("RX4 HPF cut off", cf_rxmix4_enum),
SOC_SINGLE_EXT("COMP0 Switch", SND_SOC_NOPM, COMPANDER_0, 1, 0,
tapan_get_compander, tapan_set_compander),
SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
tapan_get_compander, tapan_set_compander),
SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
tapan_get_compander, tapan_set_compander),
};
static const char * const rx_1_2_mix1_text[] = {
"ZERO", "SRC1", "SRC2", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4",
"RX5", "AUXRX", "AUXTX1"
};
static const char * const rx_3_4_mix1_text[] = {
"ZERO", "SRC1", "SRC2", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4",
"RX5", "AUXRX", "AUXTX1", "AUXTX2"
};
static const char * const rx_mix2_text[] = {
"ZERO", "SRC1", "SRC2", "IIR1", "IIR2"
};
static const char * const rx_rdac3_text[] = {
"DEM1", "DEM2"
};
static const char * const rx_rdac4_text[] = {
"DEM3", "DEM2"
};
static const char * const rx_rdac5_text[] = {
"DEM4", "DEM3_INV"
};
static const char * const sb_tx_1_2_mux_text[] = {
"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4",
"RSVD", "RSVD", "RSVD",
"DEC1", "DEC2", "DEC3", "DEC4"
};
static const char * const sb_tx3_mux_text[] = {
"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4",
"RSVD", "RSVD", "RSVD", "RSVD", "RSVD",
"DEC3"
};
static const char * const sb_tx4_mux_text[] = {
"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4",
"RSVD", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD",
"DEC4"
};
static const char * const sb_tx5_mux_text[] = {
"ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4",
"RSVD", "RSVD", "RSVD",
"DEC1"
};
static const char * const dec_1_2_mux_text[] = {
"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADCMB",
"DMIC1", "DMIC2", "DMIC3", "DMIC4"
};
static const char * const dec3_mux_text[] = {
"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADCMB",
"DMIC1", "DMIC2", "DMIC3", "DMIC4",
"ANCFBTUNE1"
};
static const char * const dec4_mux_text[] = {
"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADCMB",
"DMIC1", "DMIC2", "DMIC3", "DMIC4",
"ANCFBTUNE2"
};
static const char * const anc_mux_text[] = {
"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5",
"RSVD", "RSVD", "RSVD",
"DMIC1", "DMIC2", "DMIC3", "DMIC4",
"RSVD", "RSVD"
};
static const char * const anc1_fb_mux_text[] = {
"ZERO", "EAR_HPH_L", "EAR_LINE_1",
};
static const char * const iir_inp_text[] = {
"ZERO", "DEC1", "DEC2", "DEC3", "DEC4",
"RX1", "RX2", "RX3", "RX4", "RX5"
};
static const struct soc_enum rx_mix1_inp1_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_1_2_mix1_text);
static const struct soc_enum rx_mix1_inp2_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX1_B1_CTL, 4, 12, rx_1_2_mix1_text);
static const struct soc_enum rx_mix1_inp3_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX1_B2_CTL, 0, 12, rx_1_2_mix1_text);
static const struct soc_enum rx2_mix1_inp1_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX2_B1_CTL, 0, 12, rx_1_2_mix1_text);
static const struct soc_enum rx2_mix1_inp2_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX2_B1_CTL, 4, 12, rx_1_2_mix1_text);
static const struct soc_enum rx3_mix1_inp1_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX3_B1_CTL, 0, 13, rx_3_4_mix1_text);
static const struct soc_enum rx3_mix1_inp2_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX3_B1_CTL, 4, 13, rx_3_4_mix1_text);
static const struct soc_enum rx3_mix1_inp3_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX3_B2_CTL, 0, 13, rx_3_4_mix1_text);
static const struct soc_enum rx4_mix1_inp1_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX4_B1_CTL, 0, 13, rx_3_4_mix1_text);
static const struct soc_enum rx4_mix1_inp2_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX4_B1_CTL, 4, 13, rx_3_4_mix1_text);
static const struct soc_enum rx4_mix1_inp3_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX4_B2_CTL, 0, 13, rx_3_4_mix1_text);
static const struct soc_enum rx1_mix2_inp1_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX1_B3_CTL, 0, 5, rx_mix2_text);
static const struct soc_enum rx1_mix2_inp2_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX1_B3_CTL, 3, 5, rx_mix2_text);
static const struct soc_enum rx2_mix2_inp1_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX2_B3_CTL, 0, 5, rx_mix2_text);
static const struct soc_enum rx2_mix2_inp2_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX2_B3_CTL, 3, 5, rx_mix2_text);
static const struct soc_enum rx4_mix2_inp1_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX4_B3_CTL, 0, 5, rx_mix2_text);
static const struct soc_enum rx4_mix2_inp2_chain_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX4_B3_CTL, 3, 5, rx_mix2_text);
static const struct soc_enum rx_rdac3_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_RX2_B2_CTL, 4, 2, rx_rdac3_text);
static const struct soc_enum rx_rdac4_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_MISC, 1, 2, rx_rdac4_text);
static const struct soc_enum rx_rdac5_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_MISC, 2, 2, rx_rdac5_text);
static const struct soc_enum sb_tx1_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_SB_B1_CTL, 0, 12,
sb_tx_1_2_mux_text);
static const struct soc_enum sb_tx2_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_SB_B2_CTL, 0, 12,
sb_tx_1_2_mux_text);
static const struct soc_enum sb_tx3_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_SB_B3_CTL, 0, 11, sb_tx3_mux_text);
static const struct soc_enum sb_tx4_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_SB_B4_CTL, 0, 12, sb_tx4_mux_text);
static const struct soc_enum sb_tx5_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_SB_B5_CTL, 0, 9, sb_tx5_mux_text);
static const struct soc_enum dec1_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_B1_CTL, 0, 10, dec_1_2_mux_text);
static const struct soc_enum dec2_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_B1_CTL, 4, 10, dec_1_2_mux_text);
static const struct soc_enum dec3_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_B2_CTL, 0, 12, dec3_mux_text);
static const struct soc_enum dec4_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_TX_B2_CTL, 4, 12, dec4_mux_text);
static const struct soc_enum anc1_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_ANC_B1_CTL, 0, 15, anc_mux_text);
static const struct soc_enum anc2_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_ANC_B1_CTL, 4, 15, anc_mux_text);
static const struct soc_enum anc1_fb_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
static const struct soc_enum iir1_inp1_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ1_B1_CTL, 0, 10, iir_inp_text);
static const struct soc_enum iir1_inp2_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ1_B2_CTL, 0, 10, iir_inp_text);
static const struct soc_enum iir1_inp3_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ1_B3_CTL, 0, 10, iir_inp_text);
static const struct soc_enum iir1_inp4_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ1_B4_CTL, 0, 10, iir_inp_text);
static const struct soc_enum iir2_inp1_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ2_B1_CTL, 0, 10, iir_inp_text);
static const struct soc_enum iir2_inp2_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ2_B2_CTL, 0, 10, iir_inp_text);
static const struct soc_enum iir2_inp3_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ2_B3_CTL, 0, 10, iir_inp_text);
static const struct soc_enum iir2_inp4_mux_enum =
SOC_ENUM_SINGLE(TAPAN_A_CDC_CONN_EQ2_B4_CTL, 0, 10, iir_inp_text);
static const struct snd_kcontrol_new rx_mix1_inp1_mux =
SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
static const struct snd_kcontrol_new rx_mix1_inp2_mux =
SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
static const struct snd_kcontrol_new rx_mix1_inp3_mux =
SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
static const struct snd_kcontrol_new rx3_mix1_inp3_mux =
SOC_DAPM_ENUM("RX3 MIX1 INP3 Mux", rx3_mix1_inp3_chain_enum);
static const struct snd_kcontrol_new rx4_mix1_inp1_mux =
SOC_DAPM_ENUM("RX4 MIX1 INP1 Mux", rx4_mix1_inp1_chain_enum);
static const struct snd_kcontrol_new rx4_mix1_inp2_mux =
SOC_DAPM_ENUM("RX4 MIX1 INP2 Mux", rx4_mix1_inp2_chain_enum);
static const struct snd_kcontrol_new rx4_mix1_inp3_mux =
SOC_DAPM_ENUM("RX4 MIX1 INP3 Mux", rx4_mix1_inp3_chain_enum);
static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx1_mix2_inp1_chain_enum);
static const struct snd_kcontrol_new rx1_mix2_inp2_mux =
SOC_DAPM_ENUM("RX1 MIX2 INP2 Mux", rx1_mix2_inp2_chain_enum);
static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
static const struct snd_kcontrol_new rx2_mix2_inp2_mux =
SOC_DAPM_ENUM("RX2 MIX2 INP2 Mux", rx2_mix2_inp2_chain_enum);
static const struct snd_kcontrol_new rx4_mix2_inp1_mux =
SOC_DAPM_ENUM("RX4 MIX2 INP1 Mux", rx4_mix2_inp1_chain_enum);
static const struct snd_kcontrol_new rx4_mix2_inp2_mux =
SOC_DAPM_ENUM("RX4 MIX2 INP2 Mux", rx4_mix2_inp2_chain_enum);
static const struct snd_kcontrol_new rx_dac3_mux =
SOC_DAPM_ENUM("RDAC3 MUX Mux", rx_rdac3_enum);
static const struct snd_kcontrol_new rx_dac4_mux =
SOC_DAPM_ENUM("RDAC4 MUX Mux", rx_rdac4_enum);
static const struct snd_kcontrol_new rx_dac5_mux =
SOC_DAPM_ENUM("RDAC5 MUX Mux", rx_rdac5_enum);
static const struct snd_kcontrol_new sb_tx1_mux =
SOC_DAPM_ENUM("SLIM TX1 MUX Mux", sb_tx1_mux_enum);
static const struct snd_kcontrol_new sb_tx2_mux =
SOC_DAPM_ENUM("SLIM TX2 MUX Mux", sb_tx2_mux_enum);
static const struct snd_kcontrol_new sb_tx3_mux =
SOC_DAPM_ENUM("SLIM TX3 MUX Mux", sb_tx3_mux_enum);
static const struct snd_kcontrol_new sb_tx4_mux =
SOC_DAPM_ENUM("SLIM TX4 MUX Mux", sb_tx4_mux_enum);
static const struct snd_kcontrol_new sb_tx5_mux =
SOC_DAPM_ENUM("SLIM TX5 MUX Mux", sb_tx5_mux_enum);
static int wcd9306_put_dec_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *w = wlist->widgets[0];
struct snd_soc_codec *codec = w->codec;
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int dec_mux, decimator;
char *dec_name = NULL;
char *widget_name = NULL;
char *temp;
u16 tx_mux_ctl_reg;
u8 adc_dmic_sel = 0x0;
int ret = 0;
char *srch = NULL;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
return -EINVAL;
dec_mux = ucontrol->value.enumerated.item[0];
widget_name = kstrndup(w->name, 15, GFP_KERNEL);
if (!widget_name)
return -ENOMEM;
temp = widget_name;
dec_name = strsep(&widget_name, " ");
widget_name = temp;
if (!dec_name) {
pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
ret = -EINVAL;
goto out;
}
srch = strpbrk(dec_name, "1234");
if (srch == NULL) {
pr_err("%s: Invalid decimator name %s\n", __func__, dec_name);
return -EINVAL;
}
ret = kstrtouint(srch, 10, &decimator);
if (ret < 0) {
pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
ret = -EINVAL;
goto out;
}
dev_dbg(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n"
, __func__, w->name, decimator, dec_mux);
switch (decimator) {
case 1:
case 2:
if ((dec_mux >= 1) && (dec_mux <= 5))
adc_dmic_sel = 0x0;
else if ((dec_mux >= 6) && (dec_mux <= 9))
adc_dmic_sel = 0x1;
break;
case 3:
case 4:
if ((dec_mux >= 1) && (dec_mux <= 6))
adc_dmic_sel = 0x0;
else if ((dec_mux >= 7) && (dec_mux <= 10))
adc_dmic_sel = 0x1;
break;
default:
pr_err("%s: Invalid Decimator = %u\n", __func__, decimator);
ret = -EINVAL;
goto out;
}
tx_mux_ctl_reg = TAPAN_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1);
snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel);
ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
out:
kfree(widget_name);
return ret;
}
#define WCD9306_DEC_ENUM(xname, xenum) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_enum_double, \
.get = snd_soc_dapm_get_enum_double, \
.put = wcd9306_put_dec_enum, \
.private_value = (unsigned long)&xenum }
static const struct snd_kcontrol_new dec1_mux =
WCD9306_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum);
static const struct snd_kcontrol_new dec2_mux =
WCD9306_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum);
static const struct snd_kcontrol_new dec3_mux =
WCD9306_DEC_ENUM("DEC3 MUX Mux", dec3_mux_enum);
static const struct snd_kcontrol_new dec4_mux =
WCD9306_DEC_ENUM("DEC4 MUX Mux", dec4_mux_enum);
static const struct snd_kcontrol_new iir1_inp1_mux =
SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
static const struct snd_kcontrol_new iir1_inp2_mux =
SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
static const struct snd_kcontrol_new iir1_inp3_mux =
SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
static const struct snd_kcontrol_new iir1_inp4_mux =
SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum);
static const struct snd_kcontrol_new iir2_inp1_mux =
SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
static const struct snd_kcontrol_new iir2_inp2_mux =
SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum);
static const struct snd_kcontrol_new iir2_inp3_mux =
SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum);
static const struct snd_kcontrol_new iir2_inp4_mux =
SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum);
static const struct snd_kcontrol_new anc1_mux =
SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
static const struct snd_kcontrol_new anc2_mux =
SOC_DAPM_ENUM("ANC2 MUX Mux", anc2_mux_enum);
static const struct snd_kcontrol_new anc1_fb_mux =
SOC_DAPM_ENUM("ANC1 FB MUX Mux", anc1_fb_mux_enum);
static const struct snd_kcontrol_new dac1_switch[] = {
SOC_DAPM_SINGLE("Switch", TAPAN_A_RX_EAR_EN, 5, 1, 0)
};
static const struct snd_kcontrol_new hphl_switch[] = {
SOC_DAPM_SINGLE("Switch", TAPAN_A_RX_HPH_L_DAC_CTL, 6, 1, 0)
};
static const struct snd_kcontrol_new spk_dac_switch[] = {
SOC_DAPM_SINGLE("Switch", TAPAN_A_SPKR_DRV_DAC_CTL, 2, 1, 0)
};
static const struct snd_kcontrol_new hphl_pa_mix[] = {
SOC_DAPM_SINGLE("AUX_PGA_L Switch", TAPAN_A_RX_PA_AUX_IN_CONN,
7, 1, 0),
};
static const struct snd_kcontrol_new hphr_pa_mix[] = {
SOC_DAPM_SINGLE("AUX_PGA_R Switch", TAPAN_A_RX_PA_AUX_IN_CONN,
6, 1, 0),
};
static const struct snd_kcontrol_new ear_pa_mix[] = {
SOC_DAPM_SINGLE("AUX_PGA_L Switch", TAPAN_A_RX_PA_AUX_IN_CONN,
5, 1, 0),
};
static const struct snd_kcontrol_new lineout1_pa_mix[] = {
SOC_DAPM_SINGLE("AUX_PGA_L Switch", TAPAN_A_RX_PA_AUX_IN_CONN,
4, 1, 0),
};
static const struct snd_kcontrol_new lineout2_pa_mix[] = {
SOC_DAPM_SINGLE("AUX_PGA_R Switch", TAPAN_A_RX_PA_AUX_IN_CONN,
3, 1, 0),
};
/* virtual port entries */
static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *widget = wlist->widgets[0];
ucontrol->value.integer.value[0] = widget->value;
return 0;
}
static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *widget = wlist->widgets[0];
struct snd_soc_codec *codec = widget->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
struct soc_multi_mixer_control *mixer =
((struct soc_multi_mixer_control *)kcontrol->private_value);
u32 dai_id = widget->shift;
u32 port_id = mixer->shift;
u32 enable = ucontrol->value.integer.value[0];
u32 vtable = vport_check_table[dai_id];
dev_dbg(codec->dev, "%s: wname %s cname %s\n",
__func__, widget->name, ucontrol->id.name);
dev_dbg(codec->dev, "%s: value %u shift %d item %ld\n",
__func__, widget->value, widget->shift,
ucontrol->value.integer.value[0]);
mutex_lock(&codec->mutex);
if (tapan_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
if (dai_id != AIF1_CAP) {
dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
__func__);
mutex_unlock(&codec->mutex);
return -EINVAL;
}
}
switch (dai_id) {
case AIF1_CAP:
case AIF2_CAP:
case AIF3_CAP:
/* only add to the list if value not set
*/
if (enable && !(widget->value & 1 << port_id)) {
if (tapan_p->intf_type ==
WCD9XXX_INTERFACE_TYPE_SLIMBUS)
vtable = vport_check_table[dai_id];
if (tapan_p->intf_type ==
WCD9XXX_INTERFACE_TYPE_I2C)
vtable = vport_i2s_check_table[dai_id];
if (wcd9xxx_tx_vport_validation(
vtable,
port_id,
tapan_p->dai, NUM_CODEC_DAIS)) {
dev_dbg(codec->dev, "%s: TX%u is used by other virtual port\n",
__func__, port_id + 1);
mutex_unlock(&codec->mutex);
return 0;
}
widget->value |= 1 << port_id;
list_add_tail(&core->tx_chs[port_id].list,
&tapan_p->dai[dai_id].wcd9xxx_ch_list
);
} else if (!enable && (widget->value & 1 << port_id)) {
widget->value &= ~(1 << port_id);
list_del_init(&core->tx_chs[port_id].list);
} else {
if (enable)
dev_dbg(codec->dev, "%s: TX%u port is used by\n"
"this virtual port\n",
__func__, port_id + 1);
else
dev_dbg(codec->dev, "%s: TX%u port is not used by\n"
"this virtual port\n",
__func__, port_id + 1);
/* avoid update power function */
mutex_unlock(&codec->mutex);
return 0;
}
break;
default:
dev_err(codec->dev, "Unknown AIF %d\n", dai_id);
mutex_unlock(&codec->mutex);
return -EINVAL;
}
dev_dbg(codec->dev, "%s: name %s sname %s updated value %u shift %d\n",
__func__, widget->name, widget->sname,
widget->value, widget->shift);
snd_soc_dapm_mixer_update_power(widget, kcontrol, enable);
mutex_unlock(&codec->mutex);
return 0;
}
static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *widget = wlist->widgets[0];
ucontrol->value.enumerated.item[0] = widget->value;
return 0;
}
static const char *const slim_rx_mux_text[] = {
"ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB"
};
static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *widget = wlist->widgets[0];
struct snd_soc_codec *codec = widget->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
u32 port_id = widget->shift;
dev_dbg(codec->dev, "%s: wname %s cname %s value %u shift %d item %ld\n",
__func__, widget->name, ucontrol->id.name, widget->value,
widget->shift, ucontrol->value.integer.value[0]);
widget->value = ucontrol->value.enumerated.item[0];
mutex_lock(&codec->mutex);
if (tapan_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
if (widget->value > 1) {
dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
__func__);
goto err;
}
}
/* value need to match the Virtual port and AIF number
*/
switch (widget->value) {
case 0:
list_del_init(&core->rx_chs[port_id].list);
break;
case 1:
if (wcd9xxx_rx_vport_validation(port_id +
TAPAN_RX_PORT_START_NUMBER,
&tapan_p->dai[AIF1_PB].wcd9xxx_ch_list)) {
dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
__func__, port_id + 1);
goto rtn;
}
list_add_tail(&core->rx_chs[port_id].list,
&tapan_p->dai[AIF1_PB].wcd9xxx_ch_list);
break;
case 2:
if (wcd9xxx_rx_vport_validation(port_id +
TAPAN_RX_PORT_START_NUMBER,
&tapan_p->dai[AIF2_PB].wcd9xxx_ch_list)) {
dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
__func__, port_id + 1);
goto rtn;
}
list_add_tail(&core->rx_chs[port_id].list,
&tapan_p->dai[AIF2_PB].wcd9xxx_ch_list);
break;
case 3:
if (wcd9xxx_rx_vport_validation(port_id +
TAPAN_RX_PORT_START_NUMBER,
&tapan_p->dai[AIF3_PB].wcd9xxx_ch_list)) {
dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
__func__, port_id + 1);
goto rtn;
}
list_add_tail(&core->rx_chs[port_id].list,
&tapan_p->dai[AIF3_PB].wcd9xxx_ch_list);
break;
default:
pr_err("Unknown AIF %d\n", widget->value);
goto err;
}
rtn:
snd_soc_dapm_mux_update_power(widget, kcontrol, 1, widget->value, e);
mutex_unlock(&codec->mutex);
return 0;
err:
mutex_unlock(&codec->mutex);
return -EINVAL;
}
static const struct soc_enum slim_rx_mux_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(slim_rx_mux_text), slim_rx_mux_text);
static const struct snd_kcontrol_new slim_rx_mux[TAPAN_RX_MAX] = {
SOC_DAPM_ENUM_EXT("SLIM RX1 Mux", slim_rx_mux_enum,
slim_rx_mux_get, slim_rx_mux_put),
SOC_DAPM_ENUM_EXT("SLIM RX2 Mux", slim_rx_mux_enum,
slim_rx_mux_get, slim_rx_mux_put),
SOC_DAPM_ENUM_EXT("SLIM RX3 Mux", slim_rx_mux_enum,
slim_rx_mux_get, slim_rx_mux_put),
SOC_DAPM_ENUM_EXT("SLIM RX4 Mux", slim_rx_mux_enum,
slim_rx_mux_get, slim_rx_mux_put),
SOC_DAPM_ENUM_EXT("SLIM RX5 Mux", slim_rx_mux_enum,
slim_rx_mux_get, slim_rx_mux_put),
};
static const struct snd_kcontrol_new aif_cap_mixer[] = {
SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TAPAN_TX1, 1, 0,
slim_tx_mixer_get, slim_tx_mixer_put),
SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TAPAN_TX2, 1, 0,
slim_tx_mixer_get, slim_tx_mixer_put),
SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TAPAN_TX3, 1, 0,
slim_tx_mixer_get, slim_tx_mixer_put),
SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TAPAN_TX4, 1, 0,
slim_tx_mixer_get, slim_tx_mixer_put),
SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TAPAN_TX5, 1, 0,
slim_tx_mixer_get, slim_tx_mixer_put),
};
static int tapan_codec_enable_adc(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
u16 adc_reg;
u8 init_bit_shift;
dev_dbg(codec->dev, "%s(): %s %d\n", __func__, w->name, event);
if (w->reg == TAPAN_A_TX_1_EN) {
init_bit_shift = 7;
adc_reg = TAPAN_A_TX_1_2_TEST_CTL;
} else if (w->reg == TAPAN_A_TX_2_EN) {
init_bit_shift = 6;
adc_reg = TAPAN_A_TX_1_2_TEST_CTL;
} else if (w->reg == TAPAN_A_TX_3_EN) {
init_bit_shift = 6;
adc_reg = TAPAN_A_TX_1_2_TEST_CTL;
} else if (w->reg == TAPAN_A_TX_4_EN) {
init_bit_shift = 7;
adc_reg = TAPAN_A_TX_4_5_TEST_CTL;
} else if (w->reg == TAPAN_A_TX_5_EN) {
init_bit_shift = 6;
adc_reg = TAPAN_A_TX_4_5_TEST_CTL;
} else {
pr_err("%s: Error, invalid adc register\n", __func__);
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
if (w->reg == TAPAN_A_TX_3_EN ||
w->reg == TAPAN_A_TX_1_EN)
wcd9xxx_resmgr_notifier_call(&tapan->resmgr,
WCD9XXX_EVENT_PRE_TX_1_3_ON);
snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
1 << init_bit_shift);
break;
case SND_SOC_DAPM_POST_PMU:
usleep_range(2000, 2010);
snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
if (w->reg == TAPAN_A_TX_3_EN ||
w->reg == TAPAN_A_TX_1_EN)
wcd9xxx_resmgr_notifier_call(&tapan->resmgr,
WCD9XXX_EVENT_POST_TX_1_3_OFF);
break;
}
return 0;
}
static int tapan_codec_enable_aux_pga(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s: %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
WCD9XXX_BG_CLK_LOCK(&tapan->resmgr);
wcd9xxx_resmgr_get_bandgap(&tapan->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
/* AUX PGA requires RCO or MCLK */
wcd9xxx_resmgr_get_clk_block(&tapan->resmgr, WCD9XXX_CLK_RCO);
WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
wcd9xxx_resmgr_enable_rx_bias(&tapan->resmgr, 1);
break;
case SND_SOC_DAPM_POST_PMD:
wcd9xxx_resmgr_enable_rx_bias(&tapan->resmgr, 0);
WCD9XXX_BG_CLK_LOCK(&tapan->resmgr);
wcd9xxx_resmgr_put_bandgap(&tapan->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
wcd9xxx_resmgr_put_clk_block(&tapan->resmgr, WCD9XXX_CLK_RCO);
WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
break;
}
return 0;
}
static int tapan_codec_enable_lineout(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
u16 lineout_gain_reg;
dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
switch (w->shift) {
case 0:
lineout_gain_reg = TAPAN_A_RX_LINE_1_GAIN;
break;
case 1:
lineout_gain_reg = TAPAN_A_RX_LINE_2_GAIN;
break;
default:
pr_err("%s: Error, incorrect lineout register value\n",
__func__);
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
break;
case SND_SOC_DAPM_POST_PMU:
wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
WCD9XXX_CLSH_STATE_LO,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
dev_dbg(codec->dev, "%s: sleeping 5 ms after %s PA turn on\n",
__func__, w->name);
/* Wait for CnP time after PA enable */
usleep_range(5000, 5100);
break;
case SND_SOC_DAPM_POST_PMD:
wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
WCD9XXX_CLSH_STATE_LO,
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
dev_dbg(codec->dev, "%s: sleeping 5 ms after %s PA turn on\n",
__func__, w->name);
/* Wait for CnP time after PA disable */
usleep_range(5000, 5100);
break;
}
return 0;
}
static int tapan_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s: %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
tapan->spkr_pa_widget_on = true;
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80, 0x80);
break;
case SND_SOC_DAPM_POST_PMD:
tapan->spkr_pa_widget_on = false;
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80, 0x00);
break;
}
return 0;
}
static int tapan_codec_enable_dmic(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
u8 dmic_clk_en;
u16 dmic_clk_reg;
s32 *dmic_clk_cnt;
unsigned int dmic;
int ret;
char *srch = NULL;
srch = strpbrk(w->name, "1234");
if (srch == NULL) {
pr_err("%s: Invalid widget name %s\n", __func__, w->name);
return -EINVAL;
}
ret = kstrtouint(srch, 10, &dmic);
if (ret < 0) {
pr_err("%s: Invalid DMIC line on the codec\n", __func__);
return -EINVAL;
}
switch (dmic) {
case 1:
case 2:
dmic_clk_en = 0x01;
dmic_clk_cnt = &(tapan->dmic_1_2_clk_cnt);
dmic_clk_reg = TAPAN_A_CDC_CLK_DMIC_B1_CTL;
dev_dbg(codec->dev, "%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
__func__, event, dmic, *dmic_clk_cnt);
break;
case 3:
case 4:
dmic_clk_en = 0x10;
dmic_clk_cnt = &(tapan->dmic_3_4_clk_cnt);
dmic_clk_reg = TAPAN_A_CDC_CLK_DMIC_B1_CTL;
dev_dbg(codec->dev, "%s() event %d DMIC%d dmic_3_4_clk_cnt %d\n",
__func__, event, dmic, *dmic_clk_cnt);
break;
default:
pr_err("%s: Invalid DMIC Selection\n", __func__);
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
(*dmic_clk_cnt)++;
if (*dmic_clk_cnt == 1)
snd_soc_update_bits(codec, dmic_clk_reg,
dmic_clk_en, dmic_clk_en);
break;
case SND_SOC_DAPM_POST_PMD:
(*dmic_clk_cnt)--;
if (*dmic_clk_cnt == 0)
snd_soc_update_bits(codec, dmic_clk_reg,
dmic_clk_en, 0);
break;
}
return 0;
}
static int tapan_codec_enable_anc(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
const char *filename;
const struct firmware *fw;
int i;
int ret;
int num_anc_slots;
struct wcd9xxx_anc_header *anc_head;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
u32 anc_writes_size = 0;
int anc_size_remaining;
u32 *anc_ptr;
u16 reg;
u8 mask, val, old_val;
dev_dbg(codec->dev, "%s %d\n", __func__, event);
if (tapan->anc_func == 0)
return 0;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
filename = "wcd9306/wcd9306_anc.bin";
ret = request_firmware(&fw, filename, codec->dev);
if (ret != 0) {
dev_err(codec->dev, "Failed to acquire ANC data: %d\n",
ret);
return -ENODEV;
}
if (fw->size < sizeof(struct wcd9xxx_anc_header)) {
dev_err(codec->dev, "Not enough data\n");
release_firmware(fw);
return -ENOMEM;
}
/* First number is the number of register writes */
anc_head = (struct wcd9xxx_anc_header *)(fw->data);
anc_ptr = (u32 *)((u32)fw->data +
sizeof(struct wcd9xxx_anc_header));
anc_size_remaining = fw->size -
sizeof(struct wcd9xxx_anc_header);
num_anc_slots = anc_head->num_anc_slots;
if (tapan->anc_slot >= num_anc_slots) {
dev_err(codec->dev, "Invalid ANC slot selected\n");
release_firmware(fw);
return -EINVAL;
}
for (i = 0; i < num_anc_slots; i++) {
if (anc_size_remaining < TAPAN_PACKED_REG_SIZE) {
dev_err(codec->dev, "Invalid register format\n");
release_firmware(fw);
return -EINVAL;
}
anc_writes_size = (u32)(*anc_ptr);
anc_size_remaining -= sizeof(u32);
anc_ptr += 1;
if (anc_writes_size * TAPAN_PACKED_REG_SIZE
> anc_size_remaining) {
dev_err(codec->dev, "Invalid register format\n");
release_firmware(fw);
return -ENOMEM;
}
if (tapan->anc_slot == i)
break;
anc_size_remaining -= (anc_writes_size *
TAPAN_PACKED_REG_SIZE);
anc_ptr += anc_writes_size;
}
if (i == num_anc_slots) {
dev_err(codec->dev, "Selected ANC slot not present\n");
release_firmware(fw);
return -ENOMEM;
}
for (i = 0; i < anc_writes_size; i++) {
TAPAN_CODEC_UNPACK_ENTRY(anc_ptr[i], reg,
mask, val);
old_val = snd_soc_read(codec, reg);
snd_soc_write(codec, reg, (old_val & ~mask) |
(val & mask));
}
release_firmware(fw);
break;
case SND_SOC_DAPM_PRE_PMD:
msleep(40);
snd_soc_update_bits(codec, TAPAN_A_CDC_ANC1_B1_CTL, 0x01, 0x00);
snd_soc_update_bits(codec, TAPAN_A_CDC_ANC2_B1_CTL, 0x02, 0x00);
msleep(20);
snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0x0F);
snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_CLK_EN_CTL, 0);
snd_soc_write(codec, TAPAN_A_CDC_CLK_ANC_RESET_CTL, 0xFF);
break;
}
return 0;
}
static int tapan_codec_enable_micbias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
u16 micb_int_reg = 0, micb_ctl_reg = 0;
u8 cfilt_sel_val = 0;
char *internal1_text = "Internal1";
char *internal2_text = "Internal2";
char *internal3_text = "Internal3";
enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on;
pr_debug("%s: w->name %s event %d\n", __func__, w->name, event);
if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1"))) {
micb_ctl_reg = TAPAN_A_MICB_1_CTL;
micb_int_reg = TAPAN_A_MICB_1_INT_RBIAS;
cfilt_sel_val = tapan->resmgr.pdata->micbias.bias1_cfilt_sel;
e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_1_ON;
e_post_on = WCD9XXX_EVENT_POST_MICBIAS_1_ON;
e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF;
} else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2"))) {
micb_ctl_reg = TAPAN_A_MICB_2_CTL;
micb_int_reg = TAPAN_A_MICB_2_INT_RBIAS;
cfilt_sel_val = tapan->resmgr.pdata->micbias.bias2_cfilt_sel;
e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_2_ON;
e_post_on = WCD9XXX_EVENT_POST_MICBIAS_2_ON;
e_post_off = WCD9XXX_EVENT_POST_MICBIAS_2_OFF;
} else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3"))) {
micb_ctl_reg = TAPAN_A_MICB_3_CTL;
micb_int_reg = TAPAN_A_MICB_3_INT_RBIAS;
cfilt_sel_val = tapan->resmgr.pdata->micbias.bias3_cfilt_sel;
e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_3_ON;
e_post_on = WCD9XXX_EVENT_POST_MICBIAS_3_ON;
e_post_off = WCD9XXX_EVENT_POST_MICBIAS_3_OFF;
} else {
pr_err("%s: Error, invalid micbias %s\n", __func__, w->name);
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Let MBHC module know so micbias switch to be off */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_pre_on);
/* Get cfilt */
wcd9xxx_resmgr_cfilt_get(&tapan->resmgr, cfilt_sel_val);
if (strnstr(w->name, internal1_text, 30))
snd_soc_update_bits(codec, micb_int_reg, 0xE0, 0xE0);
else if (strnstr(w->name, internal2_text, 30))
snd_soc_update_bits(codec, micb_int_reg, 0x1C, 0x1C);
else if (strnstr(w->name, internal3_text, 30))
snd_soc_update_bits(codec, micb_int_reg, 0x3, 0x3);
if (micb_ctl_reg == TAPAN_A_MICB_2_CTL) {
if (++tapan->micb_2_users == 1)
wcd9xxx_resmgr_add_cond_update_bits(
&tapan->resmgr,
WCD9XXX_COND_HPH_MIC,
micb_ctl_reg, w->shift,
false);
pr_debug("%s: micb_2_users %d\n", __func__,
tapan->micb_2_users);
} else
snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift,
1 << w->shift);
break;
case SND_SOC_DAPM_POST_PMU:
usleep_range(20000, 20000);
/* Let MBHC module know so micbias is on */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_on);
break;
case SND_SOC_DAPM_POST_PMD:
if (micb_ctl_reg == TAPAN_A_MICB_2_CTL) {
if (--tapan->micb_2_users == 0)
wcd9xxx_resmgr_rm_cond_update_bits(
&tapan->resmgr,
WCD9XXX_COND_HPH_MIC,
micb_ctl_reg, 7,
false);
pr_debug("%s: micb_2_users %d\n", __func__,
tapan->micb_2_users);
WARN(tapan->micb_2_users < 0,
"Unexpected micbias users %d\n",
tapan->micb_2_users);
} else
snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift,
0);
/* Let MBHC module know so micbias switch to be off */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_off);
if (strnstr(w->name, internal1_text, 30))
snd_soc_update_bits(codec, micb_int_reg, 0x80, 0x00);
else if (strnstr(w->name, internal2_text, 30))
snd_soc_update_bits(codec, micb_int_reg, 0x10, 0x00);
else if (strnstr(w->name, internal3_text, 30))
snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x0);
/* Put cfilt */
wcd9xxx_resmgr_cfilt_put(&tapan->resmgr, cfilt_sel_val);
break;
}
return 0;
}
/* called under codec_resource_lock acquisition */
static int tapan_enable_mbhc_micbias(struct snd_soc_codec *codec, bool enable,
enum wcd9xxx_micbias_num micb_num)
{
int rc;
const char *micbias;
if (micb_num == MBHC_MICBIAS2)
micbias = DAPM_MICBIAS2_EXTERNAL_STANDALONE;
else
return -EINVAL;
if (enable)
rc = snd_soc_dapm_force_enable_pin(&codec->dapm,
micbias);
else
rc = snd_soc_dapm_disable_pin(&codec->dapm,
micbias);
if (!rc)
snd_soc_dapm_sync(&codec->dapm);
pr_debug("%s: leave ret %d\n", __func__, rc);
return rc;
}
static void tx_hpf_corner_freq_callback(struct work_struct *work)
{
struct delayed_work *hpf_delayed_work;
struct hpf_work *hpf_work;
struct tapan_priv *tapan;
struct snd_soc_codec *codec;
u16 tx_mux_ctl_reg;
u8 hpf_cut_of_freq;
hpf_delayed_work = to_delayed_work(work);
hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
tapan = hpf_work->tapan;
codec = hpf_work->tapan->codec;
hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
tx_mux_ctl_reg = TAPAN_A_CDC_TX1_MUX_CTL +
(hpf_work->decimator - 1) * 8;
dev_dbg(codec->dev, "%s(): decimator %u hpf_cut_of_freq 0x%x\n",
__func__, hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
snd_soc_update_bits(codec, TAPAN_A_TX_1_2_TXFE_CLKDIV, 0x55, 0x55);
snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4);
}
#define TX_MUX_CTL_CUT_OFF_FREQ_MASK 0x30
#define CF_MIN_3DB_4HZ 0x0
#define CF_MIN_3DB_75HZ 0x1
#define CF_MIN_3DB_150HZ 0x2
static int tapan_codec_enable_dec(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
unsigned int decimator;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
char *dec_name = NULL;
char *widget_name = NULL;
char *temp;
int ret = 0, i;
u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
u8 dec_hpf_cut_of_freq;
int offset;
char *srch = NULL;
dev_dbg(codec->dev, "%s %d\n", __func__, event);
widget_name = kstrndup(w->name, 15, GFP_KERNEL);
if (!widget_name)
return -ENOMEM;
temp = widget_name;
dec_name = strsep(&widget_name, " ");
widget_name = temp;
if (!dec_name) {
pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
ret = -EINVAL;
goto out;
}
srch = strpbrk(dec_name, "123456789");
if (srch == NULL) {
pr_err("%s: Invalid decimator name %s\n", __func__, dec_name);
return -EINVAL;
}
ret = kstrtouint(srch, 10, &decimator);
if (ret < 0) {
pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
ret = -EINVAL;
goto out;
}
dev_dbg(codec->dev, "%s(): widget = %s dec_name = %s decimator = %u\n",
__func__, w->name, dec_name, decimator);
if (w->reg == TAPAN_A_CDC_CLK_TX_CLK_EN_B1_CTL) {
dec_reset_reg = TAPAN_A_CDC_CLK_TX_RESET_B1_CTL;
offset = 0;
} else if (w->reg == TAPAN_A_CDC_CLK_TX_CLK_EN_B2_CTL) {
dec_reset_reg = TAPAN_A_CDC_CLK_TX_RESET_B2_CTL;
offset = 8;
} else {
pr_err("%s: Error, incorrect dec\n", __func__);
ret = -EINVAL;
goto out;
}
tx_vol_ctl_reg = TAPAN_A_CDC_TX1_VOL_CTL_CFG + 8 * (decimator - 1);
tx_mux_ctl_reg = TAPAN_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
for (i = 0; i < NUM_DECIMATORS; i++) {
if (decimator == i + 1)
tapan_p->dec_active[i] = true;
}
/* Enableable TX digital mute */
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
1 << w->shift);
snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg);
dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
dec_hpf_cut_of_freq;
if ((dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ)) {
/* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
CF_MIN_3DB_150HZ << 4);
}
/* enable HPF */
snd_soc_update_bits(codec, tx_mux_ctl_reg , 0x08, 0x00);
snd_soc_update_bits(codec, TAPAN_A_TX_1_2_TXFE_CLKDIV,
0x55, 0x44);
break;
case SND_SOC_DAPM_POST_PMU:
if (tapan_p->lb_mode) {
pr_debug("%s: loopback mode unmute the DEC\n",
__func__);
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
}
if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
CF_MIN_3DB_150HZ) {
schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
msecs_to_jiffies(300));
}
/* apply the digital gain after the decimator is enabled*/
if ((w->shift + offset) < ARRAY_SIZE(tx_digital_gain_reg))
snd_soc_write(codec,
tx_digital_gain_reg[w->shift + offset],
snd_soc_read(codec,
tx_digital_gain_reg[w->shift + offset])
);
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
(tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
for (i = 0; i < NUM_DECIMATORS; i++) {
if (decimator == i + 1)
tapan_p->dec_active[i] = false;
}
break;
}
out:
kfree(widget_name);
return ret;
}
static int tapan_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
dev_dbg(codec->dev, "%s: %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
if (spkr_drv_wrnd > 0) {
WARN_ON(!(snd_soc_read(codec, TAPAN_A_SPKR_DRV_EN) &
0x80));
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80,
0x00);
}
if (TAPAN_IS_1_0(core->version))
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_DBG_PWRSTG,
0x24, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
if (TAPAN_IS_1_0(core->version))
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_DBG_PWRSTG,
0x24, 0x24);
if (spkr_drv_wrnd > 0) {
WARN_ON(!!(snd_soc_read(codec, TAPAN_A_SPKR_DRV_EN) &
0x80));
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80,
0x80);
}
break;
}
return 0;
}
static int tapan_codec_rx_dem_select(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
pr_debug("%s %d %s\n", __func__, event, w->name);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
if (codec_ver == WCD9306)
snd_soc_update_bits(codec, TAPAN_A_CDC_RX2_B6_CTL,
1 << 5, 1 << 5);
break;
case SND_SOC_DAPM_POST_PMD:
if (codec_ver == WCD9306)
snd_soc_update_bits(codec, TAPAN_A_CDC_RX2_B6_CTL,
1 << 5, 0);
break;
}
return 0;
}
static int tapan_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_RESET_CTL,
1 << w->shift, 1 << w->shift);
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_RESET_CTL,
1 << w->shift, 0x0);
break;
case SND_SOC_DAPM_POST_PMU:
/* apply the digital gain after the interpolator is enabled*/
if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
snd_soc_write(codec,
rx_digital_gain_reg[w->shift],
snd_soc_read(codec,
rx_digital_gain_reg[w->shift])
);
break;
}
return 0;
}
/* called under codec_resource_lock acquisition */
static int __tapan_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *priv = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: enter\n", __func__);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/*
* ldo_h_users is protected by codec->mutex, don't need
* additional mutex
*/
if (++priv->ldo_h_users == 1) {
WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
wcd9xxx_resmgr_get_bandgap(&priv->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
wcd9xxx_resmgr_get_clk_block(&priv->resmgr,
WCD9XXX_CLK_RCO);
snd_soc_update_bits(codec, TAPAN_A_LDO_H_MODE_1, 1 << 7,
1 << 7);
wcd9xxx_resmgr_put_clk_block(&priv->resmgr,
WCD9XXX_CLK_RCO);
WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
pr_debug("%s: ldo_h_users %d\n", __func__,
priv->ldo_h_users);
/* LDO enable requires 1ms to settle down */
usleep_range(1000, 1010);
}
break;
case SND_SOC_DAPM_POST_PMD:
if (--priv->ldo_h_users == 0) {
WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
wcd9xxx_resmgr_get_clk_block(&priv->resmgr,
WCD9XXX_CLK_RCO);
snd_soc_update_bits(codec, TAPAN_A_LDO_H_MODE_1, 1 << 7,
0);
wcd9xxx_resmgr_put_clk_block(&priv->resmgr,
WCD9XXX_CLK_RCO);
wcd9xxx_resmgr_put_bandgap(&priv->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
pr_debug("%s: ldo_h_users %d\n", __func__,
priv->ldo_h_users);
}
WARN(priv->ldo_h_users < 0, "Unexpected ldo_h users %d\n",
priv->ldo_h_users);
break;
}
pr_debug("%s: leave\n", __func__);
return 0;
}
static int tapan_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
int rc;
rc = __tapan_codec_enable_ldo_h(w, kcontrol, event);
return rc;
}
static int tapan_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd9xxx_resmgr_enable_rx_bias(&tapan->resmgr, 1);
break;
case SND_SOC_DAPM_POST_PMD:
wcd9xxx_resmgr_enable_rx_bias(&tapan->resmgr, 0);
break;
}
return 0;
}
static int tapan_hphl_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RDAC_CLK_EN_CTL,
0x02, 0x02);
wcd9xxx_clsh_fsm(codec, &tapan_p->clsh_d,
WCD9XXX_CLSH_STATE_HPHL,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_PRE_DAC);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RDAC_CLK_EN_CTL,
0x02, 0x00);
}
return 0;
}
static int tapan_hphr_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RDAC_CLK_EN_CTL,
0x04, 0x04);
snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
wcd9xxx_clsh_fsm(codec, &tapan_p->clsh_d,
WCD9XXX_CLSH_STATE_HPHR,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_PRE_DAC);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RDAC_CLK_EN_CTL,
0x04, 0x00);
snd_soc_update_bits(codec, w->reg, 0x40, 0x00);
break;
}
return 0;
}
static int tapan_hph_pa_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
enum wcd9xxx_notify_event e_pre_on, e_post_off;
u8 req_clsh_state;
u32 pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_OFF;
dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
req_clsh_state = WCD9XXX_CLSH_STATE_HPHR;
} else if (w->shift == 4) {
e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON;
e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF;
req_clsh_state = WCD9XXX_CLSH_STATE_HPHL;
} else {
pr_err("%s: Invalid w->shift %d\n", __func__, w->shift);
return -EINVAL;
}
if (tapan->comp_enabled[COMPANDER_1])
pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_ON;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Let MBHC module know PA is turning on */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_pre_on);
break;
case SND_SOC_DAPM_POST_PMU:
dev_dbg(codec->dev, "%s: sleep %d ms after %s PA enable.\n",
__func__, pa_settle_time / 1000, w->name);
/* Time needed for PA to settle */
usleep_range(pa_settle_time, pa_settle_time + 1000);
wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
req_clsh_state,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
break;
case SND_SOC_DAPM_POST_PMD:
dev_dbg(codec->dev, "%s: sleep %d ms after %s PA disable.\n",
__func__, pa_settle_time / 1000, w->name);
/* Time needed for PA to settle */
usleep_range(pa_settle_time, pa_settle_time + 1000);
/* Let MBHC module know PA turned off */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_off);
wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
req_clsh_state,
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
break;
}
return 0;
}
static int tapan_codec_enable_anc_hph(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
int ret = 0;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
ret = tapan_hph_pa_event(w, kcontrol, event);
if (w->shift == 4) {
ret |= tapan_codec_enable_anc(w, kcontrol, event);
msleep(50);
}
break;
case SND_SOC_DAPM_POST_PMU:
if (w->shift == 4) {
snd_soc_update_bits(codec,
TAPAN_A_RX_HPH_CNP_EN, 0x30, 0x30);
msleep(30);
}
ret = tapan_hph_pa_event(w, kcontrol, event);
break;
case SND_SOC_DAPM_PRE_PMD:
if (w->shift == 5) {
snd_soc_update_bits(codec,
TAPAN_A_RX_HPH_CNP_EN, 0x30, 0x00);
msleep(40);
snd_soc_update_bits(codec,
TAPAN_A_TX_7_MBHC_EN, 0x80, 00);
ret |= tapan_codec_enable_anc(w, kcontrol, event);
}
break;
case SND_SOC_DAPM_POST_PMD:
ret = tapan_hph_pa_event(w, kcontrol, event);
break;
}
return ret;
}
static const struct snd_soc_dapm_widget tapan_dapm_i2s_widgets[] = {
SND_SOC_DAPM_SUPPLY("I2S_CLK", TAPAN_A_CDC_CLK_I2S_CTL,
4, 0, NULL, 0),
};
static int tapan_lineout_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
WCD9XXX_CLSH_STATE_LO,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_PRE_DAC);
snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, w->reg, 0x40, 0x00);
break;
}
return 0;
}
static int tapan_spk_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
return 0;
}
static const struct snd_soc_dapm_route audio_i2s_map[] = {
{"I2S_CLK", NULL, "CDC_CONN"},
{"SLIM RX1", NULL, "I2S_CLK"},
{"SLIM RX2", NULL, "I2S_CLK"},
{"SLIM TX1 MUX", NULL, "I2S_CLK"},
{"SLIM TX2 MUX", NULL, "I2S_CLK"},
};
static const struct snd_soc_dapm_route wcd9306_map[] = {
{"SLIM TX1 MUX", "RMIX4", "RX4 MIX1"},
{"SLIM TX2 MUX", "RMIX4", "RX4 MIX1"},
{"SLIM TX3 MUX", "RMIX4", "RX4 MIX1"},
{"SLIM TX4 MUX", "RMIX4", "RX4 MIX1"},
{"SLIM TX5 MUX", "RMIX4", "RX4 MIX1"},
{"SLIM TX1 MUX", "DEC3", "DEC3 MUX"},
{"SLIM TX1 MUX", "DEC4", "DEC4 MUX"},
{"SLIM TX2 MUX", "DEC3", "DEC3 MUX"},
{"SLIM TX2 MUX", "DEC4", "DEC4 MUX"},
{"SLIM TX3 MUX", "DEC3", "DEC3 MUX"},
{"SLIM TX4 MUX", "DEC4", "DEC4 MUX"},
{"ANC EAR", NULL, "ANC EAR PA"},
{"ANC EAR PA", NULL, "EAR_PA_MIXER"},
{"ANC1 FB MUX", "EAR_HPH_L", "RX1 MIX2"},
{"ANC1 FB MUX", "EAR_LINE_1", "RX2 MIX2"},
{"ANC HEADPHONE", NULL, "ANC HPHL"},
{"ANC HEADPHONE", NULL, "ANC HPHR"},
{"ANC HPHL", NULL, "HPHL_PA_MIXER"},
{"ANC HPHR", NULL, "HPHR_PA_MIXER"},
{"ANC1 MUX", "ADC1", "ADC1"},
{"ANC1 MUX", "ADC2", "ADC2"},
{"ANC1 MUX", "ADC3", "ADC3"},
{"ANC1 MUX", "ADC4", "ADC4"},
{"ANC1 MUX", "ADC5", "ADC5"},
{"ANC1 MUX", "DMIC1", "DMIC1"},
{"ANC1 MUX", "DMIC2", "DMIC2"},
{"ANC1 MUX", "DMIC3", "DMIC3"},
{"ANC1 MUX", "DMIC4", "DMIC4"},
{"ANC2 MUX", "ADC1", "ADC1"},
{"ANC2 MUX", "ADC2", "ADC2"},
{"ANC2 MUX", "ADC3", "ADC3"},
{"ANC2 MUX", "ADC4", "ADC4"},
{"ANC2 MUX", "ADC5", "ADC5"},
{"ANC2 MUX", "DMIC1", "DMIC1"},
{"ANC2 MUX", "DMIC2", "DMIC2"},
{"ANC2 MUX", "DMIC3", "DMIC3"},
{"ANC2 MUX", "DMIC4", "DMIC4"},
{"ANC HPHR", NULL, "CDC_CONN"},
{"RDAC5 MUX", "DEM4", "RX4 MIX2"},
{"SPK DAC", "Switch", "RX4 MIX2"},
{"RX1 MIX2", NULL, "ANC1 MUX"},
{"RX2 MIX2", NULL, "ANC2 MUX"},
{"RX1 MIX1", NULL, "COMP1_CLK"},
{"RX2 MIX1", NULL, "COMP1_CLK"},
{"RX3 MIX1", NULL, "COMP2_CLK"},
{"RX4 MIX1", NULL, "COMP0_CLK"},
{"RX4 MIX1", NULL, "RX4 MIX1 INP1"},
{"RX4 MIX1", NULL, "RX4 MIX1 INP2"},
{"RX4 MIX2", NULL, "RX4 MIX1"},
{"RX4 MIX2", NULL, "RX4 MIX2 INP1"},
{"RX4 MIX2", NULL, "RX4 MIX2 INP2"},
{"RX4 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX4 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX4 MIX1 INP1", "RX3", "SLIM RX3"},
{"RX4 MIX1 INP1", "RX4", "SLIM RX4"},
{"RX4 MIX1 INP1", "RX5", "SLIM RX5"},
{"RX4 MIX1 INP1", "IIR1", "IIR1"},
{"RX4 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX4 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX4 MIX1 INP2", "RX3", "SLIM RX3"},
{"RX4 MIX1 INP2", "RX5", "SLIM RX5"},
{"RX4 MIX1 INP2", "RX4", "SLIM RX4"},
{"RX4 MIX1 INP2", "IIR1", "IIR1"},
{"RX4 MIX2 INP1", "IIR1", "IIR1"},
{"RX4 MIX2 INP2", "IIR1", "IIR1"},
{"DEC1 MUX", "DMIC3", "DMIC3"},
{"DEC1 MUX", "DMIC4", "DMIC4"},
{"DEC2 MUX", "DMIC3", "DMIC3"},
{"DEC2 MUX", "DMIC4", "DMIC4"},
{"DEC3 MUX", "ADC1", "ADC1"},
{"DEC3 MUX", "ADC2", "ADC2"},
{"DEC3 MUX", "ADC3", "ADC3"},
{"DEC3 MUX", "ADC4", "ADC4"},
{"DEC3 MUX", "ADC5", "ADC5"},
{"DEC3 MUX", "DMIC1", "DMIC1"},
{"DEC3 MUX", "DMIC2", "DMIC2"},
{"DEC3 MUX", "DMIC3", "DMIC3"},
{"DEC3 MUX", "DMIC4", "DMIC4"},
{"DEC3 MUX", NULL, "CDC_CONN"},
{"DEC4 MUX", "ADC1", "ADC1"},
{"DEC4 MUX", "ADC2", "ADC2"},
{"DEC4 MUX", "ADC3", "ADC3"},
{"DEC4 MUX", "ADC4", "ADC4"},
{"DEC4 MUX", "ADC5", "ADC5"},
{"DEC4 MUX", "DMIC1", "DMIC1"},
{"DEC4 MUX", "DMIC2", "DMIC2"},
{"DEC4 MUX", "DMIC3", "DMIC3"},
{"DEC4 MUX", "DMIC4", "DMIC4"},
{"DEC4 MUX", NULL, "CDC_CONN"},
{"ADC5", NULL, "AMIC5"},
{"AUX_PGA_Left", NULL, "AMIC5"},
{"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
{"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
{"MIC BIAS3 Internal1", NULL, "LDO_H"},
{"MIC BIAS3 Internal2", NULL, "LDO_H"},
{"MIC BIAS3 External", NULL, "LDO_H"},
};
static const struct snd_soc_dapm_route audio_map[] = {
/* SLIMBUS Connections */
{"AIF1 CAP", NULL, "AIF1_CAP Mixer"},
{"AIF2 CAP", NULL, "AIF2_CAP Mixer"},
{"AIF3 CAP", NULL, "AIF3_CAP Mixer"},
/* SLIM_MIXER("AIF1_CAP Mixer"),*/
{"AIF1_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
{"AIF1_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
{"AIF1_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
{"AIF1_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
{"AIF1_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
/* SLIM_MIXER("AIF2_CAP Mixer"),*/
{"AIF2_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
{"AIF2_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
{"AIF2_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
{"AIF2_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
{"AIF2_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
/* SLIM_MIXER("AIF3_CAP Mixer"),*/
{"AIF3_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
{"AIF3_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
{"AIF3_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
{"AIF3_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
{"AIF3_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
{"SLIM TX1 MUX", "DEC1", "DEC1 MUX"},
{"SLIM TX1 MUX", "DEC2", "DEC2 MUX"},
{"SLIM TX1 MUX", "RMIX1", "RX1 MIX1"},
{"SLIM TX1 MUX", "RMIX2", "RX2 MIX1"},
{"SLIM TX1 MUX", "RMIX3", "RX3 MIX1"},
{"SLIM TX2 MUX", "DEC1", "DEC1 MUX"},
{"SLIM TX2 MUX", "DEC2", "DEC2 MUX"},
{"SLIM TX2 MUX", "RMIX1", "RX1 MIX1"},
{"SLIM TX2 MUX", "RMIX2", "RX2 MIX1"},
{"SLIM TX2 MUX", "RMIX3", "RX3 MIX1"},
{"SLIM TX3 MUX", "RMIX1", "RX1 MIX1"},
{"SLIM TX3 MUX", "RMIX2", "RX2 MIX1"},
{"SLIM TX3 MUX", "RMIX3", "RX3 MIX1"},
{"SLIM TX4 MUX", "RMIX1", "RX1 MIX1"},
{"SLIM TX4 MUX", "RMIX2", "RX2 MIX1"},
{"SLIM TX4 MUX", "RMIX3", "RX3 MIX1"},
{"SLIM TX5 MUX", "DEC1", "DEC1 MUX"},
{"SLIM TX5 MUX", "RMIX1", "RX1 MIX1"},
{"SLIM TX5 MUX", "RMIX2", "RX2 MIX1"},
{"SLIM TX5 MUX", "RMIX3", "RX3 MIX1"},
/* Earpiece (RX MIX1) */
{"EAR", NULL, "EAR PA"},
{"EAR PA", NULL, "EAR_PA_MIXER"},
{"EAR_PA_MIXER", NULL, "DAC1"},
{"DAC1", NULL, "RX_BIAS"},
{"DAC1", NULL, "CDC_CP_VDD"},
/* Headset (RX MIX1 and RX MIX2) */
{"HEADPHONE", NULL, "HPHL"},
{"HEADPHONE", NULL, "HPHR"},
{"HPHL", NULL, "HPHL_PA_MIXER"},
{"HPHL_PA_MIXER", NULL, "HPHL DAC"},
{"HPHL DAC", NULL, "RX_BIAS"},
{"HPHL DAC", NULL, "CDC_CP_VDD"},
{"HPHR", NULL, "HPHR_PA_MIXER"},
{"HPHR_PA_MIXER", NULL, "HPHR DAC"},
{"HPHR DAC", NULL, "RX_BIAS"},
{"HPHR DAC", NULL, "CDC_CP_VDD"},
{"DAC1", "Switch", "CLASS_H_DSM MUX"},
{"HPHL DAC", "Switch", "CLASS_H_DSM MUX"},
{"HPHR DAC", NULL, "RDAC3 MUX"},
{"LINEOUT1", NULL, "LINEOUT1 PA"},
{"LINEOUT2", NULL, "LINEOUT2 PA"},
{"SPK_OUT", NULL, "SPK PA"},
{"LINEOUT1 PA", NULL, "LINEOUT1_PA_MIXER"},
{"LINEOUT1_PA_MIXER", NULL, "LINEOUT1 DAC"},
{"LINEOUT2 PA", NULL, "LINEOUT2_PA_MIXER"},
{"LINEOUT2_PA_MIXER", NULL, "LINEOUT2 DAC"},
{"RDAC5 MUX", "DEM3_INV", "RX3 MIX1"},
{"LINEOUT2 DAC", NULL, "RDAC5 MUX"},
{"RDAC4 MUX", "DEM3", "RX3 MIX1"},
{"RDAC4 MUX", "DEM2", "RX2 CHAIN"},
{"LINEOUT1 DAC", NULL, "RDAC4 MUX"},
{"SPK PA", NULL, "SPK DAC"},
{"SPK DAC", NULL, "VDD_SPKDRV"},
{"RX1 INTERPOLATOR", NULL, "RX1 MIX2"},
{"RX1 CHAIN", NULL, "RX1 INTERPOLATOR"},
{"RX2 INTERPOLATOR", NULL, "RX2 MIX2"},
{"RX2 CHAIN", NULL, "RX2 INTERPOLATOR"},
{"CLASS_H_DSM MUX", "RX_HPHL", "RX1 CHAIN"},
{"LINEOUT1 DAC", NULL, "RX_BIAS"},
{"LINEOUT2 DAC", NULL, "RX_BIAS"},
{"LINEOUT1 DAC", NULL, "CDC_CP_VDD"},
{"LINEOUT2 DAC", NULL, "CDC_CP_VDD"},
{"RDAC3 MUX", "DEM2", "RX2 CHAIN"},
{"RDAC3 MUX", "DEM1", "RX1 CHAIN"},
{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
{"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
{"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
{"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
{"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
{"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
{"RX1 MIX2", NULL, "RX1 MIX1"},
{"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
{"RX1 MIX2", NULL, "RX1 MIX2 INP2"},
{"RX2 MIX2", NULL, "RX2 MIX1"},
{"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
{"RX2 MIX2", NULL, "RX2 MIX2 INP2"},
/* SLIM_MUX("AIF1_PB", "AIF1 PB"),*/
{"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"},
{"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"},
{"SLIM RX3 MUX", "AIF1_PB", "AIF1 PB"},
{"SLIM RX4 MUX", "AIF1_PB", "AIF1 PB"},
{"SLIM RX5 MUX", "AIF1_PB", "AIF1 PB"},
/* SLIM_MUX("AIF2_PB", "AIF2 PB"),*/
{"SLIM RX1 MUX", "AIF2_PB", "AIF2 PB"},
{"SLIM RX2 MUX", "AIF2_PB", "AIF2 PB"},
{"SLIM RX3 MUX", "AIF2_PB", "AIF2 PB"},
{"SLIM RX4 MUX", "AIF2_PB", "AIF2 PB"},
{"SLIM RX5 MUX", "AIF2_PB", "AIF2 PB"},
/* SLIM_MUX("AIF3_PB", "AIF3 PB"),*/
{"SLIM RX1 MUX", "AIF3_PB", "AIF3 PB"},
{"SLIM RX2 MUX", "AIF3_PB", "AIF3 PB"},
{"SLIM RX3 MUX", "AIF3_PB", "AIF3 PB"},
{"SLIM RX4 MUX", "AIF3_PB", "AIF3 PB"},
{"SLIM RX5 MUX", "AIF3_PB", "AIF3 PB"},
{"SLIM RX1", NULL, "SLIM RX1 MUX"},
{"SLIM RX2", NULL, "SLIM RX2 MUX"},
{"SLIM RX3", NULL, "SLIM RX3 MUX"},
{"SLIM RX4", NULL, "SLIM RX4 MUX"},
{"SLIM RX5", NULL, "SLIM RX5 MUX"},
{"RX1 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX1 MIX1 INP1", "RX3", "SLIM RX3"},
{"RX1 MIX1 INP1", "RX4", "SLIM RX4"},
{"RX1 MIX1 INP1", "RX5", "SLIM RX5"},
{"RX1 MIX1 INP1", "IIR1", "IIR1"},
{"RX1 MIX1 INP1", "IIR2", "IIR2"},
{"RX1 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX1 MIX1 INP2", "RX3", "SLIM RX3"},
{"RX1 MIX1 INP2", "RX4", "SLIM RX4"},
{"RX1 MIX1 INP2", "RX5", "SLIM RX5"},
{"RX1 MIX1 INP2", "IIR1", "IIR1"},
{"RX1 MIX1 INP2", "IIR2", "IIR2"},
{"RX1 MIX1 INP3", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP3", "RX2", "SLIM RX2"},
{"RX1 MIX1 INP3", "RX3", "SLIM RX3"},
{"RX1 MIX1 INP3", "RX4", "SLIM RX4"},
{"RX1 MIX1 INP3", "RX5", "SLIM RX5"},
{"RX2 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX2 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX2 MIX1 INP1", "RX3", "SLIM RX3"},
{"RX2 MIX1 INP1", "RX4", "SLIM RX4"},
{"RX2 MIX1 INP1", "RX5", "SLIM RX5"},
{"RX2 MIX1 INP1", "IIR1", "IIR1"},
{"RX2 MIX1 INP1", "IIR2", "IIR2"},
{"RX2 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX2 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX2 MIX1 INP2", "RX3", "SLIM RX3"},
{"RX2 MIX1 INP2", "RX4", "SLIM RX4"},
{"RX2 MIX1 INP2", "RX5", "SLIM RX5"},
{"RX2 MIX1 INP2", "IIR1", "IIR1"},
{"RX2 MIX1 INP2", "IIR2", "IIR2"},
{"RX3 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX3 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX3 MIX1 INP1", "RX3", "SLIM RX3"},
{"RX3 MIX1 INP1", "RX4", "SLIM RX4"},
{"RX3 MIX1 INP1", "RX5", "SLIM RX5"},
{"RX3 MIX1 INP1", "IIR1", "IIR1"},
{"RX3 MIX1 INP1", "IIR2", "IIR2"},
{"RX3 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX3 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX3 MIX1 INP2", "RX3", "SLIM RX3"},
{"RX3 MIX1 INP2", "RX4", "SLIM RX4"},
{"RX3 MIX1 INP2", "RX5", "SLIM RX5"},
{"RX3 MIX1 INP2", "IIR1", "IIR1"},
{"RX3 MIX1 INP2", "IIR2", "IIR2"},
{"RX1 MIX2 INP1", "IIR1", "IIR1"},
{"RX1 MIX2 INP2", "IIR1", "IIR1"},
{"RX2 MIX2 INP1", "IIR1", "IIR1"},
{"RX2 MIX2 INP2", "IIR1", "IIR1"},
{"RX1 MIX2 INP1", "IIR2", "IIR2"},
{"RX1 MIX2 INP2", "IIR2", "IIR2"},
{"RX2 MIX2 INP1", "IIR2", "IIR2"},
{"RX2 MIX2 INP2", "IIR2", "IIR2"},
/* Decimator Inputs */
{"DEC1 MUX", "ADC1", "ADC1"},
{"DEC1 MUX", "ADC2", "ADC2"},
{"DEC1 MUX", "ADC3", "ADC3"},
{"DEC1 MUX", "ADC4", "ADC4"},
{"DEC1 MUX", "DMIC1", "DMIC1"},
{"DEC1 MUX", "DMIC2", "DMIC2"},
{"DEC1 MUX", NULL, "CDC_CONN"},
{"DEC2 MUX", "ADC1", "ADC1"},
{"DEC2 MUX", "ADC2", "ADC2"},
{"DEC2 MUX", "ADC3", "ADC3"},
{"DEC2 MUX", "ADC4", "ADC4"},
{"DEC2 MUX", "DMIC1", "DMIC1"},
{"DEC2 MUX", "DMIC2", "DMIC2"},
{"DEC2 MUX", NULL, "CDC_CONN"},
/* ADC Connections */
{"ADC1", NULL, "AMIC1"},
{"ADC2", NULL, "AMIC2"},
{"ADC3", NULL, "AMIC3"},
{"ADC4", NULL, "AMIC4"},
/* AUX PGA Connections */
{"EAR_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
{"HPHL_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
{"HPHR_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
{"LINEOUT1_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
{"LINEOUT2_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
{"MIC BIAS1 Internal1", NULL, "LDO_H"},
{"MIC BIAS1 Internal2", NULL, "LDO_H"},
{"MIC BIAS1 External", NULL, "LDO_H"},
{"MIC BIAS2 Internal1", NULL, "LDO_H"},
{"MIC BIAS2 Internal2", NULL, "LDO_H"},
{"MIC BIAS2 Internal3", NULL, "LDO_H"},
{"MIC BIAS2 External", NULL, "LDO_H"},
{DAPM_MICBIAS2_EXTERNAL_STANDALONE, NULL, "LDO_H Standalone"},
/*sidetone path enable*/
{"IIR1", NULL, "IIR1 INP1 MUX"},
{"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
{"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
{"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
{"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
{"IIR1 INP1 MUX", "RX1", "SLIM RX1"},
{"IIR1 INP1 MUX", "RX2", "SLIM RX2"},
{"IIR1 INP1 MUX", "RX3", "SLIM RX3"},
{"IIR1 INP1 MUX", "RX4", "SLIM RX4"},
{"IIR1 INP1 MUX", "RX5", "SLIM RX5"},
{"IIR1", NULL, "IIR1 INP2 MUX"},
{"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"},
{"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"},
{"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"},
{"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"},
{"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
{"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
{"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
{"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
{"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
{"IIR1", NULL, "IIR1 INP3 MUX"},
{"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"},
{"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"},
{"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"},
{"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"},
{"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
{"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
{"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
{"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
{"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
{"IIR1", NULL, "IIR1 INP4 MUX"},
{"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"},
{"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"},
{"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"},
{"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"},
{"IIR1 INP4 MUX", "RX1", "SLIM RX1"},
{"IIR1 INP4 MUX", "RX2", "SLIM RX2"},
{"IIR1 INP4 MUX", "RX3", "SLIM RX3"},
{"IIR1 INP4 MUX", "RX4", "SLIM RX4"},
{"IIR1 INP4 MUX", "RX5", "SLIM RX5"},
{"IIR2", NULL, "IIR2 INP1 MUX"},
{"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
{"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
{"IIR2 INP1 MUX", "DEC3", "DEC3 MUX"},
{"IIR2 INP1 MUX", "DEC4", "DEC4 MUX"},
{"IIR2 INP1 MUX", "RX1", "SLIM RX1"},
{"IIR2 INP1 MUX", "RX2", "SLIM RX2"},
{"IIR2 INP1 MUX", "RX3", "SLIM RX3"},
{"IIR2 INP1 MUX", "RX4", "SLIM RX4"},
{"IIR2 INP1 MUX", "RX5", "SLIM RX5"},
{"IIR2", NULL, "IIR2 INP2 MUX"},
{"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"},
{"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"},
{"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"},
{"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"},
{"IIR2 INP2 MUX", "RX1", "SLIM RX1"},
{"IIR2 INP2 MUX", "RX2", "SLIM RX2"},
{"IIR2 INP2 MUX", "RX3", "SLIM RX3"},
{"IIR2 INP2 MUX", "RX4", "SLIM RX4"},
{"IIR2 INP2 MUX", "RX5", "SLIM RX5"},
{"IIR2", NULL, "IIR2 INP3 MUX"},
{"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"},
{"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"},
{"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"},
{"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"},
{"IIR2 INP3 MUX", "RX1", "SLIM RX1"},
{"IIR2 INP3 MUX", "RX2", "SLIM RX2"},
{"IIR2 INP3 MUX", "RX3", "SLIM RX3"},
{"IIR2 INP3 MUX", "RX4", "SLIM RX4"},
{"IIR2 INP3 MUX", "RX5", "SLIM RX5"},
{"IIR2", NULL, "IIR2 INP4 MUX"},
{"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"},
{"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"},
{"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"},
{"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"},
{"IIR2 INP4 MUX", "RX1", "SLIM RX1"},
{"IIR2 INP4 MUX", "RX2", "SLIM RX2"},
{"IIR2 INP4 MUX", "RX3", "SLIM RX3"},
{"IIR2 INP4 MUX", "RX4", "SLIM RX4"},
{"IIR2 INP4 MUX", "RX5", "SLIM RX5"},
};
static const struct snd_soc_dapm_route wcd9302_map[] = {
{"SPK DAC", "Switch", "RX3 MIX1"},
{"RDAC5 MUX", "DEM4", "RX3 MIX1"},
{"RDAC5 MUX", "DEM3_INV", "RDAC4 MUX"},
};
static int tapan_readable(struct snd_soc_codec *ssc, unsigned int reg)
{
return tapan_reg_readable[reg];
}
static bool tapan_is_digital_gain_register(unsigned int reg)
{
bool rtn = false;
switch (reg) {
case TAPAN_A_CDC_RX1_VOL_CTL_B2_CTL:
case TAPAN_A_CDC_RX2_VOL_CTL_B2_CTL:
case TAPAN_A_CDC_RX3_VOL_CTL_B2_CTL:
case TAPAN_A_CDC_RX4_VOL_CTL_B2_CTL:
case TAPAN_A_CDC_TX1_VOL_CTL_GAIN:
case TAPAN_A_CDC_TX2_VOL_CTL_GAIN:
case TAPAN_A_CDC_TX3_VOL_CTL_GAIN:
case TAPAN_A_CDC_TX4_VOL_CTL_GAIN:
rtn = true;
break;
default:
break;
}
return rtn;
}
static int tapan_volatile(struct snd_soc_codec *ssc, unsigned int reg)
{
int i = 0;
/* Registers lower than 0x100 are top level registers which can be
* written by the Tapan core driver.
*/
if ((reg >= TAPAN_A_CDC_MBHC_EN_CTL) || (reg < 0x100))
return 1;
/* IIR Coeff registers are not cacheable */
if ((reg >= TAPAN_A_CDC_IIR1_COEF_B1_CTL) &&
(reg <= TAPAN_A_CDC_IIR2_COEF_B2_CTL))
return 1;
/* ANC filter registers are not cacheable */
if ((reg >= TAPAN_A_CDC_ANC1_IIR_B1_CTL) &&
(reg <= TAPAN_A_CDC_ANC1_LPF_B2_CTL))
return 1;
if ((reg >= TAPAN_A_CDC_ANC2_IIR_B1_CTL) &&
(reg <= TAPAN_A_CDC_ANC2_LPF_B2_CTL))
return 1;
/* Digital gain register is not cacheable so we have to write
* the setting even it is the same
*/
if (tapan_is_digital_gain_register(reg))
return 1;
/* HPH status registers */
if (reg == TAPAN_A_RX_HPH_L_STATUS || reg == TAPAN_A_RX_HPH_R_STATUS)
return 1;
if (reg == TAPAN_A_MBHC_INSERT_DET_STATUS)
return 1;
for (i = 0; i < ARRAY_SIZE(audio_reg_cfg); i++)
if (audio_reg_cfg[i].reg_logical_addr -
TAPAN_REGISTER_START_OFFSET == reg)
return 1;
return 0;
}
#define TAPAN_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
#define TAPAN_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FORMAT_S24_LE)
static int tapan_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
int ret;
struct wcd9xxx *wcd9xxx = codec->control_data;
if (reg == SND_SOC_NOPM)
return 0;
BUG_ON(reg > TAPAN_MAX_REGISTER);
if (!tapan_volatile(codec, reg)) {
ret = snd_soc_cache_write(codec, reg, value);
if (ret != 0)
dev_err(codec->dev, "Cache write to %x failed: %d\n",
reg, ret);
}
return wcd9xxx_reg_write(&wcd9xxx->core_res, reg, value);
}
static unsigned int tapan_read(struct snd_soc_codec *codec,
unsigned int reg)
{
unsigned int val;
int ret;
struct wcd9xxx *wcd9xxx = codec->control_data;
if (reg == SND_SOC_NOPM)
return 0;
BUG_ON(reg > TAPAN_MAX_REGISTER);
if (!tapan_volatile(codec, reg) && tapan_readable(codec, reg) &&
reg < codec->driver->reg_cache_size) {
ret = snd_soc_cache_read(codec, reg, &val);
if (ret >= 0) {
return val;
} else
dev_err(codec->dev, "Cache read from %x failed: %d\n",
reg, ret);
}
val = wcd9xxx_reg_read(&wcd9xxx->core_res, reg);
return val;
}
static int tapan_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct wcd9xxx *tapan_core = dev_get_drvdata(dai->codec->dev->parent);
dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
__func__, substream->name, substream->stream);
if ((tapan_core != NULL) &&
(tapan_core->dev != NULL) &&
(tapan_core->dev->parent != NULL))
pm_runtime_get_sync(tapan_core->dev->parent);
return 0;
}
static void tapan_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct wcd9xxx *tapan_core = dev_get_drvdata(dai->codec->dev->parent);
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(dai->codec);
u32 active = 0;
dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
__func__, substream->name, substream->stream);
if (dai->id < NUM_CODEC_DAIS) {
if (tapan->dai[dai->id].ch_mask) {
active = 1;
dev_dbg(dai->codec->dev, "%s(): Codec DAI: chmask[%d] = 0x%lx\n",
__func__, dai->id,
tapan->dai[dai->id].ch_mask);
}
}
if ((tapan_core != NULL) &&
(tapan_core->dev != NULL) &&
(tapan_core->dev->parent != NULL) &&
(active == 0)) {
pm_runtime_mark_last_busy(tapan_core->dev->parent);
pm_runtime_put(tapan_core->dev->parent);
dev_dbg(dai->codec->dev, "%s: unvote requested", __func__);
}
}
static void tapan_set_vdd_cx_current(struct snd_soc_codec *codec,
int current_uA)
{
struct regulator *cx_regulator;
int ret;
cx_regulator = tapan_codec_find_regulator(codec,
"cdc-vdd-cx");
if (!cx_regulator) {
dev_err(codec->dev, "%s: Regulator %s not defined\n",
__func__, "cdc-vdd-cx-supply");
return;
}
ret = regulator_set_optimum_mode(cx_regulator, current_uA);
if (ret < 0)
dev_err(codec->dev,
"%s: Failed to set vdd_cx current to %d\n",
__func__, current_uA);
}
int tapan_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, bool dapm)
{
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n", __func__,
mclk_enable, dapm);
WCD9XXX_BG_CLK_LOCK(&tapan->resmgr);
if (mclk_enable) {
tapan_set_vdd_cx_current(codec, TAPAN_VDD_CX_OPTIMAL_UA);
wcd9xxx_resmgr_get_bandgap(&tapan->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
wcd9xxx_resmgr_get_clk_block(&tapan->resmgr, WCD9XXX_CLK_MCLK);
} else {
/* Put clock and BG */
wcd9xxx_resmgr_put_clk_block(&tapan->resmgr, WCD9XXX_CLK_MCLK);
wcd9xxx_resmgr_put_bandgap(&tapan->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
/* Set the vdd cx power rail sleep mode current */
tapan_set_vdd_cx_current(codec, TAPAN_VDD_CX_SLEEP_UA);
}
WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
return 0;
}
static int tapan_set_dai_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
dev_dbg(dai->codec->dev, "%s\n", __func__);
return 0;
}
static int tapan_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
u8 val = 0;
struct snd_soc_codec *codec = dai->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s\n", __func__);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* CPU is master */
if (tapan->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
if (dai->id == AIF1_CAP)
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL,
TAPAN_I2S_MASTER_MODE_MASK, 0);
else if (dai->id == AIF1_PB)
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL,
TAPAN_I2S_MASTER_MODE_MASK, 0);
}
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* CPU is slave */
if (tapan->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
val = TAPAN_I2S_MASTER_MODE_MASK;
if (dai->id == AIF1_CAP)
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL, val, val);
else if (dai->id == AIF1_PB)
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL, val, val);
}
break;
default:
return -EINVAL;
}
return 0;
}
static int tapan_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
{
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(dai->codec);
struct wcd9xxx *core = dev_get_drvdata(dai->codec->dev->parent);
if (!tx_slot || !rx_slot) {
pr_err("%s: Invalid\n", __func__);
return -EINVAL;
}
dev_dbg(dai->codec->dev, "%s(): dai_name = %s DAI-ID %x\n",
__func__, dai->name, dai->id);
dev_dbg(dai->codec->dev, "%s(): tx_ch %d rx_ch %d\n intf_type %d\n",
__func__, tx_num, rx_num, tapan->intf_type);
if (tapan->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
wcd9xxx_init_slimslave(core, core->slim->laddr,
tx_num, tx_slot, rx_num, rx_slot);
return 0;
}
static int tapan_get_channel_map(struct snd_soc_dai *dai,
unsigned int *tx_num, unsigned int *tx_slot,
unsigned int *rx_num, unsigned int *rx_slot)
{
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(dai->codec);
u32 i = 0;
struct wcd9xxx_ch *ch;
switch (dai->id) {
case AIF1_PB:
case AIF2_PB:
case AIF3_PB:
if (!rx_slot || !rx_num) {
pr_err("%s: Invalid rx_slot %d or rx_num %d\n",
__func__, (u32) rx_slot, (u32) rx_num);
return -EINVAL;
}
list_for_each_entry(ch, &tapan_p->dai[dai->id].wcd9xxx_ch_list,
list) {
dev_dbg(dai->codec->dev, "%s: rx_slot[%d] %d, ch->ch_num %d\n",
__func__, i, rx_slot[i], ch->ch_num);
rx_slot[i++] = ch->ch_num;
}
dev_dbg(dai->codec->dev, "%s: rx_num %d\n", __func__, i);
*rx_num = i;
break;
case AIF1_CAP:
case AIF2_CAP:
case AIF3_CAP:
if (!tx_slot || !tx_num) {
pr_err("%s: Invalid tx_slot %d or tx_num %d\n",
__func__, (u32) tx_slot, (u32) tx_num);
return -EINVAL;
}
list_for_each_entry(ch, &tapan_p->dai[dai->id].wcd9xxx_ch_list,
list) {
dev_dbg(dai->codec->dev, "%s: tx_slot[%d] %d, ch->ch_num %d\n",
__func__, i, tx_slot[i], ch->ch_num);
tx_slot[i++] = ch->ch_num;
}
dev_dbg(dai->codec->dev, "%s: tx_num %d\n", __func__, i);
*tx_num = i;
break;
default:
pr_err("%s: Invalid DAI ID %x\n", __func__, dai->id);
break;
}
return 0;
}
static int tapan_set_interpolator_rate(struct snd_soc_dai *dai,
u8 rx_fs_rate_reg_val, u32 compander_fs, u32 sample_rate)
{
u32 j;
u8 rx_mix1_inp;
u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
u16 rx_fs_reg;
u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
u8 rdac5_mux;
struct snd_soc_codec *codec = dai->codec;
struct wcd9xxx_ch *ch;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
list_for_each_entry(ch, &tapan->dai[dai->id].wcd9xxx_ch_list, list) {
/* for RX port starting from 16 instead of 10 like tabla */
rx_mix1_inp = ch->port + RX_MIX1_INP_SEL_RX1 -
TAPAN_TX_PORT_NUMBER;
if ((rx_mix1_inp < RX_MIX1_INP_SEL_RX1) ||
(rx_mix1_inp > RX_MIX1_INP_SEL_RX5)) {
pr_err("%s: Invalid TAPAN_RX%u port. Dai ID is %d\n",
__func__, rx_mix1_inp - 5 , dai->id);
return -EINVAL;
}
rx_mix_1_reg_1 = TAPAN_A_CDC_CONN_RX1_B1_CTL;
rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
rdac5_mux = (rdac5_mux & 0x04) >> 2;
for (j = 0; j < NUM_INTERPOLATORS; j++) {
rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
rx_mix_1_reg_1_val = snd_soc_read(codec,
rx_mix_1_reg_1);
rx_mix_1_reg_2_val = snd_soc_read(codec,
rx_mix_1_reg_2);
if (((rx_mix_1_reg_1_val & 0x0F) == rx_mix1_inp) ||
(((rx_mix_1_reg_1_val >> 4) & 0x0F)
== rx_mix1_inp) ||
((rx_mix_1_reg_2_val & 0x0F) == rx_mix1_inp)) {
rx_fs_reg = TAPAN_A_CDC_RX1_B5_CTL + 8 * j;
dev_dbg(codec->dev, "%s: AIF_PB DAI(%d) connected to RX%u\n",
__func__, dai->id, j + 1);
dev_dbg(codec->dev, "%s: set RX%u sample rate to %u\n",
__func__, j + 1, sample_rate);
snd_soc_update_bits(codec, rx_fs_reg,
0xE0, rx_fs_rate_reg_val);
if (comp_rx_path[j] < COMPANDER_MAX) {
if ((j == 3) && (rdac5_mux == 1))
tapan->comp_fs[COMPANDER_0] =
compander_fs;
else
tapan->comp_fs[comp_rx_path[j]]
= compander_fs;
}
}
if (j <= 1)
rx_mix_1_reg_1 += 3;
else
rx_mix_1_reg_1 += 2;
}
}
return 0;
}
static int tapan_set_decimator_rate(struct snd_soc_dai *dai,
u8 tx_fs_rate_reg_val, u32 sample_rate)
{
struct snd_soc_codec *codec = dai->codec;
struct wcd9xxx_ch *ch;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
u32 tx_port;
u16 tx_port_reg, tx_fs_reg;
u8 tx_port_reg_val;
s8 decimator;
list_for_each_entry(ch, &tapan->dai[dai->id].wcd9xxx_ch_list, list) {
tx_port = ch->port + 1;
dev_dbg(codec->dev, "%s: dai->id = %d, tx_port = %d",
__func__, dai->id, tx_port);
if ((tx_port < 1) || (tx_port > TAPAN_SLIM_CODEC_TX_PORTS)) {
pr_err("%s: Invalid SLIM TX%u port. DAI ID is %d\n",
__func__, tx_port, dai->id);
return -EINVAL;
}
tx_port_reg = TAPAN_A_CDC_CONN_TX_SB_B1_CTL + (tx_port - 1);
tx_port_reg_val = snd_soc_read(codec, tx_port_reg);
decimator = 0;
tx_port_reg_val = tx_port_reg_val & 0x0F;
if ((tx_port_reg_val >= 0x8) &&
(tx_port_reg_val <= 0x11)) {
decimator = (tx_port_reg_val - 0x8) + 1;
}
if (decimator) { /* SLIM_TX port has a DEC as input */
tx_fs_reg = TAPAN_A_CDC_TX1_CLK_FS_CTL +
8 * (decimator - 1);
dev_dbg(codec->dev, "%s: set DEC%u (-> SLIM_TX%u) rate to %u\n",
__func__, decimator, tx_port, sample_rate);
snd_soc_update_bits(codec, tx_fs_reg, 0x07,
tx_fs_rate_reg_val);
} else {
if ((tx_port_reg_val >= 0x1) &&
(tx_port_reg_val <= 0x4)) {
dev_dbg(codec->dev, "%s: RMIX%u going to SLIM TX%u\n",
__func__, tx_port_reg_val, tx_port);
} else if ((tx_port_reg_val >= 0x8) &&
(tx_port_reg_val <= 0x11)) {
pr_err("%s: ERROR: Should not be here\n",
__func__);
pr_err("%s: ERROR: DEC connected to SLIM TX%u\n",
__func__, tx_port);
return -EINVAL;
} else if (tx_port_reg_val == 0) {
dev_dbg(codec->dev, "%s: no signal to SLIM TX%u\n",
__func__, tx_port);
} else {
pr_err("%s: ERROR: wrong signal to SLIM TX%u\n",
__func__, tx_port);
pr_err("%s: ERROR: wrong signal = %u\n",
__func__, tx_port_reg_val);
return -EINVAL;
}
}
}
return 0;
}
static void tapan_set_rxsb_port_format(struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx_codec_dai_data *cdc_dai;
struct wcd9xxx_ch *ch;
int port;
u8 bit_sel;
u16 sb_ctl_reg, field_shift;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bit_sel = 0x2;
tapan_p->dai[dai->id].bit_width = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bit_sel = 0x0;
tapan_p->dai[dai->id].bit_width = 24;
break;
default:
dev_err(codec->dev, "Invalid format %x\n",
params_format(params));
return;
}
cdc_dai = &tapan_p->dai[dai->id];
list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
port = wcd9xxx_get_slave_port(ch->ch_num);
if (IS_ERR_VALUE(port) ||
!TAPAN_VALIDATE_RX_SBPORT_RANGE(port)) {
dev_warn(codec->dev,
"%s: invalid port ID %d returned for RX DAI\n",
__func__, port);
return;
}
port = TAPAN_CONVERT_RX_SBPORT_ID(port);
if (port <= 3) {
sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B1_CTL;
field_shift = port << 1;
} else if (port <= 4) {
sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B2_CTL;
field_shift = (port - 4) << 1;
} else { /* should not happen */
dev_warn(codec->dev,
"%s: bad port ID %d\n", __func__, port);
return;
}
dev_dbg(codec->dev, "%s: sb_ctl_reg %x field_shift %x\n"
"bit_sel %x\n", __func__, sb_ctl_reg, field_shift,
bit_sel);
snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << field_shift,
bit_sel << field_shift);
}
}
static int tapan_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(dai->codec);
u8 tx_fs_rate, rx_fs_rate;
u32 compander_fs;
int ret;
dev_dbg(dai->codec->dev, "%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n",
__func__, dai->name, dai->id,
params_rate(params), params_channels(params));
switch (params_rate(params)) {
case 8000:
tx_fs_rate = 0x00;
rx_fs_rate = 0x00;
compander_fs = COMPANDER_FS_8KHZ;
break;
case 16000:
tx_fs_rate = 0x01;
rx_fs_rate = 0x20;
compander_fs = COMPANDER_FS_16KHZ;
break;
case 32000:
tx_fs_rate = 0x02;
rx_fs_rate = 0x40;
compander_fs = COMPANDER_FS_32KHZ;
break;
case 48000:
tx_fs_rate = 0x03;
rx_fs_rate = 0x60;
compander_fs = COMPANDER_FS_48KHZ;
break;
case 96000:
tx_fs_rate = 0x04;
rx_fs_rate = 0x80;
compander_fs = COMPANDER_FS_96KHZ;
break;
case 192000:
tx_fs_rate = 0x05;
rx_fs_rate = 0xA0;
compander_fs = COMPANDER_FS_192KHZ;
break;
default:
pr_err("%s: Invalid sampling rate %d\n", __func__,
params_rate(params));
return -EINVAL;
}
switch (substream->stream) {
case SNDRV_PCM_STREAM_CAPTURE:
ret = tapan_set_decimator_rate(dai, tx_fs_rate,
params_rate(params));
if (ret < 0) {
pr_err("%s: set decimator rate failed %d\n", __func__,
ret);
return ret;
}
if (tapan->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL,
0x20, 0x20);
break;
case SNDRV_PCM_FORMAT_S32_LE:
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL,
0x20, 0x00);
break;
default:
pr_err("invalid format\n");
break;
}
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_I2S_CTL,
0x07, tx_fs_rate);
} else {
tapan->dai[dai->id].rate = params_rate(params);
}
break;
case SNDRV_PCM_STREAM_PLAYBACK:
ret = tapan_set_interpolator_rate(dai, rx_fs_rate,
compander_fs,
params_rate(params));
if (ret < 0) {
dev_err(codec->dev, "%s: set decimator rate failed %d\n",
__func__, ret);
return ret;
}
if (tapan->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL,
0x20, 0x20);
break;
case SNDRV_PCM_FORMAT_S32_LE:
snd_soc_update_bits(codec,
TAPAN_A_CDC_CLK_I2S_CTL,
0x20, 0x00);
break;
default:
dev_err(codec->dev, "invalid format\n");
break;
}
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_I2S_CTL,
0x03, (rx_fs_rate >> 0x05));
} else {
tapan_set_rxsb_port_format(params, dai);
tapan->dai[dai->id].rate = params_rate(params);
}
break;
default:
dev_err(codec->dev, "%s: Invalid stream type %d\n", __func__,
substream->stream);
return -EINVAL;
}
return 0;
}
int tapan_digital_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = NULL;
u16 tx_vol_ctl_reg = 0;
u8 decimator = 0, i;
struct tapan_priv *tapan_p;
pr_debug("%s: Digital Mute val = %d\n", __func__, mute);
if (!dai || !dai->codec) {
pr_err("%s: Invalid params\n", __func__);
return -EINVAL;
}
codec = dai->codec;
tapan_p = snd_soc_codec_get_drvdata(codec);
if (dai->id != AIF1_CAP) {
dev_dbg(codec->dev, "%s: Not capture use case skip\n",
__func__);
return 0;
}
mute = (mute) ? 1 : 0;
if (!mute) {
/*
* 5 ms is an emperical value for the mute time
* that was arrived by checking the pop level
* to be inaudible
*/
usleep_range(5000, 5010);
}
for (i = 0; i < NUM_DECIMATORS; i++) {
if (tapan_p->dec_active[i])
decimator = i + 1;
if (decimator && decimator <= NUM_DECIMATORS) {
pr_debug("%s: Mute = %d Decimator = %d", __func__,
mute, decimator);
tx_vol_ctl_reg = TAPAN_A_CDC_TX1_VOL_CTL_CFG +
8 * (decimator - 1);
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, mute);
}
decimator = 0;
}
return 0;
}
static struct snd_soc_dai_ops tapan_dai_ops = {
.startup = tapan_startup,
.shutdown = tapan_shutdown,
.hw_params = tapan_hw_params,
.set_sysclk = tapan_set_dai_sysclk,
.set_fmt = tapan_set_dai_fmt,
.set_channel_map = tapan_set_channel_map,
.get_channel_map = tapan_get_channel_map,
.digital_mute = tapan_digital_mute,
};
static struct snd_soc_dai_driver tapan9302_dai[] = {
{
.name = "tapan9302_rx1",
.id = AIF1_PB,
.playback = {
.stream_name = "AIF1 Playback",
.rates = WCD9302_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 48000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan9302_tx1",
.id = AIF1_CAP,
.capture = {
.stream_name = "AIF1 Capture",
.rates = WCD9302_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 48000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan9302_rx2",
.id = AIF2_PB,
.playback = {
.stream_name = "AIF2 Playback",
.rates = WCD9302_RATES,
.formats = TAPAN_FORMATS,
.rate_min = 8000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan9302_tx2",
.id = AIF2_CAP,
.capture = {
.stream_name = "AIF2 Capture",
.rates = WCD9302_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 48000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan9302_tx3",
.id = AIF3_CAP,
.capture = {
.stream_name = "AIF3 Capture",
.rates = WCD9302_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 48000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan9302_rx3",
.id = AIF3_PB,
.playback = {
.stream_name = "AIF3 Playback",
.rates = WCD9302_RATES,
.formats = TAPAN_FORMATS,
.rate_min = 8000,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
};
static struct snd_soc_dai_driver tapan_dai[] = {
{
.name = "tapan_rx1",
.id = AIF1_PB,
.playback = {
.stream_name = "AIF1 Playback",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS_S16_S24_LE,
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan_tx1",
.id = AIF1_CAP,
.capture = {
.stream_name = "AIF1 Capture",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan_rx2",
.id = AIF2_PB,
.playback = {
.stream_name = "AIF2 Playback",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS_S16_S24_LE,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan_tx2",
.id = AIF2_CAP,
.capture = {
.stream_name = "AIF2 Capture",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan_tx3",
.id = AIF3_CAP,
.capture = {
.stream_name = "AIF3 Capture",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 48000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan_rx3",
.id = AIF3_PB,
.playback = {
.stream_name = "AIF3 Playback",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS_S16_S24_LE,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
.channels_max = 2,
},
.ops = &tapan_dai_ops,
},
};
static struct snd_soc_dai_driver tapan_i2s_dai[] = {
{
.name = "tapan_i2s_rx1",
.id = AIF1_PB,
.playback = {
.stream_name = "AIF1 Playback",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &tapan_dai_ops,
},
{
.name = "tapan_i2s_tx1",
.id = AIF1_CAP,
.capture = {
.stream_name = "AIF1 Capture",
.rates = WCD9306_RATES,
.formats = TAPAN_FORMATS,
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &tapan_dai_ops,
},
};
static int tapan_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
bool up)
{
int ret = 0;
struct wcd9xxx_ch *ch;
if (up) {
list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
ret = wcd9xxx_get_slave_port(ch->ch_num);
if (ret < 0) {
pr_debug("%s: Invalid slave port ID: %d\n",
__func__, ret);
ret = -EINVAL;
} else {
set_bit(ret, &dai->ch_mask);
}
}
} else {
ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0),
msecs_to_jiffies(
TAPAN_SLIM_CLOSE_TIMEOUT));
if (!ret) {
pr_debug("%s: Slim close tx/rx wait timeout\n",
__func__);
ret = -ETIMEDOUT;
} else {
ret = 0;
}
}
return ret;
}
static int tapan_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct wcd9xxx *core;
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
int ret = 0;
struct wcd9xxx_codec_dai_data *dai;
core = dev_get_drvdata(codec->dev->parent);
if(core == NULL) {
dev_err(codec->dev, "%s: core is null\n",
__func__);
return -EINVAL;
}
dev_dbg(codec->dev, "%s: event called! codec name %s\n",
__func__, w->codec->name);
dev_dbg(codec->dev, "%s: num_dai %d stream name %s event %d\n",
__func__, w->codec->num_dai, w->sname, event);
/* Execute the callback only if interface type is slimbus */
if (tapan_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
return 0;
dai = &tapan_p->dai[w->shift];
dev_dbg(codec->dev, "%s: w->name %s w->shift %d event %d\n",
__func__, w->name, w->shift, event);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
dai->bus_down_in_recovery = false;
(void) tapan_codec_enable_slim_chmask(dai, true);
ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
dai->rate, dai->bit_width,
&dai->grph);
break;
case SND_SOC_DAPM_POST_PMD:
ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
dai->grph);
if (!dai->bus_down_in_recovery)
ret = tapan_codec_enable_slim_chmask(dai, false);
if (ret < 0) {
ret = wcd9xxx_disconnect_port(core,
&dai->wcd9xxx_ch_list,
dai->grph);
dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
__func__, ret);
}
if ((core != NULL) &&
(core->dev != NULL) &&
(core->dev->parent != NULL)) {
pm_runtime_mark_last_busy(core->dev->parent);
pm_runtime_put(core->dev->parent);
dev_dbg(codec->dev, "%s: unvote requested", __func__);
}
dai->bus_down_in_recovery = false;
break;
}
return ret;
}
static int tapan_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct wcd9xxx *core;
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
u32 ret = 0;
struct wcd9xxx_codec_dai_data *dai;
core = dev_get_drvdata(codec->dev->parent);
dev_dbg(codec->dev, "%s: event called! codec name %s\n",
__func__, w->codec->name);
dev_dbg(codec->dev, "%s: num_dai %d stream name %s\n",
__func__, w->codec->num_dai, w->sname);
/* Execute the callback only if interface type is slimbus */
if (tapan_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
return 0;
dev_dbg(codec->dev, "%s(): w->name %s event %d w->shift %d\n",
__func__, w->name, event, w->shift);
dai = &tapan_p->dai[w->shift];
switch (event) {
case SND_SOC_DAPM_POST_PMU:
dai->bus_down_in_recovery = false;
(void) tapan_codec_enable_slim_chmask(dai, true);
ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
dai->rate, dai->bit_width,
&dai->grph);
break;
case SND_SOC_DAPM_POST_PMD:
ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
dai->grph);
if (!dai->bus_down_in_recovery)
ret = tapan_codec_enable_slim_chmask(dai, false);
if (ret < 0) {
ret = wcd9xxx_disconnect_port(core,
&dai->wcd9xxx_ch_list,
dai->grph);
dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
__func__, ret);
}
if ((core != NULL) &&
(core->dev != NULL) &&
(core->dev->parent != NULL)) {
pm_runtime_mark_last_busy(core->dev->parent);
pm_runtime_put(core->dev->parent);
dev_dbg(codec->dev, "%s: unvote requested", __func__);
}
dai->bus_down_in_recovery = false;
break;
}
return ret;
}
static int tapan_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
wcd9xxx_clsh_fsm(codec, &tapan_p->clsh_d,
WCD9XXX_CLSH_STATE_EAR,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
usleep_range(5000, 5010);
break;
case SND_SOC_DAPM_POST_PMD:
usleep_range(5000, 5010);
snd_soc_update_bits(codec, TAPAN_A_RX_EAR_EN, 0x40, 0x00);
wcd9xxx_clsh_fsm(codec, &tapan_p->clsh_d,
WCD9XXX_CLSH_STATE_EAR,
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
}
return 0;
}
static int tapan_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd9xxx_clsh_fsm(codec, &tapan_p->clsh_d,
WCD9XXX_CLSH_STATE_EAR,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_PRE_DAC);
break;
}
return 0;
}
static int tapan_codec_iir_mux_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
pr_debug("%s: event = %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_write(codec, w->reg, snd_soc_read(codec, w->reg));
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_write(codec, w->reg, snd_soc_read(codec, w->reg));
break;
}
return 0;
}
static int tapan_codec_dsm_mux_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
u8 reg_val, zoh_mux_val = 0x00;
dev_dbg(codec->dev, "%s: event = %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
reg_val = snd_soc_read(codec, TAPAN_A_CDC_CONN_CLSH_CTL);
if ((reg_val & 0x30) == 0x10)
zoh_mux_val = 0x04;
else if ((reg_val & 0x30) == 0x20)
zoh_mux_val = 0x08;
if (zoh_mux_val != 0x00)
snd_soc_update_bits(codec,
TAPAN_A_CDC_CONN_CLSH_CTL,
0x0C, zoh_mux_val);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, TAPAN_A_CDC_CONN_CLSH_CTL,
0x0C, 0x00);
break;
}
return 0;
}
static int tapan_codec_enable_anc_ear(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
int ret = 0;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
ret = tapan_codec_enable_anc(w, kcontrol, event);
msleep(50);
snd_soc_update_bits(codec, TAPAN_A_RX_EAR_EN, 0x10, 0x10);
break;
case SND_SOC_DAPM_POST_PMU:
ret = tapan_codec_enable_ear_pa(w, kcontrol, event);
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, TAPAN_A_RX_EAR_EN, 0x10, 0x00);
msleep(40);
ret |= tapan_codec_enable_anc(w, kcontrol, event);
break;
case SND_SOC_DAPM_POST_PMD:
ret = tapan_codec_enable_ear_pa(w, kcontrol, event);
break;
}
return ret;
}
static int tapan_codec_chargepump_vdd_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *priv = snd_soc_codec_get_drvdata(codec);
int ret = 0, i;
pr_info("%s: event = %d\n", __func__, event);
if (!priv->cp_regulators[CP_REG_BUCK]
&& !priv->cp_regulators[CP_REG_BHELPER]) {
pr_err("%s: No power supply defined for ChargePump\n",
__func__);
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
for (i = 0; i < CP_REG_MAX ; i++) {
if (!priv->cp_regulators[i])
continue;
ret = regulator_enable(priv->cp_regulators[i]);
if (ret) {
pr_err("%s: CP Regulator enable failed, index = %d\n",
__func__, i);
continue;
} else {
pr_debug("%s: Enabled CP regulator, index %d\n",
__func__, i);
}
}
break;
case SND_SOC_DAPM_POST_PMD:
for (i = 0; i < CP_REG_MAX; i++) {
if (!priv->cp_regulators[i])
continue;
ret = regulator_disable(priv->cp_regulators[i]);
if (ret) {
pr_err("%s: CP Regulator disable failed, index = %d\n",
__func__, i);
return ret;
} else {
pr_debug("%s: Disabled CP regulator %d\n",
__func__, i);
}
}
break;
}
return 0;
}
static int tapan_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
int value = 0;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
value = snd_soc_read(codec, TAPAN_A_CDC_IIR1_GAIN_B1_CTL);
snd_soc_write(codec, TAPAN_A_CDC_IIR1_GAIN_B1_CTL, value);
break;
default:
pr_err("%s: event = %d not expected\n", __func__, event);
}
return 0;
}
static const struct snd_soc_dapm_widget tapan_9306_dapm_widgets[] = {
/* RX4 MIX1 mux inputs */
SND_SOC_DAPM_MUX("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
&rx4_mix1_inp1_mux),
SND_SOC_DAPM_MUX("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0,
&rx4_mix1_inp2_mux),
SND_SOC_DAPM_MUX("RX4 MIX1 INP3", SND_SOC_NOPM, 0, 0,
&rx4_mix1_inp3_mux),
/* RX4 MIX2 mux inputs */
SND_SOC_DAPM_MUX("RX4 MIX2 INP1", SND_SOC_NOPM, 0, 0,
&rx4_mix2_inp1_mux),
SND_SOC_DAPM_MUX("RX4 MIX2 INP2", SND_SOC_NOPM, 0, 0,
&rx4_mix2_inp2_mux),
SND_SOC_DAPM_MIXER("RX4 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER_E("RX4 MIX2", TAPAN_A_CDC_CLK_RX_B1_CTL, 3, 0, NULL,
0, tapan_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX_E("DEC3 MUX", TAPAN_A_CDC_CLK_TX_CLK_EN_B1_CTL, 2, 0,
&dec3_mux, tapan_codec_enable_dec,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("DEC4 MUX", TAPAN_A_CDC_CLK_TX_CLK_EN_B1_CTL, 3, 0,
&dec4_mux, tapan_codec_enable_dec,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_INPUT("AMIC5"),
SND_SOC_DAPM_ADC_E("ADC5", NULL, TAPAN_A_TX_5_EN, 7, 0,
tapan_codec_enable_adc, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
SND_SOC_DAPM_OUTPUT("ANC HEADPHONE"),
SND_SOC_DAPM_PGA_E("ANC HPHL", SND_SOC_NOPM, 5, 0, NULL, 0,
tapan_codec_enable_anc_hph,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("ANC HPHR", SND_SOC_NOPM, 4, 0, NULL, 0,
tapan_codec_enable_anc_hph, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_OUTPUT("ANC EAR"),
SND_SOC_DAPM_PGA_E("ANC EAR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tapan_codec_enable_anc_ear,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 External", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal1", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal2", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
tapan_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
tapan_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
};
/* Todo: Have seperate dapm widgets for I2S and Slimbus.
* Might Need to have callbacks registered only for slimbus
*/
static const struct snd_soc_dapm_widget tapan_common_dapm_widgets[] = {
SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
AIF1_PB, 0, tapan_codec_enable_slimrx,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_AIF_IN_E("AIF2 PB", "AIF2 Playback", 0, SND_SOC_NOPM,
AIF2_PB, 0, tapan_codec_enable_slimrx,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_AIF_IN_E("AIF3 PB", "AIF3 Playback", 0, SND_SOC_NOPM,
AIF3_PB, 0, tapan_codec_enable_slimrx,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("SLIM RX1 MUX", SND_SOC_NOPM, TAPAN_RX1, 0,
&slim_rx_mux[TAPAN_RX1]),
SND_SOC_DAPM_MUX("SLIM RX2 MUX", SND_SOC_NOPM, TAPAN_RX2, 0,
&slim_rx_mux[TAPAN_RX2]),
SND_SOC_DAPM_MUX("SLIM RX3 MUX", SND_SOC_NOPM, TAPAN_RX3, 0,
&slim_rx_mux[TAPAN_RX3]),
SND_SOC_DAPM_MUX("SLIM RX4 MUX", SND_SOC_NOPM, TAPAN_RX4, 0,
&slim_rx_mux[TAPAN_RX4]),
SND_SOC_DAPM_MUX("SLIM RX5 MUX", SND_SOC_NOPM, TAPAN_RX5, 0,
&slim_rx_mux[TAPAN_RX5]),
SND_SOC_DAPM_MIXER("SLIM RX1", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("SLIM RX2", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("SLIM RX3", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("SLIM RX4", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("SLIM RX5", SND_SOC_NOPM, 0, 0, NULL, 0),
/* RX1 MIX1 mux inputs */
SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
&rx_mix1_inp1_mux),
SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
&rx_mix1_inp2_mux),
SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
&rx_mix1_inp3_mux),
/* RX2 MIX1 mux inputs */
SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
&rx2_mix1_inp1_mux),
SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
&rx2_mix1_inp2_mux),
SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
&rx2_mix1_inp2_mux),
/* RX3 MIX1 mux inputs */
SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
&rx3_mix1_inp1_mux),
SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
&rx3_mix1_inp2_mux),
SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
&rx3_mix1_inp3_mux),
/* RX1 MIX2 mux inputs */
SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
&rx1_mix2_inp1_mux),
SND_SOC_DAPM_MUX("RX1 MIX2 INP2", SND_SOC_NOPM, 0, 0,
&rx1_mix2_inp2_mux),
/* RX2 MIX2 mux inputs */
SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
&rx2_mix2_inp1_mux),
SND_SOC_DAPM_MUX("RX2 MIX2 INP2", SND_SOC_NOPM, 0, 0,
&rx2_mix2_inp2_mux),
SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("RX1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("RX2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER_E("RX3 MIX1", TAPAN_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
0, tapan_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_VIRT_MUX_E("RX1 INTERPOLATOR",
TAPAN_A_CDC_CLK_RX_B1_CTL, 0, 0,
&rx1_interpolator, tapan_codec_enable_interpolator,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_VIRT_MUX_E("RX2 INTERPOLATOR",
TAPAN_A_CDC_CLK_RX_B1_CTL, 1, 0,
&rx2_interpolator, tapan_codec_enable_interpolator,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MIXER("RX1 CHAIN", TAPAN_A_CDC_RX1_B6_CTL, 5, 0,
NULL, 0),
SND_SOC_DAPM_MIXER_E("RX2 CHAIN", SND_SOC_NOPM, 0, 0, NULL,
0, tapan_codec_rx_dem_select, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("CLASS_H_DSM MUX", SND_SOC_NOPM, 0, 0,
&class_h_dsm_mux, tapan_codec_dsm_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
/* RX Bias */
SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
tapan_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
/* CDC_CP_VDD */
SND_SOC_DAPM_SUPPLY("CDC_CP_VDD", SND_SOC_NOPM, 0, 0,
tapan_codec_chargepump_vdd_event, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
/*EAR */
SND_SOC_DAPM_PGA_E("EAR PA", TAPAN_A_RX_EAR_EN, 4, 0, NULL, 0,
tapan_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("DAC1", TAPAN_A_RX_EAR_EN, 6, 0, dac1_switch,
ARRAY_SIZE(dac1_switch), tapan_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU),
/* Headphone Left */
SND_SOC_DAPM_PGA_E("HPHL", TAPAN_A_RX_HPH_CNP_EN, 5, 0, NULL, 0,
tapan_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("HPHL DAC", TAPAN_A_RX_HPH_L_DAC_CTL, 7, 0,
hphl_switch, ARRAY_SIZE(hphl_switch), tapan_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* Headphone Right */
SND_SOC_DAPM_PGA_E("HPHR", TAPAN_A_RX_HPH_CNP_EN, 4, 0, NULL, 0,
tapan_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("HPHR DAC", NULL, TAPAN_A_RX_HPH_R_DAC_CTL, 7, 0,
tapan_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* LINEOUT1*/
SND_SOC_DAPM_DAC_E("LINEOUT1 DAC", NULL, TAPAN_A_RX_LINE_1_DAC_CTL, 7, 0
, tapan_lineout_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("LINEOUT1 PA", TAPAN_A_RX_LINE_CNP_EN, 0, 0, NULL,
0, tapan_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
/* LINEOUT2*/
SND_SOC_DAPM_MUX("RDAC5 MUX", SND_SOC_NOPM, 0, 0,
&rx_dac5_mux),
/* LINEOUT1*/
SND_SOC_DAPM_MUX("RDAC4 MUX", SND_SOC_NOPM, 0, 0,
&rx_dac4_mux),
SND_SOC_DAPM_MUX("RDAC3 MUX", SND_SOC_NOPM, 0, 0,
&rx_dac3_mux),
SND_SOC_DAPM_DAC_E("LINEOUT2 DAC", NULL, TAPAN_A_RX_LINE_2_DAC_CTL, 7, 0
, tapan_lineout_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("LINEOUT2 PA", TAPAN_A_RX_LINE_CNP_EN, 1, 0, NULL,
0, tapan_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
/* CLASS-D SPK */
SND_SOC_DAPM_MIXER_E("SPK DAC", SND_SOC_NOPM, 0, 0,
spk_dac_switch, ARRAY_SIZE(spk_dac_switch), tapan_spk_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM, 0, 0 , NULL,
0, tapan_codec_enable_spk_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0,
tapan_codec_enable_vdd_spkr,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("HEADPHONE"),
SND_SOC_DAPM_OUTPUT("LINEOUT1"),
SND_SOC_DAPM_OUTPUT("LINEOUT2"),
SND_SOC_DAPM_OUTPUT("SPK_OUT"),
/* TX Path*/
SND_SOC_DAPM_MIXER("AIF1_CAP Mixer", SND_SOC_NOPM, AIF1_CAP, 0,
aif_cap_mixer, ARRAY_SIZE(aif_cap_mixer)),
SND_SOC_DAPM_MIXER("AIF2_CAP Mixer", SND_SOC_NOPM, AIF2_CAP, 0,
aif_cap_mixer, ARRAY_SIZE(aif_cap_mixer)),
SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0,
aif_cap_mixer, ARRAY_SIZE(aif_cap_mixer)),
SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, TAPAN_TX1, 0,
&sb_tx1_mux),
SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, TAPAN_TX2, 0,
&sb_tx2_mux),
SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, TAPAN_TX3, 0,
&sb_tx3_mux),
SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, TAPAN_TX4, 0,
&sb_tx4_mux),
SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, TAPAN_TX5, 0,
&sb_tx5_mux),
SND_SOC_DAPM_SUPPLY("CDC_CONN", WCD9XXX_A_CDC_CLK_OTHR_CTL, 2, 0, NULL,
0),
/* Decimator MUX */
SND_SOC_DAPM_MUX_E("DEC1 MUX", TAPAN_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
&dec1_mux, tapan_codec_enable_dec,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("DEC2 MUX", TAPAN_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
&dec2_mux, tapan_codec_enable_dec,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("LDO_H", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_ldo_h,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/*
* DAPM 'LDO_H Standalone' is to be powered by mbhc driver after
* acquring codec_resource lock.
* So call __tapan_codec_enable_ldo_h instead and avoid deadlock.
*/
SND_SOC_DAPM_SUPPLY("LDO_H Standalone", SND_SOC_NOPM, 7, 0,
__tapan_codec_enable_ldo_h,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal1", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal2", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("ADC1", NULL, TAPAN_A_TX_1_EN, 7, 0,
tapan_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("ADC2", NULL, TAPAN_A_TX_2_EN, 7, 0,
tapan_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_INPUT("AMIC3"),
SND_SOC_DAPM_ADC_E("ADC3", NULL, TAPAN_A_TX_3_EN, 7, 0,
tapan_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_INPUT("AMIC4"),
SND_SOC_DAPM_ADC_E("ADC4", NULL, TAPAN_A_TX_4_EN, 7, 0,
tapan_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal1", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal2", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal3", SND_SOC_NOPM, 7, 0,
tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS2_EXTERNAL_STANDALONE, SND_SOC_NOPM,
7, 0, tapan_codec_enable_micbias,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
AIF1_CAP, 0, tapan_codec_enable_slimtx,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_AIF_OUT_E("AIF2 CAP", "AIF2 Capture", 0, SND_SOC_NOPM,
AIF2_CAP, 0, tapan_codec_enable_slimtx,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_AIF_OUT_E("AIF3 CAP", "AIF3 Capture", 0, SND_SOC_NOPM,
AIF3_CAP, 0, tapan_codec_enable_slimtx,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
/* Digital Mic Inputs */
SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
tapan_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
tapan_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
/* Sidetone */
SND_SOC_DAPM_MUX_E("IIR1 INP1 MUX", TAPAN_A_CDC_IIR1_GAIN_B1_CTL, 0, 0,
&iir1_inp1_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("IIR1", TAPAN_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0,
tapan_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX_E("IIR1 INP2 MUX", TAPAN_A_CDC_IIR1_GAIN_B2_CTL, 0, 0,
&iir1_inp2_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("IIR1 INP3 MUX", TAPAN_A_CDC_IIR1_GAIN_B3_CTL, 0, 0,
&iir1_inp3_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("IIR1 INP4 MUX", TAPAN_A_CDC_IIR1_GAIN_B4_CTL, 0, 0,
&iir1_inp4_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("IIR2 INP1 MUX", TAPAN_A_CDC_IIR2_GAIN_B1_CTL, 0, 0,
&iir2_inp1_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("IIR2 INP2 MUX", TAPAN_A_CDC_IIR2_GAIN_B2_CTL, 0, 0,
&iir2_inp2_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("IIR2 INP3 MUX", TAPAN_A_CDC_IIR2_GAIN_B3_CTL, 0, 0,
&iir2_inp3_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("IIR2 INP4 MUX", TAPAN_A_CDC_IIR2_GAIN_B4_CTL, 0, 0,
&iir2_inp4_mux, tapan_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA("IIR2", TAPAN_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
/* AUX PGA */
SND_SOC_DAPM_ADC_E("AUX_PGA_Left", NULL, TAPAN_A_RX_AUX_SW_CTL, 7, 0,
tapan_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("AUX_PGA_Right", NULL, TAPAN_A_RX_AUX_SW_CTL, 6, 0,
tapan_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
/* Lineout, ear and HPH PA Mixers */
SND_SOC_DAPM_MIXER("EAR_PA_MIXER", SND_SOC_NOPM, 0, 0,
ear_pa_mix, ARRAY_SIZE(ear_pa_mix)),
SND_SOC_DAPM_MIXER("HPHL_PA_MIXER", SND_SOC_NOPM, 0, 0,
hphl_pa_mix, ARRAY_SIZE(hphl_pa_mix)),
SND_SOC_DAPM_MIXER("HPHR_PA_MIXER", SND_SOC_NOPM, 0, 0,
hphr_pa_mix, ARRAY_SIZE(hphr_pa_mix)),
SND_SOC_DAPM_MIXER("LINEOUT1_PA_MIXER", SND_SOC_NOPM, 0, 0,
lineout1_pa_mix, ARRAY_SIZE(lineout1_pa_mix)),
SND_SOC_DAPM_MIXER("LINEOUT2_PA_MIXER", SND_SOC_NOPM, 0, 0,
lineout2_pa_mix, ARRAY_SIZE(lineout2_pa_mix)),
};
static irqreturn_t tapan_slimbus_irq(int irq, void *data)
{
struct tapan_priv *priv = data;
struct snd_soc_codec *codec = priv->codec;
unsigned long status = 0;
int i, j, port_id, k;
u32 bit;
u8 val;
bool tx, cleared;
for (i = TAPAN_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
i <= TAPAN_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
val = wcd9xxx_interface_reg_read(codec->control_data, i);
status |= ((u32)val << (8 * j));
}
for_each_set_bit(j, &status, 32) {
tx = (j >= 16 ? true : false);
port_id = (tx ? j - 16 : j);
val = wcd9xxx_interface_reg_read(codec->control_data,
TAPAN_SLIM_PGD_PORT_INT_RX_SOURCE0 + j);
if (val & TAPAN_SLIM_IRQ_OVERFLOW)
pr_err_ratelimited(
"%s: overflow error on %s port %d, value %x\n",
__func__, (tx ? "TX" : "RX"), port_id, val);
if (val & TAPAN_SLIM_IRQ_UNDERFLOW)
pr_err_ratelimited(
"%s: underflow error on %s port %d, value %x\n",
__func__, (tx ? "TX" : "RX"), port_id, val);
if (val & TAPAN_SLIM_IRQ_PORT_CLOSED) {
/*
* INT SOURCE register starts from RX to TX
* but port number in the ch_mask is in opposite way
*/
bit = (tx ? j - 16 : j + 16);
dev_dbg(codec->dev, "%s: %s port %d closed value %x, bit %u\n",
__func__, (tx ? "TX" : "RX"), port_id, val,
bit);
for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) {
dev_dbg(codec->dev, "%s: priv->dai[%d].ch_mask = 0x%lx\n",
__func__, k, priv->dai[k].ch_mask);
if (test_and_clear_bit(bit,
&priv->dai[k].ch_mask)) {
cleared = true;
if (!priv->dai[k].ch_mask)
wake_up(&priv->dai[k].dai_wait);
/*
* There are cases when multiple DAIs
* might be using the same slimbus
* channel. Hence don't break here.
*/
}
}
WARN(!cleared,
"Couldn't find slimbus %s port %d for closing\n",
(tx ? "TX" : "RX"), port_id);
}
wcd9xxx_interface_reg_write(codec->control_data,
TAPAN_SLIM_PGD_PORT_INT_CLR_RX_0 +
(j / 8),
1 << (j % 8));
}
return IRQ_HANDLED;
}
static int tapan_handle_pdata(struct tapan_priv *tapan)
{
struct snd_soc_codec *codec = tapan->codec;
struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
int k1, k2, k3, rc = 0;
u8 txfe_bypass;
u8 txfe_buff;
u8 flag;
u8 i = 0, j = 0;
u8 val_txfe = 0, value = 0;
u8 dmic_sample_rate_value = 0;
u8 dmic_b1_ctl_value = 0;
u8 anc_ctl_value = 0;
if (!pdata) {
dev_err(codec->dev, "%s: NULL pdata\n", __func__);
rc = -ENODEV;
goto done;
}
txfe_bypass = pdata->amic_settings.txfe_enable;
txfe_buff = pdata->amic_settings.txfe_buff;
flag = pdata->amic_settings.use_pdata;
/* Make sure settings are correct */
if ((pdata->micbias.ldoh_v > WCD9XXX_LDOH_3P0_V) ||
(pdata->micbias.bias1_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
(pdata->micbias.bias2_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
(pdata->micbias.bias3_cfilt_sel > WCD9XXX_CFILT3_SEL)) {
dev_err(codec->dev, "%s: Invalid ldoh voltage or bias cfilt\n",
__func__);
rc = -EINVAL;
goto done;
}
/* figure out k value */
k1 = wcd9xxx_resmgr_get_k_val(&tapan->resmgr, pdata->micbias.cfilt1_mv);
k2 = wcd9xxx_resmgr_get_k_val(&tapan->resmgr, pdata->micbias.cfilt2_mv);
k3 = wcd9xxx_resmgr_get_k_val(&tapan->resmgr, pdata->micbias.cfilt3_mv);
if (IS_ERR_VALUE(k1) || IS_ERR_VALUE(k2) || IS_ERR_VALUE(k3)) {
dev_err(codec->dev,
"%s: could not get K value. k1 = %d k2 = %d k3 = %d\n",
__func__, k1, k2, k3);
rc = -EINVAL;
goto done;
}
/* Set voltage level and always use LDO */
snd_soc_update_bits(codec, TAPAN_A_LDO_H_MODE_1, 0x0C,
(pdata->micbias.ldoh_v << 2));
snd_soc_update_bits(codec, TAPAN_A_MICB_CFILT_1_VAL, 0xFC, (k1 << 2));
snd_soc_update_bits(codec, TAPAN_A_MICB_CFILT_2_VAL, 0xFC, (k2 << 2));
snd_soc_update_bits(codec, TAPAN_A_MICB_CFILT_3_VAL, 0xFC, (k3 << 2));
i = 0;
while (i < 5) {
if (flag & (0x01 << i)) {
val_txfe = (txfe_bypass & (0x01 << i)) ? 0x20 : 0x00;
val_txfe = val_txfe |
((txfe_buff & (0x01 << i)) ? 0x10 : 0x00);
snd_soc_update_bits(codec,
TAPAN_A_TX_1_2_TEST_EN + j * 10,
0x30, val_txfe);
}
if (flag & (0x01 << (i + 1))) {
val_txfe = (txfe_bypass &
(0x01 << (i + 1))) ? 0x02 : 0x00;
val_txfe |= (txfe_buff &
(0x01 << (i + 1))) ? 0x01 : 0x00;
snd_soc_update_bits(codec,
TAPAN_A_TX_1_2_TEST_EN + j * 10,
0x03, val_txfe);
}
/* Tapan only has TAPAN_A_TX_1_2_TEST_EN and
TAPAN_A_TX_4_5_TEST_EN reg */
if (i == 0) {
i = 3;
continue;
} else if (i == 3) {
break;
}
}
if (pdata->ocp.use_pdata) {
/* not defined in CODEC specification */
if (pdata->ocp.hph_ocp_limit == 1 ||
pdata->ocp.hph_ocp_limit == 5) {
rc = -EINVAL;
goto done;
}
snd_soc_update_bits(codec, TAPAN_A_RX_COM_OCP_CTL,
0x0F, pdata->ocp.num_attempts);
snd_soc_write(codec, TAPAN_A_RX_COM_OCP_COUNT,
((pdata->ocp.run_time << 4) | pdata->ocp.wait_time));
snd_soc_update_bits(codec, TAPAN_A_RX_HPH_OCP_CTL,
0xE0, (pdata->ocp.hph_ocp_limit << 5));
}
/* Set micbias capless mode with tail current */
value = (pdata->micbias.bias1_cap_mode == MICBIAS_EXT_BYP_CAP ?
0x00 : 0x10);
snd_soc_update_bits(codec, TAPAN_A_MICB_1_CTL, 0x10, value);
value = (pdata->micbias.bias2_cap_mode == MICBIAS_EXT_BYP_CAP ?
0x00 : 0x10);
snd_soc_update_bits(codec, TAPAN_A_MICB_2_CTL, 0x10, value);
value = (pdata->micbias.bias3_cap_mode == MICBIAS_EXT_BYP_CAP ?
0x00 : 0x10);
snd_soc_update_bits(codec, TAPAN_A_MICB_3_CTL, 0x10, value);
/* Set the DMIC sample rate */
if (pdata->mclk_rate == TAPAN_MCLK_CLK_9P6MHZ) {
switch (pdata->dmic_sample_rate) {
case WCD9XXX_DMIC_SAMPLE_RATE_2P4MHZ:
dmic_sample_rate_value = WCD9XXX_DMIC_SAMPLE_RATE_DIV_4;
dmic_b1_ctl_value = WCD9XXX_DMIC_B1_CTL_DIV_4;
anc_ctl_value = WCD9XXX_ANC_DMIC_X2_OFF;
break;
case WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ:
dmic_sample_rate_value = WCD9XXX_DMIC_SAMPLE_RATE_DIV_2;
dmic_b1_ctl_value = WCD9XXX_DMIC_B1_CTL_DIV_2;
anc_ctl_value = WCD9XXX_ANC_DMIC_X2_ON;
break;
case WCD9XXX_DMIC_SAMPLE_RATE_3P2MHZ:
case WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED:
dmic_sample_rate_value = WCD9XXX_DMIC_SAMPLE_RATE_DIV_3;
dmic_b1_ctl_value = WCD9XXX_DMIC_B1_CTL_DIV_3;
anc_ctl_value = WCD9XXX_ANC_DMIC_X2_OFF;
break;
default:
dev_err(codec->dev,
"%s Invalid sample rate %d for mclk %d\n",
__func__, pdata->dmic_sample_rate,
pdata->mclk_rate);
rc = -EINVAL;
goto done;
}
} else if (pdata->mclk_rate == TAPAN_MCLK_CLK_12P288MHZ) {
switch (pdata->dmic_sample_rate) {
case WCD9XXX_DMIC_SAMPLE_RATE_3P072MHZ:
dmic_sample_rate_value = WCD9XXX_DMIC_SAMPLE_RATE_DIV_4;
dmic_b1_ctl_value = WCD9XXX_DMIC_B1_CTL_DIV_4;
anc_ctl_value = WCD9XXX_ANC_DMIC_X2_OFF;
break;
case WCD9XXX_DMIC_SAMPLE_RATE_6P144MHZ:
dmic_sample_rate_value = WCD9XXX_DMIC_SAMPLE_RATE_DIV_2;
dmic_b1_ctl_value = WCD9XXX_DMIC_B1_CTL_DIV_2;
anc_ctl_value = WCD9XXX_ANC_DMIC_X2_ON;
break;
case WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ:
case WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED:
dmic_sample_rate_value = WCD9XXX_DMIC_SAMPLE_RATE_DIV_3;
dmic_b1_ctl_value = WCD9XXX_DMIC_B1_CTL_DIV_3;
anc_ctl_value = WCD9XXX_ANC_DMIC_X2_OFF;
break;
default:
dev_err(codec->dev,
"%s Invalid sample rate %d for mclk %d\n",
__func__, pdata->dmic_sample_rate,
pdata->mclk_rate);
rc = -EINVAL;
goto done;
}
} else {
dev_err(codec->dev, "%s MCLK is not set!\n", __func__);
rc = -EINVAL;
goto done;
}
snd_soc_update_bits(codec, TAPAN_A_CDC_TX1_DMIC_CTL,
0x7, dmic_sample_rate_value);
snd_soc_update_bits(codec, TAPAN_A_CDC_TX2_DMIC_CTL,
0x7, dmic_sample_rate_value);
snd_soc_update_bits(codec, TAPAN_A_CDC_TX3_DMIC_CTL,
0x7, dmic_sample_rate_value);
snd_soc_update_bits(codec, TAPAN_A_CDC_TX4_DMIC_CTL,
0x7, dmic_sample_rate_value);
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_DMIC_B1_CTL,
0xEE, dmic_b1_ctl_value);
snd_soc_update_bits(codec, TAPAN_A_CDC_ANC1_B2_CTL,
0x1, anc_ctl_value);
done:
return rc;
}
static const struct tapan_reg_mask_val tapan_reg_defaults[] = {
/* enable QFUSE for wcd9306 */
TAPAN_REG_VAL(TAPAN_A_QFUSE_CTL, 0x03),
/* PROGRAM_THE_0P85V_VBG_REFERENCE = V_0P858V */
TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x04),
TAPAN_REG_VAL(TAPAN_A_CDC_CLK_POWER_CTL, 0x03),
/* EAR PA deafults */
TAPAN_REG_VAL(TAPAN_A_RX_EAR_CMBUFF, 0x05),
/* RX1 and RX2 defaults */
TAPAN_REG_VAL(TAPAN_A_CDC_RX1_B6_CTL, 0xA0),
TAPAN_REG_VAL(TAPAN_A_CDC_RX2_B6_CTL, 0x80),
/* Heaset set Right from RX2 */
TAPAN_REG_VAL(TAPAN_A_CDC_CONN_RX2_B2_CTL, 0x10),
/*
* The following only need to be written for Tapan 1.0 parts.
* Tapan 2.0 will have appropriate defaults for these registers.
*/
/* Required defaults for class H operation */
/* borrowed from Taiko class-h */
TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0xF4),
TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x08),
TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_1, 0x5B),
TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_3, 0x6F),
/* TODO: Check below reg writes conflict with above */
/* PROGRAM_THE_0P85V_VBG_REFERENCE = V_0P858V */
TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x04),
TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0x74),
TAPAN_REG_VAL(TAPAN_A_RX_BUCK_BIAS1, 0x62),
/* Choose max non-overlap time for NCP */
TAPAN_REG_VAL(TAPAN_A_NCP_CLK, 0xFC),
/* Use 25mV/50mV for deltap/m to reduce ripple */
TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_VCL_1, 0x08),
/*
* Set DISABLE_MODE_SEL<1:0> to 0b10 (disable PWM in auto mode).
* Note that the other bits of this register will be changed during
* Rx PA bring up.
*/
TAPAN_REG_VAL(WCD9XXX_A_BUCK_MODE_3, 0xCE),
/* Reduce HPH DAC bias to 70% */
TAPAN_REG_VAL(TAPAN_A_RX_HPH_BIAS_PA, 0x7A),
/*Reduce EAR DAC bias to 70% */
TAPAN_REG_VAL(TAPAN_A_RX_EAR_BIAS_PA, 0x76),
/* Reduce LINE DAC bias to 70% */
TAPAN_REG_VAL(TAPAN_A_RX_LINE_BIAS_PA, 0x78),
/*
* There is a diode to pull down the micbias while doing
* insertion detection. This diode can cause leakage.
* Set bit 0 to 1 to prevent leakage.
* Setting this bit of micbias 2 prevents leakage for all other micbias.
*/
TAPAN_REG_VAL(TAPAN_A_MICB_2_MBHC, 0x41),
/*
* Default register settings to support dynamic change of
* vdd_buck between 1.8 volts and 2.15 volts.
*/
TAPAN_REG_VAL(TAPAN_A_BUCK_MODE_2, 0xAA),
};
static const struct tapan_reg_mask_val tapan_2_x_reg_reset_values[] = {
TAPAN_REG_VAL(TAPAN_A_TX_7_MBHC_EN, 0x6C),
TAPAN_REG_VAL(TAPAN_A_BUCK_CTRL_CCL_4, 0x51),
TAPAN_REG_VAL(TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDA),
TAPAN_REG_VAL(TAPAN_A_RX_EAR_CNP, 0xC0),
TAPAN_REG_VAL(TAPAN_A_RX_LINE_1_TEST, 0x02),
TAPAN_REG_VAL(TAPAN_A_RX_LINE_2_TEST, 0x02),
TAPAN_REG_VAL(TAPAN_A_SPKR_DRV_OCP_CTL, 0x97),
TAPAN_REG_VAL(TAPAN_A_SPKR_DRV_CLIP_DET, 0x01),
TAPAN_REG_VAL(TAPAN_A_SPKR_DRV_IEC, 0x00),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_B1_CTL, 0xE4),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_B2_CTL, 0x00),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_B3_CTL, 0x00),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_BUCK_NCP_VARS, 0x00),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_HD_EAR, 0x00),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_HD_HPH, 0x00),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_MIN_EAR, 0x00),
TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_MIN_HPH, 0x00),
};
static const struct tapan_reg_mask_val tapan_1_0_reg_defaults[] = {
/* Close leakage on the spkdrv */
TAPAN_REG_VAL(TAPAN_A_SPKR_DRV_DBG_PWRSTG, 0x24),
TAPAN_REG_VAL(TAPAN_A_SPKR_DRV_DBG_DAC, 0xE5),
};
static void tapan_update_reg_defaults(struct snd_soc_codec *codec)
{
u32 i;
struct wcd9xxx *tapan_core = dev_get_drvdata(codec->dev->parent);
if (!TAPAN_IS_1_0(tapan_core->version)) {
for (i = 0; i < ARRAY_SIZE(tapan_2_x_reg_reset_values); i++)
snd_soc_write(codec, tapan_2_x_reg_reset_values[i].reg,
tapan_2_x_reg_reset_values[i].val);
}
for (i = 0; i < ARRAY_SIZE(tapan_reg_defaults); i++)
snd_soc_write(codec, tapan_reg_defaults[i].reg,
tapan_reg_defaults[i].val);
if (TAPAN_IS_1_0(tapan_core->version)) {
for (i = 0; i < ARRAY_SIZE(tapan_1_0_reg_defaults); i++)
snd_soc_write(codec, tapan_1_0_reg_defaults[i].reg,
tapan_1_0_reg_defaults[i].val);
}
if (!TAPAN_IS_1_0(tapan_core->version))
spkr_drv_wrnd = -1;
else if (spkr_drv_wrnd == 1)
snd_soc_write(codec, TAPAN_A_SPKR_DRV_EN, 0xEF);
}
static void tapan_update_reg_mclk_rate(struct wcd9xxx *wcd9xxx)
{
struct snd_soc_codec *codec;
codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
dev_dbg(codec->dev, "%s: MCLK Rate = %x\n",
__func__, wcd9xxx->mclk_rate);
if (wcd9xxx->mclk_rate == TAPAN_MCLK_CLK_12P288MHZ) {
snd_soc_update_bits(codec, TAPAN_A_CHIP_CTL, 0x06, 0x0);
snd_soc_update_bits(codec, TAPAN_A_RX_COM_TIMER_DIV, 0x01,
0x01);
} else if (wcd9xxx->mclk_rate == TAPAN_MCLK_CLK_9P6MHZ) {
snd_soc_update_bits(codec, TAPAN_A_CHIP_CTL, 0x06, 0x2);
}
}
static const struct tapan_reg_mask_val tapan_codec_reg_init_val[] = {
/* Initialize current threshold to 365MA
* number of wait and run cycles to 4096
*/
{TAPAN_A_RX_HPH_OCP_CTL, 0xE9, 0x69},
{TAPAN_A_RX_COM_OCP_COUNT, 0xFF, 0xFF},
{TAPAN_A_RX_HPH_L_TEST, 0x01, 0x01},
{TAPAN_A_RX_HPH_R_TEST, 0x01, 0x01},
/* Initialize gain registers to use register gain */
{TAPAN_A_RX_HPH_L_GAIN, 0x20, 0x20},
{TAPAN_A_RX_HPH_R_GAIN, 0x20, 0x20},
{TAPAN_A_RX_LINE_1_GAIN, 0x20, 0x20},
{TAPAN_A_RX_LINE_2_GAIN, 0x20, 0x20},
{TAPAN_A_SPKR_DRV_GAIN, 0x04, 0x04},
/* Set RDAC5 MUX to take input from DEM3_INV.
* This sets LO2 DAC to get input from DEM3_INV
* for LO1 and LO2 to work as differential outputs.
*/
{TAPAN_A_CDC_CONN_MISC, 0x04, 0x04},
/* CLASS H config */
{TAPAN_A_CDC_CONN_CLSH_CTL, 0x3C, 0x14},
/* Use 16 bit sample size for TX1 to TX5 */
{TAPAN_A_CDC_CONN_TX_SB_B1_CTL, 0x30, 0x20},
{TAPAN_A_CDC_CONN_TX_SB_B2_CTL, 0x30, 0x20},
{TAPAN_A_CDC_CONN_TX_SB_B3_CTL, 0x30, 0x20},
{TAPAN_A_CDC_CONN_TX_SB_B4_CTL, 0x30, 0x20},
{TAPAN_A_CDC_CONN_TX_SB_B5_CTL, 0x30, 0x20},
/* Disable SPK SWITCH */
{TAPAN_A_SPKR_DRV_DAC_CTL, 0x04, 0x00},
/* Use 16 bit sample size for RX */
{TAPAN_A_CDC_CONN_RX_SB_B1_CTL, 0xFF, 0xAA},
{TAPAN_A_CDC_CONN_RX_SB_B2_CTL, 0xFF, 0x2A},
/*enable HPF filter for TX paths */
{TAPAN_A_CDC_TX1_MUX_CTL, 0x8, 0x0},
{TAPAN_A_CDC_TX2_MUX_CTL, 0x8, 0x0},
{TAPAN_A_CDC_TX3_MUX_CTL, 0x8, 0x0},
{TAPAN_A_CDC_TX4_MUX_CTL, 0x8, 0x0},
/* Compander zone selection */
{TAPAN_A_CDC_COMP0_B4_CTL, 0x3F, 0x37},
{TAPAN_A_CDC_COMP1_B4_CTL, 0x3F, 0x37},
{TAPAN_A_CDC_COMP2_B4_CTL, 0x3F, 0x37},
{TAPAN_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
{TAPAN_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
{TAPAN_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
/*
* Setup wavegen timer to 20msec and disable chopper
* as default. This corresponds to Compander OFF
*/
{TAPAN_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
{TAPAN_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
{TAPAN_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
{TAPAN_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
};
void *tapan_get_afe_config(struct snd_soc_codec *codec,
enum afe_config_type config_type)
{
struct tapan_priv *priv = snd_soc_codec_get_drvdata(codec);
switch (config_type) {
case AFE_SLIMBUS_SLAVE_CONFIG:
return &priv->slimbus_slave_cfg;
case AFE_CDC_REGISTERS_CONFIG:
return &tapan_audio_reg_cfg;
case AFE_AANC_VERSION:
return &tapan_cdc_aanc_version;
default:
pr_err("%s: Unknown config_type 0x%x\n", __func__, config_type);
return NULL;
}
}
static void tapan_init_slim_slave_cfg(struct snd_soc_codec *codec)
{
struct tapan_priv *priv = snd_soc_codec_get_drvdata(codec);
struct afe_param_cdc_slimbus_slave_cfg *cfg;
struct wcd9xxx *wcd9xxx = codec->control_data;
uint64_t eaddr = 0;
pr_debug("%s\n", __func__);
cfg = &priv->slimbus_slave_cfg;
cfg->minor_version = 1;
cfg->tx_slave_port_offset = 0;
cfg->rx_slave_port_offset = 16;
memcpy(&eaddr, &wcd9xxx->slim->e_addr, sizeof(wcd9xxx->slim->e_addr));
/* e-addr is 6-byte elemental address of the device */
WARN_ON(sizeof(wcd9xxx->slim->e_addr) != 6);
cfg->device_enum_addr_lsw = eaddr & 0xFFFFFFFF;
cfg->device_enum_addr_msw = eaddr >> 32;
pr_debug("%s: slimbus logical address 0x%llx\n", __func__, eaddr);
}
static void tapan_codec_init_reg(struct snd_soc_codec *codec)
{
u32 i;
for (i = 0; i < ARRAY_SIZE(tapan_codec_reg_init_val); i++)
snd_soc_update_bits(codec, tapan_codec_reg_init_val[i].reg,
tapan_codec_reg_init_val[i].mask,
tapan_codec_reg_init_val[i].val);
}
static void tapan_slim_interface_init_reg(struct snd_soc_codec *codec)
{
int i;
for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
wcd9xxx_interface_reg_write(codec->control_data,
TAPAN_SLIM_PGD_PORT_INT_EN0 + i,
0xFF);
}
static int tapan_setup_irqs(struct tapan_priv *tapan)
{
int ret = 0;
struct snd_soc_codec *codec = tapan->codec;
struct wcd9xxx *wcd9xxx = codec->control_data;
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
ret = wcd9xxx_request_irq(core_res, WCD9XXX_IRQ_SLIMBUS,
tapan_slimbus_irq, "SLIMBUS Slave", tapan);
if (ret)
pr_err("%s: Failed to request irq %d\n", __func__,
WCD9XXX_IRQ_SLIMBUS);
else
tapan_slim_interface_init_reg(codec);
return ret;
}
static void tapan_cleanup_irqs(struct tapan_priv *tapan)
{
struct snd_soc_codec *codec = tapan->codec;
struct wcd9xxx *wcd9xxx = codec->control_data;
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, tapan);
}
static void tapan_enable_mux_bias_block(struct snd_soc_codec *codec)
{
snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
0x80, 0x00);
}
static void tapan_put_cfilt_fast_mode(struct snd_soc_codec *codec,
struct wcd9xxx_mbhc *mbhc)
{
snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
0x30, 0x30);
}
static void tapan_codec_specific_cal_setup(struct snd_soc_codec *codec,
struct wcd9xxx_mbhc *mbhc)
{
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
0x04, 0x04);
snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0xE0, 0xE0);
}
static struct wcd9xxx_cfilt_mode tapan_codec_switch_cfilt_mode(
struct wcd9xxx_mbhc *mbhc,
bool fast)
{
struct snd_soc_codec *codec = mbhc->codec;
struct wcd9xxx_cfilt_mode cfilt_mode;
if (fast)
cfilt_mode.reg_mode_val = WCD9XXX_CFILT_EXT_PRCHG_EN;
else
cfilt_mode.reg_mode_val = WCD9XXX_CFILT_EXT_PRCHG_DSBL;
cfilt_mode.cur_mode_val =
snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x30;
cfilt_mode.reg_mask = 0x30;
return cfilt_mode;
}
static void tapan_select_cfilt(struct snd_soc_codec *codec,
struct wcd9xxx_mbhc *mbhc)
{
snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x60, 0x00);
}
enum wcd9xxx_cdc_type tapan_get_cdc_type(void)
{
return WCD9XXX_CDC_TYPE_TAPAN;
}
static void wcd9xxx_prepare_hph_pa(struct wcd9xxx_mbhc *mbhc,
struct list_head *lh)
{
int i;
struct snd_soc_codec *codec = mbhc->codec;
u32 delay;
const struct wcd9xxx_reg_mask_val reg_set_paon[] = {
{WCD9XXX_A_CDC_CLSH_B1_CTL, 0x0F, 0x00},
{WCD9XXX_A_RX_HPH_CHOP_CTL, 0xFF, 0xA4},
{WCD9XXX_A_RX_HPH_OCP_CTL, 0xFF, 0x67},
{WCD9XXX_A_RX_HPH_L_TEST, 0x1, 0x0},
{WCD9XXX_A_RX_HPH_R_TEST, 0x1, 0x0},
{WCD9XXX_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
{WCD9XXX_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
{WCD9XXX_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x2A},
{TAPAN_A_CDC_CONN_RX2_B2_CTL, 0xFF, 0x10},
{WCD9XXX_A_CDC_CLK_OTHR_CTL, 0xFF, 0x05},
{WCD9XXX_A_CDC_RX1_B6_CTL, 0xFF, 0x81},
{WCD9XXX_A_CDC_CLK_RX_B1_CTL, 0x03, 0x03},
{WCD9XXX_A_RX_HPH_L_GAIN, 0xFF, 0x2C},
{WCD9XXX_A_CDC_RX2_B6_CTL, 0xFF, 0x81},
{WCD9XXX_A_RX_HPH_R_GAIN, 0xFF, 0x2C},
{WCD9XXX_A_BUCK_CTRL_CCL_4, 0xFF, 0x50},
{WCD9XXX_A_BUCK_CTRL_VCL_1, 0xFF, 0x08},
{WCD9XXX_A_BUCK_CTRL_CCL_1, 0xFF, 0x5B},
{WCD9XXX_A_NCP_CLK, 0xFF, 0x9C},
{WCD9XXX_A_NCP_CLK, 0xFF, 0xFC},
{WCD9XXX_A_BUCK_MODE_3, 0xFF, 0xCE},
{WCD9XXX_A_BUCK_CTRL_CCL_3, 0xFF, 0x6B},
{WCD9XXX_A_BUCK_CTRL_CCL_3, 0xFF, 0x6F},
{TAPAN_A_RX_BUCK_BIAS1, 0xFF, 0x62},
{TAPAN_A_RX_HPH_BIAS_PA, 0xFF, 0x7A},
{TAPAN_A_CDC_CLK_RDAC_CLK_EN_CTL, 0xFF, 0x02},
{TAPAN_A_CDC_CLK_RDAC_CLK_EN_CTL, 0xFF, 0x06},
{WCD9XXX_A_RX_COM_BIAS, 0xFF, 0x80},
{WCD9XXX_A_BUCK_MODE_3, 0xFF, 0xC6},
{WCD9XXX_A_BUCK_MODE_4, 0xFF, 0xE6},
{WCD9XXX_A_BUCK_MODE_5, 0xFF, 0x02},
{WCD9XXX_A_BUCK_MODE_1, 0xFF, 0xA1},
/* Delay 1ms */
{WCD9XXX_A_NCP_EN, 0xFF, 0xFF},
/* Delay 1ms */
{WCD9XXX_A_BUCK_MODE_5, 0xFF, 0x03},
{WCD9XXX_A_BUCK_MODE_5, 0xFF, 0x7B},
{WCD9XXX_A_CDC_CLSH_B1_CTL, 0xFF, 0xE6},
{WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xFF, 0x40},
{WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xFF, 0xC0},
{WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xFF, 0x40},
{WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xFF, 0xC0},
{WCD9XXX_A_NCP_STATIC, 0xFF, 0x08},
{WCD9XXX_A_RX_HPH_L_DAC_CTL, 0x03, 0x01},
{WCD9XXX_A_RX_HPH_R_DAC_CTL, 0x03, 0x01},
};
/*
* Configure PA in class-AB, -18dB gain,
* companding off, OCP off, Chopping ON
*/
for (i = 0; i < ARRAY_SIZE(reg_set_paon); i++) {
/*
* Some of the codec registers like BUCK_MODE_1
* and NCP_EN requires 1ms wait time for them
* to take effect. Other register writes for
* PA configuration do not require any wait time.
*/
if (reg_set_paon[i].reg == WCD9XXX_A_BUCK_MODE_1 ||
reg_set_paon[i].reg == WCD9XXX_A_NCP_EN)
delay = 1000;
else
delay = 0;
wcd9xxx_soc_update_bits_push(codec, lh,
reg_set_paon[i].reg,
reg_set_paon[i].mask,
reg_set_paon[i].val, delay);
}
pr_debug("%s: PAs are prepared\n", __func__);
return;
}
static int wcd9xxx_enable_static_pa(struct wcd9xxx_mbhc *mbhc, bool enable)
{
struct snd_soc_codec *codec = mbhc->codec;
int wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME) *
TAPAN_WG_TIME_FACTOR_US;
/*
* Tapan requires additional time to enable PA.
* It is observed during experiments that we need
* an additional wait time about 0.35 times of
* the WG_TIME
*/
wg_time += (int) (wg_time * 35) / 100;
snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x30,
enable ? 0x30 : 0x0);
/* Wait for wave gen time to avoid pop noise */
usleep_range(wg_time, wg_time + WCD9XXX_USLEEP_RANGE_MARGIN_US);
pr_debug("%s: PAs are %s as static mode (wg_time %d)\n", __func__,
enable ? "enabled" : "disabled", wg_time);
return 0;
}
static int tapan_setup_zdet(struct wcd9xxx_mbhc *mbhc,
enum mbhc_impedance_detect_stages stage)
{
int ret = 0;
struct snd_soc_codec *codec = mbhc->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
const int mux_wait_us = 25;
switch (stage) {
case MBHC_ZDET_PRE_MEASURE:
INIT_LIST_HEAD(&tapan->reg_save_restore);
/* Configure PA */
wcd9xxx_prepare_hph_pa(mbhc, &tapan->reg_save_restore);
#define __wr(reg, mask, value) \
do { \
ret = wcd9xxx_soc_update_bits_push(codec, \
&tapan->reg_save_restore, \
reg, mask, value, 0); \
if (ret < 0) \
return ret; \
} while (0)
/* Setup MBHC */
__wr(WCD9XXX_A_MBHC_SCALING_MUX_1, 0x7F, 0x40);
__wr(WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0);
__wr(WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0xFF, 0x78);
__wr(WCD9XXX_A_TX_7_MBHC_EN, 0xFF, 0xEC);
__wr(WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0xFF, 0x45);
__wr(WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL, 0xFF, 0x80);
__wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xFF, 0x0A);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
__wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xFF, 0x02);
/* Enable Impedance Detection */
__wr(WCD9XXX_A_MBHC_HPH, 0xFF, 0xC8);
/*
* CnP setup for 0mV
* Route static data as input to noise shaper
*/
__wr(TAPAN_A_CDC_RX1_B3_CTL, 0xFF, 0x02);
__wr(TAPAN_A_CDC_RX2_B3_CTL, 0xFF, 0x02);
snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_TEST,
0x02, 0x00);
snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_TEST,
0x02, 0x00);
/* Reset the HPHL static data pointer */
__wr(TAPAN_A_CDC_RX1_B2_CTL, 0xFF, 0x00);
/* Four consecutive writes to set 0V as static data input */
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0x00);
/* Reset the HPHR static data pointer */
__wr(TAPAN_A_CDC_RX2_B2_CTL, 0xFF, 0x00);
/* Four consecutive writes to set 0V as static data input */
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0x00);
/* Enable the HPHL and HPHR PA */
wcd9xxx_enable_static_pa(mbhc, true);
break;
case MBHC_ZDET_POST_MEASURE:
/* Turn off ICAL */
snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xF0);
wcd9xxx_enable_static_pa(mbhc, false);
/*
* Setup CnP wavegen to ramp to the desired
* output using a 40ms ramp
*/
/* CnP wavegen current to 0.5uA */
snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_WG_OCP, 0x1A);
/* Set the current division ratio to 2000 */
snd_soc_write(codec, WCD9XXX_A_RX_HPH_CNP_WG_CTL, 0xDF);
/* Set the wavegen timer to max (60msec) */
snd_soc_write(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME, 0xA0);
/* Set the CnP reference current to sc_bias */
snd_soc_write(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x6D);
snd_soc_write(codec, TAPAN_A_CDC_RX1_B2_CTL, 0x00);
/* Four consecutive writes to set -10mV as static data input */
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0x1F);
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0x19);
snd_soc_write(codec, TAPAN_A_CDC_RX1_B1_CTL, 0xAA);
snd_soc_write(codec, TAPAN_A_CDC_RX2_B2_CTL, 0x00);
/* Four consecutive writes to set -10mV as static data input */
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0x00);
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0x1F);
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0x19);
snd_soc_write(codec, TAPAN_A_CDC_RX2_B1_CTL, 0xAA);
snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_TEST,
0x02, 0x02);
snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_TEST,
0x02, 0x02);
/* Enable the HPHL and HPHR PA and wait for 60mS */
wcd9xxx_enable_static_pa(mbhc, true);
snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
0x7F, 0x40);
usleep_range(mux_wait_us,
mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
break;
case MBHC_ZDET_PA_DISABLE:
if (!mbhc->hph_pa_dac_state)
wcd9xxx_enable_static_pa(mbhc, false);
wcd9xxx_restore_registers(codec, &tapan->reg_save_restore);
break;
default:
dev_dbg(codec->dev, "%s: Case %d not supported\n",
__func__, stage);
break;
}
#undef __wr
return ret;
}
static void tapan_compute_impedance(struct wcd9xxx_mbhc *mbhc, s16 *l, s16 *r,
uint32_t *zl, uint32_t *zr)
{
int zln, zld;
int zrn, zrd;
int rl = 0, rr = 0;
if (!mbhc) {
pr_err("%s: NULL pointer for MBHC", __func__);
return;
}
zln = (l[1] - l[0]) * TAPAN_ZDET_MUL_FACTOR;
zld = (l[2] - l[0]);
if (zld)
rl = zln / zld;
zrn = (r[1] - r[0]) * TAPAN_ZDET_MUL_FACTOR;
zrd = (r[2] - r[0]);
if (zrd)
rr = zrn / zrd;
*zl = rl;
*zr = rr;
}
static const struct wcd9xxx_mbhc_cb mbhc_cb = {
.enable_mux_bias_block = tapan_enable_mux_bias_block,
.cfilt_fast_mode = tapan_put_cfilt_fast_mode,
.codec_specific_cal = tapan_codec_specific_cal_setup,
.switch_cfilt_mode = tapan_codec_switch_cfilt_mode,
.select_cfilt = tapan_select_cfilt,
.get_cdc_type = tapan_get_cdc_type,
.setup_zdet = tapan_setup_zdet,
.compute_impedance = tapan_compute_impedance,
};
int tapan_hs_detect(struct snd_soc_codec *codec,
struct wcd9xxx_mbhc_config *mbhc_cfg)
{
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
return wcd9xxx_mbhc_start(&tapan->mbhc, mbhc_cfg);
}
EXPORT_SYMBOL(tapan_hs_detect);
void tapan_hs_detect_exit(struct snd_soc_codec *codec)
{
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
wcd9xxx_mbhc_stop(&tapan->mbhc);
}
EXPORT_SYMBOL(tapan_hs_detect_exit);
void tapan_event_register(
int (*machine_event_cb)(struct snd_soc_codec *codec,
enum wcd9xxx_codec_event),
struct snd_soc_codec *codec)
{
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
tapan->machine_codec_event_cb = machine_event_cb;
}
EXPORT_SYMBOL(tapan_event_register);
static int tapan_device_down(struct wcd9xxx *wcd9xxx)
{
struct snd_soc_codec *codec;
codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
snd_soc_card_change_online_state(codec->card, 0);
return 0;
}
static const struct wcd9xxx_mbhc_intr cdc_intr_ids = {
.poll_plug_rem = WCD9XXX_IRQ_MBHC_REMOVAL,
.shortavg_complete = WCD9XXX_IRQ_MBHC_SHORT_TERM,
.potential_button_press = WCD9XXX_IRQ_MBHC_PRESS,
.button_release = WCD9XXX_IRQ_MBHC_RELEASE,
.dce_est_complete = WCD9XXX_IRQ_MBHC_POTENTIAL,
.insertion = WCD9XXX_IRQ_MBHC_INSERTION,
.hph_left_ocp = WCD9306_IRQ_HPH_PA_OCPL_FAULT,
.hph_right_ocp = WCD9306_IRQ_HPH_PA_OCPR_FAULT,
.hs_jack_switch = WCD9306_IRQ_MBHC_JACK_SWITCH,
};
static int tapan_post_reset_cb(struct wcd9xxx *wcd9xxx)
{
int ret = 0;
int rco_clk_rate;
struct snd_soc_codec *codec;
struct tapan_priv *tapan;
int count;
codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
tapan = snd_soc_codec_get_drvdata(codec);
snd_soc_card_change_online_state(codec->card, 1);
mutex_lock(&codec->mutex);
if (codec->reg_def_copy) {
pr_debug("%s: Update ASOC cache", __func__);
kfree(codec->reg_cache);
codec->reg_cache = kmemdup(codec->reg_def_copy,
codec->reg_size, GFP_KERNEL);
if (!codec->reg_cache) {
pr_err("%s: Cache update failed!\n", __func__);
mutex_unlock(&codec->mutex);
return -ENOMEM;
}
}
if (spkr_drv_wrnd == 1)
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80, 0x80);
tapan_update_reg_defaults(codec);
tapan_update_reg_mclk_rate(wcd9xxx);
tapan_codec_init_reg(codec);
ret = tapan_handle_pdata(tapan);
if (IS_ERR_VALUE(ret))
pr_err("%s: bad pdata\n", __func__);
tapan_slim_interface_init_reg(codec);
wcd9xxx_resmgr_post_ssr(&tapan->resmgr);
wcd9xxx_mbhc_deinit(&tapan->mbhc);
if (TAPAN_IS_1_0(wcd9xxx->version))
rco_clk_rate = TAPAN_MCLK_CLK_12P288MHZ;
else
rco_clk_rate = TAPAN_MCLK_CLK_9P6MHZ;
ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec,
tapan_enable_mbhc_micbias,
&mbhc_cb, &cdc_intr_ids, rco_clk_rate,
TAPAN_CDC_ZDET_SUPPORTED);
if (ret)
pr_err("%s: mbhc init failed %d\n", __func__, ret);
else
wcd9xxx_mbhc_start(&tapan->mbhc, tapan->mbhc.mbhc_cfg);
tapan_cleanup_irqs(tapan);
ret = tapan_setup_irqs(tapan);
if (ret)
pr_err("%s: Failed to setup irq: %d\n", __func__, ret);
tapan->machine_codec_event_cb(codec, WCD9XXX_CODEC_EVENT_CODEC_UP);
for (count = 0; count < NUM_CODEC_DAIS; count++)
tapan->dai[count].bus_down_in_recovery = true;
mutex_unlock(&codec->mutex);
return ret;
}
static struct wcd9xxx_reg_address tapan_reg_address = {
};
static int wcd9xxx_ssr_register(struct wcd9xxx *control,
int (*device_down_cb)(struct wcd9xxx *wcd9xxx),
int (*device_up_cb)(struct wcd9xxx *wcd9xxx),
void *priv)
{
control->dev_down = device_down_cb;
control->post_reset = device_up_cb;
control->ssr_priv = priv;
return 0;
}
static struct regulator *tapan_codec_find_regulator(
struct snd_soc_codec *codec,
const char *name)
{
int i;
struct wcd9xxx *core = NULL;
if (codec == NULL) {
dev_err(codec->dev, "%s: codec not initialized\n", __func__);
return NULL;
}
core = dev_get_drvdata(codec->dev->parent);
if (core == NULL) {
dev_err(codec->dev, "%s: core not initialized\n", __func__);
return NULL;
}
for (i = 0; i < core->num_of_supplies; i++) {
if (core->supplies[i].supply &&
!strcmp(core->supplies[i].supply, name))
return core->supplies[i].consumer;
}
return NULL;
}
static void tapan_enable_config_rco(struct wcd9xxx *core, bool enable)
{
struct wcd9xxx_core_resource *core_res = &core->core_res;
if (enable) {
wcd9xxx_reg_update(core, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
0x80, 0x80);
wcd9xxx_reg_update(core, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
0x04, 0x04);
wcd9xxx_reg_update(core, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
0x01, 0x01);
usleep_range(1000, 1000);
wcd9xxx_reg_update(core, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
0x80, 0x00);
/* Enable RC Oscillator */
wcd9xxx_reg_update(core, WCD9XXX_A_RC_OSC_FREQ, 0x10, 0x00);
wcd9xxx_reg_write(core_res, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x17);
usleep_range(5, 5);
wcd9xxx_reg_update(core, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x80);
wcd9xxx_reg_update(core, WCD9XXX_A_RC_OSC_TEST, 0x80, 0x80);
usleep_range(10, 10);
wcd9xxx_reg_update(core, WCD9XXX_A_RC_OSC_TEST, 0x80, 0x00);
usleep_range(20, 20);
wcd9xxx_reg_update(core, WCD9XXX_A_CLK_BUFF_EN1, 0x08, 0x08);
/* Enable MCLK and wait 1ms till it gets enabled */
wcd9xxx_reg_write(core_res, WCD9XXX_A_CLK_BUFF_EN2, 0x02);
usleep_range(1000, 1000);
/* Enable CLK BUFF and wait for 1.2ms */
wcd9xxx_reg_update(core, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x01);
usleep_range(1000, 1200);
wcd9xxx_reg_update(core, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x00);
wcd9xxx_reg_update(core, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x04);
wcd9xxx_reg_update(core, WCD9XXX_A_CDC_CLK_MCLK_CTL,
0x01, 0x01);
usleep_range(50, 50);
} else {
wcd9xxx_reg_update(core, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00);
usleep_range(50, 50);
wcd9xxx_reg_update(core, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02);
wcd9xxx_reg_update(core, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x00);
usleep_range(50, 50);
wcd9xxx_reg_update(core, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x00);
usleep_range(10, 10);
wcd9xxx_reg_write(core_res, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x16);
wcd9xxx_reg_update(core, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
0x03, 0x00);
usleep_range(100, 100);
}
}
static bool tapan_check_wcd9306(struct device *cdc_dev, bool sensed)
{
struct wcd9xxx *core = dev_get_drvdata(cdc_dev->parent);
u8 reg_val;
bool ret = true;
unsigned long timeout;
bool timedout;
struct wcd9xxx_core_resource *core_res = &core->core_res;
if (!core) {
dev_err(cdc_dev, "%s: core not initialized\n", __func__);
return -EINVAL;
}
tapan_enable_config_rco(core, 1);
if (sensed == false) {
reg_val = wcd9xxx_reg_read(core_res, TAPAN_A_QFUSE_CTL);
wcd9xxx_reg_write(core_res, TAPAN_A_QFUSE_CTL,
(reg_val | 0x03));
}
timeout = jiffies + HZ;
do {
if ((wcd9xxx_reg_read(core_res, TAPAN_A_QFUSE_STATUS)))
break;
} while (!(timedout = time_after(jiffies, timeout)));
if (wcd9xxx_reg_read(core_res, TAPAN_A_QFUSE_DATA_OUT1) ||
wcd9xxx_reg_read(core_res, TAPAN_A_QFUSE_DATA_OUT2)) {
dev_info(cdc_dev, "%s: wcd9302 detected\n", __func__);
ret = false;
} else
dev_info(cdc_dev, "%s: wcd9306 detected\n", __func__);
tapan_enable_config_rco(core, 0);
return ret;
};
static int tapan_codec_probe(struct snd_soc_codec *codec)
{
struct wcd9xxx *control;
struct tapan_priv *tapan;
struct wcd9xxx_pdata *pdata;
struct wcd9xxx *wcd9xxx;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret = 0;
int i, rco_clk_rate;
void *ptr = NULL;
struct wcd9xxx_core_resource *core_res;
codec->control_data = dev_get_drvdata(codec->dev->parent);
control = codec->control_data;
wcd9xxx_ssr_register(control, tapan_device_down,
tapan_post_reset_cb, (void *)codec);
dev_info(codec->dev, "%s()\n", __func__);
tapan = kzalloc(sizeof(struct tapan_priv), GFP_KERNEL);
if (!tapan) {
dev_err(codec->dev, "Failed to allocate private data\n");
return -ENOMEM;
}
for (i = 0 ; i < NUM_DECIMATORS; i++) {
tx_hpf_work[i].tapan = tapan;
tx_hpf_work[i].decimator = i + 1;
INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
tx_hpf_corner_freq_callback);
}
snd_soc_codec_set_drvdata(codec, tapan);
/* codec resmgr module init */
wcd9xxx = codec->control_data;
core_res = &wcd9xxx->core_res;
pdata = dev_get_platdata(codec->dev->parent);
ret = wcd9xxx_resmgr_init(&tapan->resmgr, codec, core_res, pdata,
&pdata->micbias, &tapan_reg_address,
WCD9XXX_CDC_TYPE_TAPAN);
if (ret) {
pr_err("%s: wcd9xxx init failed %d\n", __func__, ret);
return ret;
}
tapan->cp_regulators[CP_REG_BUCK] = tapan_codec_find_regulator(codec,
WCD9XXX_SUPPLY_BUCK_NAME);
tapan->cp_regulators[CP_REG_BHELPER] = tapan_codec_find_regulator(codec,
"cdc-vdd-buckhelper");
tapan->clsh_d.buck_mv = tapan_codec_get_buck_mv(codec);
/*
* If 1.8 volts is requested on the vdd_cp line, then
* assume that S4 is in a dynamically switchable state
* and can switch between 1.8 volts and 2.15 volts
*/
if (tapan->clsh_d.buck_mv == WCD9XXX_CDC_BUCK_MV_1P8)
tapan->clsh_d.is_dynamic_vdd_cp = true;
wcd9xxx_clsh_init(&tapan->clsh_d, &tapan->resmgr);
if (TAPAN_IS_1_0(control->version))
rco_clk_rate = TAPAN_MCLK_CLK_12P288MHZ;
else
rco_clk_rate = TAPAN_MCLK_CLK_9P6MHZ;
ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec,
tapan_enable_mbhc_micbias,
&mbhc_cb, &cdc_intr_ids, rco_clk_rate,
TAPAN_CDC_ZDET_SUPPORTED);
if (ret) {
pr_err("%s: mbhc init failed %d\n", __func__, ret);
return ret;
}
tapan->codec = codec;
for (i = 0; i < COMPANDER_MAX; i++) {
tapan->comp_enabled[i] = 0;
tapan->comp_fs[i] = COMPANDER_FS_48KHZ;
}
tapan->intf_type = wcd9xxx_get_intf_type();
tapan->aux_pga_cnt = 0;
tapan->aux_l_gain = 0x1F;
tapan->aux_r_gain = 0x1F;
tapan->ldo_h_users = 0;
tapan->micb_2_users = 0;
tapan->lb_mode = false;
tapan_update_reg_defaults(codec);
tapan_update_reg_mclk_rate(wcd9xxx);
tapan_codec_init_reg(codec);
ret = tapan_handle_pdata(tapan);
if (IS_ERR_VALUE(ret)) {
dev_err(codec->dev, "%s: bad pdata\n", __func__);
goto err_pdata;
}
if (spkr_drv_wrnd > 0) {
WCD9XXX_BG_CLK_LOCK(&tapan->resmgr);
wcd9xxx_resmgr_get_bandgap(&tapan->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
}
ptr = kmalloc((sizeof(tapan_rx_chs) +
sizeof(tapan_tx_chs)), GFP_KERNEL);
if (!ptr) {
pr_err("%s: no mem for slim chan ctl data\n", __func__);
ret = -ENOMEM;
goto err_nomem_slimch;
}
if (tapan->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
snd_soc_dapm_new_controls(dapm, tapan_dapm_i2s_widgets,
ARRAY_SIZE(tapan_dapm_i2s_widgets));
snd_soc_dapm_add_routes(dapm, audio_i2s_map,
ARRAY_SIZE(audio_i2s_map));
for (i = 0; i < ARRAY_SIZE(tapan_i2s_dai); i++)
INIT_LIST_HEAD(&tapan->dai[i].wcd9xxx_ch_list);
} else if (tapan->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
for (i = 0; i < NUM_CODEC_DAIS; i++) {
INIT_LIST_HEAD(&tapan->dai[i].wcd9xxx_ch_list);
init_waitqueue_head(&tapan->dai[i].dai_wait);
}
tapan_init_slim_slave_cfg(codec);
}
if (codec_ver == WCD9306) {
snd_soc_add_codec_controls(codec, tapan_9306_snd_controls,
ARRAY_SIZE(tapan_9306_snd_controls));
snd_soc_dapm_new_controls(dapm, tapan_9306_dapm_widgets,
ARRAY_SIZE(tapan_9306_dapm_widgets));
snd_soc_dapm_add_routes(dapm, wcd9306_map,
ARRAY_SIZE(wcd9306_map));
} else {
snd_soc_dapm_add_routes(dapm, wcd9302_map,
ARRAY_SIZE(wcd9302_map));
}
control->num_rx_port = TAPAN_RX_MAX;
control->rx_chs = ptr;
memcpy(control->rx_chs, tapan_rx_chs, sizeof(tapan_rx_chs));
control->num_tx_port = TAPAN_TX_MAX;
control->tx_chs = ptr + sizeof(tapan_rx_chs);
memcpy(control->tx_chs, tapan_tx_chs, sizeof(tapan_tx_chs));
snd_soc_dapm_sync(dapm);
(void) tapan_setup_irqs(tapan);
atomic_set(&kp_tapan_priv, (unsigned long)tapan);
mutex_lock(&dapm->codec->mutex);
if (codec_ver == WCD9306) {
snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
snd_soc_dapm_disable_pin(dapm, "ANC EAR");
}
snd_soc_dapm_sync(dapm);
mutex_unlock(&dapm->codec->mutex);
codec->ignore_pmdown_time = 1;
if (ret)
tapan_cleanup_irqs(tapan);
return ret;
err_pdata:
kfree(ptr);
err_nomem_slimch:
kfree(tapan);
return ret;
}
static int tapan_codec_remove(struct snd_soc_codec *codec)
{
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
int index = 0;
WCD9XXX_BG_CLK_LOCK(&tapan->resmgr);
atomic_set(&kp_tapan_priv, 0);
if (spkr_drv_wrnd > 0)
wcd9xxx_resmgr_put_bandgap(&tapan->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
tapan_cleanup_irqs(tapan);
/* cleanup MBHC */
wcd9xxx_mbhc_deinit(&tapan->mbhc);
/* cleanup resmgr */
wcd9xxx_resmgr_deinit(&tapan->resmgr);
for (index = 0; index < CP_REG_MAX; index++)
tapan->cp_regulators[index] = NULL;
kfree(tapan);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_tapan = {
.probe = tapan_codec_probe,
.remove = tapan_codec_remove,
.read = tapan_read,
.write = tapan_write,
.readable_register = tapan_readable,
.volatile_register = tapan_volatile,
.reg_cache_size = TAPAN_CACHE_SIZE,
.reg_cache_default = tapan_reset_reg_defaults,
.reg_word_size = 1,
.controls = tapan_common_snd_controls,
.num_controls = ARRAY_SIZE(tapan_common_snd_controls),
.dapm_widgets = tapan_common_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(tapan_common_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
#ifdef CONFIG_PM
static int tapan_suspend(struct device *dev)
{
dev_dbg(dev, "%s: system suspend\n", __func__);
return 0;
}
static int tapan_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct tapan_priv *tapan = platform_get_drvdata(pdev);
dev_dbg(dev, "%s: system resume\n", __func__);
/* Notify */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, WCD9XXX_EVENT_POST_RESUME);
return 0;
}
static const struct dev_pm_ops tapan_pm_ops = {
.suspend = tapan_suspend,
.resume = tapan_resume,
};
#endif
static int __devinit tapan_probe(struct platform_device *pdev)
{
int ret = 0;
bool is_wcd9306;
is_wcd9306 = tapan_check_wcd9306(&pdev->dev, false);
if (is_wcd9306 < 0) {
dev_info(&pdev->dev, "%s: cannot find codec type, default to 9306\n",
__func__);
is_wcd9306 = true;
}
codec_ver = is_wcd9306 ? WCD9306 : WCD9302;
if (!is_wcd9306) {
if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
ret = snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_tapan,
tapan9302_dai, ARRAY_SIZE(tapan9302_dai));
else if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
ret = snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_tapan,
tapan_i2s_dai, ARRAY_SIZE(tapan_i2s_dai));
} else {
if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
ret = snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_tapan,
tapan_dai, ARRAY_SIZE(tapan_dai));
else if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
ret = snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_tapan,
tapan_i2s_dai, ARRAY_SIZE(tapan_i2s_dai));
}
return ret;
}
static int __devexit tapan_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
}
static struct platform_driver tapan_codec_driver = {
.probe = tapan_probe,
.remove = tapan_remove,
.driver = {
.name = "tapan_codec",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &tapan_pm_ops,
#endif
},
};
static int __init tapan_codec_init(void)
{
return platform_driver_register(&tapan_codec_driver);
}
static void __exit tapan_codec_exit(void)
{
platform_driver_unregister(&tapan_codec_driver);
}
module_init(tapan_codec_init);
module_exit(tapan_codec_exit);
MODULE_DESCRIPTION("Tapan codec driver");
MODULE_LICENSE("GPL v2");
|
smac0628/caf-LA.BF.1.1.2.1
|
sound/soc/codecs/wcd9306.c
|
C
|
gpl-2.0
| 197,279
|
/* linux/drivers/media/video/samsung/tv20/s5pv210/hdcp_s5pv210.c
*
* hdcp raw ftn file for Samsung TVOut driver
*
* Copyright (c) 2010 Samsung Electronics
* http://www.samsungsemi.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <plat/gpio-cfg.h>
#include <mach/regs-gpio.h>
#include <mach/gpio.h>
#include <plat/regs-hdmi.h>
#include "../ddc.h"
#include "tv_out_s5pv210.h"
/* for Operation check */
#ifdef CONFIG_TVOUT_RAW_DBG
#define S5P_HDCP_DEBUG 1
#define S5P_HDCP_AUTH_DEBUG 1
#endif
#ifdef S5P_HDCP_DEBUG
#define HDCPPRINTK(fmt, args...) \
printk(KERN_INFO "\t\t[HDCP] %s: " fmt, __func__ , ## args)
#else
#define HDCPPRINTK(fmt, args...)
#endif
/* for authentication key check */
#ifdef S5P_HDCP_AUTH_DEBUG
#define AUTHPRINTK(fmt, args...) \
printk("\t\t\t[AUTHKEY] %s: " fmt, __func__ , ## args)
#else
#define AUTHPRINTK(fmt, args...)
#endif
enum hdmi_run_mode {
DVI_MODE = 0,
HDMI_MODE
};
enum hdmi_resolution {
SD480P = 0,
SD480I,
WWSD480P,
HD720P,
SD576P,
WWSD576P,
HD1080I
};
enum hdmi_color_bar_type {
HORIZONTAL = 0,
VERTICAL
};
enum hdcp_event {
/* Stop HDCP */
HDCP_EVENT_STOP = 0,
/* Start HDCP*/
HDCP_EVENT_START,
/* Start to read Bksv, Bcaps */
HDCP_EVENT_READ_BKSV_START,
/* Start to write Aksv, An */
HDCP_EVENT_WRITE_AKSV_START,
/* Start to check if Ri is equal to Rj */
HDCP_EVENT_CHECK_RI_START,
/* Start 2nd authentication process */
HDCP_EVENT_SECOND_AUTH_START
};
enum hdcp_state {
NOT_AUTHENTICATED = 0,
RECEIVER_READ_READY,
BCAPS_READ_DONE,
BKSV_READ_DONE,
AN_WRITE_DONE,
AKSV_WRITE_DONE,
FIRST_AUTHENTICATION_DONE,
SECOND_AUTHENTICATION_RDY,
RECEIVER_FIFOLSIT_READY,
SECOND_AUTHENTICATION_DONE,
};
/*
* Below CSC_TYPE is temporary. CSC_TYPE enum.
* may be included in SetSD480pVars_60Hz etc.
*
* LR : Limited Range (16~235)
* FR : Full Range (0~255)
*/
enum hdmi_intr_src {
WAIT_FOR_ACTIVE_RX = 0,
WDT_FOR_REPEATER,
EXCHANGE_KSV,
UPDATE_P_VAL,
UPDATE_R_VAL,
AUDIO_OVERFLOW,
AUTHEN_ACK,
UNKNOWN_INT
};
struct s5p_hdcp_info {
bool is_repeater;
bool hpd_status;
u32 time_out;
u32 hdcp_enable;
spinlock_t lock;
spinlock_t reset_lock;
struct i2c_client *client;
wait_queue_head_t waitq;
enum hdcp_event event;
enum hdcp_state auth_status;
struct work_struct work;
};
static struct s5p_hdcp_info hdcp_info = {
.is_repeater = false,
.time_out = 0,
.hdcp_enable = false,
.client = NULL,
.event = HDCP_EVENT_STOP,
.auth_status = NOT_AUTHENTICATED,
};
#define HDCP_RI_OFFSET 0x08
#define INFINITE 0xffffffff
#define HDMI_SYS_ENABLE (1 << 0)
#define HDMI_ASP_ENABLE (1 << 2)
#define HDMI_ASP_DISABLE (~HDMI_ASP_ENABLE)
#define MAX_DEVS_EXCEEDED (0x1 << 7)
#define MAX_CASCADE_EXCEEDED (0x1 << 3)
#define MAX_CASCADE_EXCEEDED_ERROR (-1)
#define MAX_DEVS_EXCEEDED_ERROR (-2)
#define REPEATER_ILLEGAL_DEVICE_ERROR (-3)
#define REPEATER_TIMEOUT_ERROR (-4)
#define AINFO_SIZE 1
#define BCAPS_SIZE 1
#define BSTATUS_SIZE 2
#define SHA_1_HASH_SIZE 20
#define KSV_FIFO_READY (0x1 << 5)
/* spmoon for test : it's not in manual */
#define SET_HDCP_KSV_WRITE_DONE (0x1 << 3)
#define CLEAR_HDCP_KSV_WRITE_DONE (~SET_HDCP_KSV_WRITE_DONE)
#define SET_HDCP_KSV_LIST_EMPTY (0x1 << 2)
#define CLEAR_HDCP_KSV_LIST_EMPTY (~SET_HDCP_KSV_LIST_EMPTY)
#define SET_HDCP_KSV_END (0x1 << 1)
#define CLEAR_HDCP_KSV_END (~SET_HDCP_KSV_END)
#define SET_HDCP_KSV_READ (0x1 << 0)
#define CLEAR_HDCP_KSV_READ (~SET_HDCP_KSV_READ)
#define SET_HDCP_SHA_VALID_READY (0x1 << 1)
#define CLEAR_HDCP_SHA_VALID_READY (~SET_HDCP_SHA_VALID_READY)
#define SET_HDCP_SHA_VALID (0x1 << 0)
#define CLEAR_HDCP_SHA_VALID (~SET_HDCP_SHA_VALID)
#define TRANSMIT_EVERY_VSYNC (0x1 << 1)
/* must be checked */
static bool g_sw_reset;
static bool g_is_dvi;
static bool g_av_mute;
static bool g_audio_en;
/*
* 1st Authentication step func.
* Write the Ainfo data to Rx
*/
static bool write_ainfo(void)
{
int ret = 0;
u8 ainfo[2];
ainfo[0] = HDCP_Ainfo;
ainfo[1] = 0;
ret = ddc_write(ainfo, 2);
if (ret < 0) {
pr_err("%s::Can't write ainfo data through i2c bus\n",
__func__);
}
return (ret < 0) ? false : true;
}
/*
* Write the An data to Rx
*/
static bool write_an(void)
{
int ret = 0;
u8 an[AN_SIZE+1];
an[0] = HDCP_An;
an[1] = readb(g_hdmi_base + S5P_HDCP_An_0_0);
an[2] = readb(g_hdmi_base + S5P_HDCP_An_0_1);
an[3] = readb(g_hdmi_base + S5P_HDCP_An_0_2);
an[4] = readb(g_hdmi_base + S5P_HDCP_An_0_3);
an[5] = readb(g_hdmi_base + S5P_HDCP_An_1_0);
an[6] = readb(g_hdmi_base + S5P_HDCP_An_1_1);
an[7] = readb(g_hdmi_base + S5P_HDCP_An_1_2);
an[8] = readb(g_hdmi_base + S5P_HDCP_An_1_3);
ret = ddc_write(an, AN_SIZE + 1);
if (ret < 0) {
pr_err("%s::Can't write an data through i2c bus\n",
__func__);
}
#ifdef S5P_HDCP_AUTH_DEBUG
{
u16 i = 0;
for (i = 1; i < AN_SIZE + 1; i++)
AUTHPRINTK("HDCPAn[%d]: 0x%x\n", i, an[i]);
}
#endif
return (ret < 0) ? false : true;
}
/*
* Write the Aksv data to Rx
*/
static bool write_aksv(void)
{
int ret = 0;
u8 aksv[AKSV_SIZE+1];
aksv[0] = HDCP_Aksv;
aksv[1] = readb(g_hdmi_base + S5P_HDCP_AKSV_0_0);
aksv[2] = readb(g_hdmi_base + S5P_HDCP_AKSV_0_1);
aksv[3] = readb(g_hdmi_base + S5P_HDCP_AKSV_0_2);
aksv[4] = readb(g_hdmi_base + S5P_HDCP_AKSV_0_3);
aksv[5] = readb(g_hdmi_base + S5P_HDCP_AKSV_1);
if (aksv[1] == 0 &&
aksv[2] == 0 &&
aksv[3] == 0 &&
aksv[4] == 0 &&
aksv[5] == 0)
return false;
ret = ddc_write(aksv, AKSV_SIZE + 1);
if (ret < 0) {
pr_err("%s::Can't write aksv data through i2c bus\n",
__func__);
}
#ifdef S5P_HDCP_AUTH_DEBUG
{
u16 i = 0;
for (i = 1; i < AKSV_SIZE + 1; i++)
AUTHPRINTK("HDCPAksv[%d]: 0x%x\n", i, aksv[i]);
}
#endif
return (ret < 0) ? false : true;
}
static bool read_bcaps(void)
{
int ret = 0;
u8 bcaps[BCAPS_SIZE] = {0};
ret = ddc_read(HDCP_Bcaps, bcaps, BCAPS_SIZE);
if (ret < 0) {
pr_err("%s::Can't read bcaps data from i2c bus\n",
__func__);
return false;
}
writel(bcaps[0], g_hdmi_base + S5P_HDCP_BCAPS);
HDCPPRINTK("BCAPS(from i2c) : 0x%08x\n", bcaps[0]);
if (bcaps[0] & REPEATER_SET)
hdcp_info.is_repeater = true;
else
hdcp_info.is_repeater = false;
HDCPPRINTK("attached device type : %s !!\n",
hdcp_info.is_repeater ? "REPEATER" : "SINK");
HDCPPRINTK("BCAPS(from sfr) = 0x%08x\n",
readl(g_hdmi_base + S5P_HDCP_BCAPS));
return true;
}
static bool read_again_bksv(void)
{
u8 bk_sv[BKSV_SIZE] = {0, 0, 0, 0, 0};
u8 i = 0;
u8 j = 0;
u32 no_one = 0;
u32 no_zero = 0;
u32 result = 0;
int ret = 0;
ret = ddc_read(HDCP_Bksv, bk_sv, BKSV_SIZE);
if (ret < 0) {
pr_err("%s::Can't read bk_sv data from i2c bus\n",
__func__);
return false;
}
#ifdef S5P_HDCP_AUTH_DEBUG
for (i = 0; i < BKSV_SIZE; i++)
AUTHPRINTK("i2c read : Bksv[%d]: 0x%x\n", i, bk_sv[i]);
#endif
for (i = 0; i < BKSV_SIZE; i++) {
for (j = 0; j < 8; j++) {
result = bk_sv[i] & (0x1 << j);
if (result == 0)
no_zero += 1;
else
no_one += 1;
}
}
if ((no_zero == 20) && (no_one == 20)) {
HDCPPRINTK("Suucess: no_zero, and no_one is 20\n");
writel(bk_sv[0], g_hdmi_base + S5P_HDCP_BKSV_0_0);
writel(bk_sv[1], g_hdmi_base + S5P_HDCP_BKSV_0_1);
writel(bk_sv[2], g_hdmi_base + S5P_HDCP_BKSV_0_2);
writel(bk_sv[3], g_hdmi_base + S5P_HDCP_BKSV_0_3);
writel(bk_sv[4], g_hdmi_base + S5P_HDCP_BKSV_1);
#ifdef S5P_HDCP_AUTH_DEBUG
for (i = 0; i < BKSV_SIZE; i++)
AUTHPRINTK("set reg : Bksv[%d]: 0x%x\n", i, bk_sv[i]);
/*
writel(HDCP_ENC_ENABLE, g_hdmi_base + S5P_ENC_EN);
*/
#endif
return true;
} else {
pr_err("%s::no_zero or no_one is NOT 20\n", __func__);
return false;
}
}
static bool read_bksv(void)
{
u8 bk_sv[BKSV_SIZE] = {0, 0, 0, 0, 0};
int i = 0;
int j = 0;
u32 no_one = 0;
u32 no_zero = 0;
u32 result = 0;
u32 count = 0;
int ret = 0;
ret = ddc_read(HDCP_Bksv, bk_sv, BKSV_SIZE);
if (ret < 0) {
pr_err("%s::Can't read bk_sv data from i2c bus\n",
__func__);
return false;
}
#ifdef S5P_HDCP_AUTH_DEBUG
for (i = 0; i < BKSV_SIZE; i++)
AUTHPRINTK("i2c read : Bksv[%d]: 0x%x\n", i, bk_sv[i]);
#endif
for (i = 0; i < BKSV_SIZE; i++) {
for (j = 0; j < 8; j++) {
result = bk_sv[i] & (0x1 << j);
if (result == 0)
no_zero++;
else
no_one++;
}
}
if ((no_zero == 20) && (no_one == 20)) {
writel(bk_sv[0], g_hdmi_base + S5P_HDCP_BKSV_0_0);
writel(bk_sv[1], g_hdmi_base + S5P_HDCP_BKSV_0_1);
writel(bk_sv[2], g_hdmi_base + S5P_HDCP_BKSV_0_2);
writel(bk_sv[3], g_hdmi_base + S5P_HDCP_BKSV_0_3);
writel(bk_sv[4], g_hdmi_base + S5P_HDCP_BKSV_1);
#ifdef S5P_HDCP_AUTH_DEBUG
for (i = 0; i < BKSV_SIZE; i++)
AUTHPRINTK("set reg : Bksv[%d]: 0x%x\n", i, bk_sv[i]);
#endif
HDCPPRINTK("Success: no_zero, and no_one is 20\n");
} else {
HDCPPRINTK("Failed: no_zero or no_one is NOT 20\n");
while (!read_again_bksv()) {
count++;
mdelay(200);
if (count == 14)
return false;
}
}
return true;
}
/*
* Compare the R value of Tx with that of Rx
*/
static bool compare_r_val(void)
{
int ret = 0;
u8 ri[2] = {0, 0};
u8 rj[2] = {0, 0};
u16 i;
for (i = 0; i < R_VAL_RETRY_CNT; i++) {
if (hdcp_info.auth_status < AKSV_WRITE_DONE) {
ret = false;
break;
}
/* Read R value from Tx */
ri[0] = readl(g_hdmi_base + S5P_HDCP_Ri_0);
ri[1] = readl(g_hdmi_base + S5P_HDCP_Ri_1);
/* Read R value from Rx */
ret = ddc_read(HDCP_Ri, rj, 2);
if (ret < 0) {
pr_err("%s::Can't read r data from i2c bus\n",
__func__);
return false;
}
#ifdef S5P_HDCP_AUTH_DEBUG
AUTHPRINTK("retries :: %d\n", i);
printk("\t\t\t Rx(ddc)\t ->");
printk("rj[0]: 0x%02x, rj[1]: 0x%02x\n", rj[0], rj[1]);
printk("\t\t\t Tx(register)\t ->");
printk("ri[0]: 0x%02x, ri[1]: 0x%02x\n", ri[0], ri[1]);
#endif
/* Compare R value */
if ((ri[0] == rj[0]) && (ri[1] == rj[1]) && (ri[0] | ri[1])) {
writel(Ri_MATCH_RESULT__YES,
g_hdmi_base + S5P_HDCP_CHECK_RESULT);
HDCPPRINTK("R0, R0' is matched!!\n");
/* for simplay test */
mdelay(1);
ret = true;
break;
} else {
writel(Ri_MATCH_RESULT__NO,
g_hdmi_base + S5P_HDCP_CHECK_RESULT);
HDCPPRINTK("R0, R0' is not matched!!\n");
ret = false;
}
ri[0] = 0;
ri[1] = 0;
rj[0] = 0;
rj[1] = 0;
}
if (!ret) {
hdcp_info.event = HDCP_EVENT_STOP;
hdcp_info.auth_status = NOT_AUTHENTICATED;
}
return ret ? true : false;
}
/*
* Enable/Disable Software HPD control
*/
static void sw_hpd_enable(bool enable)
{
u8 reg;
reg = readb(g_hdmi_base + S5P_HPD);
reg &= ~HPD_SW_ENABLE;
if (enable)
writeb(reg | HPD_SW_ENABLE, g_hdmi_base + S5P_HPD);
else
writeb(reg, g_hdmi_base + S5P_HPD);
}
/*
* Set Software HPD level
*
* @param level [in] if 0 - low;othewise, high
*/
static void set_sw_hpd(bool level)
{
u8 reg;
reg = readb(g_hdmi_base + S5P_HPD);
reg &= ~HPD_ON;
if (level)
writeb(reg | HPD_ON, g_hdmi_base + S5P_HPD);
else
writeb(reg, g_hdmi_base + S5P_HPD);
}
/*
* Reset Authentication
*/
static void reset_authentication(void)
{
u8 reg;
spin_lock_irq(&hdcp_info.reset_lock);
hdcp_info.time_out = INFINITE;
hdcp_info.event = HDCP_EVENT_STOP;
hdcp_info.auth_status = NOT_AUTHENTICATED;
/* Disable hdcp */
writeb(0x0, g_hdmi_base + S5P_HDCP_CTRL1);
writeb(0x0, g_hdmi_base + S5P_HDCP_CTRL2);
s5p_hdmi_mute_en(true);
/* Disable encryption */
HDCPPRINTK("Stop Encryption by reset!!\n");
writeb(HDCP_ENC_DIS, g_hdmi_base + S5P_ENC_EN);
HDCPPRINTK("Now reset authentication\n");
/* disable hdmi status enable reg. */
reg = readb(g_hdmi_base + S5P_STATUS_EN);
reg &= HDCP_STATUS_DIS_ALL;
writeb(reg, g_hdmi_base + S5P_STATUS_EN);
/* clear all result */
writeb(CLEAR_ALL_RESULTS, g_hdmi_base + S5P_HDCP_CHECK_RESULT);
/*
* 1. Mask HPD plug and unplug interrupt
* disable HPD INT
*/
g_sw_reset = true;
reg = s5p_hdmi_get_enabled_interrupt();
s5p_hdmi_disable_interrupts(HDMI_IRQ_HPD_PLUG);
s5p_hdmi_disable_interrupts(HDMI_IRQ_HPD_UNPLUG);
/* for simplay test */
mdelay(50);
/* 2. Enable software HPD */
sw_hpd_enable(true);
/* 3. Make software HPD logical 0 */
set_sw_hpd(false);
/* 4. Make software HPD logical 1 */
set_sw_hpd(true);
/* 5. Disable software HPD */
sw_hpd_enable(false);
/* 6. Unmask HPD plug and unplug interrupt */
if (reg & 1<<HDMI_IRQ_HPD_PLUG)
s5p_hdmi_enable_interrupts(HDMI_IRQ_HPD_PLUG);
if (reg & 1<<HDMI_IRQ_HPD_UNPLUG)
s5p_hdmi_enable_interrupts(HDMI_IRQ_HPD_UNPLUG);
g_sw_reset = false;
/* clear result */
#if 0
writel(Ri_MATCH_RESULT__NO, g_hdmi_base + S5P_HDCP_CHECK_RESULT);
writel(readl(g_hdmi_base + S5P_HDMI_CON_0) & HDMI_DIS,
g_hdmi_base + S5P_HDMI_CON_0);
writel(readl(g_hdmi_base + S5P_HDMI_CON_0) | HDMI_EN,
g_hdmi_base + S5P_HDMI_CON_0);
#endif
writel(CLEAR_ALL_RESULTS, g_hdmi_base + S5P_HDCP_CHECK_RESULT);
/* set hdcp_int enable */
reg = readb(g_hdmi_base + S5P_STATUS_EN);
reg |= WTFORACTIVERX_INT_OCCURRED |
WATCHDOG_INT_OCCURRED |
EXCHANGEKSV_INT_OCCURRED |
UPDATE_RI_INT_OCCURRED;
writeb(reg, g_hdmi_base + S5P_STATUS_EN);
/* HDCP Enable */
writeb(CP_DESIRED_EN, g_hdmi_base + S5P_HDCP_CTRL1);
spin_unlock_irq(&hdcp_info.reset_lock);
}
/*
* Set the timing parameter for load e-fuse key.
*/
/* TODO: must use clk_get for pclk rate */
#define PCLK_D_RATE_FOR_HDCP 166000000
static u32 efuse_ceil(u32 val, u32 time)
{
u32 res;
res = val / time;
if (val % time)
res += 1;
return res;
}
#if 0
static void hdcp_efuse_timing(void)
{
u32 time, val;
/* TODO: must use clk_get for pclk rate */
time = 1000000000/PCLK_D_RATE_FOR_HDCP;
val = efuse_ceil(EFUSE_ADDR_WIDTH, time);
writeb(val, g_hdmi_base + S5P_EFUSE_ADDR_WIDTH);
val = efuse_ceil(EFUSE_SIGDEV_ASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_SIGDEV_ASSERT);
val = efuse_ceil(EFUSE_SIGDEV_DEASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_SIGDEV_DEASSERT);
val = efuse_ceil(EFUSE_PRCHG_ASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_PRCHG_ASSERT);
val = efuse_ceil(EFUSE_PRCHG_DEASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_PRCHG_DEASSERT);
val = efuse_ceil(EFUSE_FSET_ASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_FSET_ASSERT);
val = efuse_ceil(EFUSE_FSET_DEASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_FSET_DEASSERT);
val = efuse_ceil(EFUSE_SENSING, time);
writeb(val, g_hdmi_base + S5P_EFUSE_SENSING);
val = efuse_ceil(EFUSE_SCK_ASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_SCK_ASSERT);
val = efuse_ceil(EFUSE_SCK_DEASSERT, time);
writeb(val, g_hdmi_base + S5P_EFUSE_SCK_DEASSERT);
val = efuse_ceil(EFUSE_SDOUT_OFFSET, time);
writeb(val, g_hdmi_base + S5P_EFUSE_SDOUT_OFFSET);
val = efuse_ceil(EFUSE_READ_OFFSET, time);
writeb(val, g_hdmi_base + S5P_EFUSE_READ_OFFSET);
}
#endif
/*
* load hdcp key from e-fuse mem.
*/
static int hdcp_loadkey(void)
{
u8 status;
int time_out = HDMI_TIME_OUT;
#if 0
hdcp_efuse_timing();
#endif
/* read HDCP key from E-Fuse */
writeb(EFUSE_CTRL_ACTIVATE, g_hdmi_base + S5P_EFUSE_CTRL);
do {
status = readb(g_hdmi_base + S5P_EFUSE_STATUS);
time_out--;
} while (!(status & EFUSE_ECC_DONE) && time_out);
if (readb(g_hdmi_base + S5P_EFUSE_STATUS) & EFUSE_ECC_FAIL) {
pr_err("%s::Can't load key from fuse ctrl.\n", __func__);
return -EINVAL;
} else {
HDCPPRINTK("%s::readb S5P_EFUSE_STATUS for EFUSE_ECC_FAIL: 0\n",
__func__);
}
return 0;
}
/*
* Start encryption
*/
static void start_encryption(void)
{
int time_out = HDMI_TIME_OUT;
if (readl(g_hdmi_base + S5P_HDCP_CHECK_RESULT) ==
Ri_MATCH_RESULT__YES) {
do {
if (readl(g_hdmi_base + S5P_STATUS) & AUTHENTICATED) {
writel(HDCP_ENC_ENABLE, g_hdmi_base + S5P_ENC_EN);
HDCPPRINTK("Encryption start!!\n");
s5p_hdmi_mute_en(false);
break;
} else {
time_out--;
mdelay(1);
}
} while (time_out);
if (time_out <= 0)
pr_err("%s::readl S5P_STATUS for AUTHENTICATED fail!!\n",
__func__);
} else {
writel(HDCP_ENC_DISABLE, g_hdmi_base + S5P_ENC_EN);
s5p_hdmi_mute_en(true);
HDCPPRINTK("Encryption stop!!\n");
}
}
/*
* Check whether Rx is repeater or not
*/
static int check_repeater(void)
{
int ret = 0;
u8 i = 0;
u16 j = 0;
u8 bcaps[BCAPS_SIZE] = {0};
u8 status[BSTATUS_SIZE] = {0, 0};
u8 rx_v[SHA_1_HASH_SIZE] = {0};
u8 ksv_list[HDCP_MAX_DEVS*HDCP_KSV_SIZE] = {0};
u32 dev_cnt;
u32 stat;
bool ksv_fifo_ready = false;
memset(rx_v, 0x0, SHA_1_HASH_SIZE);
memset(ksv_list, 0x0, HDCP_MAX_DEVS * HDCP_KSV_SIZE);
while (j <= 50) {
ret = ddc_read(HDCP_Bcaps, bcaps, BCAPS_SIZE);
if (ret < 0) {
pr_err("%s::Can't read bcaps data from i2c bus\n",
__func__);
return false;
}
if (bcaps[0] & KSV_FIFO_READY) {
HDCPPRINTK("ksv fifo is ready\n");
ksv_fifo_ready = true;
writel(bcaps[0], g_hdmi_base + S5P_HDCP_BCAPS);
break;
} else {
HDCPPRINTK("ksv fifo is not ready\n");
ksv_fifo_ready = false;
mdelay(100);
j++;
}
bcaps[0] = 0;
}
if (!ksv_fifo_ready)
return REPEATER_TIMEOUT_ERROR;
/*
* Check MAX_CASCADE_EXCEEDED
* or MAX_DEVS_EXCEEDED indicator
*/
ret = ddc_read(HDCP_BStatus, status, BSTATUS_SIZE);
if (ret < 0) {
pr_err("%s::Can't read status data from i2c bus\n",
__func__);
return false;
}
/* MAX_CASCADE_EXCEEDED || MAX_DEVS_EXCEEDED */
if (status[1] & MAX_CASCADE_EXCEEDED) {
HDCPPRINTK("MAX_CASCADE_EXCEEDED\n");
return MAX_CASCADE_EXCEEDED_ERROR;
} else if (status[0] & MAX_DEVS_EXCEEDED) {
HDCPPRINTK("MAX_CASCADE_EXCEEDED\n");
return MAX_DEVS_EXCEEDED_ERROR;
}
writel(status[0], g_hdmi_base + S5P_HDCP_BSTATUS_0);
writel(status[1], g_hdmi_base + S5P_HDCP_BSTATUS_1);
/* Read KSV list */
dev_cnt = status[0] & 0x7f;
HDCPPRINTK("status[0] :0x%08x, status[1] :0x%08x!!\n",
status[0], status[1]);
if (dev_cnt) {
u32 val = 0;
/* read ksv */
ret = ddc_read(HDCP_KSVFIFO, ksv_list,
dev_cnt * HDCP_KSV_SIZE);
if (ret < 0) {
pr_err("%s::Can't read ksv fifo!!\n", __func__);
return false;
}
/* write ksv */
for (i = 0; i < dev_cnt - 1; i++) {
writel(ksv_list[(i*5) + 0],
g_hdmi_base + S5P_HDCP_RX_KSV_0_0);
writel(ksv_list[(i*5) + 1],
g_hdmi_base + S5P_HDCP_RX_KSV_0_1);
writel(ksv_list[(i*5) + 2],
g_hdmi_base + S5P_HDCP_RX_KSV_0_2);
writel(ksv_list[(i*5) + 3],
g_hdmi_base + S5P_HDCP_RX_KSV_0_3);
writel(ksv_list[(i*5) + 4],
g_hdmi_base + S5P_HDCP_RX_KSV_0_4);
mdelay(1);
writel(SET_HDCP_KSV_WRITE_DONE,
g_hdmi_base + S5P_HDCP_RX_KSV_LIST_CTRL);
mdelay(1);
stat = readl(g_hdmi_base + S5P_HDCP_RX_KSV_LIST_CTRL);
if (!(stat & SET_HDCP_KSV_READ))
return false;
HDCPPRINTK("HDCP_RX_KSV_1 = 0x%x\n",
readl(g_hdmi_base + S5P_HDCP_RX_KSV_LIST_CTRL));
HDCPPRINTK("i : %d, dev_cnt : %d, val = 0x%08x\n",
i, dev_cnt, val);
}
writel(ksv_list[(i*5) + 0], g_hdmi_base + S5P_HDCP_RX_KSV_0_0);
writel(ksv_list[(i*5) + 1], g_hdmi_base + S5P_HDCP_RX_KSV_0_1);
writel(ksv_list[(i*5) + 2], g_hdmi_base + S5P_HDCP_RX_KSV_0_2);
writel(ksv_list[(i*5) + 3], g_hdmi_base + S5P_HDCP_RX_KSV_0_3);
writel(ksv_list[(i*5) + 4], g_hdmi_base + S5P_HDCP_RX_KSV_0_4);
mdelay(1);
/* end of ksv */
val = SET_HDCP_KSV_END|SET_HDCP_KSV_WRITE_DONE;
writel(val, g_hdmi_base + S5P_HDCP_RX_KSV_LIST_CTRL);
HDCPPRINTK("HDCP_RX_KSV_1 = 0x%x\n",
readl(g_hdmi_base + S5P_HDCP_RX_KSV_LIST_CTRL));
HDCPPRINTK("i : %d, dev_cnt : %d, val = 0x%08x\n",
i, dev_cnt, val);
} else {
/*
mdelay(200);
*/
writel(SET_HDCP_KSV_LIST_EMPTY,
g_hdmi_base + S5P_HDCP_RX_KSV_LIST_CTRL);
}
/* Read SHA-1 from receiver */
ret = ddc_read(HDCP_SHA1, rx_v, SHA_1_HASH_SIZE);
if (ret < 0) {
pr_err("%s::Can't read sha_1_hash data from i2c bus\n",
__func__);
return false;
}
#ifdef S5P_HDCP_DEBUG
for (i = 0; i < SHA_1_HASH_SIZE; i++)
HDCPPRINTK("SHA_1 rx :: %x\n", rx_v[i]);
#endif
/* write SHA-1 to register */
writeb(rx_v[0], g_hdmi_base + S5P_HDCP_RX_SHA1_0_0);
writeb(rx_v[1], g_hdmi_base + S5P_HDCP_RX_SHA1_0_1);
writeb(rx_v[2], g_hdmi_base + S5P_HDCP_RX_SHA1_0_2);
writeb(rx_v[3], g_hdmi_base + S5P_HDCP_RX_SHA1_0_3);
writeb(rx_v[4], g_hdmi_base + S5P_HDCP_RX_SHA1_1_0);
writeb(rx_v[5], g_hdmi_base + S5P_HDCP_RX_SHA1_1_1);
writeb(rx_v[6], g_hdmi_base + S5P_HDCP_RX_SHA1_1_2);
writeb(rx_v[7], g_hdmi_base + S5P_HDCP_RX_SHA1_1_3);
writeb(rx_v[8], g_hdmi_base + S5P_HDCP_RX_SHA1_2_0);
writeb(rx_v[9], g_hdmi_base + S5P_HDCP_RX_SHA1_2_1);
writeb(rx_v[10], g_hdmi_base + S5P_HDCP_RX_SHA1_2_2);
writeb(rx_v[11], g_hdmi_base + S5P_HDCP_RX_SHA1_2_3);
writeb(rx_v[12], g_hdmi_base + S5P_HDCP_RX_SHA1_3_0);
writeb(rx_v[13], g_hdmi_base + S5P_HDCP_RX_SHA1_3_1);
writeb(rx_v[14], g_hdmi_base + S5P_HDCP_RX_SHA1_3_2);
writeb(rx_v[15], g_hdmi_base + S5P_HDCP_RX_SHA1_3_3);
writeb(rx_v[16], g_hdmi_base + S5P_HDCP_RX_SHA1_4_0);
writeb(rx_v[17], g_hdmi_base + S5P_HDCP_RX_SHA1_4_1);
writeb(rx_v[18], g_hdmi_base + S5P_HDCP_RX_SHA1_4_2);
writeb(rx_v[19], g_hdmi_base + S5P_HDCP_RX_SHA1_4_3);
/* SHA write done, and wait for SHA computation being done */
mdelay(1);
/* check authentication success or not */
stat = readb(g_hdmi_base + S5P_HDCP_AUTH_STATUS);
HDCPPRINTK("auth status %d\n", stat);
if (stat & SET_HDCP_SHA_VALID_READY) {
stat = readb(g_hdmi_base + S5P_HDCP_AUTH_STATUS);
if (stat & SET_HDCP_SHA_VALID)
ret = true;
else
ret = false;
} else {
pr_err("%s::SHA not ready 0x%x\n", __func__, stat);
ret = false;
}
/* clear all validate bit */
writeb(0x0, g_hdmi_base + S5P_HDCP_AUTH_STATUS);
return ret;
}
static bool try_read_receiver(void)
{
u16 i = 0;
bool ret = false;
s5p_hdmi_mute_en(true);
for (i = 0; i < 400; i++) {
msleep(250);
if (hdcp_info.auth_status != RECEIVER_READ_READY) {
pr_err("%s::hdcp stat. changed!!\
failed attempt no = %d\n",
__func__, i);
return false;
}
ret = read_bcaps();
if (ret) {
HDCPPRINTK("succeeded at attempt no= %d\n", i);
return true;
} else
pr_err("%s::can't read bcaps!! \
failed attempt no=%d\n",
__func__, i);
}
return false;
}
static void s5p_hdcp_reset(void)
{
s5p_stop_hdcp();
g_hdcp_protocol_status = 2;
HDCPPRINTK("HDCP ftn. reset!!\n");
}
static void bksv_start_bh(void)
{
bool ret = false;
HDCPPRINTK("HDCP_EVENT_READ_BKSV_START bh\n");
hdcp_info.auth_status = RECEIVER_READ_READY;
ret = read_bcaps();
if (!ret) {
ret = try_read_receiver();
if (!ret) {
pr_err("%s::Can't read bcaps!! retry failed!!\
hdcp ftn. will be stopped\n", __func__);
reset_authentication();
return;
}
}
hdcp_info.auth_status = BCAPS_READ_DONE;
ret = read_bksv();
if (!ret) {
pr_err("%s::Can't read bksv!!\
hdcp ftn. will be reset\n", __func__);
reset_authentication();
return;
}
hdcp_info.auth_status = BKSV_READ_DONE;
HDCPPRINTK("authentication status : bksv is done (0x%08x)\n",
hdcp_info.auth_status);
}
static void second_auth_start_bh(void)
{
u8 count = 0;
int reg;
bool ret = false;
int ret_err;
u32 bcaps;
HDCPPRINTK("HDCP_EVENT_SECOND_AUTH_START bh\n");
ret = read_bcaps();
if (!ret) {
ret = try_read_receiver();
if (!ret) {
pr_err("%s::Can't read bcaps!! retry failed!!\
hdcp ftn. will be stopped\n", __func__);
reset_authentication();
return;
}
}
bcaps = readl(g_hdmi_base + S5P_HDCP_BCAPS);
bcaps &= (KSV_FIFO_READY);
if (!bcaps) {
HDCPPRINTK("ksv fifo is not ready\n");
do {
count++;
ret = read_bcaps();
if (!ret) {
ret = try_read_receiver();
if (!ret)
reset_authentication();
return;
}
bcaps = readl(g_hdmi_base + S5P_HDCP_BCAPS);
bcaps &= (KSV_FIFO_READY);
if (bcaps) {
HDCPPRINTK("bcaps retries : %d\n", count);
break;
}
mdelay(100);
if (!hdcp_info.hdcp_enable) {
reset_authentication();
return;
}
} while (count <= 50);
/* wait times exceeded 5 seconds */
if (count > 50) {
hdcp_info.time_out = INFINITE;
/*
* time-out (This bit is only available in a REPEATER)
*/
writel(readl(g_hdmi_base + S5P_HDCP_CTRL1) | 0x1 << 2,
g_hdmi_base + S5P_HDCP_CTRL1);
reset_authentication();
return;
}
}
HDCPPRINTK("ksv fifo ready\n");
ret_err = check_repeater();
if (ret_err == true) {
u32 flag;
hdcp_info.auth_status = SECOND_AUTHENTICATION_DONE;
HDCPPRINTK("second authentication done!!\n");
flag = readb(g_hdmi_base + S5P_STATUS);
HDCPPRINTK("hdcp state : %s authenticated!!\n",
flag & AUTHENTICATED ? "" : "not not");
start_encryption();
} else if (ret_err == false) {
/* i2c error */
pr_err("%s::repeater check error!!\n", __func__);
reset_authentication();
} else {
if (ret_err == REPEATER_ILLEGAL_DEVICE_ERROR) {
/*
* No need to start the HDCP
* in case of invalid KSV (revocation case)
*/
pr_err("%s::illegal dev. error!!\n", __func__);
reg = readl(g_hdmi_base + S5P_HDCP_CTRL2);
reg = 0x1;
writel(reg, g_hdmi_base + S5P_HDCP_CTRL2);
reg = 0x0;
writel(reg, g_hdmi_base + S5P_HDCP_CTRL2);
hdcp_info.auth_status = NOT_AUTHENTICATED;
} else if (ret_err == REPEATER_TIMEOUT_ERROR) {
reg = readl(g_hdmi_base + S5P_HDCP_CTRL1);
reg |= SET_REPEATER_TIMEOUT;
writel(reg, g_hdmi_base + S5P_HDCP_CTRL1);
reg &= ~SET_REPEATER_TIMEOUT;
writel(reg, g_hdmi_base + S5P_HDCP_CTRL1);
hdcp_info.auth_status = NOT_AUTHENTICATED;
} else {
/*
* MAX_CASCADE_EXCEEDED_ERROR
* MAX_DEVS_EXCEEDED_ERROR
*/
pr_err("%s::repeater check error(MAX_EXCEEDED)!!\n", __func__);
reset_authentication();
}
}
}
static bool write_aksv_start_bh(void)
{
bool ret = false;
HDCPPRINTK("HDCP_EVENT_WRITE_AKSV_START bh\n");
if (hdcp_info.auth_status != BKSV_READ_DONE) {
pr_err("%s::bksv is not ready!!\n", __func__);
return false;
}
ret = write_an();
if (!ret)
return false;
hdcp_info.auth_status = AN_WRITE_DONE;
HDCPPRINTK("an write done!!\n");
ret = write_aksv();
if (!ret)
return false;
/*
* Wait for 100ms. Transmitter must not read
* Ro' value sooner than 100ms after writing
* Aksv
*/
mdelay(100);
hdcp_info.auth_status = AKSV_WRITE_DONE;
HDCPPRINTK("aksv write done!!\n");
return ret;
}
static bool check_ri_start_bh(void)
{
bool ret = false;
HDCPPRINTK("HDCP_EVENT_CHECK_RI_START bh\n");
if (hdcp_info.auth_status == AKSV_WRITE_DONE ||
hdcp_info.auth_status == FIRST_AUTHENTICATION_DONE ||
hdcp_info.auth_status == SECOND_AUTHENTICATION_DONE) {
ret = compare_r_val();
if (ret) {
if (hdcp_info.auth_status == AKSV_WRITE_DONE) {
/*
* Check whether HDMI receiver is
* repeater or not
*/
if (hdcp_info.is_repeater)
hdcp_info.auth_status
= SECOND_AUTHENTICATION_RDY;
else {
hdcp_info.auth_status
= FIRST_AUTHENTICATION_DONE;
start_encryption();
}
}
} else {
HDCPPRINTK("authentication reset\n");
reset_authentication();
}
HDCPPRINTK("auth_status = 0x%08x\n",
hdcp_info.auth_status);
return true;
} else
reset_authentication();
HDCPPRINTK("aksv_write or first/second"
" authentication is not done\n");
return false;
}
/*
* bottom half for hdmi interrupt
*
*/
static void hdcp_work(void *arg)
{
/*
HDCPPRINTK("event : 0x%08x\n", hdcp_info.event);
*/
/*
* I2C int. was occurred
* for reading Bksv and Bcaps
*/
if (hdcp_info.event & (1 << HDCP_EVENT_READ_BKSV_START)) {
bksv_start_bh();
/* clear event */
/* spin_lock_bh(&hdcp_info.lock); */
hdcp_info.event &= ~(1 << HDCP_EVENT_READ_BKSV_START);
/* spin_unlock_bh(&hdcp_info.lock); */
}
/*
* Watchdog timer int. was occurred
* for checking repeater
*/
if (hdcp_info.event & (1 << HDCP_EVENT_SECOND_AUTH_START)) {
second_auth_start_bh();
/* clear event */
/* spin_lock_bh(&hdcp_info.lock); */
hdcp_info.event &= ~(1 << HDCP_EVENT_SECOND_AUTH_START);
/* spin_unlock_bh(&hdcp_info.lock); */
}
/*
* An_Write int. was occurred
* for writing Ainfo, An and Aksv
*/
if (hdcp_info.event & (1 << HDCP_EVENT_WRITE_AKSV_START)) {
write_aksv_start_bh();
/* clear event */
/* spin_lock_bh(&hdcp_info.lock); */
hdcp_info.event &= ~(1 << HDCP_EVENT_WRITE_AKSV_START);
/* spin_unlock_bh(&hdcp_info.lock); */
}
/*
* Ri int. was occurred
* for comparing Ri and Ri'(from HDMI sink)
*/
if (hdcp_info.event & (1 << HDCP_EVENT_CHECK_RI_START)) {
check_ri_start_bh();
/* clear event */
/* spin_lock_bh(&hdcp_info.lock); */
hdcp_info.event &= ~(1 << HDCP_EVENT_CHECK_RI_START);
/* spin_unlock_bh(&hdcp_info.lock); */
}
}
irqreturn_t s5p_hdcp_irq_handler(int irq, void *dev_id)
{
u32 event = 0;
u8 flag;
unsigned long spin_flags;
event = 0;
/* check HDCP Status */
flag = readb(g_hdmi_base + S5P_STATUS);
HDCPPRINTK("irq_status : 0x%08x\n", readb(g_hdmi_base + S5P_STATUS));
HDCPPRINTK("hdcp state : %s authenticated!!\n",
flag & AUTHENTICATED ? "" : "not");
spin_lock_irqsave(&hdcp_info.lock, spin_flags);
/*
* processing interrupt
* interrupt processing seq. is firstly set event for workqueue,
* and interrupt pending clear. 'flag|' was used for preventing
* to clear AUTHEN_ACK.- it caused many problem. be careful.
*/
/* I2C INT */
if (flag & WTFORACTIVERX_INT_OCCURRED) {
event |= (1 << HDCP_EVENT_READ_BKSV_START);
writeb(flag | WTFORACTIVERX_INT_OCCURRED,
g_hdmi_base + S5P_STATUS);
writeb(0x0, g_hdmi_base + S5P_HDCP_I2C_INT);
}
/* AN INT */
if (flag & EXCHANGEKSV_INT_OCCURRED) {
event |= (1 << HDCP_EVENT_WRITE_AKSV_START);
writeb(flag | EXCHANGEKSV_INT_OCCURRED,
g_hdmi_base + S5P_STATUS);
writeb(0x0, g_hdmi_base + S5P_HDCP_AN_INT);
}
/* RI INT */
if (flag & UPDATE_RI_INT_OCCURRED) {
event |= (1 << HDCP_EVENT_CHECK_RI_START);
writeb(flag | UPDATE_RI_INT_OCCURRED,
g_hdmi_base + S5P_STATUS);
writeb(0x0, g_hdmi_base + S5P_HDCP_RI_INT);
}
/* WATCHDOG INT */
if (flag & WATCHDOG_INT_OCCURRED) {
event |= (1 << HDCP_EVENT_SECOND_AUTH_START);
writeb(flag | WATCHDOG_INT_OCCURRED,
g_hdmi_base + S5P_STATUS);
writeb(0x0, g_hdmi_base + S5P_HDCP_WDT_INT);
}
if (!event) {
pr_err("%s::unknown irq.\n", __func__);
spin_unlock_irqrestore(&hdcp_info.lock, spin_flags);
return IRQ_HANDLED;
}
hdcp_info.event |= event;
schedule_work(&hdcp_info.work);
spin_unlock_irqrestore(&hdcp_info.lock, spin_flags);
return IRQ_HANDLED;
}
static int s5p_hdcp_is_reset(void)
{
int ret = 0;
if (spin_is_locked(&hdcp_info.reset_lock))
return 1;
return ret;
}
static bool s5p_set_hpd_detection(bool detection, bool hdcp_enabled,
struct i2c_client *client)
{
u32 hpd_reg_val = 0;
if (detection)
hpd_reg_val = CABLE_PLUGGED;
else
hpd_reg_val = CABLE_UNPLUGGED;
writel(hpd_reg_val, g_hdmi_base + S5P_HPD);
HDCPPRINTK("HPD status :: 0x%08x\n",
readl(g_hdmi_base + S5P_HPD));
return true;
}
int s5p_hdcp_init(void)
{
HDCPPRINTK("HDCP ftn. Init!!\n");
g_is_dvi = false;
g_av_mute = false;
g_audio_en = true;
/* for bh */
INIT_WORK(&hdcp_info.work, (work_func_t)hdcp_work);
init_waitqueue_head(&hdcp_info.waitq);
/* for dev_dbg err. */
spin_lock_init(&hdcp_info.lock);
spin_lock_init(&hdcp_info.reset_lock);
s5p_hdmi_register_isr((hdmi_isr)s5p_hdcp_irq_handler,
(u8)HDMI_IRQ_HDCP);
return 0;
}
/*
* start - start functions are only called under stopping HDCP
*/
bool s5p_start_hdcp(void)
{
u8 reg;
u32 sfr_val;
hdcp_info.event = HDCP_EVENT_STOP;
hdcp_info.time_out = INFINITE;
hdcp_info.auth_status = NOT_AUTHENTICATED;
HDCPPRINTK("HDCP ftn. Start!!\n");
g_sw_reset = true;
reg = s5p_hdmi_get_enabled_interrupt();
s5p_hdmi_disable_interrupts(HDMI_IRQ_HPD_PLUG);
s5p_hdmi_disable_interrupts(HDMI_IRQ_HPD_UNPLUG);
/* 2. Enable software HPD */
sw_hpd_enable(true);
/* 3. Make software HPD logical */
set_sw_hpd(false);
/* 4. Make software HPD logical */
set_sw_hpd(true);
/* 5. Disable software HPD */
sw_hpd_enable(false);
set_sw_hpd(false);
/* 6. Unmask HPD plug and unplug interrupt */
if (reg & 1<<HDMI_IRQ_HPD_PLUG)
s5p_hdmi_enable_interrupts(HDMI_IRQ_HPD_PLUG);
if (reg & 1<<HDMI_IRQ_HPD_UNPLUG)
s5p_hdmi_enable_interrupts(HDMI_IRQ_HPD_UNPLUG);
g_sw_reset = false;
HDCPPRINTK("Stop Encryption by Start!!\n");
writel(HDCP_ENC_DISABLE, g_hdmi_base + S5P_ENC_EN);
s5p_hdmi_mute_en(true);
g_hdcp_protocol_status = 1;
if (hdcp_loadkey() < 0)
return false;
/* for av mute */
writel(DO_NOT_TRANSMIT, g_hdmi_base + S5P_GCP_CON);
/*
* 1-1. set hdmi status enable reg.
* Update_Ri_int_en should be enabled after
* s/w gets ExchangeKSV_int.
*/
writel(HDCP_STATUS_EN_ALL, g_hdmi_base + S5P_STATUS_EN);
/*
* 3. set hdcp control reg.
* Disable advance cipher option, Enable CP(Content Protection),
* Disable time-out (This bit is only available in a REPEATER)
* Disable XOR shift, Disable Pj port update, Use external key
*/
sfr_val = 0;
sfr_val |= CP_DESIRED_EN;
writel(sfr_val, g_hdmi_base + S5P_HDCP_CTRL1);
s5p_hdmi_enable_interrupts(HDMI_IRQ_HDCP);
if (!read_bcaps()) {
pr_err("%s::can't read ddc port!\n", __func__);
reset_authentication();
}
hdcp_info.hdcp_enable = true;
HDCPPRINTK("\tSTATUS \t0x%08x\n",
readl(g_hdmi_base + S5P_STATUS));
HDCPPRINTK("\tSTATUS_EN \t0x%08x\n",
readl(g_hdmi_base + S5P_STATUS_EN));
HDCPPRINTK("\tHPD \t0x%08x\n", readl(g_hdmi_base + S5P_HPD));
HDCPPRINTK("\tHDCP_CTRL \t0x%08x\n",
readl(g_hdmi_base + S5P_HDCP_CTRL1));
HDCPPRINTK("\tMODE_SEL \t0x%08x\n",
readl(g_hdmi_base + S5P_MODE_SEL));
HDCPPRINTK("\tENC_EN \t0x%08x\n",
readl(g_hdmi_base + S5P_ENC_EN));
HDCPPRINTK("\tHDMI_CON_0 \t0x%08x\n",
readl(g_hdmi_base + S5P_HDMI_CON_0));
return true;
}
/*
* stop - stop functions are only called under running HDCP
*/
bool s5p_stop_hdcp(void)
{
u32 sfr_val = 0;
HDCPPRINTK("HDCP ftn. Stop!!\n");
/*
s5p_hdmi_disable_interrupts(HDMI_IRQ_HPD_PLUG);
s5p_hdmi_disable_interrupts(HDMI_IRQ_HPD_UNPLUG);
*/
s5p_hdmi_disable_interrupts(HDMI_IRQ_HDCP);
g_hdcp_protocol_status = 0;
hdcp_info.time_out = INFINITE;
hdcp_info.event = HDCP_EVENT_STOP;
hdcp_info.auth_status = NOT_AUTHENTICATED;
hdcp_info.hdcp_enable = false;
/* hdcp_info.client = NULL; */
/* 3. disable hdcp control reg. */
sfr_val = readl(g_hdmi_base + S5P_HDCP_CTRL1);
sfr_val &= (ENABLE_1_DOT_1_FEATURE_DIS
& CLEAR_REPEATER_TIMEOUT
& EN_PJ_DIS
& CP_DESIRED_DIS);
writel(sfr_val, g_hdmi_base + S5P_HDCP_CTRL1);
/* 1-3. disable hdmi hpd reg. */
sw_hpd_enable(false);
/* 1-2. disable hdmi status enable reg. */
sfr_val = readl(g_hdmi_base + S5P_STATUS_EN);
sfr_val &= HDCP_STATUS_DIS_ALL;
writel(sfr_val, g_hdmi_base + S5P_STATUS_EN);
/* 1-1. clear all status pending */
sfr_val = readl(g_hdmi_base + S5P_STATUS);
sfr_val |= HDCP_STATUS_EN_ALL;
writel(sfr_val, g_hdmi_base + S5P_STATUS);
/* disable encryption */
HDCPPRINTK("Stop Encryption by Stop!!\n");
writel(HDCP_ENC_DISABLE, g_hdmi_base + S5P_ENC_EN);
s5p_hdmi_mute_en(true);
/* clear result */
writel(Ri_MATCH_RESULT__NO, g_hdmi_base + S5P_HDCP_CHECK_RESULT);
writel(CLEAR_ALL_RESULTS, g_hdmi_base + S5P_HDCP_CHECK_RESULT);
#if 0
/* hdmi disable */
sfr_val = readl(g_hdmi_base + S5P_HDMI_CON_0);
sfr_val &= ~(PWDN_ENB_NORMAL | HDMI_EN | ASP_EN);
writel(sfr_val, g_hdmi_base + S5P_HDMI_CON_0);
*/
HDCPPRINTK("\tSTATUS \t0x%08x\n", readl(g_hdmi_base + S5P_STATUS));
HDCPPRINTK("\tSTATUS_EN \t0x%08x\n",
readl(g_hdmi_base + S5P_STATUS_EN));
HDCPPRINTK("\tHPD \t0x%08x\n", readl(g_hdmi_base + S5P_HPD));
HDCPPRINTK("\tHDCP_CTRL \t0x%08x\n",
readl(g_hdmi_base + S5P_HDCP_CTRL1));
HDCPPRINTK("\tMODE_SEL \t0x%08x\n",
readl(g_hdmi_base + S5P_MODE_SEL));
HDCPPRINTK("\tENC_EN \t0x%08x\n", readl(g_hdmi_base + S5P_ENC_EN));
HDCPPRINTK("\tHDMI_CON_0 \t0x%08x\n",
readl(g_hdmi_base + S5P_HDMI_CON_0));
writel(sfr_val, g_hdmi_base + S5P_HDMI_CON_0);
#endif
return true;
}
/* called by hpd */
int s5p_hdcp_encrypt_stop(bool on)
{
u32 reg;
if (hdcp_info.hdcp_enable) {
/* clear interrupt pending all */
writeb(0x0, g_hdmi_base + S5P_HDCP_I2C_INT);
writeb(0x0, g_hdmi_base + S5P_HDCP_AN_INT);
writeb(0x0, g_hdmi_base + S5P_HDCP_RI_INT);
writeb(0x0, g_hdmi_base + S5P_HDCP_WDT_INT);
writel(HDCP_ENC_DISABLE, g_hdmi_base + S5P_ENC_EN);
s5p_hdmi_mute_en(true);
if (!g_sw_reset) {
reg = readl(g_hdmi_base + S5P_HDCP_CTRL1);
if (on) {
writel(reg | CP_DESIRED_EN,
g_hdmi_base + S5P_HDCP_CTRL1);
s5p_hdmi_enable_interrupts(HDMI_IRQ_HDCP);
} else {
hdcp_info.event
= HDCP_EVENT_STOP;
hdcp_info.auth_status
= NOT_AUTHENTICATED;
writel(reg & ~CP_DESIRED_EN,
g_hdmi_base + S5P_HDCP_CTRL1);
s5p_hdmi_disable_interrupts(HDMI_IRQ_HDCP);
}
}
HDCPPRINTK("Stop Encryption by HPD Event!!\n");
}
return 0;
}
int s5p_hdmi_set_dvi(bool en)
{
if (en)
g_is_dvi = true;
else
g_is_dvi = false;
return 0;
}
void s5p_hdmi_set_audio(bool en)
{
if (en)
g_audio_en = true;
else
g_audio_en = false;
}
int s5p_hdmi_audio_enable(bool en)
{
u8 reg;
if (!g_is_dvi) {
reg = readl(g_hdmi_base + S5P_HDMI_CON_0);
if (en) {
reg |= ASP_EN;
writel(HDMI_TRANS_EVERY_SYNC , g_hdmi_base + S5P_AUI_CON);
} else {
reg &= ~ASP_EN;
writel(HDMI_DO_NOT_TANS , g_hdmi_base + S5P_AUI_CON);
}
writel(reg, g_hdmi_base + S5P_HDMI_CON_0);
}
return 0;
}
int s5p_hdmi_set_mute(bool en)
{
if (en)
g_av_mute = true;
else
g_av_mute = false;
return 0;
}
int s5p_hdmi_get_mute(void)
{
return g_av_mute ? true : false;
}
void s5p_hdmi_mute_en(bool en)
{
if (!g_av_mute) {
if (en) {
s5p_hdmi_video_set_bluescreen(true, 128, 0, 128);
s5p_hdmi_audio_enable(false);
} else {
s5p_hdmi_video_set_bluescreen(false, 128, 0, 128);
if (g_audio_en)
s5p_hdmi_audio_enable(true);
}
}
}
|
Jolocotroco/android_kernel_samsung_smdkv210
|
drivers/media/video/samsung/tv20/s5pv210/hdcp_s5pv210.c
|
C
|
gpl-2.0
| 38,054
|
/******************************************************************************
* File : velocity_tria3.c *
* Author : Carlos Rosales Fernandez (carlos@ihpc.a-star.edu.sg) *
* Date : 01-09-2006 *
* Revision : 1.0 *
*******************************************************************************
* DESCRIPTION *
* Calculates the three components of the flow speed at a given point Xin[] *
* and returns the values in array U[]. *
* Works for linear interpolation in triangular elements (3-noded triangles). *
******************************************************************************/
/******************************************************************************
* COPYRIGHT & LICENSE INFORMATION *
* *
* Copyright 2006 Carlos Rosales Fernandez and The Institute of High *
* Performance Computing (A*STAR) *
* *
* This file is part of stkSolver. *
* *
* stkSolver is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* stkSolver is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with stkSolver; if not, write to the Free Software *
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *
******************************************************************************/
#include "constants.h"
#include "velocity_tria3.h"
int velocity_tria3(double *Xin, double **mNodes, unsigned int **mElems,
double *vProbParam, double *vB, double *U)
{
const unsigned int ELEMS = nElems, NODES_IN_ELEM = 3;
unsigned int currentNode, i, j, SinNode, test, xNode, yNode, zNode;
double dx, dy, dz, factor;
double X[3][3];
double Int[3][3][3];
/* Initialize */
U[0] = U[1] = U[2] = 0.0;
factor = 1.0/(8.0*pi*vProbParam[0]);
for(i = 0; i < ELEMS; i++){
for(j = 0; j < NODES_IN_ELEM; j++){
currentNode = mElems[i][j] - 1;
X[j][0] = mNodes[currentNode][0];
X[j][1] = mNodes[currentNode][1];
X[j][2] = mNodes[currentNode][2];
}
/* Check for singular case */
test = 0;
for(j = 0; j < NODES_IN_ELEM; j++){
dx = X[j][0] - Xin[0];
dy = X[j][1] - Xin[1];
dz = X[j][2] - Xin[2];
if(dx == 0.0 && dy == 0.0 && dz == 0.0){
test = 1;
SinNode = j+1;
break;
}
}
if(test == 0) intGStk_tria3(X,Xin,Int);
else intSingularGStk_tria3(SinNode,X,Xin,Int);
/* Add cotribution from each node j in element i */
for(j = 0; j < NODES_IN_ELEM; j++){
xNode = mElems[i][j] - 1;
yNode = xNode + nNodes;
zNode = yNode + nNodes;
U[0] -= Int[0][0][j]*vB[xNode] + Int[0][1][j]*vB[yNode] + /* Ux */
Int[0][2][j]*vB[zNode];
U[1] -= Int[1][0][j]*vB[xNode] + Int[1][1][j]*vB[yNode] + /* Uy */
Int[1][2][j]*vB[zNode];
U[2] -= Int[2][0][j]*vB[xNode] + Int[2][1][j]*vB[yNode] + /* Uz */
Int[2][2][j]*vB[zNode];
}
}
U[0] = U[0]*factor;
U[1] = U[1]*factor;
U[2] = U[2]*factor;
return 0;
}
|
carlosrosales/stksolver
|
src/velocity_tria3.c
|
C
|
gpl-2.0
| 4,564
|
/***********************************************************************
*
* avra - Assembler for the Atmel AVR microcontroller series
*
* Copyright (C) 1998-2004 Jon Anders Haugum, TObias Weber
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
*
* Authors of avra can be reached at:
* email: jonah@omegav.ntnu.no, tobiw@suprafluid.com
* www: http://sourceforge.net/projects/avra
*/
/*
* In append_type: added generic register names support
* Alexey Pavluchenko, 16.Nov.2005
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "misc.h"
#include "args.h"
#include "avra.h"
#include "device.h"
/* Only Windows LIBC does support itoa, so we add this
function for other systems here manually. Thank you
Peter Hettkamp for your work. */
#ifndef WIN32
char * itoa(int num, char *str, const int number_format)
{
int num1 = num;
int num_chars = 0;
int pos;
while (num1>0)
{
num_chars++;
num1 /= number_format;
}
if (num_chars == 0)
num_chars = 1;
str[num_chars] = 0;
for (pos = num_chars-1; pos>=0; pos--)
{
int cur_char = num % number_format;
if (cur_char < 10) /* Insert number */
{
str[pos] = cur_char + '0';
}
else
{
str[pos] = cur_char-10 + 'A';
}
num /= number_format;
}
return(str);
}
#endif
int read_macro(struct prog_info *pi, char *name)
{
int loopok;
int i;
int start;
struct macro *macro;
struct macro_line *macro_line;
struct macro_line **last_macro_line = NULL;
struct macro_label *macro_label;
if (pi->pass == PASS_1)
{
if (!name)
{
print_msg(pi, MSGTYPE_ERROR, "missing macro name");
return(True);
}
get_next_token(name, TERM_END);
for (i = 0; !IS_END_OR_COMMENT(name[i]); i++)
{
if (!IS_LABEL(name[i]))
{
print_msg(pi, MSGTYPE_ERROR, "illegal characters used in macro name '%s'",name);
return(False);
}
}
macro = calloc(1, sizeof(struct macro));
if (!macro)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
if (pi->last_macro)
pi->last_macro->next = macro;
else
pi->first_macro = macro;
pi->last_macro = macro;
macro->name = malloc(strlen(name) + 1);
if (!macro->name)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
strcpy(macro->name, name);
macro->include_file = pi->fi->include_file;
macro->first_line_number = pi->fi->line_number;
last_macro_line = ¯o->first_macro_line;
}
else /* pi->pass == PASS_2 */
{
if (pi->list_line && pi->list_on)
{
fprintf(pi->list_file, " %s\n", pi->list_line);
pi->list_line = NULL;
}
// reset macro label running numbers
get_next_token(name, TERM_END);
macro = get_macro(pi, name);
if (!macro)
{
print_msg(pi, MSGTYPE_ERROR, "macro inconsistency in '%s'", name);
return(True);
}
for (macro_label = macro->first_label; macro_label; macro_label = macro_label->next)
{
macro_label->running_number = 0;
}
}
loopok = True;
while (loopok)
{
if (fgets_new(pi,pi->fi->buff, LINEBUFFER_LENGTH, pi->fi->fp))
{
pi->fi->line_number++;
i = 0;
while (IS_HOR_SPACE(pi->fi->buff[i]) && !IS_END_OR_COMMENT(pi->fi->buff[i]))
i++;
if (pi->fi->buff[i] == '.')
{
i++;
if (!nocase_strncmp(&pi->fi->buff[i], "endm", 4))
loopok = False;
if (!nocase_strncmp(&pi->fi->buff[i], "endmacro", 8))
loopok = False;
}
if (pi->pass == PASS_1)
{
if (loopok)
{
i = 0; /* find start of line */
while (IS_HOR_SPACE(pi->fi->buff[i]) && !IS_END_OR_COMMENT(pi->fi->buff[i]))
{
i++;
}
start = i;
/* find end of line */
while (!IS_END_OR_COMMENT(pi->fi->buff[i]) && (IS_LABEL(pi->fi->buff[i]) || pi->fi->buff[i] == ':'))
{
i++;
}
if (pi->fi->buff[i-1] == ':' && (pi->fi->buff[i-2] == '%'
&& (IS_HOR_SPACE(pi->fi->buff[i]) || IS_END_OR_COMMENT(pi->fi->buff[i]))))
{
if (macro->first_label)
{
for (macro_label = macro->first_label; macro_label->next; macro_label=macro_label->next)
{
}
macro_label->next = calloc(1,sizeof(struct macro_label));
macro_label = macro_label->next;
}
else
{
macro_label = calloc(1,sizeof(struct macro_label));
macro->first_label = macro_label;
}
macro_label->label = malloc(strlen(&pi->fi->buff[start])+1);
pi->fi->buff[i-1] = '\0';
strcpy(macro_label->label, &pi->fi->buff[start]);
pi->fi->buff[i-1] = ':';
macro_label->running_number = 0;
}
macro_line = calloc(1, sizeof(struct macro_line));
if (!macro_line)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
*last_macro_line = macro_line;
last_macro_line = ¯o_line->next;
macro_line->line = malloc(strlen(pi->fi->buff) + 1);
if (!macro_line->line)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
strcpy(macro_line->line, &pi->fi->buff[start]);
}
}
else if (pi->fi->buff && pi->list_file && pi->list_on)
{
if (pi->fi->buff[i] == ';')
fprintf(pi->list_file, " %s\n", pi->fi->buff);
else
fprintf(pi->list_file, " %s\n", pi->fi->buff);
}
}
else
{
if (feof(pi->fi->fp))
{
print_msg(pi, MSGTYPE_ERROR, "Found no closing .ENDMACRO");
return(True);
}
else
{
perror(pi->fi->include_file->name);
return(False);
}
}
}
return(True);
}
struct macro *get_macro(struct prog_info *pi, char *name)
{
struct macro *macro;
for (macro = pi->first_macro; macro; macro = macro->next)
if (!nocase_strcmp(macro->name, name))
return(macro);
return(NULL);
}
void append_type(struct prog_info *pi, char *name, int c, char *value)
{
int p, l;
struct def *def;
p = strlen(name);
name[p++] = '_';
if (c == 0)
{
name[p++] = 'v';
name[p] = '\0';
return;
}
l = strlen(value);
if ((l==2 || l==3) && (tolower(value[0])=='r') && isdigit(value[1]) && (l==3 ? isdigit(value[2]) : 1) && (atoi(&value[1])<32))
{
itoa((c*8),&name[p],10);
return;
}
for (def = pi->first_def; def; def = def->next)
if (!nocase_strcmp(def->name, value))
{
itoa((c*8),&name[p],10);
return;
}
name[p++] = 'i';
name[p] = '\0';
}
/*********************************************************
* This routine replaces the macro call with mnemonics. *
*********************************************************/
int expand_macro(struct prog_info *pi, struct macro *macro, char *rest_line)
{
int ok = True, macro_arg_count = 0, off, a, b = 0, c, i = 0, j = 0;
char *line = NULL;
char *temp;
char *macro_args[MAX_MACRO_ARGS];
char tmp[7];
char buff[LINEBUFFER_LENGTH];
char arg = False;
char *nmn; //string buffer for 'n'ew 'm'acro 'n'ame
struct macro_line *old_macro_line;
struct macro_call *macro_call;
struct macro_label *macro_label;
if (rest_line)
{
//we reserve some extra space for extended macro parameters
line = malloc(strlen(rest_line) + 20);
if (!line)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
/* exchange amca word 'src' with YH:YL and 'dst' with ZH:ZL */
for (c = 0, a = strlen(rest_line); c < a; c++)
{
switch (tolower(rest_line[c]))
{
case 's':
if (IS_SEPARATOR(rest_line[c-1]) && (rest_line[c+1] == 'r') && (rest_line[c+2] == 'c') && IS_SEPARATOR(rest_line[c+3]))
{
strcpy(&line[b],"YH:YL");
b += 5;
c += 2;
}
else
{
line[b++] = rest_line[c];
}
break;
case 'd':
if (IS_SEPARATOR(rest_line[c-1]) && (rest_line[c+1] == 's') && (rest_line[c+2] == 't') && IS_SEPARATOR(rest_line[c+3]))
{
strcpy(&line[b],"ZH:ZL");
b += 5;
c += 2;
}
else
{
line[b++] = rest_line[c];
}
break;
// case ';':
// break;
default:
line[b++] = rest_line[c];
}
}
strcpy(&line[b],"\n"); /* set CR/LF at the end of the line */
/* here we split up the macro arguments into "macro_args"
* Extended macro code interpreter added by TW 2002
*/
temp = line;
/* test for advanced parameters */
if ( temp[0] == '[' ) // there must be "[" " then "]", else it is garbage
{
if (!strchr(temp, ']'))
{
print_msg(pi, MSGTYPE_ERROR, "found no closing ']'");
return(False);
}
// Okay now we are within the advanced code interpreter
temp++; // = &temp[1]; // skip the first bracket
nmn = malloc(LINEBUFFER_LENGTH);
if (!nmn)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
strcpy(nmn,macro->name); // create a new macro name buffer
c = 1; // byte counter
arg = True; // loop flag
while (arg)
{
while (IS_HOR_SPACE(temp[0])) //skip leading spaces
{
temp++; // = &temp[1];
}
off = 0; // pointer offset
do
{
switch (temp[off]) //test current character code
{
case ':':
temp[off] = '\0';
if (off > 0)
{
c++;
macro_args[macro_arg_count++] = temp;
}
else
{
print_msg(pi, MSGTYPE_ERROR, "missing register before ':'",nmn);
return(False);
}
break;
case ']':
arg = False;
case ',':
a = off;
do
temp[a--] = '\0';
while ( IS_HOR_SPACE(temp[a]) );
if (off > 0)
{
macro_args[macro_arg_count++] = temp;
append_type(pi, nmn, c, temp);
c = 1;
}
else
{
append_type(pi, nmn, 0, temp);
c = 1;
}
break;
default:
off++;
}
}
while (temp[off] != '\0');
if (arg)
temp = &temp[off+1];
else
break;
}
macro = get_macro(pi,nmn);
if (macro == NULL)
{
print_msg(pi, MSGTYPE_ERROR, "Macro %s is not defined !",nmn);
return(False);
}
free(nmn);
}
/* or else, we handle the macro as normal macro */
else
{
line = malloc(strlen(rest_line) + 1);
if (!line)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
strcpy(line, rest_line);
temp = line;
while (temp)
{
macro_args[macro_arg_count++] = temp;
temp = get_next_token(temp, TERM_COMMA);
}
}
}
if (pi->pass == PASS_1)
{
macro_call = calloc(1, sizeof(struct macro_call));
if (!macro_call)
{
print_msg(pi, MSGTYPE_OUT_OF_MEM, NULL);
return(False);
}
if (pi->last_macro_call)
pi->last_macro_call->next = macro_call;
else
pi->first_macro_call = macro_call;
pi->last_macro_call = macro_call;
macro_call->line_number = pi->fi->line_number;
macro_call->include_file = pi->fi->include_file;
macro_call->macro = macro;
macro_call->prev_on_stack = pi->macro_call;
if (macro_call->prev_on_stack)
{
macro_call->nest_level = macro_call->prev_on_stack->nest_level + 1;
macro_call->prev_line_index = macro_call->prev_on_stack->line_index;
}
}
else
{
for (macro_call = pi->first_macro_call; macro_call; macro_call = macro_call->next)
{
if ((macro_call->include_file->num == pi->fi->include_file->num) && (macro_call->line_number == pi->fi->line_number))
{
if (pi->macro_call)
{
/* Find correct macro_call when using recursion and nesting */
if (macro_call->prev_on_stack == pi->macro_call)
if ((macro_call->nest_level == (pi->macro_call->nest_level + 1)) && (macro_call->prev_line_index == pi->macro_call->line_index))
break;
}
else
break;
}
}
if (pi->list_line && pi->list_on)
{
fprintf(pi->list_file, "C:%06x + %s\n", pi->cseg_addr, pi->list_line);
pi->list_line = NULL;
}
}
macro_call->line_index = 0;
pi->macro_call = macro_call;
old_macro_line = pi->macro_line;
//printf("\nconvert macro: '%s'\n",macro->name);
for (pi->macro_line = macro->first_macro_line; pi->macro_line && ok; pi->macro_line = pi->macro_line->next)
{
macro_call->line_index++;
if (GET_ARG(pi->args, ARG_LISTMAC))
pi->list_line = buff;
else
pi->list_line = NULL;
/* here we change jumps/calls within macro that corresponds to macro labels.
Only in case there is an entry in macro_label list */
strcpy(buff,"\0");
macro_label = get_macro_label(pi->macro_line->line,macro);
if (macro_label)
{
/* test if the right macro label has been found */
temp = strstr(pi->macro_line->line,macro_label->label);
c = strlen(macro_label->label);
if (temp[c] == ':') /* it is a label definition */
{
macro_label->running_number++;
strncpy(buff, macro_label->label, c - 1);
buff[c - 1] = 0;
i = strlen(buff) + 2; /* we set the process indeafter label */
/* add running number to it */
strcpy(&buff[c-1],itoa(macro_label->running_number, tmp, 10));
strcat(buff, ":\0");
}
else if (IS_HOR_SPACE(temp[c]) || IS_END_OR_COMMENT(temp[c])) /* it is a jump to a macro defined label */
{
strcpy(buff,pi->macro_line->line);
temp = strstr(buff, macro_label->label);
i = temp - buff + strlen(macro_label->label);
strncpy(temp, macro_label->label, c - 1);
strcpy(&temp[c-1], itoa(macro_label->running_number, tmp, 10));
}
}
else
{
i = 0;
}
/* here we check every character of current line */
for (j = i; pi->macro_line->line[i] != '\0'; i++)
{
/* check for register place holders */
if (pi->macro_line->line[i] == '@')
{
i++;
if (!isdigit(pi->macro_line->line[i]))
print_msg(pi, MSGTYPE_ERROR, "@ must be followed by a number");
else if ((pi->macro_line->line[i] - '0') >= macro_arg_count)
print_msg(pi, MSGTYPE_ERROR, "Missing macro argument (for @%c)", pi->macro_line->line[i]);
else
{
/* and replace them with given registers */
strcat(&buff[j], macro_args[pi->macro_line->line[i] - '0']);
j += strlen(macro_args[pi->macro_line->line[i] - '0']);
}
}
else if (pi->macro_line->line[i] == ';')
{
strncat(buff, "\n", 1);
break;
}
else
{
strncat(buff, &pi->macro_line->line[i], 1);
}
}
ok = parse_line(pi, buff);
if (ok)
{
if ((pi->pass == PASS_2) && pi->list_line && pi->list_on)
fprintf(pi->list_file, " %s\n", pi->list_line);
if (pi->error_count >= pi->max_errors)
{
print_msg(pi, MSGTYPE_MESSAGE, "Maximum error count reached. Exiting...");
ok = False;
break;
}
}
}
pi->macro_line = old_macro_line;
pi->macro_call = macro_call->prev_on_stack;
if (rest_line)
free(line);
return(ok);
}
struct macro_label *get_macro_label(char *line, struct macro *macro)
{
char *temp;
struct macro_label *macro_label;
for (macro_label = macro->first_label; macro_label; macro_label = macro_label->next)
{
temp = strstr(line,macro_label->label);
if (temp)
{
return macro_label;
}
}
return NULL;
}
/* end of macro.c */
|
pearsonalan/avra
|
src/macro.c
|
C
|
gpl-2.0
| 15,499
|
/*
* linux/init/main.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* GK 2/5/95 - Changed to support mounting root fs via NFS
* Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
* Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
* Simplified starting of init: Michael A. Griffith <grif@acm.org>
*/
#define DEBUG
#include <linux/types.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/stackprotector.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/tty.h>
#include <linux/percpu.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/kernel_stat.h>
#include <linux/start_kernel.h>
#include <linux/security.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/rcupdate.h>
#include <linux/moduleparam.h>
#include <linux/kallsyms.h>
#include <linux/writeback.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/cgroup.h>
#include <linux/efi.h>
#include <linux/tick.h>
#include <linux/interrupt.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/rmap.h>
#include <linux/mempolicy.h>
#include <linux/key.h>
#include <linux/buffer_head.h>
#include <linux/page_cgroup.h>
#include <linux/debug_locks.h>
#include <linux/debugobjects.h>
#include <linux/lockdep.h>
#include <linux/kmemleak.h>
#include <linux/pid_namespace.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/idr.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/kmemcheck.h>
#include <linux/sfi.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/file.h>
#include <linux/ptrace.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/sched_clock.h>
#include <linux/random.h>
#include <asm/io.h>
#include <asm/bugs.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
#endif
#ifdef CONFIG_HTC_EARLY_RTB
#include <linux/msm_rtb.h>
#endif
static int kernel_init(void *);
extern void init_IRQ(void);
extern void fork_init(unsigned long);
extern void mca_init(void);
extern void sbus_init(void);
extern void radix_tree_init(void);
#ifndef CONFIG_DEBUG_RODATA
static inline void mark_rodata_ro(void) { }
#endif
#ifdef CONFIG_TC
extern void tc_init(void);
#endif
bool early_boot_irqs_disabled __read_mostly;
enum system_states system_state __read_mostly;
EXPORT_SYMBOL(system_state);
#define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
#define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
extern void time_init(void);
void (*__initdata late_time_init)(void);
extern void softirq_init(void);
char __initdata boot_command_line[COMMAND_LINE_SIZE];
char *saved_command_line;
char *hashed_command_line;
static char *static_command_line;
static char *execute_command;
static char *ramdisk_execute_command;
unsigned int reset_devices;
EXPORT_SYMBOL(reset_devices);
static int __init set_reset_devices(char *str)
{
reset_devices = 1;
return 1;
}
__setup("reset_devices", set_reset_devices);
static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
static const char *panic_later, *panic_param;
extern const struct obs_kernel_param __setup_start[], __setup_end[];
static int __init obsolete_checksetup(char *line)
{
const struct obs_kernel_param *p;
int had_early_param = 0;
p = __setup_start;
do {
int n = strlen(p->str);
if (parameqn(line, p->str, n)) {
if (p->early) {
if (line[n] == '\0' || line[n] == '=')
had_early_param = 1;
} else if (!p->setup_func) {
pr_warn("Parameter %s is obsolete, ignored\n",
p->str);
return 1;
} else if (p->setup_func(line + n))
return 1;
}
p++;
} while (p < __setup_end);
return had_early_param;
}
unsigned long loops_per_jiffy = (1<<12);
EXPORT_SYMBOL(loops_per_jiffy);
static int __init debug_kernel(char *str)
{
console_loglevel = 10;
return 0;
}
static int __init quiet_kernel(char *str)
{
console_loglevel = 4;
return 0;
}
early_param("debug", debug_kernel);
early_param("quiet", quiet_kernel);
static int __init loglevel(char *str)
{
int newlevel;
if (get_option(&str, &newlevel)) {
console_loglevel = newlevel;
return 0;
}
return -EINVAL;
}
early_param("loglevel", loglevel);
static int __init repair_env_string(char *param, char *val, const char *unused)
{
if (val) {
if (val == param+strlen(param)+1)
val[-1] = '=';
else if (val == param+strlen(param)+2) {
val[-2] = '=';
memmove(val-1, val, strlen(val)+1);
val--;
} else
BUG();
}
return 0;
}
static int __init unknown_bootoption(char *param, char *val, const char *unused)
{
repair_env_string(param, val, unused);
if (obsolete_checksetup(param))
return 0;
if (strchr(param, '.') && (!val || strchr(param, '.') < val))
return 0;
if (panic_later)
return 0;
if (val) {
unsigned int i;
for (i = 0; envp_init[i]; i++) {
if (i == MAX_INIT_ENVS) {
panic_later = "Too many boot env vars at `%s'";
panic_param = param;
}
if (!strncmp(param, envp_init[i], val - param))
break;
}
envp_init[i] = param;
} else {
unsigned int i;
for (i = 0; argv_init[i]; i++) {
if (i == MAX_INIT_ARGS) {
panic_later = "Too many boot init vars at `%s'";
panic_param = param;
}
}
argv_init[i] = param;
}
return 0;
}
static int __init init_setup(char *str)
{
unsigned int i;
execute_command = str;
for (i = 1; i < MAX_INIT_ARGS; i++)
argv_init[i] = NULL;
return 1;
}
__setup("init=", init_setup);
static int __init rdinit_setup(char *str)
{
unsigned int i;
ramdisk_execute_command = str;
for (i = 1; i < MAX_INIT_ARGS; i++)
argv_init[i] = NULL;
return 1;
}
__setup("rdinit=", rdinit_setup);
#ifndef CONFIG_SMP
static const unsigned int setup_max_cpus = NR_CPUS;
#ifdef CONFIG_X86_LOCAL_APIC
static void __init smp_init(void)
{
APIC_init_uniprocessor();
}
#else
#define smp_init() do { } while (0)
#endif
static inline void setup_nr_cpu_ids(void) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#endif
static void __init setup_command_line(char *command_line)
{
saved_command_line = alloc_bootmem(strlen (boot_command_line)+1);
static_command_line = alloc_bootmem(strlen (command_line)+1);
strcpy (saved_command_line, boot_command_line);
strcpy (static_command_line, command_line);
}
#define RAW_SN_LEN 4
static void __init hash_sn(void)
{
char *p;
unsigned int td_sf = 0;
size_t cmdline_len, sf_len;
cmdline_len = strlen(saved_command_line);
sf_len = strlen("td.sf=");
hashed_command_line = alloc_bootmem(cmdline_len + 1);
strncpy(hashed_command_line, saved_command_line, cmdline_len);
hashed_command_line[cmdline_len] = '\0';
p = saved_command_line;
for (p = saved_command_line; p < saved_command_line + cmdline_len - sf_len; p++) {
if (!strncmp(p, "td.sf=", sf_len)) {
p += sf_len;
if (*p != '0')
td_sf = 1;
break;
}
}
if (td_sf) {
unsigned int i;
size_t sn_len = 0;
for (p = hashed_command_line; p < hashed_command_line + cmdline_len - strlen("androidboot.serialno="); p++) {
if (!strncmp(p, "androidboot.serialno=", strlen("androidboot.serialno="))) {
p += strlen("androidboot.serialno=");
while (*p != ' ' && *p != '\0') {
sn_len++;
p++;
}
p -= sn_len;
for (i = sn_len - 1; i >= RAW_SN_LEN; i--)
*p++ = '*';
break;
}
}
}
}
static __initdata DECLARE_COMPLETION(kthreadd_done);
static noinline void __init_refok rest_init(void)
{
int pid;
const struct sched_param param = { .sched_priority = 1 };
rcu_scheduler_starting();
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
numa_default_policy();
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
rcu_read_lock();
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
rcu_read_unlock();
sched_setscheduler_nocheck(kthreadd_task, SCHED_FIFO, ¶m);
complete(&kthreadd_done);
init_idle_bootup_task(current);
schedule_preempt_disabled();
cpu_startup_entry(CPUHP_ONLINE);
}
static int __init do_early_param(char *param, char *val, const char *unused)
{
const struct obs_kernel_param *p;
for (p = __setup_start; p < __setup_end; p++) {
if ((p->early && parameq(param, p->str)) ||
(strcmp(param, "console") == 0 &&
strcmp(p->str, "earlycon") == 0)
) {
if (p->setup_func(val) != 0)
pr_warn("Malformed early option '%s'\n", param);
}
}
return 0;
}
void __init parse_early_options(char *cmdline)
{
parse_args("early options", cmdline, NULL, 0, 0, 0, do_early_param);
}
void __init parse_early_param(void)
{
#if defined(CONFIG_EARLY_PRINTK)
void deferred_early_console_init(void);
#endif
static __initdata int done = 0;
static __initdata char tmp_cmdline[COMMAND_LINE_SIZE];
if (done)
return;
strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
parse_early_options(tmp_cmdline);
done = 1;
#if defined(CONFIG_EARLY_PRINTK)
deferred_early_console_init();
#endif
}
static void __init boot_cpu_init(void)
{
int cpu = smp_processor_id();
set_cpu_online(cpu, true);
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
}
void __init __weak smp_setup_processor_id(void)
{
}
# if THREAD_SIZE >= PAGE_SIZE
void __init __weak thread_info_cache_init(void)
{
}
#endif
static void __init mm_init(void)
{
page_cgroup_init_flatmem();
mem_init();
kmem_cache_init();
percpu_init_late();
pgtable_cache_init();
vmalloc_init();
}
asmlinkage void __init start_kernel(void)
{
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
lockdep_init();
smp_setup_processor_id();
debug_objects_early_init();
cgroup_init_early();
local_irq_disable();
early_boot_irqs_disabled = true;
boot_cpu_init();
page_address_init();
pr_notice("%s", linux_banner);
setup_arch(&command_line);
boot_init_stack_canary();
mm_init_owner(&init_mm, &init_task);
mm_init_cpumask(&init_mm);
setup_command_line(command_line);
hash_sn();
setup_nr_cpu_ids();
setup_per_cpu_areas();
smp_prepare_boot_cpu();
build_all_zonelists(NULL, NULL);
page_alloc_init();
pr_notice("Kernel command line: %s\n", hashed_command_line);
parse_early_param();
parse_args("Booting kernel", static_command_line, __start___param,
__stop___param - __start___param,
-1, -1, &unknown_bootoption);
jump_label_init();
setup_log_buf(0);
pidhash_init();
vfs_caches_init_early();
sort_main_extable();
trap_init();
mm_init();
sched_init();
preempt_disable();
if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
idr_init_cache();
perf_event_init();
rcu_init();
tick_nohz_init();
radix_tree_init();
early_irq_init();
init_IRQ();
tick_init();
init_timers();
hrtimers_init();
softirq_init();
timekeeping_init();
time_init();
sched_clock_postinit();
profile_init();
call_function_init();
WARN(!irqs_disabled(), "Interrupts were enabled early\n");
early_boot_irqs_disabled = false;
local_irq_enable();
kmem_cache_init_late();
console_init();
if (panic_later)
panic(panic_later, panic_param);
lockdep_info();
locking_selftest();
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start && !initrd_below_start_ok &&
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
page_to_pfn(virt_to_page((void *)initrd_start)),
min_low_pfn);
initrd_start = 0;
}
#endif
page_cgroup_init();
debug_objects_mem_init();
kmemleak_init();
setup_per_cpu_pageset();
numa_policy_init();
if (late_time_init)
late_time_init();
sched_clock_init();
calibrate_delay();
pidmap_init();
anon_vma_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_enter_virtual_mode();
#endif
#ifdef CONFIG_X86_ESPFIX64
init_espfix_bsp();
#endif
thread_info_cache_init();
cred_init();
fork_init(totalram_pages);
proc_caches_init();
buffer_init();
key_init();
security_init();
dbg_late_init();
vfs_caches_init(totalram_pages);
signals_init();
page_writeback_init();
#ifdef CONFIG_PROC_FS
proc_root_init();
#endif
cgroup_init();
cpuset_init();
taskstats_init_early();
delayacct_init();
check_bugs();
acpi_early_init();
sfi_init_late();
if (efi_enabled(EFI_RUNTIME_SERVICES)) {
efi_late_init();
efi_free_boot_services();
}
ftrace_init();
rest_init();
}
static void __init do_ctors(void)
{
#ifdef CONFIG_CONSTRUCTORS
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
for (; fn < (ctor_fn_t *) __ctors_end; fn++)
(*fn)();
#endif
}
bool initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
static char msgbuf[64];
static int __init_or_module do_one_initcall_debug(initcall_t fn)
{
ktime_t calltime, delta, rettime;
unsigned long long duration;
int ret;
pr_debug("calling %pF @ %i\n", fn, task_pid_nr(current));
calltime = ktime_get();
ret = fn();
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
pr_debug("initcall %pF returned %d after %lld usecs\n",
fn, ret, duration);
return ret;
}
int __init_or_module do_one_initcall(initcall_t fn)
{
int count = preempt_count();
int ret;
#ifdef CONFIG_HTC_EARLY_RTB
uncached_logk_pc(LOGK_INITCALL, (void *)fn, (void *)(0x00000000));
#endif
if (initcall_debug)
ret = do_one_initcall_debug(fn);
else
ret = fn();
#ifdef CONFIG_HTC_EARLY_RTB
uncached_logk_pc(LOGK_INITCALL, (void *)fn, (void *)(0xffffffff));
#endif
msgbuf[0] = 0;
if (preempt_count() != count) {
sprintf(msgbuf, "preemption imbalance ");
preempt_count() = count;
}
if (irqs_disabled()) {
strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
local_irq_enable();
}
WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
return ret;
}
extern initcall_t __initcall_start[];
extern initcall_t __initcall0_start[];
extern initcall_t __initcall1_start[];
extern initcall_t __initcall2_start[];
extern initcall_t __initcall3_start[];
extern initcall_t __initcall4_start[];
extern initcall_t __initcall5_start[];
extern initcall_t __initcall6_start[];
extern initcall_t __initcall7_start[];
extern initcall_t __initcall_end[];
static initcall_t *initcall_levels[] __initdata = {
__initcall0_start,
__initcall1_start,
__initcall2_start,
__initcall3_start,
__initcall4_start,
__initcall5_start,
__initcall6_start,
__initcall7_start,
__initcall_end,
};
static char *initcall_level_names[] __initdata = {
"early",
"core",
"postcore",
"arch",
"subsys",
"fs",
"device",
"late",
};
static void __init do_initcall_level(int level)
{
extern const struct kernel_param __start___param[], __stop___param[];
initcall_t *fn;
strcpy(static_command_line, saved_command_line);
parse_args(initcall_level_names[level],
static_command_line, __start___param,
__stop___param - __start___param,
level, level,
&repair_env_string);
for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
do_one_initcall(*fn);
}
static void __init do_initcalls(void)
{
int level;
for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
do_initcall_level(level);
}
static void __init do_basic_setup(void)
{
cpuset_init_smp();
usermodehelper_init();
shmem_init();
driver_init();
init_irq_proc();
do_ctors();
usermodehelper_enable();
do_initcalls();
random_int_secret_init();
}
static void __init do_pre_smp_initcalls(void)
{
initcall_t *fn;
for (fn = __initcall_start; fn < __initcall0_start; fn++)
do_one_initcall(*fn);
}
void __init load_default_modules(void)
{
load_default_elevator_module();
}
static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
return do_execve(init_filename,
(const char __user *const __user *)argv_init,
(const char __user *const __user *)envp_init);
}
static noinline void __init kernel_init_freeable(void);
static int __ref kernel_init(void *unused)
{
kernel_init_freeable();
async_synchronize_full();
free_initmem();
mark_rodata_ro();
system_state = SYSTEM_RUNNING;
numa_default_policy();
flush_delayed_fput();
if (ramdisk_execute_command) {
if (!run_init_process(ramdisk_execute_command))
return 0;
pr_err("Failed to execute %s\n", ramdisk_execute_command);
}
if (execute_command) {
if (!run_init_process(execute_command))
return 0;
pr_err("Failed to execute %s. Attempting defaults...\n",
execute_command);
}
if (!run_init_process("/sbin/init") ||
!run_init_process("/etc/init") ||
!run_init_process("/bin/init") ||
!run_init_process("/bin/sh"))
return 0;
panic("No init found. Try passing init= option to kernel. "
"See Linux Documentation/init.txt for guidance.");
}
static noinline void __init kernel_init_freeable(void)
{
wait_for_completion(&kthreadd_done);
gfp_allowed_mask = __GFP_BITS_MASK;
set_mems_allowed(node_states[N_MEMORY]);
set_cpus_allowed_ptr(current, cpu_all_mask);
cad_pid = task_pid(current);
smp_prepare_cpus(setup_max_cpus);
do_pre_smp_initcalls();
lockup_detector_init();
smp_init();
#ifdef CONFIG_HTC_EARLY_RTB
htc_early_rtb_init();
#endif
sched_init_smp();
do_basic_setup();
if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
pr_err("Warning: unable to open an initial console.\n");
(void) sys_dup(0);
(void) sys_dup(0);
if (!ramdisk_execute_command)
ramdisk_execute_command = "/init";
if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
ramdisk_execute_command = NULL;
prepare_namespace();
}
load_default_modules();
}
|
flar2/ElementalX-m9
|
init/main.c
|
C
|
gpl-2.0
| 18,082
|
/* Festalon - NSF Player
* Copyright (C) 2004 Xodnizel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <string.h>
#include <stdlib.h>
#include "../types.h"
#include "x6502.h"
#include "cart.h"
#include "memory.h"
/* 16 are (sort of) reserved for UNIF/iNES and 16 to map other stuff. */
static INLINE void setpageptr(NESCART *ca, int s, uint32 A, uint8 *p, int ram)
{
uint32 AB=A>>11;
int x;
if(p)
for(x=(s>>1)-1;x>=0;x--)
{
ca->PRGIsRAM[AB+x]=ram;
ca->Page[AB+x]=p-A;
}
else
for(x=(s>>1)-1;x>=0;x--)
{
ca->PRGIsRAM[AB+x]=0;
ca->Page[AB+x]=0;
}
}
static uint8 nothing[8192];
void FESTAC_Kill(NESCART *ca)
{
free(ca);
}
NESCART *FESTAC_Init(void)
{
int x;
NESCART *ca;
if(!(ca=malloc(sizeof(NESCART)))) return(0);
memset(ca,0,sizeof(NESCART));
for(x=0;x<32;x++)
{
ca->Page[x]=nothing-x*2048;
ca->PRGptr[x]=0;
ca->PRGsize[x]=0;
}
return(ca);
}
void FESTAC_SetupPRG(NESCART *ca, int chip, uint8 *p, uint32 size, int ram)
{
ca->PRGptr[chip]=p;
ca->PRGsize[chip]=size;
ca->PRGmask2[chip]=(size>>11)-1;
ca->PRGmask4[chip]=(size>>12)-1;
ca->PRGmask8[chip]=(size>>13)-1;
ca->PRGmask16[chip]=(size>>14)-1;
ca->PRGmask32[chip]=(size>>15)-1;
ca->PRGram[chip]=ram?1:0;
}
DECLFR(CartBR)
{
NESCART *ca=private;
return ca->Page[A>>11][A];
}
DECLFW(CartBW)
{
NESCART *ca=private;
if(ca->PRGIsRAM[A>>11] && ca->Page[A>>11])
ca->Page[A>>11][A]=V;
}
DECLFR(CartBROB)
{
NESCART *ca=private;
if(!ca->Page[A>>11]) return(DB);
return ca->Page[A>>11][A];
}
void FASTAPASS(3) setprg2r(NESCART *ca, int r, unsigned int A, unsigned int V)
{
V&=ca->PRGmask2[r];
setpageptr(ca,2,A,ca->PRGptr[r]?(&ca->PRGptr[r][V<<11]):0,ca->PRGram[r]);
}
void FASTAPASS(2) setprg2(NESCART *ca, uint32 A, uint32 V)
{
setprg2r(ca,0,A,V);
}
void FASTAPASS(3) setprg4r(NESCART *ca, int r, unsigned int A, unsigned int V)
{
V&=ca->PRGmask4[r];
setpageptr(ca,4,A,ca->PRGptr[r]?(&ca->PRGptr[r][V<<12]):0,ca->PRGram[r]);
}
void FASTAPASS(2) setprg4(NESCART *ca, uint32 A, uint32 V)
{
setprg4r(ca,0,A,V);
}
void FASTAPASS(3) setprg8r(NESCART *ca, int r, unsigned int A, unsigned int V)
{
if(ca->PRGsize[r]>=8192)
{
V&=ca->PRGmask8[r];
setpageptr(ca,8,A,ca->PRGptr[r]?(&ca->PRGptr[r][V<<13]):0,ca->PRGram[r]);
}
else
{
uint32 VA=V<<2;
int x;
for(x=0;x<4;x++)
setpageptr(ca,2,A+(x<<11),ca->PRGptr[r]?(&ca->PRGptr[r][((VA+x)&ca->PRGmask2[r])<<11]):0,ca->PRGram[r]);
}
}
void FASTAPASS(2) setprg8(NESCART *ca, uint32 A, uint32 V)
{
setprg8r(ca,0,A,V);
}
void FASTAPASS(3) setprg16r(NESCART *ca, int r, unsigned int A, unsigned int V)
{
if(ca->PRGsize[r]>=16384)
{
V&=ca->PRGmask16[r];
setpageptr(ca,16,A,ca->PRGptr[r]?(&ca->PRGptr[r][V<<14]):0,ca->PRGram[r]);
}
else
{
uint32 VA=V<<3;
int x;
for(x=0;x<8;x++)
setpageptr(ca,2,A+(x<<11),ca->PRGptr[r]?(&ca->PRGptr[r][((VA+x)&ca->PRGmask2[r])<<11]):0,ca->PRGram[r]);
}
}
void FASTAPASS(2) setprg16(NESCART *ca, uint32 A, uint32 V)
{
setprg16r(ca,0,A,V);
}
void FASTAPASS(3) setprg32r(NESCART *ca, int r,unsigned int A, unsigned int V)
{
if(ca->PRGsize[r]>=32768)
{
V&=ca->PRGmask32[r];
setpageptr(ca,32,A,ca->PRGptr[r]?(&ca->PRGptr[r][V<<15]):0,ca->PRGram[r]);
}
else
{
uint32 VA=V<<4;
int x;
for(x=0;x<16;x++)
setpageptr(ca,2,A+(x<<11),ca->PRGptr[r]?(&ca->PRGptr[r][((VA+x)&ca->PRGmask2[r])<<11]):0,ca->PRGram[r]);
}
}
void FASTAPASS(2) setprg32(NESCART *ca, uint32 A, uint32 V)
{
setprg32r(ca,0,A,V);
}
|
ahefner/festalon
|
src/nes/cart.c
|
C
|
gpl-2.0
| 4,179
|
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/fs.h>
#include <net/cfg80211.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/addrconf.h>
#include "iwl-modparams.h"
#include "fw-api.h"
#include "mvm.h"
void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (iwlwifi_mod_params.sw_crypto)
return;
mutex_lock(&mvm->mutex);
memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
mvmvif->rekey_data.replay_ctr =
cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
mvmvif->rekey_data.valid = true;
mutex_unlock(&mvm->mutex);
}
#if IS_ENABLED(CONFIG_IPV6)
void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct inet6_dev *idev)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct inet6_ifaddr *ifa;
int idx = 0;
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
mvmvif->target_ipv6_addrs[idx] = ifa->addr;
idx++;
if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
break;
}
read_unlock_bh(&idev->lock);
mvmvif->num_target_ipv6_addrs = idx;
}
#endif
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->tx_key_idx = idx;
}
static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
{
int i;
for (i = 0; i < IWL_P1K_SIZE; i++)
out[i] = cpu_to_le16(p1k[i]);
}
struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
bool error, use_rsc_tsc, use_tkip;
int wep_key_idx;
};
static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *_data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct wowlan_key_data *data = _data;
struct aes_sc *aes_sc, *aes_tx_sc = NULL;
struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
struct iwl_p1k_cache *rx_p1ks;
u8 *rx_mic_key;
struct ieee80211_key_seq seq;
u32 cur_rx_iv32 = 0;
u16 p1k[IWL_P1K_SIZE];
int ret, i;
mutex_lock(&mvm->mutex);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
struct {
struct iwl_mvm_wep_key_cmd wep_key_cmd;
struct iwl_mvm_wep_key wep_key;
} __packed wkc = {
.wep_key_cmd.mac_id_n_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.wep_key_cmd.num_keys = 1,
/* firmware sets STA_KEY_FLG_WEP_13BYTES */
.wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
.wep_key.key_index = key->keyidx,
.wep_key.key_size = key->keylen,
};
/*
* This will fail -- the key functions don't set support
* pairwise WEP keys. However, that's better than silently
* failing WoWLAN. Or maybe not?
*/
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
break;
memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
if (key->keyidx == mvmvif->tx_key_idx) {
/* TX key must be at offset 0 */
wkc.wep_key.key_offset = 0;
} else {
/* others start at 1 */
data->wep_key_idx++;
wkc.wep_key.key_offset = data->wep_key_idx;
}
ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
sizeof(wkc), &wkc);
data->error = ret != 0;
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
/* don't upload key again */
goto out_unlock;
}
default:
data->error = true;
goto out_unlock;
case WLAN_CIPHER_SUITE_AES_CMAC:
/*
* Ignore CMAC keys -- the WoWLAN firmware doesn't support them
* but we also shouldn't abort suspend due to that. It does have
* support for the IGTK key renewal, but doesn't really use the
* IGTK for anything. This means we could spuriously wake up or
* be deauthenticated, but that was considered acceptable.
*/
goto out_unlock;
case WLAN_CIPHER_SUITE_TKIP:
if (sta) {
tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
ieee80211_get_key_tx_seq(key, &seq);
tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
memcpy(data->tkip->mic_keys.tx,
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
IWL_MIC_KEY_SIZE);
rx_mic_key = data->tkip->mic_keys.rx_unicast;
} else {
tkip_sc =
data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
rx_p1ks = data->tkip->rx_multi;
rx_mic_key = data->tkip->mic_keys.rx_mcast;
}
/*
* For non-QoS this relies on the fact that both the uCode and
* mac80211 use TID 0 (as they need to to avoid replay attacks)
* for checking the IV in the frames.
*/
for (i = 0; i < IWL_NUM_RSC; i++) {
ieee80211_get_key_rx_seq(key, i, &seq);
tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
/* wrapping isn't allowed, AP must rekey */
if (seq.tkip.iv32 > cur_rx_iv32)
cur_rx_iv32 = seq.tkip.iv32;
}
ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
cur_rx_iv32, p1k);
iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
cur_rx_iv32 + 1, p1k);
iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
memcpy(rx_mic_key,
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
IWL_MIC_KEY_SIZE);
data->use_tkip = true;
data->use_rsc_tsc = true;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (sta) {
u8 *pn = seq.ccmp.pn;
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
ieee80211_get_key_tx_seq(key, &seq);
aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
((u64)pn[2] << 24) |
((u64)pn[1] << 32) |
((u64)pn[0] << 40));
} else {
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
}
/*
* For non-QoS this relies on the fact that both the uCode and
* mac80211 use TID 0 for checking the IV in the frames.
*/
for (i = 0; i < IWL_NUM_RSC; i++) {
u8 *pn = seq.ccmp.pn;
ieee80211_get_key_rx_seq(key, i, &seq);
aes_sc->pn = cpu_to_le64((u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
((u64)pn[2] << 24) |
((u64)pn[1] << 32) |
((u64)pn[0] << 40));
}
data->use_rsc_tsc = true;
break;
}
/*
* The D3 firmware hardcodes the key offset 0 as the key it uses
* to transmit packets to the AP, i.e. the PTK.
*/
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
key->hw_key_idx = 0;
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
} else {
/*
* firmware only supports TSC/RSC for a single key,
* so if there are multiple keep overwriting them
* with new ones -- this relies on mac80211 doing
* list_add_tail().
*/
key->hw_key_idx = 1;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
}
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
data->error = ret != 0;
out_unlock:
mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan)
{
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.flags = CMD_SYNC,
};
int i, err;
if (!wowlan->n_patterns)
return 0;
cmd.len[0] = sizeof(*pattern_cmd) +
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
return -ENOMEM;
pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
memcpy(&pattern_cmd->patterns[i].mask,
wowlan->patterns[i].mask, mask_len);
memcpy(&pattern_cmd->patterns[i].pattern,
wowlan->patterns[i].pattern,
wowlan->patterns[i].pattern_len);
pattern_cmd->patterns[i].mask_size = mask_len;
pattern_cmd->patterns[i].pattern_size =
wowlan->patterns[i].pattern_len;
}
cmd.data[0] = pattern_cmd;
err = iwl_mvm_send_cmd(mvm, &cmd);
kfree(pattern_cmd);
return err;
}
static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
struct iwl_proto_offload_cmd_v3_small v3s;
struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
struct iwl_host_cmd hcmd = {
.id = PROT_OFFLOAD_CONFIG_CMD,
.flags = CMD_SYNC,
.data[0] = &cmd,
.dataflags[0] = IWL_HCMD_DFL_DUP,
};
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
struct iwl_ns_config *nsc;
struct iwl_targ_addr *addrs;
int n_nsc, n_addrs;
int c;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
nsc = cmd.v3s.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
addrs = cmd.v3s.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
} else {
nsc = cmd.v3l.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
addrs = cmd.v3l.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
}
if (mvmvif->num_target_ipv6_addrs)
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
/*
* For each address we have (and that will fit) fill a target
* address struct and combine for NS offload structs with the
* solicited node addresses.
*/
for (i = 0, c = 0;
i < mvmvif->num_target_ipv6_addrs &&
i < n_addrs && c < n_nsc; i++) {
struct in6_addr solicited_addr;
int j;
addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
&solicited_addr);
for (j = 0; j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
if (j == c)
c++;
addrs[i].addr = mvmvif->target_ipv6_addrs[i];
addrs[i].config_num = cpu_to_le32(j);
nsc[j].dest_ipv6_addr = solicited_addr;
memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
}
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
else
cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
memcpy(cmd.v2.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v2.target_ipv6_addr[i]));
} else {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
memcpy(cmd.v1.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v1.target_ipv6_addr[i]));
}
#endif
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
common = &cmd.v3s.common;
size = sizeof(cmd.v3s);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
common = &cmd.v3l.common;
size = sizeof(cmd.v3l);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
common = &cmd.v1.common;
size = sizeof(cmd.v1);
}
if (vif->bss_conf.arp_addr_cnt) {
enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
if (!enabled)
return 0;
common->enabled = cpu_to_le32(enabled);
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}
enum iwl_mvm_tcp_packet_type {
MVM_TCP_TX_SYN,
MVM_TCP_RX_SYNACK,
MVM_TCP_TX_DATA,
MVM_TCP_RX_ACK,
MVM_TCP_RX_WAKE,
MVM_TCP_TX_FIN,
};
static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
{
__sum16 check = tcp_v4_check(len, saddr, daddr, 0);
return cpu_to_le16(be16_to_cpu((__force __be16)check));
}
static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
struct cfg80211_wowlan_tcp *tcp,
void *_pkt, u8 *mask,
__le16 *pseudo_hdr_csum,
enum iwl_mvm_tcp_packet_type ptype)
{
struct {
struct ethhdr eth;
struct iphdr ip;
struct tcphdr tcp;
u8 data[];
} __packed *pkt = _pkt;
u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
int i;
pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
pkt->ip.version = 4;
pkt->ip.ihl = 5;
pkt->ip.protocol = IPPROTO_TCP;
switch (ptype) {
case MVM_TCP_TX_SYN:
case MVM_TCP_TX_DATA:
case MVM_TCP_TX_FIN:
memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
pkt->ip.ttl = 128;
pkt->ip.saddr = tcp->src;
pkt->ip.daddr = tcp->dst;
pkt->tcp.source = cpu_to_be16(tcp->src_port);
pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
/* overwritten for TX SYN later */
pkt->tcp.doff = sizeof(struct tcphdr) / 4;
pkt->tcp.window = cpu_to_be16(65000);
break;
case MVM_TCP_RX_SYNACK:
case MVM_TCP_RX_ACK:
case MVM_TCP_RX_WAKE:
memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
pkt->ip.saddr = tcp->dst;
pkt->ip.daddr = tcp->src;
pkt->tcp.source = cpu_to_be16(tcp->dst_port);
pkt->tcp.dest = cpu_to_be16(tcp->src_port);
break;
default:
WARN_ON(1);
return;
}
switch (ptype) {
case MVM_TCP_TX_SYN:
/* firmware assumes 8 option bytes - 8 NOPs for now */
memset(pkt->data, 0x01, 8);
ip_tot_len += 8;
pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
pkt->tcp.syn = 1;
break;
case MVM_TCP_TX_DATA:
ip_tot_len += tcp->payload_len;
memcpy(pkt->data, tcp->payload, tcp->payload_len);
pkt->tcp.psh = 1;
pkt->tcp.ack = 1;
break;
case MVM_TCP_TX_FIN:
pkt->tcp.fin = 1;
pkt->tcp.ack = 1;
break;
case MVM_TCP_RX_SYNACK:
pkt->tcp.syn = 1;
pkt->tcp.ack = 1;
break;
case MVM_TCP_RX_ACK:
pkt->tcp.ack = 1;
break;
case MVM_TCP_RX_WAKE:
ip_tot_len += tcp->wake_len;
pkt->tcp.psh = 1;
pkt->tcp.ack = 1;
memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
break;
}
switch (ptype) {
case MVM_TCP_TX_SYN:
case MVM_TCP_TX_DATA:
case MVM_TCP_TX_FIN:
pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
break;
case MVM_TCP_RX_WAKE:
for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
u8 tmp = tcp->wake_mask[i];
mask[i + 6] |= tmp << 6;
if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
mask[i + 7] = tmp >> 2;
}
/* fall through for ethernet/IP/TCP headers mask */
case MVM_TCP_RX_SYNACK:
case MVM_TCP_RX_ACK:
mask[0] = 0xff; /* match ethernet */
/*
* match ethernet, ip.version, ip.ihl
* the ip.ihl half byte is really masked out by firmware
*/
mask[1] = 0x7f;
mask[2] = 0x80; /* match ip.protocol */
mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
mask[5] = 0x80; /* match tcp flags */
/* leave rest (0 or set for MVM_TCP_RX_WAKE) */
break;
};
*pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
pkt->ip.saddr, pkt->ip.daddr);
}
static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_wowlan_tcp *tcp)
{
struct iwl_wowlan_remote_wake_config *cfg;
struct iwl_host_cmd cmd = {
.id = REMOTE_WAKE_CONFIG_CMD,
.len = { sizeof(*cfg), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
.flags = CMD_SYNC,
};
int ret;
if (!tcp)
return 0;
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
cmd.data[0] = cfg;
cfg->max_syn_retries = 10;
cfg->max_data_retries = 10;
cfg->tcp_syn_ack_timeout = 1; /* seconds */
cfg->tcp_ack_timeout = 1; /* seconds */
/* SYN (TX) */
iwl_mvm_build_tcp_packet(
vif, tcp, cfg->syn_tx.data, NULL,
&cfg->syn_tx.info.tcp_pseudo_header_checksum,
MVM_TCP_TX_SYN);
cfg->syn_tx.info.tcp_payload_length = 0;
/* SYN/ACK (RX) */
iwl_mvm_build_tcp_packet(
vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
&cfg->synack_rx.info.tcp_pseudo_header_checksum,
MVM_TCP_RX_SYNACK);
cfg->synack_rx.info.tcp_payload_length = 0;
/* KEEPALIVE/ACK (TX) */
iwl_mvm_build_tcp_packet(
vif, tcp, cfg->keepalive_tx.data, NULL,
&cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
MVM_TCP_TX_DATA);
cfg->keepalive_tx.info.tcp_payload_length =
cpu_to_le16(tcp->payload_len);
cfg->sequence_number_offset = tcp->payload_seq.offset;
/* length must be 0..4, the field is little endian */
cfg->sequence_number_length = tcp->payload_seq.len;
cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
if (tcp->payload_tok.len) {
cfg->token_offset = tcp->payload_tok.offset;
cfg->token_length = tcp->payload_tok.len;
cfg->num_tokens =
cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
memcpy(cfg->tokens, tcp->payload_tok.token_stream,
tcp->tokens_size);
} else {
/* set tokens to max value to almost never run out */
cfg->num_tokens = cpu_to_le16(65535);
}
/* ACK (RX) */
iwl_mvm_build_tcp_packet(
vif, tcp, cfg->keepalive_ack_rx.data,
cfg->keepalive_ack_rx.rx_mask,
&cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
MVM_TCP_RX_ACK);
cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
/* WAKEUP (RX) */
iwl_mvm_build_tcp_packet(
vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
&cfg->wake_rx.info.tcp_pseudo_header_checksum,
MVM_TCP_RX_WAKE);
cfg->wake_rx.info.tcp_payload_length =
cpu_to_le16(tcp->wake_len);
/* FIN */
iwl_mvm_build_tcp_packet(
vif, tcp, cfg->fin_tx.data, NULL,
&cfg->fin_tx.info.tcp_pseudo_header_checksum,
MVM_TCP_TX_FIN);
cfg->fin_tx.info.tcp_payload_length = 0;
ret = iwl_mvm_send_cmd(mvm, &cmd);
kfree(cfg);
return ret;
}
struct iwl_d3_iter_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
bool error;
};
static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_d3_iter_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return;
if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
if (data->vif) {
IWL_ERR(data->mvm, "More than one managed interface active!\n");
data->error = true;
return;
}
data->vif = vif;
}
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *ctx;
u8 chains_static, chains_dynamic;
struct cfg80211_chan_def chandef;
int ret, i;
struct iwl_binding_cmd binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
u32 status;
/* add back the PHY */
if (WARN_ON(!mvmvif->phy_ctxt))
return -EINVAL;
rcu_read_lock();
ctx = rcu_dereference(vif->chanctx_conf);
if (WARN_ON(!ctx)) {
rcu_read_unlock();
return -EINVAL;
}
chandef = ctx->def;
chains_static = ctx->rx_chains_static;
chains_dynamic = ctx->rx_chains_dynamic;
rcu_read_unlock();
ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
chains_static, chains_dynamic);
if (ret)
return ret;
/* add back the MAC */
mvmvif->uploaded = false;
if (WARN_ON(!vif->bss_conf.assoc))
return -EINVAL;
/* hack */
vif->bss_conf.assoc = false;
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
vif->bss_conf.assoc = true;
if (ret)
return ret;
/* add back binding - XXX refactor? */
binding_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
binding_cmd.phy =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
for (i = 1; i < MAX_MACS_IN_BINDING; i++)
binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
sizeof(binding_cmd), &binding_cmd,
&status);
if (ret) {
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
return ret;
}
if (status) {
IWL_ERR(mvm, "Binding command failed: %u\n", status);
return -EIO;
}
ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
if (ret)
return ret;
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
if (ret)
return ret;
/* and some quota */
quota_cmd.quotas[0].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
quota_cmd.quotas[0].quota = cpu_to_le32(100);
quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
for (i = 1; i < MAX_BINDINGS; i++)
quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
sizeof(quota_cmd), "a_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
return 0;
}
static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_nonqos_seq_query_cmd query_cmd = {
.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
.mac_id_n_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
};
struct iwl_host_cmd cmd = {
.id = NON_QOS_TX_COUNTER_CMD,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
int err;
u32 size;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
cmd.data[0] = &query_cmd;
cmd.len[0] = sizeof(query_cmd);
}
err = iwl_mvm_send_cmd(mvm, &cmd);
if (err)
return err;
size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
size -= sizeof(cmd.resp_pkt->hdr);
if (size < sizeof(__le16)) {
err = -EINVAL;
} else {
err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
/* new API returns next, not last-used seqno */
if (mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
err = (u16) (err - 0x10);
}
iwl_free_resp(&cmd);
return err;
}
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_nonqos_seq_query_cmd query_cmd = {
.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
.mac_id_n_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.value = cpu_to_le16(mvmvif->seqno),
};
/* return if called during restart, not resume from D3 */
if (!mvmvif->seqno_valid)
return;
mvmvif->seqno_valid = false;
if (!(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
return;
if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
sizeof(query_cmd), &query_cmd))
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_d3_iter_data suspend_iter_data = {
.mvm = mvm,
};
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
struct iwl_d3_manager_config d3_cfg_cmd_data = {
/*
* Program the minimum sleep time to 10 seconds, as many
* platforms have issues processing a wakeup signal while
* still being in the process of suspending.
*/
.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
};
struct iwl_host_cmd d3_cfg_cmd = {
.id = D3_CONFIG_CMD,
.flags = CMD_SYNC | CMD_WANT_SKB,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
struct wowlan_key_data key_data = {
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
};
int ret, i;
int len __maybe_unused;
u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
/*
* mac80211 shouldn't get here, but for D3 test
* it doesn't warrant a warning
*/
WARN_ON(!test);
return -EINVAL;
}
key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
if (!key_data.rsc_tsc)
return -ENOMEM;
mutex_lock(&mvm->mutex);
old_aux_sta_id = mvm->aux_sta.sta_id;
/* see if there's only a single BSS vif and it's associated */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_iface_iterator, &suspend_iter_data);
if (suspend_iter_data.error || !suspend_iter_data.vif) {
ret = 1;
goto out_noreset;
}
vif = suspend_iter_data.vif;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) {
ret = -EINVAL;
goto out_noreset;
}
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
/* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
goto out_noreset;
wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
/*
* For QoS counters, we store the one to use next, so subtract 0x10
* since the uCode will add 0x10 *before* using the value while we
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
}
if (wowlan->disconnect)
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
/*
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
wowlan_config_cmd.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
}
iwl_mvm_cancel_scan(mvm);
iwl_trans_stop_device(mvm->trans);
/*
* The D3 firmware still hardcodes the AP station ID for the
* BSS we're associated with as 0. Store the real STA ID here
* and assign 0. When we leave this function, we'll restore
* the original value for the resume code.
*/
old_ap_sta_id = mvm_ap_sta->sta_id;
mvm_ap_sta->sta_id = 0;
mvmvif->ap_sta_id = 0;
/*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
* all the MACs that aren't support.
* We don't have to clear up everything though because the
* reprogramming is manual. When we resume, we'll actually
* go through a proper restart sequence again to switch
* back to the runtime firmware image.
*/
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
/* We reprogram keys and shouldn't allocate new key indices */
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
/*
* The D3 firmware still hardcodes the AP station ID for the
* BSS we're associated with as 0. As a result, we have to move
* the auxiliary station to ID 1 so the ID 0 remains free for
* the AP station for later.
* We set the sta_id to 1 here, and reset it to its previous
* value (that we stored above) later.
*/
mvm->aux_sta.sta_id = 1;
ret = iwl_mvm_load_d3_fw(mvm);
if (ret)
goto out;
ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
if (ret)
goto out;
if (!iwlwifi_mod_params.sw_crypto) {
/*
* This needs to be unlocked due to lock ordering
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
mutex_unlock(&mvm->mutex);
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_wowlan_program_keys,
&key_data);
mutex_lock(&mvm->mutex);
if (key_data.error) {
ret = -EIO;
goto out;
}
if (key_data.use_rsc_tsc) {
struct iwl_host_cmd rsc_tsc_cmd = {
.id = WOWLAN_TSC_RSC_PARAM,
.flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.len[0] = sizeof(*key_data.rsc_tsc),
};
ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd);
if (ret)
goto out;
}
if (key_data.use_tkip) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
CMD_SYNC, sizeof(tkip_cmd),
&tkip_cmd);
if (ret)
goto out;
}
if (mvmvif->rekey_data.valid) {
memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
NL80211_KCK_LEN);
kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
NL80211_KEK_LEN);
kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_KEK_KCK_MATERIAL,
CMD_SYNC,
sizeof(kek_kck_cmd),
&kek_kck_cmd);
if (ret)
goto out;
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
CMD_SYNC, sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
if (ret)
goto out;
ret = iwl_mvm_send_patterns(mvm, wowlan);
if (ret)
goto out;
ret = iwl_mvm_send_proto_offload(mvm, vif);
if (ret)
goto out;
ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
if (ret)
goto out;
ret = iwl_mvm_power_update_device_mode(mvm);
if (ret)
goto out;
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
goto out;
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->d3_wake_sysassert)
d3_cfg_cmd_data.wakeup_flags |=
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
goto out;
#ifdef CONFIG_IWLWIFI_DEBUGFS
len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) &
FH_RSCSR_FRAME_SIZE_MSK;
if (len >= sizeof(u32) * 2) {
mvm->d3_test_pme_ptr =
le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
}
#endif
iwl_free_resp(&d3_cfg_cmd);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
iwl_trans_d3_suspend(mvm->trans, test);
out:
mvm->aux_sta.sta_id = old_aux_sta_id;
mvm_ap_sta->sta_id = old_ap_sta_id;
mvmvif->ap_sta_id = old_ap_sta_id;
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
out_noreset:
kfree(key_data.rsc_tsc);
mutex_unlock(&mvm->mutex);
return ret;
}
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
return __iwl_mvm_suspend(hw, wowlan, false);
}
/* converted data from the different status responses */
struct iwl_wowlan_status_data {
u16 pattern_number;
u16 qos_seq_ctr[8];
u32 wakeup_reasons;
u32 wake_packet_length;
u32 wake_packet_bufsize;
const u8 *wake_packet;
};
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_wowlan_status_data *status)
{
struct sk_buff *pkt = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
u32 reasons = status->wakeup_reasons;
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
goto report;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
wakeup.magic_pkt = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
wakeup.pattern_idx =
status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
wakeup.gtk_rekey_failure = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
wakeup.eap_identity_req = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
wakeup.four_way_handshake = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
wakeup.tcp_connlost = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
wakeup.tcp_nomoretokens = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
if (status->wake_packet_bufsize) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
struct ieee80211_hdr *hdr = (void *)pktdata;
int truncated = pktlen - pktsize;
/* this would be a firmware bug */
if (WARN_ON_ONCE(truncated < 0))
truncated = 0;
if (ieee80211_is_data(hdr->frame_control)) {
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int ivlen = 0, icvlen = 4; /* also FCS */
pkt = alloc_skb(pktsize, GFP_KERNEL);
if (!pkt)
goto report;
memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
pktdata += hdrlen;
pktsize -= hdrlen;
if (ieee80211_has_protected(hdr->frame_control)) {
/*
* This is unlocked and using gtk_i(c)vlen,
* but since everything is under RTNL still
* that's not really a problem - changing
* it would be difficult.
*/
if (is_multicast_ether_addr(hdr->addr1)) {
ivlen = mvm->gtk_ivlen;
icvlen += mvm->gtk_icvlen;
} else {
ivlen = mvm->ptk_ivlen;
icvlen += mvm->ptk_icvlen;
}
}
/* if truncated, FCS/ICV is (partially) gone */
if (truncated >= icvlen) {
icvlen = 0;
truncated -= icvlen;
} else {
icvlen -= truncated;
truncated = 0;
}
pktsize -= ivlen + icvlen;
pktdata += ivlen;
memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
goto report;
wakeup.packet = pkt->data;
wakeup.packet_present_len = pkt->len;
wakeup.packet_len = pkt->len - truncated;
wakeup.packet_80211 = false;
} else {
int fcslen = 4;
if (truncated >= 4) {
truncated -= 4;
fcslen = 0;
} else {
fcslen -= truncated;
truncated = 0;
}
pktsize -= fcslen;
wakeup.packet = status->wake_packet;
wakeup.packet_present_len = pktsize;
wakeup.packet_len = pktlen - truncated;
wakeup.packet_80211 = true;
}
}
report:
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
kfree_skb(pkt);
}
static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
struct ieee80211_key_seq *seq)
{
u64 pn;
pn = le64_to_cpu(sc->pn);
seq->ccmp.pn[0] = pn >> 40;
seq->ccmp.pn[1] = pn >> 32;
seq->ccmp.pn[2] = pn >> 24;
seq->ccmp.pn[3] = pn >> 16;
seq->ccmp.pn[4] = pn >> 8;
seq->ccmp.pn[5] = pn;
}
static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
struct ieee80211_key_seq *seq)
{
seq->tkip.iv32 = le32_to_cpu(sc->iv32);
seq->tkip.iv16 = le16_to_cpu(sc->iv16);
}
static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
struct ieee80211_key_conf *key)
{
int tid;
BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
for (tid = 0; tid < IWL_NUM_RSC; tid++) {
struct ieee80211_key_seq seq = {};
iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
ieee80211_set_key_rx_seq(key, tid, &seq);
}
}
static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
struct ieee80211_key_conf *key)
{
int tid;
BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
for (tid = 0; tid < IWL_NUM_RSC; tid++) {
struct ieee80211_key_seq seq = {};
iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
ieee80211_set_key_rx_seq(key, tid, &seq);
}
}
static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
struct iwl_wowlan_status_v6 *status)
{
union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
break;
case WLAN_CIPHER_SUITE_TKIP:
iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
break;
default:
WARN_ON(1);
}
}
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_wowlan_status_v6 *status;
void *last_gtk;
u32 cipher;
bool find_phase, unhandled_cipher;
int num_keys;
};
static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *_data)
{
struct iwl_mvm_d3_gtk_iter_data *data = _data;
if (data->unhandled_cipher)
return;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
/* ignore WEP completely, nothing to do */
return;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_TKIP:
/* we support these */
break;
default:
/* everything else (even CMAC for MFP) - disconnect from AP */
data->unhandled_cipher = true;
return;
}
data->num_keys++;
/*
* pairwise key - update sequence counters only;
* note that this assumes no TDLS sessions are active
*/
if (sta) {
struct ieee80211_key_seq seq = {};
union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
if (data->find_phase)
return;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
break;
case WLAN_CIPHER_SUITE_TKIP:
iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
break;
}
ieee80211_set_key_tx_seq(key, &seq);
/* that's it for this key */
return;
}
if (data->find_phase) {
data->last_gtk = key;
data->cipher = key->cipher;
return;
}
if (data->status->num_of_gtk_rekeys)
ieee80211_remove_key(key);
else if (data->last_gtk == key)
iwl_mvm_set_key_rx_seq(key, data->status);
}
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_wowlan_status_v6 *status)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
.status = status,
};
u32 disconnection_reasons =
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
if (!status || !vif->bss_conf.bssid)
return false;
if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
return false;
/* find last GTK that we used initially, if any */
gtkdata.find_phase = true;
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_d3_update_gtks, >kdata);
/* not trying to keep connections with MFP/unhandled ciphers */
if (gtkdata.unhandled_cipher)
return false;
if (!gtkdata.num_keys)
goto out;
if (!gtkdata.last_gtk)
return false;
/*
* invalidate all other GTKs that might still exist and update
* the one that we used
*/
gtkdata.find_phase = false;
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_d3_update_gtks, >kdata);
if (status->num_of_gtk_rekeys) {
struct ieee80211_key_conf *key;
struct {
struct ieee80211_key_conf conf;
u8 key[32];
} conf = {
.conf.cipher = gtkdata.cipher,
.conf.keyidx = status->gtk.key_index,
};
switch (gtkdata.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
conf.conf.keylen = WLAN_KEY_LEN_CCMP;
memcpy(conf.conf.key, status->gtk.decrypt_key,
WLAN_KEY_LEN_CCMP);
break;
case WLAN_CIPHER_SUITE_TKIP:
conf.conf.keylen = WLAN_KEY_LEN_TKIP;
memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
/* leave TX MIC key zeroed, we don't use it anyway */
memcpy(conf.conf.key +
NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
status->gtk.tkip_mic_key, 8);
break;
}
key = ieee80211_gtk_rekey_add(vif, &conf.conf);
if (IS_ERR(key))
return false;
iwl_mvm_set_key_rx_seq(key, status);
}
if (status->num_of_gtk_rekeys) {
__be64 replay_ctr =
cpu_to_be64(le64_to_cpu(status->replay_ctr));
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
}
out:
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
return true;
}
/* releases the MVM mutex */
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status_data status;
struct iwl_wowlan_status_v6 *status_v6;
int ret, len, status_size, i;
bool keep;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n",
err_info.valid);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
goto out_unlock;
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
goto out_unlock;
}
/* RF-kill already asserted again... */
if (!cmd.resp_pkt)
goto out_unlock;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
status_size = sizeof(struct iwl_wowlan_status_v6);
else
status_size = sizeof(struct iwl_wowlan_status_v4);
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
if (len - sizeof(struct iwl_cmd_header) < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
status_v6 = (void *)cmd.resp_pkt->data;
status.pattern_number = le16_to_cpu(status_v6->pattern_number);
for (i = 0; i < 8; i++)
status.qos_seq_ctr[i] =
le16_to_cpu(status_v6->qos_seq_ctr[i]);
status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
status.wake_packet_length =
le32_to_cpu(status_v6->wake_packet_length);
status.wake_packet_bufsize =
le32_to_cpu(status_v6->wake_packet_bufsize);
status.wake_packet = status_v6->wake_packet;
} else {
struct iwl_wowlan_status_v4 *status_v4;
status_v6 = NULL;
status_v4 = (void *)cmd.resp_pkt->data;
status.pattern_number = le16_to_cpu(status_v4->pattern_number);
for (i = 0; i < 8; i++)
status.qos_seq_ctr[i] =
le16_to_cpu(status_v4->qos_seq_ctr[i]);
status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
status.wake_packet_length =
le32_to_cpu(status_v4->wake_packet_length);
status.wake_packet_bufsize =
le32_to_cpu(status_v4->wake_packet_bufsize);
status.wake_packet = status_v4->wake_packet;
}
if (len - sizeof(struct iwl_cmd_header) !=
status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
/* still at hard-coded place 0 for D3 image */
ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[0],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta))
goto out_free_resp;
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status.qos_seq_ctr[i];
/* firmware stores last-used value, we store next value */
seq += 0x10;
mvm_ap_sta->tid_data[i].seq_number = seq;
}
/* now we have all the data we need, unlock to avoid mac80211 issues */
mutex_unlock(&mvm->mutex);
iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
iwl_free_resp(&cmd);
return keep;
out_free_resp:
iwl_free_resp(&cmd);
out_unlock:
mutex_unlock(&mvm->mutex);
return false;
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
{
#ifdef CONFIG_IWLWIFI_DEBUGFS
const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
if (!mvm->store_d3_resume_sram)
return;
if (!mvm->d3_resume_sram) {
mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
if (!mvm->d3_resume_sram)
return;
}
iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
#endif
}
static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
/* skip the one we keep connection on */
if (data == vif)
return;
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_resume_disconnect(vif);
}
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct iwl_d3_iter_data resume_iter_data = {
.mvm = mvm,
};
struct ieee80211_vif *vif = NULL;
int ret;
enum iwl_d3_status d3_status;
bool keep = false;
mutex_lock(&mvm->mutex);
/* get the BSS vif pointer again */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_iface_iterator, &resume_iter_data);
if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif))
goto out_unlock;
vif = resume_iter_data.vif;
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
if (ret)
goto out_unlock;
if (d3_status != IWL_D3_STATUS_ALIVE) {
IWL_INFO(mvm, "Device was reset during suspend\n");
goto out_unlock;
}
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (keep)
mvm->keep_vif = vif;
#endif
/* has unlocked the mutex, so skip that */
goto out;
out_unlock:
mutex_unlock(&mvm->mutex);
out:
if (!test)
ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
return 1;
}
int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
return __iwl_mvm_resume(mvm, false);
}
void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
device_set_wakeup_enable(mvm->trans->dev, enabled);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
{
struct iwl_mvm *mvm = inode->i_private;
int err;
if (mvm->d3_test_active)
return -EBUSY;
file->private_data = inode->i_private;
ieee80211_stop_queues(mvm->hw);
synchronize_net();
/* start pseudo D3 */
rtnl_lock();
err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
rtnl_unlock();
if (err > 0)
err = -EINVAL;
if (err) {
ieee80211_wake_queues(mvm->hw);
return err;
}
mvm->d3_test_active = true;
mvm->keep_vif = NULL;
return 0;
}
static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
u32 pme_asserted;
while (true) {
/* read pme_ptr if available */
if (mvm->d3_test_pme_ptr) {
pme_asserted = iwl_trans_read_mem32(mvm->trans,
mvm->d3_test_pme_ptr);
if (pme_asserted)
break;
}
if (msleep_interruptible(100))
break;
}
return 0;
}
static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
/* skip the one we keep connection on */
if (_data == vif)
return;
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_connection_loss(vif);
}
static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
{
struct iwl_mvm *mvm = inode->i_private;
int remaining_time = 10;
mvm->d3_test_active = false;
__iwl_mvm_resume(mvm, true);
iwl_abort_notification_waits(&mvm->notif_wait);
ieee80211_restart_hw(mvm->hw);
/* wait for restart and disconnect all interfaces */
while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
remaining_time > 0) {
remaining_time--;
msleep(1000);
}
if (remaining_time == 0)
IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
ieee80211_wake_queues(mvm->hw);
return 0;
}
const struct file_operations iwl_dbgfs_d3_test_ops = {
.llseek = no_llseek,
.open = iwl_mvm_d3_test_open,
.read = iwl_mvm_d3_test_read,
.release = iwl_mvm_d3_test_release,
};
#endif
|
freedesktop-unofficial-mirror/tegra__linux
|
drivers/net/wireless/iwlwifi/mvm/d3.c
|
C
|
gpl-2.0
| 53,080
|
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */
/*
* Copyright © 2000-2004 Marco Pesenti Gritti
* Copyright © 2009 Collabora Ltd.
* Copyright © 2011 Igalia S.L.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "config.h"
#include "window-commands.h"
#include "ephy-bookmarks-editor.h"
#include "ephy-bookmarks-ui.h"
#include "ephy-debug.h"
#include "ephy-dialog.h"
#include "ephy-embed-container.h"
#include "ephy-embed-prefs.h"
#include "ephy-embed-shell.h"
#include "ephy-embed-single.h"
#include "ephy-embed-utils.h"
#include "ephy-embed.h"
#include "ephy-file-chooser.h"
#include "ephy-file-helpers.h"
#include "ephy-find-toolbar.h"
#include "ephy-gui.h"
#include "ephy-history-window.h"
#include "ephy-link.h"
#include "ephy-location-entry.h"
#include "ephy-notebook.h"
#include "ephy-prefs.h"
#include "ephy-private.h"
#include "ephy-settings.h"
#include "ephy-shell.h"
#include "ephy-state.h"
#include "ephy-string.h"
#include "ephy-web-app-utils.h"
#include "ephy-zoom.h"
#include "pdm-dialog.h"
#include <gio/gio.h>
#include <glib.h>
#include <glib/gi18n.h>
#include <gtk/gtk.h>
#include <libnotify/notify.h>
#include <libsoup/soup.h>
#include <string.h>
#ifdef HAVE_WEBKIT2
#include <webkit2/webkit2.h>
#else
#include <webkit/webkit.h>
#endif
void
window_cmd_file_print (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
EphyWebView *view;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
g_return_if_fail (EPHY_IS_EMBED (embed));
view = ephy_embed_get_web_view (embed);
ephy_web_view_print (view);
}
void
window_cmd_file_send_to (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
char *command, *subject, *body;
const char *location, *title;
GdkScreen *screen;
GError *error = NULL;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
location = ephy_web_view_get_address (ephy_embed_get_web_view (embed));
title = ephy_web_view_get_title (ephy_embed_get_web_view (embed));
subject = g_uri_escape_string (title, NULL, TRUE);
body = g_uri_escape_string (location, NULL, TRUE);
command = g_strconcat ("mailto:",
"?Subject=", subject,
"&Body=", body, NULL);
g_free (subject);
g_free (body);
if (window)
{
screen = gtk_widget_get_screen (GTK_WIDGET (window));
}
else
{
screen = gdk_screen_get_default ();
}
if (!gtk_show_uri (screen, command, gtk_get_current_event_time(), &error))
{
g_warning ("Unable to send link by email: %s\n", error->message);
g_error_free (error);
}
g_free (command);
}
static gboolean
event_with_shift (void)
{
GdkEvent *event;
GdkEventType type = 0;
guint state = 0;
event = gtk_get_current_event ();
if (event)
{
type = event->type;
if (type == GDK_BUTTON_RELEASE)
{
state = event->button.state;
}
else if (type == GDK_KEY_PRESS || type == GDK_KEY_RELEASE)
{
state = event->key.state;
}
gdk_event_free (event);
}
return (state & GDK_SHIFT_MASK) != 0;
}
void
window_cmd_go_location (GtkAction *action,
EphyWindow *window)
{
ephy_window_activate_location (window);
}
void
window_cmd_view_stop (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
gtk_widget_grab_focus (GTK_WIDGET (embed));
webkit_web_view_stop_loading (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed));
}
void
window_cmd_view_reload (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
WebKitWebView *view;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
gtk_widget_grab_focus (GTK_WIDGET (embed));
view = EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed);
if (event_with_shift ())
webkit_web_view_reload_bypass_cache (view);
else
webkit_web_view_reload (view);
}
void
window_cmd_file_bookmark_page (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
ephy_bookmarks_ui_add_bookmark (GTK_WINDOW (window),
ephy_web_view_get_address (ephy_embed_get_web_view (embed)),
ephy_web_view_get_title (ephy_embed_get_web_view (embed)));
}
static void
open_response_cb (GtkDialog *dialog, int response, EphyWindow *window)
{
if (response == GTK_RESPONSE_ACCEPT)
{
char *uri, *converted;
uri = gtk_file_chooser_get_uri (GTK_FILE_CHOOSER (dialog));
if (uri != NULL)
{
converted = g_filename_to_utf8 (uri, -1, NULL, NULL, NULL);
if (converted != NULL)
{
ephy_window_load_url (window, converted);
}
g_free (converted);
g_free (uri);
}
}
gtk_widget_destroy (GTK_WIDGET (dialog));
}
static void
save_response_cb (GtkDialog *dialog, int response, EphyEmbed *embed)
{
if (response == GTK_RESPONSE_ACCEPT)
{
char *uri, *converted;
uri = gtk_file_chooser_get_uri (GTK_FILE_CHOOSER (dialog));
if (uri != NULL)
{
converted = g_filename_to_utf8 (uri, -1, NULL, NULL, NULL);
if (converted != NULL)
{
EphyWebView *web_view = ephy_embed_get_web_view (embed);
ephy_web_view_save (web_view, converted);
}
g_free (converted);
g_free (uri);
}
}
gtk_widget_destroy (GTK_WIDGET (dialog));
}
void
window_cmd_file_open (GtkAction *action,
EphyWindow *window)
{
EphyFileChooser *dialog;
dialog = ephy_file_chooser_new (_("Open"),
GTK_WIDGET (window),
GTK_FILE_CHOOSER_ACTION_OPEN,
EPHY_PREFS_STATE_OPEN_DIR,
EPHY_FILE_FILTER_ALL_SUPPORTED);
g_signal_connect (dialog, "response",
G_CALLBACK (open_response_cb), window);
gtk_widget_show (GTK_WIDGET (dialog));
}
static char *
get_suggested_filename (EphyWebView *view)
{
char *suggested_filename;
const char *mimetype;
#ifdef HAVE_WEBKIT2
WebKitURIResponse *response;
#else
WebKitWebFrame *frame;
WebKitWebDataSource *data_source;
#endif
WebKitWebResource *web_resource;
#ifdef HAVE_WEBKIT2
web_resource = webkit_web_view_get_main_resource (WEBKIT_WEB_VIEW (view));
response = webkit_web_resource_get_response (web_resource);
mimetype = webkit_uri_response_get_mime_type (response);
#else
frame = webkit_web_view_get_main_frame (WEBKIT_WEB_VIEW (view));
data_source = webkit_web_frame_get_data_source (frame);
web_resource = webkit_web_data_source_get_main_resource (data_source);
mimetype = webkit_web_resource_get_mime_type (web_resource);
#endif
if ((g_ascii_strncasecmp (mimetype, "text/html", 9)) == 0)
{
/* Web Title will be used as suggested filename*/
suggested_filename = g_strconcat (ephy_web_view_get_title (view), ".html", NULL);
}
else
{
SoupURI *soup_uri = soup_uri_new (webkit_web_resource_get_uri (web_resource));
suggested_filename = g_path_get_basename (soup_uri->path);
soup_uri_free (soup_uri);
}
return suggested_filename;
}
void
window_cmd_file_save_as (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
EphyFileChooser *dialog;
char *suggested_filename;
EphyWebView *view;
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
dialog = ephy_file_chooser_new (_("Save"),
GTK_WIDGET (window),
GTK_FILE_CHOOSER_ACTION_SAVE,
EPHY_PREFS_STATE_SAVE_DIR,
EPHY_FILE_FILTER_NONE);
gtk_file_chooser_set_do_overwrite_confirmation (GTK_FILE_CHOOSER (dialog), TRUE);
view = ephy_embed_get_web_view (embed);
suggested_filename = get_suggested_filename (view);
gtk_file_chooser_set_current_name (GTK_FILE_CHOOSER (dialog), suggested_filename);
g_free (suggested_filename);
g_signal_connect (dialog, "response",
G_CALLBACK (save_response_cb), embed);
gtk_widget_show (GTK_WIDGET (dialog));
}
typedef struct {
EphyWebView *view;
GtkWidget *image;
GtkWidget *entry;
GtkWidget *spinner;
GtkWidget *box;
char *icon_href;
} EphyApplicationDialogData;
static void
ephy_application_dialog_data_free (EphyApplicationDialogData *data)
{
g_free (data->icon_href);
g_slice_free (EphyApplicationDialogData, data);
}
static void
take_page_snapshot_and_set_image (EphyApplicationDialogData *data)
{
GdkPixbuf *snapshot;
int x, y, w, h;
x = y = 0;
w = h = 128; /* GNOME hi-res icon size. */
snapshot = ephy_web_view_get_snapshot (data->view, x, y, w, h);
gtk_image_set_from_pixbuf (GTK_IMAGE (data->image), snapshot);
g_object_unref (snapshot);
}
#ifdef HAVE_WEBKIT2
static void
download_finished_cb (WebKitDownload *download,
EphyApplicationDialogData *data)
{
char *filename;
filename = g_filename_from_uri (webkit_download_get_destination (download), NULL, NULL);
gtk_image_set_from_file (GTK_IMAGE (data->image), filename);
g_free (filename);
}
static void
download_failed_cb (WebKitDownload *download,
GError *error,
EphyApplicationDialogData *data)
{
g_signal_handlers_disconnect_by_func (download, download_finished_cb, data);
/* Something happened, default to a page snapshot. */
take_page_snapshot_and_set_image (data);
}
#else
static void
download_status_changed_cb (WebKitDownload *download,
GParamSpec *spec,
EphyApplicationDialogData *data)
{
WebKitDownloadStatus status = webkit_download_get_status (download);
char *filename;
switch (status)
{
case WEBKIT_DOWNLOAD_STATUS_FINISHED:
filename = g_filename_from_uri (webkit_download_get_destination_uri (download),
NULL, NULL);
gtk_image_set_from_file (GTK_IMAGE (data->image), filename);
g_free (filename);
break;
case WEBKIT_DOWNLOAD_STATUS_ERROR:
case WEBKIT_DOWNLOAD_STATUS_CANCELLED:
/* Something happened, default to a page snapshot. */
take_page_snapshot_and_set_image (data);
break;
default:
break;
}
}
#endif
static void
download_icon_and_set_image (EphyApplicationDialogData *data)
{
#ifndef HAVE_WEBKIT2
WebKitNetworkRequest *request;
#endif
WebKitDownload *download;
char *destination, *destination_uri, *tmp_filename;
#ifdef HAVE_WEBKIT2
download = webkit_web_context_download_uri (webkit_web_context_get_default (),
data->icon_href);
#else
request = webkit_network_request_new (data->icon_href);
download = webkit_download_new (request);
g_object_unref (request);
#endif
tmp_filename = ephy_file_tmp_filename ("ephy-download-XXXXXX", NULL);
destination = g_build_filename (ephy_file_tmp_dir (), tmp_filename, NULL);
destination_uri = g_filename_to_uri (destination, NULL, NULL);
#ifdef HAVE_WEBKIT2
webkit_download_set_destination (download, destination_uri);
#else
webkit_download_set_destination_uri (download, destination_uri);
#endif
g_free (destination);
g_free (destination_uri);
g_free (tmp_filename);
#ifdef HAVE_WEBKIT2
g_signal_connect (download, "finished",
G_CALLBACK (download_finished_cb), data);
g_signal_connect (download, "failed",
G_CALLBACK (download_failed_cb), data);
#else
g_signal_connect (download, "notify::status",
G_CALLBACK (download_status_changed_cb), data);
webkit_download_start (download);
#endif
}
static void
fill_default_application_image (EphyApplicationDialogData *data)
{
#ifdef HAVE_WEBKIT2
/* TODO: DOM Bindindgs */
#else
WebKitDOMDocument *document;
WebKitDOMNodeList *links;
gulong length, i;
document = webkit_web_view_get_dom_document (WEBKIT_WEB_VIEW (data->view));
links = webkit_dom_document_get_elements_by_tag_name (document, "link");
length = webkit_dom_node_list_get_length (links);
for (i = 0; i < length; i++)
{
char *rel;
WebKitDOMNode *node = webkit_dom_node_list_item (links, i);
rel = webkit_dom_html_link_element_get_rel (WEBKIT_DOM_HTML_LINK_ELEMENT (node));
/* TODO: support more than one possible icon. */
if (g_strcmp0 (rel, "apple-touch-icon") == 0 ||
g_strcmp0 (rel, "apple-touch-icon-precomposed") == 0)
{
data->icon_href = webkit_dom_html_link_element_get_href (WEBKIT_DOM_HTML_LINK_ELEMENT (node));
download_icon_and_set_image (data);
g_free (rel);
return;
}
}
#endif
/* If we make it here, no "apple-touch-icon" link was
* found. Take a snapshot of the page. */
take_page_snapshot_and_set_image (data);
}
static void
fill_default_application_title (EphyApplicationDialogData *data)
{
const char *title = ephy_web_view_get_title (data->view);
gtk_entry_set_text (GTK_ENTRY (data->entry), title);
}
static void
notify_launch_cb (NotifyNotification *notification,
char *action,
gpointer user_data)
{
char * desktop_file = user_data;
/* A gross hack to be able to launch epiphany from within
* Epiphany. Might be a good idea to figure out a better
* solution... */
g_unsetenv (EPHY_UUID_ENVVAR);
ephy_file_launch_desktop_file (desktop_file, NULL, 0, NULL);
g_free (desktop_file);
}
static gboolean
confirm_web_application_overwrite (GtkWindow *parent, const char *title)
{
GtkResponseType response;
GtkWidget *dialog;
dialog = gtk_message_dialog_new (parent, 0,
GTK_MESSAGE_QUESTION,
GTK_BUTTONS_NONE,
_("A web application named '%s' already exists. Do you want to replace it?"),
title);
gtk_dialog_add_buttons (GTK_DIALOG (dialog),
_("Cancel"),
GTK_RESPONSE_CANCEL,
_("Replace"),
GTK_RESPONSE_OK,
NULL);
gtk_message_dialog_format_secondary_text (GTK_MESSAGE_DIALOG (dialog),
_("An application with the same name already exists. Replacing it will "
"overwrite it."));
gtk_dialog_set_default_response (GTK_DIALOG (dialog), GTK_RESPONSE_CANCEL);
response = gtk_dialog_run (GTK_DIALOG (dialog));
gtk_widget_destroy (dialog);
return response == GTK_RESPONSE_OK;
}
static void
dialog_save_as_application_response_cb (GtkDialog *dialog,
gint response,
EphyApplicationDialogData *data)
{
const char *app_name;
char *desktop_file;
char *message;
NotifyNotification *notification;
if (response == GTK_RESPONSE_OK) {
app_name = gtk_entry_get_text (GTK_ENTRY (data->entry));
if (ephy_web_application_exists (app_name))
{
if (confirm_web_application_overwrite (GTK_WINDOW (dialog), app_name))
ephy_web_application_delete (app_name);
else
return;
}
/* Create Web Application, including a new profile and .desktop file. */
desktop_file = ephy_web_application_create (webkit_web_view_get_uri (WEBKIT_WEB_VIEW (data->view)),
app_name,
gtk_image_get_pixbuf (GTK_IMAGE (data->image)));
if (desktop_file)
message = g_strdup_printf (_("The application '%s' is ready to be used"),
app_name);
else
message = g_strdup_printf (_("The application '%s' could not be created"),
app_name);
notification = notify_notification_new (message,
NULL, NULL);
g_free (message);
if (desktop_file) {
notify_notification_add_action (notification, "launch", _("Launch"),
(NotifyActionCallback)notify_launch_cb,
g_path_get_basename (desktop_file),
NULL);
notify_notification_set_icon_from_pixbuf (notification, gtk_image_get_pixbuf (GTK_IMAGE (data->image)));
g_free (desktop_file);
}
notify_notification_set_timeout (notification, NOTIFY_EXPIRES_DEFAULT);
notify_notification_set_urgency (notification, NOTIFY_URGENCY_LOW);
notify_notification_set_hint (notification, "transient", g_variant_new_boolean (TRUE));
notify_notification_show (notification, NULL);
}
ephy_application_dialog_data_free (data);
gtk_widget_destroy (GTK_WIDGET (dialog));
}
void
window_cmd_file_save_as_application (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
GtkWidget *dialog, *box, *image, *entry, *content_area;
EphyWebView *view;
EphyApplicationDialogData *data;
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
view = EPHY_WEB_VIEW (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed));
/* Show dialog with icon, title. */
dialog = gtk_dialog_new_with_buttons (_("Create Web Application"),
GTK_WINDOW (window),
0,
GTK_STOCK_CANCEL,
GTK_RESPONSE_CANCEL,
_("C_reate"),
GTK_RESPONSE_OK,
NULL);
content_area = gtk_dialog_get_content_area (GTK_DIALOG (dialog));
gtk_container_set_border_width (GTK_CONTAINER (dialog), 5);
gtk_box_set_spacing (GTK_BOX (content_area), 14); /* 14 + 2 * 5 = 24 */
box = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 5);
gtk_container_add (GTK_CONTAINER (content_area), box);
image = gtk_image_new ();
gtk_widget_set_size_request (image, 128, 128);
gtk_container_add (GTK_CONTAINER (box), image);
entry = gtk_entry_new ();
gtk_entry_set_activates_default (GTK_ENTRY (entry), TRUE);
gtk_box_pack_end (GTK_BOX (box), entry, FALSE, FALSE, 0);
data = g_slice_new0 (EphyApplicationDialogData);
data->view = view;
data->image = image;
data->entry = entry;
fill_default_application_image (data);
fill_default_application_title (data);
gtk_widget_show_all (dialog);
gtk_dialog_set_default_response (GTK_DIALOG (dialog), GTK_RESPONSE_OK);
g_signal_connect (dialog, "response",
G_CALLBACK (dialog_save_as_application_response_cb),
data);
gtk_widget_show_all (dialog);
}
void
window_cmd_file_work_offline (GtkAction *action,
EphyWindow *window)
{
/* TODO: WebKitGTK+ does not currently support offline status. */
#if 0
EphyEmbedSingle *single;
gboolean offline;
single = EPHY_EMBED_SINGLE (ephy_embed_shell_get_embed_single (embed_shell));
offline = gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action));
ephy_embed_single_set_network_status (single, !offline);
#endif
}
void
window_cmd_file_close_window (GtkAction *action,
EphyWindow *window)
{
GtkWidget *notebook;
EphyEmbed *embed;
notebook = ephy_window_get_notebook (window);
if (g_settings_get_boolean (EPHY_SETTINGS_LOCKDOWN,
EPHY_PREFS_LOCKDOWN_QUIT) &&
gtk_notebook_get_n_pages (GTK_NOTEBOOK (notebook)) == 1)
{
return;
}
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
g_signal_emit_by_name (notebook, "tab-close-request", embed);
}
void
window_cmd_edit_undo (GtkAction *action,
EphyWindow *window)
{
GtkWidget *widget;
GtkWidget *embed;
GtkWidget *location_entry;
widget = gtk_window_get_focus (GTK_WINDOW (window));
location_entry = gtk_widget_get_ancestor (widget, EPHY_TYPE_LOCATION_ENTRY);
if (location_entry)
{
ephy_location_entry_reset (EPHY_LOCATION_ENTRY (location_entry));
}
else
{
embed = gtk_widget_get_ancestor (widget, EPHY_TYPE_EMBED);
if (embed)
{
#ifdef HAVE_WEBKIT2
webkit_web_view_execute_editing_command (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (EPHY_EMBED (embed)), "Undo");
#else
webkit_web_view_undo (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (EPHY_EMBED (embed)));
#endif
}
}
}
void
window_cmd_edit_redo (GtkAction *action,
EphyWindow *window)
{
GtkWidget *widget;
GtkWidget *embed;
GtkWidget *location_entry;
widget = gtk_window_get_focus (GTK_WINDOW (window));
location_entry = gtk_widget_get_ancestor (widget, EPHY_TYPE_LOCATION_ENTRY);
if (location_entry)
{
ephy_location_entry_undo_reset (EPHY_LOCATION_ENTRY (location_entry));
}
else
{
embed = gtk_widget_get_ancestor (widget, EPHY_TYPE_EMBED);
if (embed)
{
#ifdef HAVE_WEBKIT2
webkit_web_view_execute_editing_command (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (EPHY_EMBED (embed)), "Redo");
#else
webkit_web_view_redo (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (EPHY_EMBED (embed)));
#endif
}
}
}
void
window_cmd_edit_cut (GtkAction *action,
EphyWindow *window)
{
GtkWidget *widget = gtk_window_get_focus (GTK_WINDOW (window));
if (GTK_IS_EDITABLE (widget))
{
gtk_editable_cut_clipboard (GTK_EDITABLE (widget));
}
else
{
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
#ifdef HAVE_WEBKIT2
webkit_web_view_execute_editing_command (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed), WEBKIT_EDITING_COMMAND_CUT);
#else
webkit_web_view_cut_clipboard (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed));
#endif
}
}
void
window_cmd_edit_copy (GtkAction *action,
EphyWindow *window)
{
GtkWidget *widget = gtk_window_get_focus (GTK_WINDOW (window));
if (GTK_IS_EDITABLE (widget))
{
gtk_editable_copy_clipboard (GTK_EDITABLE (widget));
}
else
{
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
#ifdef HAVE_WEBKIT2
webkit_web_view_execute_editing_command (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed), WEBKIT_EDITING_COMMAND_COPY);
#else
webkit_web_view_copy_clipboard (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed));
#endif
}
}
void
window_cmd_edit_paste (GtkAction *action,
EphyWindow *window)
{
GtkWidget *widget = gtk_window_get_focus (GTK_WINDOW (window));
if (GTK_IS_EDITABLE (widget))
{
gtk_editable_paste_clipboard (GTK_EDITABLE (widget));
}
else
{
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
#ifdef HAVE_WEBKIT2
webkit_web_view_execute_editing_command (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed), WEBKIT_EDITING_COMMAND_PASTE);
#else
webkit_web_view_paste_clipboard (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed));
#endif
}
}
void
window_cmd_edit_delete (GtkAction *action,
EphyWindow *window)
{
GtkWidget *widget = gtk_window_get_focus (GTK_WINDOW (window));
if (GTK_IS_EDITABLE (widget))
{
gtk_editable_delete_text (GTK_EDITABLE (widget), 0, -1);
}
else
{
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
/* FIXME: TODO */
#if 0
ephy_command_manager_do_command (EPHY_COMMAND_MANAGER (embed),
"cmd_delete");
#endif
}
}
void
window_cmd_edit_select_all (GtkAction *action,
EphyWindow *window)
{
GtkWidget *widget = gtk_window_get_focus (GTK_WINDOW (window));
if (GTK_IS_EDITABLE (widget))
{
gtk_editable_select_region (GTK_EDITABLE (widget), 0, -1);
}
else
{
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
#ifdef HAVE_WEBKIT2
webkit_web_view_execute_editing_command (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed), "SelectAll");
#else
webkit_web_view_select_all (EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (embed));
#endif
}
}
void
window_cmd_edit_find (GtkAction *action,
EphyWindow *window)
{
EphyFindToolbar *toolbar;
toolbar = EPHY_FIND_TOOLBAR (ephy_window_get_find_toolbar (window));
ephy_find_toolbar_open (toolbar, FALSE, FALSE);
}
void
window_cmd_edit_find_next (GtkAction *action,
EphyWindow *window)
{
EphyFindToolbar *toolbar;
toolbar = EPHY_FIND_TOOLBAR (ephy_window_get_find_toolbar (window));
ephy_find_toolbar_find_next (toolbar);
}
void
window_cmd_edit_find_prev (GtkAction *action,
EphyWindow *window)
{
EphyFindToolbar *toolbar;
toolbar = EPHY_FIND_TOOLBAR (ephy_window_get_find_toolbar (window));
ephy_find_toolbar_find_previous (toolbar);
}
void
window_cmd_view_fullscreen (GtkAction *action,
EphyWindow *window)
{
if (gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action)))
gtk_window_fullscreen (GTK_WINDOW (window));
else
gtk_window_unfullscreen (GTK_WINDOW (window));
}
void
window_cmd_view_zoom_in (GtkAction *action,
EphyWindow *window)
{
ephy_window_set_zoom (window, ZOOM_IN);
}
void
window_cmd_view_zoom_out (GtkAction *action,
EphyWindow *window)
{
ephy_window_set_zoom (window, ZOOM_OUT);
}
void
window_cmd_view_zoom_normal (GtkAction *action,
EphyWindow *window)
{
ephy_window_set_zoom (window, 1.0);
}
static void
view_source_embedded (const char *uri, EphyEmbed *embed)
{
EphyEmbed *new_embed;
new_embed = ephy_shell_new_tab
(ephy_shell_get_default (),
EPHY_WINDOW (gtk_widget_get_toplevel (GTK_WIDGET (embed))),
embed,
NULL,
EPHY_NEW_TAB_JUMP | EPHY_NEW_TAB_IN_EXISTING_WINDOW | EPHY_NEW_TAB_APPEND_AFTER);
#ifdef HAVE_WEBKIT2
/* TODO: View Source */
#else
webkit_web_view_set_view_source_mode
(EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (new_embed), TRUE);
webkit_web_view_load_uri
(EPHY_GET_WEBKIT_WEB_VIEW_FROM_EMBED (new_embed), uri);
#endif
}
static void
save_temp_source_close_cb (GOutputStream *ostream, GAsyncResult *result, gpointer data)
{
char *uri;
GFile *file;
GError *error = NULL;
g_output_stream_close_finish (ostream, result, &error);
if (error)
{
g_warning ("Unable to close file: %s", error->message);
g_error_free (error);
return;
}
uri = (char*)g_object_get_data (G_OBJECT (ostream), "ephy-save-temp-source-uri");
file = g_file_new_for_uri (uri);
if (!ephy_file_launch_handler ("text/plain", file, gtk_get_current_event_time ()))
{
/* Fallback to view the source inside the browser */
const char *uri;
EphyEmbed *embed;
uri = (const char*) g_object_get_data (G_OBJECT (ostream),
"ephy-original-source-uri");
embed = (EphyEmbed*)g_object_get_data (G_OBJECT (ostream),
"ephy-save-temp-source-embed");
view_source_embedded (uri, embed);
}
g_object_unref (ostream);
g_object_unref (file);
}
static void
save_temp_source_write_cb (GOutputStream *ostream, GAsyncResult *result, GString *data)
{
GError *error = NULL;
gssize written;
written = g_output_stream_write_finish (ostream, result, &error);
if (error)
{
g_string_free (data, TRUE);
g_warning ("Unable to write to file: %s", error->message);
g_error_free (error);
g_output_stream_close_async (ostream, G_PRIORITY_DEFAULT, NULL,
(GAsyncReadyCallback)save_temp_source_close_cb,
NULL);
return;
}
if (written == data->len)
{
g_string_free (data, TRUE);
g_output_stream_close_async (ostream, G_PRIORITY_DEFAULT, NULL,
(GAsyncReadyCallback)save_temp_source_close_cb,
NULL);
return;
}
data->len -= written;
data->str += written;
g_output_stream_write_async (ostream,
data->str, data->len,
G_PRIORITY_DEFAULT, NULL,
(GAsyncReadyCallback)save_temp_source_write_cb,
data);
}
#ifdef HAVE_WEBKIT2
static void
get_main_resource_data_cb (WebKitWebResource *resource, GAsyncResult *result, GOutputStream *ostream)
{
guchar *data;
gsize data_length;
GString *data_str;
GError *error = NULL;
data = webkit_web_resource_get_data_finish (resource, result, &data_length, &error);
if (error) {
g_warning ("Unable to get main resource data: %s", error->message);
g_error_free (error);
return;
}
/* We create a new GString here because we need to make sure
* we keep writing in case of partial writes */
data_str = g_string_new_len ((gchar *)data, data_length);
g_free (data);
g_output_stream_write_async (ostream,
data_str->str, data_str->len,
G_PRIORITY_DEFAULT, NULL,
(GAsyncReadyCallback)save_temp_source_write_cb,
data_str);
}
#endif
static void
save_temp_source_replace_cb (GFile *file, GAsyncResult *result, EphyEmbed *embed)
{
EphyWebView *view;
#ifdef HAVE_WEBKIT2
WebKitWebResource *resource;
#else
WebKitWebFrame *frame;
WebKitWebDataSource *data_source;
GString *const_data;
GString *data;
#endif
GFileOutputStream *ostream;
GError *error = NULL;
ostream = g_file_replace_finish (file, result, &error);
if (error)
{
g_warning ("Unable to replace file: %s", error->message);
g_error_free (error);
return;
}
g_object_set_data_full (G_OBJECT (ostream),
"ephy-save-temp-source-uri",
g_file_get_uri (file),
g_free);
view = ephy_embed_get_web_view (embed);
g_object_set_data_full (G_OBJECT (ostream),
"ephy-original-source-uri",
g_strdup (webkit_web_view_get_uri (WEBKIT_WEB_VIEW (view))),
g_free),
g_object_set_data_full (G_OBJECT (ostream),
"ephy-save-temp-source-embed",
g_object_ref (embed),
g_object_unref);
#ifdef HAVE_WEBKIT2
resource = webkit_web_view_get_main_resource (WEBKIT_WEB_VIEW (view));
webkit_web_resource_get_data (resource, NULL,
(GAsyncReadyCallback)get_main_resource_data_cb,
ostream);
#else
frame = webkit_web_view_get_main_frame (WEBKIT_WEB_VIEW (view));
data_source = webkit_web_frame_get_data_source (frame);
const_data = webkit_web_data_source_get_data (data_source);
/* We create a new GString here because we need to make sure
* we keep writing in case of partial writes */
if (const_data)
data = g_string_new_len (const_data->str, const_data->len);
else
data = g_string_new_len ("", 0);
g_output_stream_write_async (G_OUTPUT_STREAM (ostream),
data->str, data->len,
G_PRIORITY_DEFAULT, NULL,
(GAsyncReadyCallback)save_temp_source_write_cb,
data);
#endif
}
static void
save_temp_source (EphyEmbed *embed,
guint32 user_time)
{
GFile *file;
char *tmp, *base;
const char *static_temp_dir;
static_temp_dir = ephy_file_tmp_dir ();
if (static_temp_dir == NULL)
{
return;
}
base = g_build_filename (static_temp_dir, "viewsourceXXXXXX", NULL);
tmp = ephy_file_tmp_filename (base, "html");
g_free (base);
if (tmp == NULL)
{
return;
}
file = g_file_new_for_path (tmp);
g_file_replace_async (file, NULL, FALSE,
G_FILE_CREATE_REPLACE_DESTINATION|G_FILE_CREATE_PRIVATE,
G_PRIORITY_DEFAULT, NULL,
(GAsyncReadyCallback)save_temp_source_replace_cb,
embed);
g_object_unref (file);
g_free (tmp);
}
void
window_cmd_view_page_source (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
const char *address;
guint32 user_time;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
g_return_if_fail (embed != NULL);
address = ephy_web_view_get_address (ephy_embed_get_web_view (embed));
#ifdef HAVE_WEBKIT2
/* TODO: View Source */
#else
if (g_settings_get_boolean (EPHY_SETTINGS_MAIN,
EPHY_PREFS_INTERNAL_VIEW_SOURCE))
{
view_source_embedded (address, embed);
return;
}
#endif
user_time = gtk_get_current_event_time ();
if (g_str_has_prefix (address, "file://"))
{
GFile *file;
file = g_file_new_for_uri (address);
ephy_file_launch_handler ("text/plain", file, user_time);
g_object_unref (file);
}
else
{
save_temp_source (embed, user_time);
}
}
#define ABOUT_GROUP "About"
void
window_cmd_help_about (GtkAction *action,
GtkWidget *window)
{
const char *licence_part[] = {
N_("Web is free software; you can redistribute it and/or modify "
"it under the terms of the GNU General Public License as published by "
"the Free Software Foundation; either version 2 of the License, or "
"(at your option) any later version."),
N_("The GNOME Web Browser is distributed in the hope that it will be useful, "
"but WITHOUT ANY WARRANTY; without even the implied warranty of "
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the "
"GNU General Public License for more details."),
N_("You should have received a copy of the GNU General Public License "
"along with the GNOME Web Browser; if not, write to the Free Software Foundation, Inc., "
"51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA")
};
char *licence = NULL, *comments = NULL;
GKeyFile *key_file;
GError *error = NULL;
char **list, **authors, **contributors, **past_authors, **artists, **documenters;
gsize n_authors, n_contributors, n_past_authors, n_artists, n_documenters, i, j;
key_file = g_key_file_new ();
if (!g_key_file_load_from_file (key_file, DATADIR G_DIR_SEPARATOR_S "about.ini",
0, &error))
{
g_warning ("Couldn't load about data: %s\n", error->message);
g_error_free (error);
return;
}
list = g_key_file_get_string_list (key_file, ABOUT_GROUP, "Authors",
&n_authors, NULL);
contributors = g_key_file_get_string_list (key_file, ABOUT_GROUP, "Contributors",
&n_contributors, NULL);
past_authors = g_key_file_get_string_list (key_file, ABOUT_GROUP, "PastAuthors",
&n_past_authors, NULL);
#define APPEND(_to,_from) \
_to[i++] = g_strdup (_from);
#define APPEND_STRV_AND_FREE(_to,_from) \
if (_from)\
{\
for (j = 0; _from[j] != NULL; ++j)\
{\
_to[i++] = _from[j];\
}\
g_free (_from);\
}
authors = g_new (char *, (list ? n_authors : 0) +
(contributors ? n_contributors : 0) +
(past_authors ? n_past_authors : 0) + 7 + 1);
i = 0;
APPEND_STRV_AND_FREE (authors, list);
APPEND (authors, "");
APPEND (authors, _("Contact us at:"));
APPEND (authors, "<epiphany-list@gnome.org>");
APPEND (authors, "");
APPEND (authors, _("Contributors:"));
APPEND_STRV_AND_FREE (authors, contributors);
APPEND (authors, "");
APPEND (authors, _("Past developers:"));
APPEND_STRV_AND_FREE (authors, past_authors);
authors[i++] = NULL;
list = g_key_file_get_string_list (key_file, ABOUT_GROUP, "Artists", &n_artists, NULL);
artists = g_new (char *, (list ? n_artists : 0) + 4 + 1);
i = 0;
APPEND_STRV_AND_FREE (artists, list);
APPEND (artists, "");
APPEND (artists, _("Contact us at:"));
APPEND (artists, "<gnome-art-list@gnome.org>");
APPEND (artists, "<gnome-themes-list@gnome.org>");
artists[i++] = NULL;
list = g_key_file_get_string_list (key_file, ABOUT_GROUP, "Documenters", &n_documenters, NULL);
documenters = g_new (char *, (list ? n_documenters : 0) + 3 + 1);
i = 0;
APPEND_STRV_AND_FREE (documenters, list);
APPEND (documenters, "");
APPEND (documenters, _("Contact us at:"));
APPEND (documenters, "<gnome-doc-list@gnome.org>");
documenters[i++] = NULL;
#undef APPEND
#undef APPEND_STRV_AND_FREE
g_key_file_free (key_file);
#ifdef HAVE_WEBKIT2
comments = g_strdup_printf (_("Lets you view web pages and find information on the internet.\n"
"Powered by WebKit %d.%d.%d"),
webkit_get_major_version (),
webkit_get_minor_version (),
webkit_get_micro_version ());
#else
comments = g_strdup_printf (_("Lets you view web pages and find information on the internet.\n"
"Powered by WebKit %d.%d.%d"),
webkit_major_version (),
webkit_minor_version (),
webkit_micro_version ());
#endif
licence = g_strjoin ("\n\n",
_(licence_part[0]),
_(licence_part[1]),
_(licence_part[2]),
NULL);
gtk_show_about_dialog (window ? GTK_WINDOW (window) : NULL,
"program-name", _("Web"),
"version", VERSION,
"copyright", "Copyright © 2002–2004 Marco Pesenti Gritti\n"
"Copyright © 2003–2012 The Web Developers",
"artists", artists,
"authors", authors,
"comments", comments,
"documenters", documenters,
/* Translators: This is a special message that shouldn't be translated
* literally. It is used in the about box to give credits to
* the translators.
* Thus, you should translate it to your name and email address.
* You should also include other translators who have contributed to
* this translation; in that case, please write each of them on a separate
* line seperated by newlines (\n).
*/
"translator-credits", _("translator-credits"),
"logo-icon-name", "web-browser",
"website", "http://www.gnome.org/projects/epiphany",
"website-label", _("Web Website"),
"license", licence,
"wrap-license", TRUE,
NULL);
g_free (comments);
g_free (licence);
g_strfreev (artists);
g_strfreev (authors);
g_strfreev (documenters);
}
void
window_cmd_tabs_next (GtkAction *action,
EphyWindow *window)
{
GtkWidget *nb;
nb = ephy_window_get_notebook (window);
g_return_if_fail (nb != NULL);
ephy_notebook_next_page (EPHY_NOTEBOOK (nb));
}
void
window_cmd_tabs_previous (GtkAction *action,
EphyWindow *window)
{
GtkWidget *nb;
nb = ephy_window_get_notebook (window);
g_return_if_fail (nb != NULL);
ephy_notebook_prev_page (EPHY_NOTEBOOK (nb));
}
void
window_cmd_tabs_move_left (GtkAction *action,
EphyWindow *window)
{
GtkWidget *child;
GtkNotebook *notebook;
int page;
notebook = GTK_NOTEBOOK (ephy_window_get_notebook (window));
page = gtk_notebook_get_current_page (notebook);
if (page < 1) return;
child = gtk_notebook_get_nth_page (notebook, page);
gtk_notebook_reorder_child (notebook, child, page - 1);
}
void window_cmd_tabs_move_right (GtkAction *action,
EphyWindow *window)
{
GtkWidget *child;
GtkNotebook *notebook;
int page, n_pages;
notebook = GTK_NOTEBOOK (ephy_window_get_notebook (window));
page = gtk_notebook_get_current_page (notebook);
n_pages = gtk_notebook_get_n_pages (notebook) - 1;
if (page > n_pages - 1) return;
child = gtk_notebook_get_nth_page (notebook, page);
gtk_notebook_reorder_child (notebook, child, page + 1);
}
void
window_cmd_tabs_detach (GtkAction *action,
EphyWindow *window)
{
EphyEmbed *embed;
GtkNotebook *notebook;
EphyWindow *new_window;
notebook = GTK_NOTEBOOK (ephy_window_get_notebook (window));
if (gtk_notebook_get_n_pages (notebook) <= 1)
return;
embed = ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window));
g_object_ref_sink (embed);
gtk_notebook_remove_page (notebook, gtk_notebook_page_num (notebook, GTK_WIDGET (embed)));
new_window = ephy_window_new ();
ephy_embed_container_add_child (EPHY_EMBED_CONTAINER (new_window), embed, 0, FALSE);
g_object_unref (embed);
gtk_window_present (GTK_WINDOW (new_window));
}
void
window_cmd_load_location (GtkAction *action,
EphyWindow *window)
{
const char *location;
location = ephy_window_get_location (window);
if (location)
{
EphyBookmarks *bookmarks;
char *address;
bookmarks = ephy_shell_get_bookmarks (ephy_shell_get_default ());
address = ephy_bookmarks_resolve_address (bookmarks, location, NULL);
g_return_if_fail (address != NULL);
ephy_link_open (EPHY_LINK (window), address,
ephy_embed_container_get_active_child (EPHY_EMBED_CONTAINER (window)),
ephy_link_flags_from_current_event ());
}
}
void
window_cmd_browse_with_caret (GtkAction *action,
EphyWindow *window)
{
gboolean active;
EphyEmbed *embed;
embed = ephy_embed_container_get_active_child
(EPHY_EMBED_CONTAINER (window));
active = gtk_toggle_action_get_active (GTK_TOGGLE_ACTION (action));
/* FIXME: perhaps a bit of a kludge; we check if there's an
* active embed because we don't want to show the dialog on
* startup when we sync the GtkAction with our GConf
* preference */
if (active && embed)
{
GtkWidget *dialog;
int response;
dialog = gtk_message_dialog_new (GTK_WINDOW (window),
GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_MESSAGE_QUESTION, GTK_BUTTONS_CANCEL,
_("Enable caret browsing mode?"));
gtk_message_dialog_format_secondary_text (GTK_MESSAGE_DIALOG (dialog),
_("Pressing F7 turns caret browsing on or off. This feature "
"places a moveable cursor in web pages, allowing you to move "
"around with your keyboard. Do you want to enable caret browsing on?"));
gtk_dialog_add_button (GTK_DIALOG (dialog), _("_Enable"), GTK_RESPONSE_ACCEPT);
gtk_dialog_set_default_response (GTK_DIALOG (dialog), GTK_RESPONSE_CANCEL);
response = gtk_dialog_run (GTK_DIALOG (dialog));
gtk_widget_destroy (dialog);
if (response == GTK_RESPONSE_CANCEL)
{
gtk_toggle_action_set_active (GTK_TOGGLE_ACTION (action), FALSE);
return;
}
}
g_settings_set_boolean (EPHY_SETTINGS_MAIN,
EPHY_PREFS_ENABLE_CARET_BROWSING, active);
}
|
jdapena/epiphany
|
src/window-commands.c
|
C
|
gpl-2.0
| 40,646
|
/*
* Copyright (C) 2013 Andreas Steffen
* HSR Hochschule fuer Technik Rapperswil
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include "tcg_swid_attr_tag_inv.h"
#include <pa_tnc/pa_tnc_msg.h>
#include <bio/bio_writer.h>
#include <bio/bio_reader.h>
#include <utils/debug.h>
typedef struct private_tcg_swid_attr_tag_inv_t private_tcg_swid_attr_tag_inv_t;
/**
* SWID Tag Inventory
* see section 4.10 of TCG TNC SWID Message and Attributes for IF-M
*
* 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved | Tag ID Count |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Request ID Copy |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | EID Epoch |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Last EID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Unique Sequence ID Length |Unique Sequence ID (var length)|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Tag Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Tag (Variable) |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
#define SWID_TAG_INV_SIZE 16
#define SWID_TAG_INV_RESERVED 0x00
/**
* Private data of an tcg_swid_attr_tag_inv_t object.
*/
struct private_tcg_swid_attr_tag_inv_t {
/**
* Public members of tcg_swid_attr_tag_inv_t
*/
tcg_swid_attr_tag_inv_t public;
/**
* Vendor-specific attribute type
*/
pen_type_t type;
/**
* Attribute value
*/
chunk_t value;
/**
* Noskip flag
*/
bool noskip_flag;
/**
* Request ID
*/
u_int32_t request_id;
/**
* Event ID Epoch
*/
u_int32_t eid_epoch;
/**
* Last Event ID
*/
u_int32_t last_eid;
/**
* SWID Tag Inventory
*/
swid_inventory_t *inventory;
/**
* Reference count
*/
refcount_t ref;
};
METHOD(pa_tnc_attr_t, get_type, pen_type_t,
private_tcg_swid_attr_tag_inv_t *this)
{
return this->type;
}
METHOD(pa_tnc_attr_t, get_value, chunk_t,
private_tcg_swid_attr_tag_inv_t *this)
{
return this->value;
}
METHOD(pa_tnc_attr_t, get_noskip_flag, bool,
private_tcg_swid_attr_tag_inv_t *this)
{
return this->noskip_flag;
}
METHOD(pa_tnc_attr_t, set_noskip_flag,void,
private_tcg_swid_attr_tag_inv_t *this, bool noskip)
{
this->noskip_flag = noskip;
}
METHOD(pa_tnc_attr_t, build, void,
private_tcg_swid_attr_tag_inv_t *this)
{
bio_writer_t *writer;
swid_tag_t *tag;
enumerator_t *enumerator;
if (this->value.ptr)
{
return;
}
writer = bio_writer_create(SWID_TAG_INV_SIZE);
writer->write_uint8 (writer, SWID_TAG_INV_RESERVED);
writer->write_uint24(writer, this->inventory->get_count(this->inventory));
writer->write_uint32(writer, this->request_id);
writer->write_uint32(writer, this->eid_epoch);
writer->write_uint32(writer, this->last_eid);
enumerator = this->inventory->create_enumerator(this->inventory);
while (enumerator->enumerate(enumerator, &tag))
{
writer->write_data16(writer, tag->get_unique_seq_id(tag));
writer->write_data32(writer, tag->get_encoding(tag));
}
enumerator->destroy(enumerator);
this->value = writer->extract_buf(writer);
writer->destroy(writer);
}
METHOD(pa_tnc_attr_t, process, status_t,
private_tcg_swid_attr_tag_inv_t *this, u_int32_t *offset)
{
bio_reader_t *reader;
u_int32_t tag_count;
u_int8_t reserved;
chunk_t tag_encoding, unique_seq_id;
swid_tag_t *tag;
if (this->value.len < SWID_TAG_INV_SIZE)
{
DBG1(DBG_TNC, "insufficient data for SWID Tag Inventory");
*offset = 0;
return FAILED;
}
reader = bio_reader_create(this->value);
reader->read_uint8 (reader, &reserved);
reader->read_uint24(reader, &tag_count);
reader->read_uint32(reader, &this->request_id);
reader->read_uint32(reader, &this->eid_epoch);
reader->read_uint32(reader, &this->last_eid);
*offset = SWID_TAG_INV_SIZE;
while (tag_count--)
{
if (!reader->read_data16(reader, &unique_seq_id))
{
DBG1(DBG_TNC, "insufficient data for Unique Sequence ID");
return FAILED;
}
*offset += 2 + unique_seq_id.len;
if (!reader->read_data32(reader, &tag_encoding))
{
DBG1(DBG_TNC, "insufficient data for Tag");
return FAILED;
}
*offset += 4 + tag_encoding.len;
tag = swid_tag_create(tag_encoding, unique_seq_id);
this->inventory->add(this->inventory, tag);
}
reader->destroy(reader);
return SUCCESS;
}
METHOD(pa_tnc_attr_t, get_ref, pa_tnc_attr_t*,
private_tcg_swid_attr_tag_inv_t *this)
{
ref_get(&this->ref);
return &this->public.pa_tnc_attribute;
}
METHOD(pa_tnc_attr_t, destroy, void,
private_tcg_swid_attr_tag_inv_t *this)
{
if (ref_put(&this->ref))
{
this->inventory->destroy(this->inventory);
free(this->value.ptr);
free(this);
}
}
METHOD(tcg_swid_attr_tag_inv_t, get_request_id, u_int32_t,
private_tcg_swid_attr_tag_inv_t *this)
{
return this->request_id;
}
METHOD(tcg_swid_attr_tag_inv_t, get_last_eid, u_int32_t,
private_tcg_swid_attr_tag_inv_t *this, u_int32_t *eid_epoch)
{
if (eid_epoch)
{
*eid_epoch = this->eid_epoch;
}
return this->last_eid;
}
METHOD(tcg_swid_attr_tag_inv_t, get_inventory, swid_inventory_t*,
private_tcg_swid_attr_tag_inv_t *this)
{
return this->inventory;
}
/**
* Described in header.
*/
pa_tnc_attr_t *tcg_swid_attr_tag_inv_create(u_int32_t request_id,
u_int32_t eid_epoch, u_int32_t eid,
swid_inventory_t *inventory)
{
private_tcg_swid_attr_tag_inv_t *this;
INIT(this,
.public = {
.pa_tnc_attribute = {
.get_type = _get_type,
.get_value = _get_value,
.get_noskip_flag = _get_noskip_flag,
.set_noskip_flag = _set_noskip_flag,
.build = _build,
.process = _process,
.get_ref = _get_ref,
.destroy = _destroy,
},
.get_request_id = _get_request_id,
.get_last_eid = _get_last_eid,
.get_inventory = _get_inventory,
},
.type = { PEN_TCG, TCG_SWID_TAG_INVENTORY },
.request_id = request_id,
.eid_epoch = eid_epoch,
.last_eid = eid,
.inventory = inventory,
.ref = 1,
);
return &this->public.pa_tnc_attribute;
}
/**
* Described in header.
*/
pa_tnc_attr_t *tcg_swid_attr_tag_inv_create_from_data(chunk_t data)
{
private_tcg_swid_attr_tag_inv_t *this;
INIT(this,
.public = {
.pa_tnc_attribute = {
.get_type = _get_type,
.get_value = _get_value,
.get_noskip_flag = _get_noskip_flag,
.set_noskip_flag = _set_noskip_flag,
.build = _build,
.process = _process,
.get_ref = _get_ref,
.destroy = _destroy,
},
.get_request_id = _get_request_id,
.get_last_eid = _get_last_eid,
.get_inventory = _get_inventory,
},
.type = { PEN_TCG, TCG_SWID_TAG_INVENTORY },
.value = chunk_clone(data),
.inventory = swid_inventory_create(TRUE),
.ref = 1,
);
return &this->public.pa_tnc_attribute;
}
|
maire/strongswan
|
src/libpts/tcg/swid/tcg_swid_attr_tag_inv.c
|
C
|
gpl-2.0
| 7,725
|
/* HTVMS_WAISProt.c
**
** Adaptation for Lynx by F.Macrides (macrides@sci.wfeb.edu)
**
** 31-May-1994 FM Initial version.
**
**----------------------------------------------------------------------*/
/*
** Routines originally from WProt.c -- FM
**
**----------------------------------------------------------------------*/
/* WIDE AREA INFORMATION SERVER SOFTWARE:
No guarantees or restrictions. See the readme file for the full standard
disclaimer.
3.26.90 Harry Morris, morris@think.com
3.30.90 Harry Morris
- removed chunk code from WAISSearchAPDU,
- added makeWAISQueryType1Query() and readWAISType1Query() which replace
makeWAISQueryTerms() and makeWAISQueryDocs().
4.11.90 HWM - generalized conditional includes (see c-dialect.h)
- renamed makeWAISType1Query() to makeWAISTextQuery()
renamed readWAISType1Query() to readWAISTextQuery()
5.29.90 TS - fixed bug in makeWAISQueryDocs
added CSTFreeWAISFoo functions
*/
#define _C_WAIS_protocol_
/* This file implements the Z39.50 extensions required for WAIS
*/
#include <HTUtils.h>
#include <HTVMS_WaisUI.h>
#include <HTVMS_WaisProt.h>
#include <LYLeaks.h>
/* very rough estimates of the size of an object */
#define DefWAISInitResponseSize (size_t)200
#define DefWAISSearchSize (size_t)3000
#define DefWAISSearchResponseSize (size_t)6000
#define DefWAISPresentSize (size_t)1000
#define DefWAISPresentResponseSize (size_t)6000
#define DefWAISDocHeaderSize (size_t)500
#define DefWAISShortHeaderSize (size_t)200
#define DefWAISLongHeaderSize (size_t)800
#define DefWAISDocTextSize (size_t)6000
#define DefWAISDocHeadlineSize (size_t)500
#define DefWAISDocCodeSize (size_t)500
#define RESERVE_SPACE_FOR_WAIS_HEADER(len) \
if (*len > 0) \
*len -= header_len;
/*----------------------------------------------------------------------*/
static unsigned long userInfoTagSize PARAMS((data_tag tag,
unsigned long length));
static unsigned long
userInfoTagSize(tag,length)
data_tag tag;
unsigned long length;
/* return the number of bytes required to write the user info tag and
length
*/
{
unsigned long size;
/* calculate bytes required to represent tag. max tag is 16K */
size = writtenCompressedIntSize(tag);
size += writtenCompressedIntSize(length);
return(size);
}
/*----------------------------------------------------------------------*/
static char* writeUserInfoHeader PARAMS((data_tag tag,long infoSize,
long estHeaderSize,char* buffer,
long* len));
static char*
writeUserInfoHeader(tag,infoSize,estHeaderSize,buffer,len)
data_tag tag;
long infoSize;
long estHeaderSize;
char* buffer;
long* len;
/* write the tag and size, making sure the info fits. return the true end
of the info (after adjustment) note that the argument infoSize includes
estHeaderSize. Note that the argument len is the number of bytes remaining
in the buffer. Since we write the tag and size at the begining of the
buffer (in space that we reserved) we don't want to pass len the calls which
do that writing.
*/
{
long dummyLen = 100; /* plenty of space for a tag and size */
char* buf = buffer;
long realSize = infoSize - estHeaderSize;
long realHeaderSize = userInfoTagSize(tag,realSize);
if (buffer == NULL || *len == 0)
return(NULL);
/* write the tag */
buf = writeTag(tag,buf,&dummyLen);
/* see if the if the header size was correct. if not,
we have to shift the info to fit the real header size */
if (estHeaderSize != realHeaderSize)
{ /* make sure there is enough space */
CHECK_FOR_SPACE_LEFT(realHeaderSize - estHeaderSize,len);
memmove(buffer + realHeaderSize,buffer + estHeaderSize,(size_t)(realSize));
}
/* write the size */
writeCompressedInteger(realSize,buf,&dummyLen);
/* return the true end of buffer */
return(buffer + realHeaderSize + realSize);
}
/*----------------------------------------------------------------------*/
static char* readUserInfoHeader PARAMS((data_tag* tag,unsigned long* num,
char* buffer));
static char*
readUserInfoHeader(tag,num,buffer)
data_tag* tag;
unsigned long* num;
char* buffer;
/* read the tag and size */
{
char* buf = buffer;
buf = readTag(tag,buf);
buf = readCompressedInteger(num,buf);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISInitResponse*
makeWAISInitResponse(chunkCode,
chunkIDLen,
chunkMarker,
highlightMarker,
deHighlightMarker,
newLineChars)
long chunkCode;
long chunkIDLen;
char* chunkMarker;
char* highlightMarker;
char* deHighlightMarker;
char* newLineChars;
/* create a WAIS init response object */
{
WAISInitResponse* init = (WAISInitResponse*)s_malloc((size_t)sizeof(WAISInitResponse));
init->ChunkCode = chunkCode; /* note: none are copied! */
init->ChunkIDLength = chunkIDLen;
init->ChunkMarker = chunkMarker;
init->HighlightMarker = highlightMarker;
init->DeHighlightMarker = deHighlightMarker;
init->NewlineCharacters = newLineChars;
return(init);
}
/*----------------------------------------------------------------------*/
void
freeWAISInitResponse(init)
WAISInitResponse* init;
/* free an object made with makeWAISInitResponse */
{
s_free(init->ChunkMarker);
s_free(init->HighlightMarker);
s_free(init->DeHighlightMarker);
s_free(init->NewlineCharacters);
s_free(init);
}
/*----------------------------------------------------------------------*/
char*
writeInitResponseInfo(init,buffer,len)
InitResponseAPDU* init;
char* buffer;
long* len;
/* write an init response object */
{
unsigned long header_len = userInfoTagSize(DT_UserInformationLength,
DefWAISInitResponseSize);
char* buf = buffer + header_len;
WAISInitResponse* info = (WAISInitResponse*)init->UserInformationField;
unsigned long size;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeNum(info->ChunkCode,DT_ChunkCode,buf,len);
buf = writeNum(info->ChunkIDLength,DT_ChunkIDLength,buf,len);
buf = writeString(info->ChunkMarker,DT_ChunkMarker,buf,len);
buf = writeString(info->HighlightMarker,DT_HighlightMarker,buf,len);
buf = writeString(info->DeHighlightMarker,DT_DeHighlightMarker,buf,len);
buf = writeString(info->NewlineCharacters,DT_NewlineCharacters,buf,len);
/* now write the header and size */
size = buf - buffer;
buf = writeUserInfoHeader(DT_UserInformationLength,size,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
readInitResponseInfo(info,buffer)
void** info;
char* buffer;
/* read an init response object */
{
char* buf = buffer;
unsigned long size;
unsigned long headerSize;
long chunkCode,chunkIDLen;
data_tag tag1;
char* chunkMarker = NULL;
char* highlightMarker = NULL;
char* deHighlightMarker = NULL;
char* newLineChars = NULL;
chunkCode = chunkIDLen = UNUSED;
buf = readUserInfoHeader(&tag1,&size,buf);
headerSize = buf - buffer;
while (buf < (buffer + size + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_ChunkCode:
buf = readNum(&chunkCode,buf);
break;
case DT_ChunkIDLength:
buf = readNum(&chunkIDLen,buf);
break;
case DT_ChunkMarker:
buf = readString(&chunkMarker,buf);
break;
case DT_HighlightMarker:
buf = readString(&highlightMarker,buf);
break;
case DT_DeHighlightMarker:
buf = readString(&deHighlightMarker,buf);
break;
case DT_NewlineCharacters:
buf = readString(&newLineChars,buf);
break;
default:
s_free(highlightMarker);
s_free(deHighlightMarker);
s_free(newLineChars);
REPORT_READ_ERROR(buf);
break;
}
}
*info = (void *)makeWAISInitResponse(chunkCode,chunkIDLen,chunkMarker,
highlightMarker,deHighlightMarker,
newLineChars);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISSearch*
makeWAISSearch(seedWords,
docs,
textList,
dateFactor,
beginDateRange,
endDateRange,
maxDocsRetrieved)
char* seedWords;
DocObj** docs;
char** textList;
long dateFactor;
char* beginDateRange;
char* endDateRange;
long maxDocsRetrieved;
/* create a type 3 query object */
{
WAISSearch* query = (WAISSearch*)s_malloc((size_t)sizeof(WAISSearch));
query->SeedWords = seedWords; /* not copied! */
query->Docs = docs; /* not copied! */
query->TextList = textList; /* not copied! */
query->DateFactor = dateFactor;
query->BeginDateRange = beginDateRange;
query->EndDateRange = endDateRange;
query->MaxDocumentsRetrieved = maxDocsRetrieved;
return(query);
}
/*----------------------------------------------------------------------*/
void
freeWAISSearch(query)
WAISSearch* query;
/* destroy an object made with makeWAISSearch() */
{
void* ptr = NULL;
long i;
s_free(query->SeedWords);
if (query->Docs != NULL)
for (i = 0,ptr = (void *)query->Docs[i]; ptr != NULL; ptr = (void *)query->Docs[++i])
freeDocObj((DocObj*)ptr);
s_free(query->Docs);
if (query->TextList != NULL) /* XXX revisit when textlist is fully defined */
for (i = 0,ptr = (void *)query->TextList[i]; ptr != NULL; ptr = (void *)query->TextList[++i])
s_free(ptr);
s_free(query->TextList);
s_free(query->BeginDateRange);
s_free(query->EndDateRange);
s_free(query);
}
/*----------------------------------------------------------------------*/
DocObj*
makeDocObjUsingWholeDocument(docID,type)
any* docID;
char* type;
/* construct a document object using byte chunks - only for use by
servers */
{
DocObj* doc = (DocObj*)s_malloc((size_t)sizeof(DocObj));
doc->DocumentID = docID; /* not copied! */
doc->Type = type; /* not copied! */
doc->ChunkCode = CT_document;
return(doc);
}
/*----------------------------------------------------------------------*/
DocObj*
makeDocObjUsingLines(docID,type,start,end)
any* docID;
char* type;
long start;
long end;
/* construct a document object using line chunks - only for use by
servers */
{
DocObj* doc = (DocObj*)s_malloc((size_t)sizeof(DocObj));
doc->ChunkCode = CT_line;
doc->DocumentID = docID; /* not copied */
doc->Type = type; /* not copied! */
doc->ChunkStart.Pos = start;
doc->ChunkEnd.Pos = end;
return(doc);
}
/*----------------------------------------------------------------------*/
DocObj*
makeDocObjUsingBytes(docID,type,start,end)
any* docID;
char* type;
long start;
long end;
/* construct a document object using byte chunks - only for use by
servers */
{
DocObj* doc = (DocObj*)s_malloc((size_t)sizeof(DocObj));
doc->ChunkCode = CT_byte;
doc->DocumentID = docID; /* not copied */
doc->Type = type; /* not copied! */
doc->ChunkStart.Pos = start;
doc->ChunkEnd.Pos = end;
return(doc);
}
/*----------------------------------------------------------------------*/
DocObj*
makeDocObjUsingParagraphs(docID,type,start,end)
any* docID;
char* type;
any* start;
any* end;
/* construct a document object using byte chunks - only for use by
servers */
{
DocObj* doc = (DocObj*)s_malloc((size_t)sizeof(DocObj));
doc->ChunkCode = CT_paragraph;
doc->DocumentID = docID; /* not copied */
doc->Type = type;
doc->ChunkStart.ID = start;
doc->ChunkEnd.ID = end;
return(doc);
}
/*----------------------------------------------------------------------*/
void
freeDocObj(doc)
DocObj* doc;
/* free a docObj */
{
freeAny(doc->DocumentID);
s_free(doc->Type);
if (doc->ChunkCode == CT_paragraph)
{ freeAny(doc->ChunkStart.ID);
freeAny(doc->ChunkEnd.ID);
}
s_free(doc);
}
/*----------------------------------------------------------------------*/
static char* writeDocObj PARAMS((DocObj* doc,char* buffer,long* len));
static char*
writeDocObj(doc,buffer,len)
DocObj* doc;
char* buffer;
long* len;
/* write as little as we can about the doc obj */
{
char* buf = buffer;
/* we alwasy have to write the id, but its tag depends on if its a chunk */
if (doc->ChunkCode == CT_document)
buf = writeAny(doc->DocumentID,DT_DocumentID,buf,len);
else
buf = writeAny(doc->DocumentID,DT_DocumentIDChunk,buf,len);
if (doc->Type != NULL)
buf = writeString(doc->Type,DT_TYPE,buf,len);
switch (doc->ChunkCode)
{ case CT_document:
/* do nothing - there is no chunk data */
break;
case CT_byte:
case CT_line:
buf = writeNum(doc->ChunkCode,DT_ChunkCode,buf,len);
buf = writeNum(doc->ChunkStart.Pos,DT_ChunkStartID,buf,len);
buf = writeNum(doc->ChunkEnd.Pos,DT_ChunkEndID,buf,len);
break;
case CT_paragraph:
buf = writeNum(doc->ChunkCode,DT_ChunkCode,buf,len);
buf = writeAny(doc->ChunkStart.ID,DT_ChunkStartID,buf,len);
buf = writeAny(doc->ChunkEnd.ID,DT_ChunkEndID,buf,len);
break;
default:
panic("Implementation error: unknown chuck type %ld",
doc->ChunkCode);
break;
}
return(buf);
}
/*----------------------------------------------------------------------*/
static char* readDocObj PARAMS((DocObj** doc,char* buffer));
static char*
readDocObj(doc,buffer)
DocObj** doc;
char* buffer;
/* read whatever we have about the new document */
{
char* buf = buffer;
data_tag tag;
*doc = (DocObj*)s_malloc((size_t)sizeof(DocObj));
tag = peekTag(buf);
buf = readAny(&((*doc)->DocumentID),buf);
if (tag == DT_DocumentID)
{ (*doc)->ChunkCode = CT_document;
tag = peekTag(buf);
if (tag == DT_TYPE) /* XXX depends on DT_TYPE != what comes next */
buf = readString(&((*doc)->Type),buf);
/* ChunkStart and ChunkEnd are undefined */
}
else if (tag == DT_DocumentIDChunk)
{ boolean readParagraphs = false; /* for cleanup */
tag = peekTag(buf);
if (tag == DT_TYPE) /* XXX depends on DT_TYPE != CT_FOO */
buf = readString(&((*doc)->Type),buf);
buf = readNum(&((*doc)->ChunkCode),buf);
switch ((*doc)->ChunkCode)
{ case CT_byte:
case CT_line:
buf = readNum(&((*doc)->ChunkStart.Pos),buf);
buf = readNum(&((*doc)->ChunkEnd.Pos),buf);
break;
case CT_paragraph:
readParagraphs = true;
buf = readAny(&((*doc)->ChunkStart.ID),buf);
buf = readAny(&((*doc)->ChunkEnd.ID),buf);
break;
default:
freeAny((*doc)->DocumentID);
if (readParagraphs)
{ freeAny((*doc)->ChunkStart.ID);
freeAny((*doc)->ChunkEnd.ID);
}
s_free(doc);
REPORT_READ_ERROR(buf);
break;
}
}
else
{ freeAny((*doc)->DocumentID);
s_free(*doc);
REPORT_READ_ERROR(buf);
}
return(buf);
}
/*----------------------------------------------------------------------*/
char*
writeSearchInfo(query,buffer,len)
SearchAPDU* query;
char* buffer;
long* len;
/* write out a WAIS query (type 1 or 3) */
{
if (strcmp(query->QueryType,QT_TextRetrievalQuery) == 0)
{ return(writeAny((any*)query->Query,DT_Query,buffer,len));
}
else
{ unsigned long header_len = userInfoTagSize(DT_UserInformationLength,
DefWAISSearchSize);
char* buf = buffer + header_len;
WAISSearch* info = (WAISSearch*)query->Query;
unsigned long size;
long i;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeString(info->SeedWords,DT_SeedWords,buf,len);
if (info->Docs != NULL)
{ for (i = 0; info->Docs[i] != NULL; i++)
{ buf = writeDocObj(info->Docs[i],buf,len);
}
}
/* XXX text list */
buf = writeNum(info->DateFactor,DT_DateFactor,buf,len);
buf = writeString(info->BeginDateRange,DT_BeginDateRange,buf,len);
buf = writeString(info->EndDateRange,DT_EndDateRange,buf,len);
buf = writeNum(info->MaxDocumentsRetrieved,DT_MaxDocumentsRetrieved,buf,len);
/* now write the header and size */
size = buf - buffer;
buf = writeUserInfoHeader(DT_UserInformationLength,size,header_len,buffer,len);
return(buf);
}
}
/*----------------------------------------------------------------------*/
char*
readSearchInfo(info,buffer)
void** info;
char* buffer;
/* read a WAIS query (type 1 or 3) */
{
data_tag type = peekTag(buffer);
if (type == DT_Query) /* this is a type 1 query */
{ char* buf = buffer;
any* query = NULL;
buf = readAny(&query,buf);
*info = (void *)query;
return(buf);
}
else /* a type 3 query */
{ char* buf = buffer;
unsigned long size;
unsigned long headerSize;
data_tag tag1;
char* seedWords = NULL;
char* beginDateRange = NULL;
char* endDateRange = NULL;
long dateFactor,maxDocsRetrieved;
char** textList = NULL;
DocObj** docIDs = NULL;
DocObj* doc = NULL;
long docs = 0;
long i;
void* ptr = NULL;
dateFactor = maxDocsRetrieved = UNUSED;
buf = readUserInfoHeader(&tag1,&size,buf);
headerSize = buf - buffer;
while (buf < (buffer + size + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_SeedWords:
buf = readString(&seedWords,buf);
break;
case DT_DocumentID:
case DT_DocumentIDChunk:
if (docIDs == NULL) /* create a new doc list */
{ docIDs = (DocObj**)s_malloc((size_t)sizeof(DocObj*) * 2);
}
else /* grow the doc list */
{ docIDs = (DocObj**)s_realloc((char*)docIDs,(size_t)(sizeof(DocObj*) * (docs + 2)));
}
buf = readDocObj(&doc,buf);
if (buf == NULL)
{ s_free(seedWords);
s_free(beginDateRange);
s_free(endDateRange);
if (docIDs != NULL)
for (i = 0,ptr = (void *)docIDs[i]; ptr != NULL; ptr = (void *)docIDs[++i])
freeDocObj((DocObj*)ptr);
s_free(docIDs);
/* XXX should also free textlist when it is fully defined */
}
RETURN_ON_NULL(buf);
docIDs[docs++] = doc; /* put it in the list */
docIDs[docs] = NULL;
break;
case DT_TextList:
/* XXX */
break;
case DT_DateFactor:
buf = readNum(&dateFactor,buf);
break;
case DT_BeginDateRange:
buf = readString(&beginDateRange,buf);
break;
case DT_EndDateRange:
buf = readString(&endDateRange,buf);
break;
case DT_MaxDocumentsRetrieved:
buf = readNum(&maxDocsRetrieved,buf);
break;
default:
s_free(seedWords);
s_free(beginDateRange);
s_free(endDateRange);
if (docIDs != NULL)
for (i = 0,ptr = (void *)docIDs[i]; ptr != NULL; ptr = (void *)docIDs[++i])
freeDocObj((DocObj*)ptr);
s_free(docIDs);
/* XXX should also free textlist when it is fully defined */
REPORT_READ_ERROR(buf);
break;
}
}
*info = (void *)makeWAISSearch(seedWords,docIDs,textList,
dateFactor,beginDateRange,endDateRange,
maxDocsRetrieved);
return(buf);
}
}
/*----------------------------------------------------------------------*/
WAISDocumentHeader*
makeWAISDocumentHeader(docID,
versionNumber,
score,
bestMatch,
docLen,
lines,
types,
source,
date,
headline,
originCity)
any* docID;
long versionNumber;
long score;
long bestMatch;
long docLen;
long lines;
char** types;
char* source;
char* date;
char* headline;
char* originCity;
/* construct a standard document header, note that no fields are copied!
if the application needs to save these fields, it should copy them,
or set the field in this object to NULL before freeing it.
*/
{
WAISDocumentHeader* header =
(WAISDocumentHeader*)s_malloc((size_t)sizeof(WAISDocumentHeader));
header->DocumentID = docID;
header->VersionNumber = versionNumber;
header->Score = score;
header->BestMatch = bestMatch;
header->DocumentLength = docLen;
header->Lines = lines;
header->Types = types;
header->Source = source;
header->Date = date;
header->Headline = headline;
header->OriginCity = originCity;
return(header);
}
/*----------------------------------------------------------------------*/
void
freeWAISDocumentHeader(header)
WAISDocumentHeader* header;
{
freeAny(header->DocumentID);
doList((void**)header->Types,fs_free); /* can't use the macro here ! */
s_free(header->Types);
s_free(header->Source);
s_free(header->Date);
s_free(header->Headline);
s_free(header->OriginCity);
s_free(header);
}
/*----------------------------------------------------------------------*/
char*
writeWAISDocumentHeader(header,buffer,len)
WAISDocumentHeader* header;
char* buffer;
long* len;
{
unsigned long header_len = userInfoTagSize(DT_DocumentHeaderGroup ,
DefWAISDocHeaderSize);
char* buf = buffer + header_len;
unsigned long size1;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeAny(header->DocumentID,DT_DocumentID,buf,len);
buf = writeNum(header->VersionNumber,DT_VersionNumber,buf,len);
buf = writeNum(header->Score,DT_Score,buf,len);
buf = writeNum(header->BestMatch,DT_BestMatch,buf,len);
buf = writeNum(header->DocumentLength,DT_DocumentLength,buf,len);
buf = writeNum(header->Lines,DT_Lines,buf,len);
if (header->Types != NULL)
{ long size;
char* ptr = NULL;
long i;
buf = writeTag(DT_TYPE_BLOCK,buf,len);
for (i = 0,size = 0,ptr = header->Types[i]; ptr != NULL; ptr = header->Types[++i])
{ long typeSize = strlen(ptr);
size += writtenTagSize(DT_TYPE);
size += writtenCompressedIntSize(typeSize);
size += typeSize;
}
buf = writeCompressedInteger((unsigned long)size,buf,len);
for (i = 0,ptr = header->Types[i]; ptr != NULL; ptr = header->Types[++i])
buf = writeString(ptr,DT_TYPE,buf,len);
}
buf = writeString(header->Source,DT_Source,buf,len);
buf = writeString(header->Date,DT_Date,buf,len);
buf = writeString(header->Headline,DT_Headline,buf,len);
buf = writeString(header->OriginCity,DT_OriginCity,buf,len);
/* now write the header and size */
size1 = buf - buffer;
buf = writeUserInfoHeader(DT_DocumentHeaderGroup,size1,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
readWAISDocumentHeader(header,buffer)
WAISDocumentHeader** header;
char* buffer;
{
char* buf = buffer;
unsigned long size1;
unsigned long headerSize;
data_tag tag1;
any* docID = NULL;
long versionNumber,score,bestMatch,docLength,lines;
char** types = NULL;
char *source = NULL;
char *date = NULL;
char *headline = NULL;
char *originCity = NULL;
versionNumber = score = bestMatch = docLength = lines = UNUSED;
buf = readUserInfoHeader(&tag1,&size1,buf);
headerSize = buf - buffer;
while (buf < (buffer + size1 + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_DocumentID:
buf = readAny(&docID,buf);
break;
case DT_VersionNumber:
buf = readNum(&versionNumber,buf);
break;
case DT_Score:
buf = readNum(&score,buf);
break;
case DT_BestMatch:
buf = readNum(&bestMatch,buf);
break;
case DT_DocumentLength:
buf = readNum(&docLength,buf);
break;
case DT_Lines:
buf = readNum(&lines,buf);
break;
case DT_TYPE_BLOCK:
{ unsigned long size = -1;
long numTypes = 0;
buf = readTag(&tag,buf);
buf = readCompressedInteger(&size,buf);
while (size > 0)
{ char* type = NULL;
char* originalBuf = buf;
buf = readString(&type,buf);
types = (char**)s_realloc(types,(size_t)(sizeof(char*) * (numTypes + 2)));
types[numTypes++] = type;
types[numTypes] = NULL;
size -= (buf - originalBuf);
}
}
/* FALLTHRU */
case DT_Source:
buf = readString(&source,buf);
break;
case DT_Date:
buf = readString(&date,buf);
break;
case DT_Headline:
buf = readString(&headline,buf);
break;
case DT_OriginCity:
buf = readString(&originCity,buf);
break;
default:
freeAny(docID);
s_free(source);
s_free(date);
s_free(headline);
s_free(originCity);
REPORT_READ_ERROR(buf);
break;
}
}
*header = makeWAISDocumentHeader(docID,versionNumber,score,bestMatch,
docLength,lines,types,source,date,headline,
originCity);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISDocumentShortHeader*
makeWAISDocumentShortHeader(docID,
versionNumber,
score,
bestMatch,
docLen,
lines)
any* docID;
long versionNumber;
long score;
long bestMatch;
long docLen;
long lines;
/* construct a short document header, note that no fields are copied!
if the application needs to save these fields, it should copy them,
or set the field in this object to NULL before freeing it.
*/
{
WAISDocumentShortHeader* header =
(WAISDocumentShortHeader*)s_malloc((size_t)sizeof(WAISDocumentShortHeader));
header->DocumentID = docID;
header->VersionNumber = versionNumber;
header->Score = score;
header->BestMatch = bestMatch;
header->DocumentLength = docLen;
header->Lines = lines;
return(header);
}
/*----------------------------------------------------------------------*/
void
freeWAISDocumentShortHeader(header)
WAISDocumentShortHeader* header;
{
freeAny(header->DocumentID);
s_free(header);
}
/*----------------------------------------------------------------------*/
char*
writeWAISDocumentShortHeader(header,buffer,len)
WAISDocumentShortHeader* header;
char* buffer;
long* len;
{
unsigned long header_len = userInfoTagSize(DT_DocumentShortHeaderGroup ,
DefWAISShortHeaderSize);
char* buf = buffer + header_len;
unsigned long size;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeAny(header->DocumentID,DT_DocumentID,buf,len);
buf = writeNum(header->VersionNumber,DT_VersionNumber,buf,len);
buf = writeNum(header->Score,DT_Score,buf,len);
buf = writeNum(header->BestMatch,DT_BestMatch,buf,len);
buf = writeNum(header->DocumentLength,DT_DocumentLength,buf,len);
buf = writeNum(header->Lines,DT_Lines,buf,len);
/* now write the header and size */
size = buf - buffer;
buf = writeUserInfoHeader(DT_DocumentShortHeaderGroup,size,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
readWAISDocumentShortHeader(header,buffer)
WAISDocumentShortHeader** header;
char* buffer;
{
char* buf = buffer;
unsigned long size;
unsigned long headerSize;
data_tag tag1;
any* docID = NULL;
long versionNumber,score,bestMatch,docLength,lines;
versionNumber = score = bestMatch = docLength = lines = UNUSED;
buf = readUserInfoHeader(&tag1,&size,buf);
headerSize = buf - buffer;
while (buf < (buffer + size + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_DocumentID:
buf = readAny(&docID,buf);
break;
case DT_VersionNumber:
buf = readNum(&versionNumber,buf);
break;
case DT_Score:
buf = readNum(&score,buf);
break;
case DT_BestMatch:
buf = readNum(&bestMatch,buf);
break;
case DT_DocumentLength:
buf = readNum(&docLength,buf);
break;
case DT_Lines:
buf = readNum(&lines,buf);
break;
default:
freeAny(docID);
REPORT_READ_ERROR(buf);
break;
}
}
*header = makeWAISDocumentShortHeader(docID,versionNumber,score,bestMatch,
docLength,lines);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISDocumentLongHeader*
makeWAISDocumentLongHeader(docID,
versionNumber,
score,
bestMatch,
docLen,
lines,
types,
source,
date,
headline,
originCity,
stockCodes,
companyCodes,
industryCodes)
any* docID;
long versionNumber;
long score;
long bestMatch;
long docLen;
long lines;
char** types;
char* source;
char* date;
char* headline;
char* originCity;
char* stockCodes;
char* companyCodes;
char* industryCodes;
/* construct a long document header, note that no fields are copied!
if the application needs to save these fields, it should copy them,
or set the field in this object to NULL before freeing it.
*/
{
WAISDocumentLongHeader* header =
(WAISDocumentLongHeader*)s_malloc((size_t)sizeof(WAISDocumentLongHeader));
header->DocumentID = docID;
header->VersionNumber = versionNumber;
header->Score = score;
header->BestMatch = bestMatch;
header->DocumentLength = docLen;
header->Lines = lines;
header->Types = types;
header->Source = source;
header->Date = date;
header->Headline = headline;
header->OriginCity = originCity;
header->StockCodes = stockCodes;
header->CompanyCodes = companyCodes;
header->IndustryCodes = industryCodes;
return(header);
}
/*----------------------------------------------------------------------*/
void
freeWAISDocumentLongHeader(header)
WAISDocumentLongHeader* header;
{
freeAny(header->DocumentID);
doList((void**)header->Types,fs_free); /* can't use the macro here! */
s_free(header->Source);
s_free(header->Date);
s_free(header->Headline);
s_free(header->OriginCity);
s_free(header->StockCodes);
s_free(header->CompanyCodes);
s_free(header->IndustryCodes);
s_free(header);
}
/*----------------------------------------------------------------------*/
char*
writeWAISDocumentLongHeader(header,buffer,len)
WAISDocumentLongHeader* header;
char* buffer;
long* len;
{
unsigned long header_len = userInfoTagSize(DT_DocumentLongHeaderGroup ,
DefWAISLongHeaderSize);
char* buf = buffer + header_len;
unsigned long size1;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeAny(header->DocumentID,DT_DocumentID,buf,len);
buf = writeNum(header->VersionNumber,DT_VersionNumber,buf,len);
buf = writeNum(header->Score,DT_Score,buf,len);
buf = writeNum(header->BestMatch,DT_BestMatch,buf,len);
buf = writeNum(header->DocumentLength,DT_DocumentLength,buf,len);
buf = writeNum(header->Lines,DT_Lines,buf,len);
if (header->Types != NULL)
{ long size;
char* ptr = NULL;
long i;
buf = writeTag(DT_TYPE_BLOCK,buf,len);
for (i = 0,size = 0,ptr = header->Types[i]; ptr != NULL; ptr = header->Types[++i])
{ long typeSize = strlen(ptr);
size += writtenTagSize(DT_TYPE);
size += writtenCompressedIntSize(typeSize);
size += typeSize;
}
buf = writeCompressedInteger((unsigned long)size,buf,len);
for (i = 0,ptr = header->Types[i]; ptr != NULL; ptr = header->Types[++i])
buf = writeString(ptr,DT_TYPE,buf,len);
}
buf = writeString(header->Source,DT_Source,buf,len);
buf = writeString(header->Date,DT_Date,buf,len);
buf = writeString(header->Headline,DT_Headline,buf,len);
buf = writeString(header->OriginCity,DT_OriginCity,buf,len);
buf = writeString(header->StockCodes,DT_StockCodes,buf,len);
buf = writeString(header->CompanyCodes,DT_CompanyCodes,buf,len);
buf = writeString(header->IndustryCodes,DT_IndustryCodes,buf,len);
/* now write the header and size */
size1 = buf - buffer;
buf = writeUserInfoHeader(DT_DocumentLongHeaderGroup,size1,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
readWAISDocumentLongHeader(header,buffer)
WAISDocumentLongHeader** header;
char* buffer;
{
char* buf = buffer;
unsigned long size1;
unsigned long headerSize;
data_tag tag1;
any* docID;
long versionNumber,score,bestMatch,docLength,lines;
char **types;
char *source,*date,*headline,*originCity,*stockCodes,*companyCodes,*industryCodes;
docID = NULL;
versionNumber = score = bestMatch = docLength = lines = UNUSED;
types = NULL;
source = date = headline = originCity = stockCodes = companyCodes = industryCodes = NULL;
buf = readUserInfoHeader(&tag1,&size1,buf);
headerSize = buf - buffer;
while (buf < (buffer + size1 + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_DocumentID:
buf = readAny(&docID,buf);
break;
case DT_VersionNumber:
buf = readNum(&versionNumber,buf);
break;
case DT_Score:
buf = readNum(&score,buf);
break;
case DT_BestMatch:
buf = readNum(&bestMatch,buf);
break;
case DT_DocumentLength:
buf = readNum(&docLength,buf);
break;
case DT_Lines:
buf = readNum(&lines,buf);
break;
case DT_TYPE_BLOCK:
{ unsigned long size = -1;
long numTypes = 0;
buf = readTag(&tag,buf);
readCompressedInteger(&size,buf);
while (size > 0)
{ char* type = NULL;
char* originalBuf = buf;
buf = readString(&type,buf);
types = (char**)s_realloc(types,(size_t)(sizeof(char*) * (numTypes + 2)));
types[numTypes++] = type;
types[numTypes] = NULL;
size -= (buf - originalBuf);
}
}
/* FALLTHRU */
case DT_Source:
buf = readString(&source,buf);
break;
case DT_Date:
buf = readString(&date,buf);
break;
case DT_Headline:
buf = readString(&headline,buf);
break;
case DT_OriginCity:
buf = readString(&originCity,buf);
break;
case DT_StockCodes:
buf = readString(&stockCodes,buf);
break;
case DT_CompanyCodes:
buf = readString(&companyCodes,buf);
break;
case DT_IndustryCodes:
buf = readString(&industryCodes,buf);
break;
default:
freeAny(docID);
s_free(source);
s_free(date);
s_free(headline);
s_free(originCity);
s_free(stockCodes);
s_free(companyCodes);
s_free(industryCodes);
REPORT_READ_ERROR(buf);
break;
}
}
*header = makeWAISDocumentLongHeader(docID,versionNumber,score,bestMatch,
docLength,lines,types,source,date,headline,
originCity,stockCodes,companyCodes,
industryCodes);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISSearchResponse*
makeWAISSearchResponse(seedWordsUsed,
docHeaders,
shortHeaders,
longHeaders,
text,
headlines,
codes,
diagnostics)
char* seedWordsUsed;
WAISDocumentHeader** docHeaders;
WAISDocumentShortHeader** shortHeaders;
WAISDocumentLongHeader** longHeaders;
WAISDocumentText** text;
WAISDocumentHeadlines** headlines;
WAISDocumentCodes** codes;
diagnosticRecord** diagnostics;
{
WAISSearchResponse* response = (WAISSearchResponse*)s_malloc((size_t)sizeof(WAISSearchResponse));
response->SeedWordsUsed = seedWordsUsed;
response->DocHeaders = docHeaders;
response->ShortHeaders = shortHeaders;
response->LongHeaders = longHeaders;
response->Text = text;
response->Headlines = headlines;
response->Codes = codes;
response->Diagnostics = diagnostics;
return(response);
}
/*----------------------------------------------------------------------*/
void
freeWAISSearchResponse(response)
WAISSearchResponse* response;
{
void* ptr = NULL;
long i;
s_free(response->SeedWordsUsed);
if (response->DocHeaders != NULL)
for (i = 0,ptr = (void *)response->DocHeaders[i]; ptr != NULL; ptr = (void *)response->DocHeaders[++i])
freeWAISDocumentHeader((WAISDocumentHeader*)ptr);
s_free(response->DocHeaders);
if (response->ShortHeaders != NULL)
for (i = 0,ptr = (void *)response->ShortHeaders[i]; ptr != NULL; ptr = (void *)response->ShortHeaders[++i])
freeWAISDocumentShortHeader((WAISDocumentShortHeader*)ptr);
s_free(response->ShortHeaders);
if (response->LongHeaders != NULL)
for (i = 0,ptr = (void *)response->LongHeaders[i]; ptr != NULL; ptr = (void *)response->LongHeaders[++i])
freeWAISDocumentLongHeader((WAISDocumentLongHeader*)ptr);
s_free(response->LongHeaders);
if (response->Text != NULL)
for (i = 0,ptr = (void *)response->Text[i]; ptr != NULL; ptr = (void *)response->Text[++i])
freeWAISDocumentText((WAISDocumentText*)ptr);
s_free(response->Text);
if (response->Headlines != NULL)
for (i = 0,ptr = (void *)response->Headlines[i]; ptr != NULL; ptr = (void *)response->Headlines[++i])
freeWAISDocumentHeadlines((WAISDocumentHeadlines*)ptr);
s_free(response->Headlines);
if (response->Codes != NULL)
for (i = 0,ptr = (void *)response->Codes[i]; ptr != NULL; ptr = (void *)response->Codes[++i])
freeWAISDocumentCodes((WAISDocumentCodes*)ptr);
s_free(response->Codes);
if (response->Diagnostics != NULL)
for (i = 0,ptr = (void *)response->Diagnostics[i]; ptr != NULL; ptr = (void *)response->Diagnostics[++i])
freeDiag((diagnosticRecord*)ptr);
s_free(response->Diagnostics);
s_free(response);
}
/*----------------------------------------------------------------------*/
char*
writeSearchResponseInfo(query,buffer,len)
SearchResponseAPDU* query;
char* buffer;
long* len;
{
unsigned long header_len = userInfoTagSize(DT_UserInformationLength,
DefWAISSearchResponseSize);
char* buf = buffer + header_len;
WAISSearchResponse* info = (WAISSearchResponse*)query->DatabaseDiagnosticRecords;
unsigned long size;
void* header = NULL;
long i;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeString(info->SeedWordsUsed,DT_SeedWordsUsed,buf,len);
/* write out all the headers */
if (info->DocHeaders != NULL)
{ for (i = 0,header = (void *)info->DocHeaders[i]; header != NULL; header = (void *)info->DocHeaders[++i])
buf = writeWAISDocumentHeader((WAISDocumentHeader*)header,buf,len);
}
if (info->ShortHeaders != NULL)
{ for (i = 0,header = (void *)info->ShortHeaders[i]; header != NULL; header = (void *)info->ShortHeaders[++i])
buf = writeWAISDocumentShortHeader((WAISDocumentShortHeader*)header,buf,len);
}
if (info->LongHeaders != NULL)
{ for (i = 0,header = (void *)info->LongHeaders[i]; header != NULL; header = (void *)info->LongHeaders[++i])
buf = writeWAISDocumentLongHeader((WAISDocumentLongHeader*)header,buf,len);
}
if (info->Text != NULL)
{ for (i = 0,header = (void *)info->Text[i]; header != NULL; header = (void *)info->Text[++i])
buf = writeWAISDocumentText((WAISDocumentText*)header,buf,len);
}
if (info->Headlines != NULL)
{ for (i = 0,header = (void *)info->Headlines[i]; header != NULL; header = (void *)info->Headlines[++i])
buf = writeWAISDocumentHeadlines((WAISDocumentHeadlines*)header,buf,len);
}
if (info->Codes != NULL)
{ for (i = 0,header = (void *)info->Codes[i]; header != NULL;header = (void *)info->Codes[++i])
buf = writeWAISDocumentCodes((WAISDocumentCodes*)header,buf,len);
}
if (info->Diagnostics != NULL)
{ for (i = 0, header = (void *)info->Diagnostics[i]; header != NULL; header = (void *)info->Diagnostics[++i])
buf = writeDiag((diagnosticRecord*)header,buf,len);
}
/* now write the header and size */
size = buf - buffer;
buf = writeUserInfoHeader(DT_UserInformationLength,size,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
static void
cleanUpWaisSearchResponse PARAMS((char* buf,char* seedWordsUsed,
WAISDocumentHeader** docHeaders,
WAISDocumentShortHeader** shortHeaders,
WAISDocumentLongHeader** longHeaders,
WAISDocumentText** text,
WAISDocumentHeadlines** headlines,
WAISDocumentCodes** codes,
diagnosticRecord**diags));
static void
cleanUpWaisSearchResponse (buf,seedWordsUsed,docHeaders,shortHeaders,
longHeaders,text,headlines,codes,diags)
char* buf;
char* seedWordsUsed;
WAISDocumentHeader** docHeaders;
WAISDocumentShortHeader** shortHeaders;
WAISDocumentLongHeader** longHeaders;
WAISDocumentText** text;
WAISDocumentHeadlines** headlines;
WAISDocumentCodes** codes;
diagnosticRecord** diags;
/* if buf is NULL, we have just gotten a read error, and need to clean up
any state we have built. If not, then everything is going fine, and
we should just hang loose
*/
{
void* ptr = NULL;
long i;
if (buf == NULL)
{ s_free(seedWordsUsed);
if (docHeaders != NULL)
for (i = 0,ptr = (void *)docHeaders[i]; ptr != NULL;
ptr = (void *)docHeaders[++i])
freeWAISDocumentHeader((WAISDocumentHeader*)ptr);
s_free(docHeaders);
if (shortHeaders != NULL)
for (i = 0,ptr = (void *)shortHeaders[i]; ptr != NULL;
ptr = (void *)shortHeaders[++i])
freeWAISDocumentShortHeader((WAISDocumentShortHeader*)ptr);
s_free(shortHeaders);
if (longHeaders != NULL)
for (i = 0,ptr = (void *)longHeaders[i]; ptr != NULL;
ptr = (void *)longHeaders[++i])
freeWAISDocumentLongHeader((WAISDocumentLongHeader*)ptr);
s_free(longHeaders);
if (text != NULL)
for (i = 0,ptr = (void *)text[i]; ptr != NULL; ptr = (void *)text[++i])
freeWAISDocumentText((WAISDocumentText*)ptr);
s_free(text);
if (headlines != NULL)
for (i = 0,ptr = (void *)headlines[i]; ptr != NULL;
ptr = (void *)headlines[++i])
freeWAISDocumentHeadlines((WAISDocumentHeadlines*)ptr);
s_free(headlines);
if (codes != NULL)
for (i = 0,ptr = (void *)codes[i]; ptr != NULL;
ptr = (void *)codes[++i])
freeWAISDocumentCodes((WAISDocumentCodes*)ptr);
s_free(codes);
if (diags != NULL)
for (i = 0,ptr = (void *)diags[i]; ptr != NULL;
ptr = (void *)diags[++i])
freeDiag((diagnosticRecord*)ptr);
s_free(diags);
}
}
/*----------------------------------------------------------------------*/
char*
readSearchResponseInfo(info,buffer)
void** info;
char* buffer;
{
char* buf = buffer;
unsigned long size;
unsigned long headerSize;
data_tag tag1;
void* header = NULL;
WAISDocumentHeader** docHeaders = NULL;
WAISDocumentShortHeader** shortHeaders = NULL;
WAISDocumentLongHeader** longHeaders = NULL;
WAISDocumentText** text = NULL;
WAISDocumentHeadlines** headlines = NULL;
WAISDocumentCodes** codes = NULL;
long numDocHeaders,numLongHeaders,numShortHeaders,numText,numHeadlines;
long numCodes;
char* seedWordsUsed = NULL;
diagnosticRecord** diags = NULL;
diagnosticRecord* diag = NULL;
long numDiags = 0;
numDocHeaders = numLongHeaders = numShortHeaders = numText = numHeadlines = numCodes = 0;
buf = readUserInfoHeader(&tag1,&size,buf);
headerSize = buf - buffer;
while (buf < (buffer + size + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_SeedWordsUsed:
buf = readString(&seedWordsUsed,buf);
break;
case DT_DatabaseDiagnosticRecords:
if (diags == NULL) /* create a new diag list */
{ diags = (diagnosticRecord**)s_malloc((size_t)sizeof(diagnosticRecord*) * 2);
}
else /* grow the diag list */
{ diags = (diagnosticRecord**)s_realloc((char*)diags,(size_t)(sizeof(diagnosticRecord*) * (numDiags + 2)));
}
buf = readDiag(&diag,buf);
diags[numDiags++] = diag; /* put it in the list */
diags[numDiags] = NULL;
break;
case DT_DocumentHeaderGroup:
if (docHeaders == NULL) /* create a new header list */
{ docHeaders = (WAISDocumentHeader**)s_malloc((size_t)sizeof(WAISDocumentHeader*) * 2);
}
else /* grow the doc list */
{ docHeaders = (WAISDocumentHeader**)s_realloc((char*)docHeaders,(size_t)(sizeof(WAISDocumentHeader*) * (numDocHeaders + 2)));
}
buf = readWAISDocumentHeader((WAISDocumentHeader**)&header,buf);
cleanUpWaisSearchResponse(buf,seedWordsUsed,docHeaders,shortHeaders,longHeaders,text,headlines,codes,diags);
RETURN_ON_NULL(buf);
docHeaders[numDocHeaders++] =
(WAISDocumentHeader*)header; /* put it in the list */
docHeaders[numDocHeaders] = NULL;
break;
case DT_DocumentShortHeaderGroup:
if (shortHeaders == NULL) /* create a new header list */
{ shortHeaders = (WAISDocumentShortHeader**)s_malloc((size_t)sizeof(WAISDocumentShortHeader*) * 2);
}
else /* grow the doc list */
{ shortHeaders = (WAISDocumentShortHeader**)s_realloc((char*)shortHeaders,(size_t)(sizeof(WAISDocumentShortHeader*) * (numShortHeaders + 2)));
}
buf = readWAISDocumentShortHeader((WAISDocumentShortHeader**)&header,buf);
cleanUpWaisSearchResponse(buf,seedWordsUsed,docHeaders,shortHeaders,longHeaders,text,headlines,codes,diags);
RETURN_ON_NULL(buf);
shortHeaders[numShortHeaders++] =
(WAISDocumentShortHeader*)header; /* put it in the list */
shortHeaders[numShortHeaders] = NULL;
break;
case DT_DocumentLongHeaderGroup:
if (longHeaders == NULL) /* create a new header list */
{ longHeaders = (WAISDocumentLongHeader**)s_malloc((size_t)sizeof(WAISDocumentLongHeader*) * 2);
}
else /* grow the doc list */
{ longHeaders = (WAISDocumentLongHeader**)s_realloc((char*)longHeaders,(size_t)(sizeof(WAISDocumentLongHeader*) * (numLongHeaders + 2)));
}
buf = readWAISDocumentLongHeader((WAISDocumentLongHeader**)&header,buf);
cleanUpWaisSearchResponse(buf,seedWordsUsed,docHeaders,shortHeaders,longHeaders,text,headlines,codes,diags);
RETURN_ON_NULL(buf);
longHeaders[numLongHeaders++] =
(WAISDocumentLongHeader*)header; /* put it in the list */
longHeaders[numLongHeaders] = NULL;
break;
case DT_DocumentTextGroup:
if (text == NULL) /* create a new list */
{ text = (WAISDocumentText**)s_malloc((size_t)sizeof(WAISDocumentText*) * 2);
}
else /* grow the list */
{ text = (WAISDocumentText**)s_realloc((char*)text,(size_t)(sizeof(WAISDocumentText*) * (numText + 2)));
}
buf = readWAISDocumentText((WAISDocumentText**)&header,buf);
cleanUpWaisSearchResponse(buf,seedWordsUsed,docHeaders,shortHeaders,longHeaders,text,headlines,codes,diags);
RETURN_ON_NULL(buf);
text[numText++] =
(WAISDocumentText*)header; /* put it in the list */
text[numText] = NULL;
break;
case DT_DocumentHeadlineGroup:
if (headlines == NULL) /* create a new list */
{ headlines = (WAISDocumentHeadlines**)s_malloc((size_t)sizeof(WAISDocumentHeadlines*) * 2);
}
else /* grow the list */
{ headlines = (WAISDocumentHeadlines**)s_realloc((char*)headlines,(size_t)(sizeof(WAISDocumentHeadlines*) * (numHeadlines + 2)));
}
buf = readWAISDocumentHeadlines((WAISDocumentHeadlines**)&header,buf);
cleanUpWaisSearchResponse(buf,seedWordsUsed,docHeaders,shortHeaders,longHeaders,text,headlines,codes,diags);
RETURN_ON_NULL(buf);
headlines[numHeadlines++] =
(WAISDocumentHeadlines*)header; /* put it in the list */
headlines[numHeadlines] = NULL;
break;
case DT_DocumentCodeGroup:
if (codes == NULL) /* create a new list */
{ codes = (WAISDocumentCodes**)s_malloc((size_t)sizeof(WAISDocumentCodes*) * 2);
}
else /* grow the list */
{ codes = (WAISDocumentCodes**)s_realloc((char*)codes,(size_t)(sizeof(WAISDocumentCodes*) * (numCodes + 2)));
}
buf = readWAISDocumentCodes((WAISDocumentCodes**)&header,buf);
cleanUpWaisSearchResponse(buf,seedWordsUsed,docHeaders,shortHeaders,longHeaders,text,headlines,codes,diags);
RETURN_ON_NULL(buf);
codes[numCodes++] =
(WAISDocumentCodes*)header; /* put it in the list */
codes[numCodes] = NULL;
break;
default:
cleanUpWaisSearchResponse(buf,seedWordsUsed,docHeaders,shortHeaders,longHeaders,text,headlines,codes,diags);
REPORT_READ_ERROR(buf);
break;
}
}
*info = (void *)makeWAISSearchResponse(seedWordsUsed,docHeaders,shortHeaders,
longHeaders,text,headlines,codes,diags);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISDocumentText*
makeWAISDocumentText(docID,versionNumber,documentText)
any* docID;
long versionNumber;
any* documentText;
{
WAISDocumentText* docText = (WAISDocumentText*)s_malloc((size_t)sizeof(WAISDocumentText));
docText->DocumentID = docID;
docText->VersionNumber = versionNumber;
docText->DocumentText = documentText;
return(docText);
}
/*----------------------------------------------------------------------*/
void
freeWAISDocumentText(docText)
WAISDocumentText* docText;
{
freeAny(docText->DocumentID);
freeAny(docText->DocumentText);
s_free(docText);
}
/*----------------------------------------------------------------------*/
char*
writeWAISDocumentText(docText,buffer,len)
WAISDocumentText* docText;
char* buffer;
long* len;
{
unsigned long header_len = userInfoTagSize(DT_DocumentTextGroup,
DefWAISDocTextSize);
char* buf = buffer + header_len;
unsigned long size;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeAny(docText->DocumentID,DT_DocumentID,buf,len);
buf = writeNum(docText->VersionNumber,DT_VersionNumber,buf,len);
buf = writeAny(docText->DocumentText,DT_DocumentText,buf,len);
/* now write the header and size */
size = buf - buffer;
buf = writeUserInfoHeader(DT_DocumentTextGroup,size,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
readWAISDocumentText(docText,buffer)
WAISDocumentText** docText;
char* buffer;
{
char* buf = buffer;
unsigned long size;
unsigned long headerSize;
data_tag tag1;
any *docID,*documentText;
long versionNumber;
docID = documentText = NULL;
versionNumber = UNUSED;
buf = readUserInfoHeader(&tag1,&size,buf);
headerSize = buf - buffer;
while (buf < (buffer + size + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_DocumentID:
buf = readAny(&docID,buf);
break;
case DT_VersionNumber:
buf = readNum(&versionNumber,buf);
break;
case DT_DocumentText:
buf = readAny(&documentText,buf);
break;
default:
freeAny(docID);
freeAny(documentText);
REPORT_READ_ERROR(buf);
break;
}
}
*docText = makeWAISDocumentText(docID,versionNumber,documentText);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISDocumentHeadlines*
makeWAISDocumentHeadlines(docID,
versionNumber,
source,
date,
headline,
originCity)
any* docID;
long versionNumber;
char* source;
char* date;
char* headline;
char* originCity;
{
WAISDocumentHeadlines* docHeadline =
(WAISDocumentHeadlines*)s_malloc((size_t)sizeof(WAISDocumentHeadlines));
docHeadline->DocumentID = docID;
docHeadline->VersionNumber = versionNumber;
docHeadline->Source = source;
docHeadline->Date = date;
docHeadline->Headline = headline;
docHeadline->OriginCity = originCity;
return(docHeadline);
}
/*----------------------------------------------------------------------*/
void
freeWAISDocumentHeadlines(docHeadline)
WAISDocumentHeadlines* docHeadline;
{
freeAny(docHeadline->DocumentID);
s_free(docHeadline->Source);
s_free(docHeadline->Date);
s_free(docHeadline->Headline);
s_free(docHeadline->OriginCity);
s_free(docHeadline);
}
/*----------------------------------------------------------------------*/
char*
writeWAISDocumentHeadlines(docHeadline,buffer,len)
WAISDocumentHeadlines* docHeadline;
char* buffer;
long* len;
{
unsigned long header_len = userInfoTagSize(DT_DocumentHeadlineGroup,
DefWAISDocHeadlineSize);
char* buf = buffer + header_len;
unsigned long size;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeAny(docHeadline->DocumentID,DT_DocumentID,buf,len);
buf = writeNum(docHeadline->VersionNumber,DT_VersionNumber,buf,len);
buf = writeString(docHeadline->Source,DT_Source,buf,len);
buf = writeString(docHeadline->Date,DT_Date,buf,len);
buf = writeString(docHeadline->Headline,DT_Headline,buf,len);
buf = writeString(docHeadline->OriginCity,DT_OriginCity,buf,len);
/* now write the header and size */
size = buf - buffer;
buf = writeUserInfoHeader(DT_DocumentHeadlineGroup,size,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
readWAISDocumentHeadlines(docHeadline,buffer)
WAISDocumentHeadlines** docHeadline;
char* buffer;
{
char* buf = buffer;
unsigned long size;
unsigned long headerSize;
data_tag tag1;
any* docID;
long versionNumber;
char *source,*date,*headline,*originCity;
docID = NULL;
versionNumber = UNUSED;
source = date = headline = originCity = NULL;
buf = readUserInfoHeader(&tag1,&size,buf);
headerSize = buf - buffer;
while (buf < (buffer + size + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_DocumentID:
buf = readAny(&docID,buf);
break;
case DT_VersionNumber:
buf = readNum(&versionNumber,buf);
break;
case DT_Source:
buf = readString(&source,buf);
break;
case DT_Date:
buf = readString(&date,buf);
break;
case DT_Headline:
buf = readString(&headline,buf);
break;
case DT_OriginCity:
buf = readString(&originCity,buf);
break;
default:
freeAny(docID);
s_free(source);
s_free(date);
s_free(headline);
s_free(originCity);
REPORT_READ_ERROR(buf);
break;
}
}
*docHeadline = makeWAISDocumentHeadlines(docID,versionNumber,source,date,
headline,originCity);
return(buf);
}
/*----------------------------------------------------------------------*/
WAISDocumentCodes*
makeWAISDocumentCodes(docID,
versionNumber,
stockCodes,
companyCodes,
industryCodes)
any* docID;
long versionNumber;
char* stockCodes;
char* companyCodes;
char* industryCodes;
{
WAISDocumentCodes* docCodes = (WAISDocumentCodes*)s_malloc((size_t)sizeof(WAISDocumentCodes));
docCodes->DocumentID = docID;
docCodes->VersionNumber = versionNumber;
docCodes->StockCodes = stockCodes;
docCodes->CompanyCodes = companyCodes;
docCodes->IndustryCodes = industryCodes;
return(docCodes);
}
/*----------------------------------------------------------------------*/
void
freeWAISDocumentCodes(docCodes)
WAISDocumentCodes* docCodes;
{
freeAny(docCodes->DocumentID);
s_free(docCodes->StockCodes);
s_free(docCodes->CompanyCodes);
s_free(docCodes->IndustryCodes);
s_free(docCodes);
}
/*----------------------------------------------------------------------*/
char*
writeWAISDocumentCodes(docCodes,buffer,len)
WAISDocumentCodes* docCodes;
char* buffer;
long* len;
{
unsigned long header_len = userInfoTagSize(DT_DocumentCodeGroup ,
DefWAISDocCodeSize);
char* buf = buffer + header_len;
unsigned long size;
RESERVE_SPACE_FOR_WAIS_HEADER(len);
buf = writeAny(docCodes->DocumentID,DT_DocumentID,buf,len);
buf = writeNum(docCodes->VersionNumber,DT_VersionNumber,buf,len);
buf = writeString(docCodes->StockCodes,DT_StockCodes,buf,len);
buf = writeString(docCodes->CompanyCodes,DT_CompanyCodes,buf,len);
buf = writeString(docCodes->IndustryCodes,DT_IndustryCodes,buf,len);
/* now write the header and size */
size = buf - buffer;
buf = writeUserInfoHeader(DT_DocumentCodeGroup,size,header_len,buffer,len);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
readWAISDocumentCodes(docCodes,buffer)
WAISDocumentCodes** docCodes;
char* buffer;
{
char* buf = buffer;
unsigned long size;
unsigned long headerSize;
data_tag tag1;
any* docID;
long versionNumber;
char *stockCodes,*companyCodes,*industryCodes;
docID = NULL;
versionNumber = UNUSED;
stockCodes = companyCodes = industryCodes = NULL;
buf = readUserInfoHeader(&tag1,&size,buf);
headerSize = buf - buffer;
while (buf < (buffer + size + headerSize))
{ data_tag tag = peekTag(buf);
switch (tag)
{ case DT_DocumentID:
buf = readAny(&docID,buf);
break;
case DT_VersionNumber:
buf = readNum(&versionNumber,buf);
break;
case DT_StockCodes:
buf = readString(&stockCodes,buf);
break;
case DT_CompanyCodes:
buf = readString(&companyCodes,buf);
break;
case DT_IndustryCodes:
buf = readString(&industryCodes,buf);
break;
default:
freeAny(docID);
s_free(stockCodes);
s_free(companyCodes);
s_free(industryCodes);
REPORT_READ_ERROR(buf);
break;
}
}
*docCodes = makeWAISDocumentCodes(docID,versionNumber,stockCodes,
companyCodes,industryCodes);
return(buf);
}
/*----------------------------------------------------------------------*/
char*
writePresentInfo(present,buffer,len)
PresentAPDU* present GCC_UNUSED;
char* buffer;
long* len GCC_UNUSED;
{
/* The WAIS protocol doesn't use present info */
return(buffer);
}
/*----------------------------------------------------------------------*/
char*
readPresentInfo(info,buffer)
void** info;
char* buffer;
{
/* The WAIS protocol doesn't use present info */
*info = NULL;
return(buffer);
}
/*----------------------------------------------------------------------*/
char*
writePresentResponseInfo(response,buffer,len)
PresentResponseAPDU* response GCC_UNUSED;
char* buffer;
long* len GCC_UNUSED;
{
/* The WAIS protocol doesn't use presentResponse info */
return(buffer);
}
/*----------------------------------------------------------------------*/
char*
readPresentResponseInfo(info,buffer)
void** info;
char* buffer;
{
/* The WAIS protocol doesn't use presentResponse info */
*info = NULL;
return(buffer);
}
/*----------------------------------------------------------------------*/
/* support for type 1 queries */
/* new use values (for the chunk types) */
#define BYTE "wb"
#define LINE "wl"
#define PARAGRAPH "wp"
#define DATA_TYPE "wt"
/* WAIS supports the following semantics for type 1 queries:
1. retrieve the header/codes from a document:
System_Control_Number = docID
Data Type = type (optional)
And
2. retrieve a fragment of the text of a document:
System_Control_Number = docID
Data Type = type (optional)
And
Chunk >= start
And
Chunk < end
And
Information from multiple documents may be requested by using
groups of the above joined by:
OR
( XXX does an OR come after every group but the first, or do they
all come at the end? )
( XXX return type could be in the element set)
*/
static query_term** makeWAISQueryTerms PARAMS((DocObj** docs));
static query_term**
makeWAISQueryTerms(docs)
DocObj** docs;
/* given a null terminated list of docObjs, construct the appropriate
query of the form given above
*/
{
query_term** terms = NULL;
long numTerms = 0;
DocObj* doc = NULL;
long i;
if (docs == NULL)
return((query_term**)NULL);
terms = (query_term**)s_malloc((size_t)(sizeof(query_term*) * 1));
terms[numTerms] = NULL;
/* loop through the docs making terms for them all */
for (i = 0,doc = docs[i]; doc != NULL; doc = docs[++i])
{ any* type = NULL;
if (doc->Type != NULL)
type = stringToAny(doc->Type);
if (doc->ChunkCode == CT_document) /* a whole document */
{ terms = (query_term**)s_realloc((char*)terms,
(size_t)(sizeof(query_term*) *
(numTerms + 3 + 1)));
terms[numTerms++] = makeAttributeTerm(SYSTEM_CONTROL_NUMBER,
EQUAL,IGNORE,IGNORE,
IGNORE,IGNORE,doc->DocumentID);
if (type != NULL)
{ terms[numTerms++] = makeAttributeTerm(DATA_TYPE,EQUAL,
IGNORE,IGNORE,IGNORE,
IGNORE,type);
terms[numTerms++] = makeOperatorTerm(AND);
}
terms[numTerms] = NULL;
}
else /* a document fragment */
{ char chunk_att[ATTRIBUTE_SIZE];
any* startChunk = NULL;
any* endChunk = NULL;
terms = (query_term**)s_realloc((char*)terms,
(size_t)(sizeof(query_term*) *
(numTerms + 7 + 1)));
switch (doc->ChunkCode)
{ case CT_byte:
case CT_line:
{ char start[20],end[20];
(doc->ChunkCode == CT_byte) ?
strncpy(chunk_att,BYTE,ATTRIBUTE_SIZE) :
strncpy(chunk_att,LINE,ATTRIBUTE_SIZE);
sprintf(start,"%ld",doc->ChunkStart.Pos);
startChunk = stringToAny(start);
sprintf(end,"%ld",doc->ChunkEnd.Pos);
endChunk = stringToAny(end);
}
break;
case CT_paragraph:
strncpy(chunk_att,PARAGRAPH,ATTRIBUTE_SIZE);
startChunk = doc->ChunkStart.ID;
endChunk = doc->ChunkEnd.ID;
break;
default:
/* error */
break;
}
terms[numTerms++] = makeAttributeTerm(SYSTEM_CONTROL_NUMBER,
EQUAL,IGNORE,IGNORE,
IGNORE,
IGNORE,doc->DocumentID);
if (type != NULL)
{ terms[numTerms++] = makeAttributeTerm(DATA_TYPE,EQUAL,IGNORE,
IGNORE,IGNORE,IGNORE,
type);
terms[numTerms++] = makeOperatorTerm(AND);
}
terms[numTerms++] = makeAttributeTerm(chunk_att,
GREATER_THAN_OR_EQUAL,
IGNORE,IGNORE,IGNORE,
IGNORE,
startChunk);
terms[numTerms++] = makeOperatorTerm(AND);
terms[numTerms++] = makeAttributeTerm(chunk_att,LESS_THAN,
IGNORE,IGNORE,IGNORE,
IGNORE,
endChunk);
terms[numTerms++] = makeOperatorTerm(AND);
terms[numTerms] = NULL;
if (doc->ChunkCode == CT_byte || doc->ChunkCode == CT_line)
{ freeAny(startChunk);
freeAny(endChunk);
}
}
freeAny(type);
if (i != 0) /* multiple independent queries, need a disjunction */
{ terms = (query_term**)s_realloc((char*)terms,
(size_t)(sizeof(query_term*) *
(numTerms + 1 + 1)));
terms[numTerms++] = makeOperatorTerm(OR);
terms[numTerms] = NULL;
}
}
return(terms);
}
/*----------------------------------------------------------------------*/
static DocObj** makeWAISQueryDocs PARAMS((query_term** terms));
static DocObj**
makeWAISQueryDocs(terms)
query_term** terms;
/* given a list of terms in the form given above, convert them to
DocObjs.
*/
{
query_term* docTerm = NULL;
query_term* fragmentTerm = NULL;
DocObj** docs = NULL;
DocObj* doc = NULL;
long docNum,termNum;
docNum = termNum = 0;
docs = (DocObj**)s_malloc((size_t)(sizeof(DocObj*) * 1));
docs[docNum] = NULL;
/* translate the terms into DocObjs */
while (true)
{
query_term* typeTerm = NULL;
char* type = NULL;
long startTermOffset;
docTerm = terms[termNum];
if (docTerm == NULL)
break; /* we're done converting */
typeTerm = terms[termNum + 1]; /* get the lead Term if it exists */
if (strcmp(typeTerm->Use,DATA_TYPE) == 0) /* we do have a type */
{ startTermOffset = 3;
type = anyToString(typeTerm->Term);
}
else /* no type */
{ startTermOffset = 1;
typeTerm = NULL;
type = NULL;
}
/* grow the doc list */
docs = (DocObj**)s_realloc((char*)docs,(size_t)(sizeof(DocObj*) *
(docNum + 1 + 1)));
/* figure out what kind of docObj to build - and build it */
fragmentTerm = terms[termNum + startTermOffset];
if (fragmentTerm != NULL && fragmentTerm->TermType == TT_Attribute)
{ /* build a document fragment */
query_term* startTerm = fragmentTerm;
query_term* endTerm = terms[termNum + startTermOffset + 2];
if (strcmp(startTerm->Use,BYTE) == 0){ /* a byte chunk */
doc = makeDocObjUsingBytes(duplicateAny(docTerm->Term),
type,
anyToLong(startTerm->Term),
anyToLong(endTerm->Term));
log_write("byte");
}else if (strcmp(startTerm->Use,LINE) == 0){ /* a line chunk */
doc = makeDocObjUsingLines(duplicateAny(docTerm->Term),
type,
anyToLong(startTerm->Term),
anyToLong(endTerm->Term));
log_write("line");
}else{
log_write("chunk"); /* a paragraph chunk */
doc = makeDocObjUsingParagraphs(duplicateAny(docTerm->Term),
type,
duplicateAny(startTerm->Term),
duplicateAny(endTerm->Term));
}
termNum += (startTermOffset + 4); /* point to next term */
}
else /* build a full document */
{
doc = makeDocObjUsingWholeDocument(duplicateAny(docTerm->Term),
type);
log_write("whole doc");
termNum += startTermOffset; /* point to next term */
}
docs[docNum++] = doc; /* insert the new document */
docs[docNum] = NULL; /* keep the doc list terminated */
if (terms[termNum] != NULL)
termNum++; /* skip the OR operator it necessary */
else
break; /* we are done */
}
return(docs);
}
/*----------------------------------------------------------------------*/
any*
makeWAISTextQuery(docs)
DocObj** docs;
/* given a list of DocObjs, return an any whose contents is the corresponding
type 1 query
*/
{
any *buf = NULL;
query_term** terms = NULL;
terms = makeWAISQueryTerms(docs);
buf = writeQuery(terms);
doList((void**)terms,freeTerm);
s_free(terms);
return(buf);
}
/*----------------------------------------------------------------------*/
DocObj**
readWAISTextQuery(buf)
any* buf;
/* given an any whose contents are type 1 queries of the WAIS sort,
construct a list of the corresponding DocObjs
*/
{
query_term** terms = NULL;
DocObj** docs = NULL;
terms = readQuery(buf);
docs = makeWAISQueryDocs(terms);
doList((void**)terms,freeTerm);
s_free(terms);
return(docs);
}
/*----------------------------------------------------------------------*/
/* Customized free WAIS object routines: */
/* */
/* This set of procedures is for applications to free a WAIS object */
/* which was made with makeWAISFOO. */
/* Each procedure frees only the memory that was allocated in its */
/* associated makeWAISFOO routine, thus it's not necessary for the */
/* caller to assign nulls to the pointer fields of the WAIS object. */
/*----------------------------------------------------------------------*/
void
CSTFreeWAISInitResponse(init)
WAISInitResponse* init;
/* free an object made with makeWAISInitResponse */
{
s_free(init);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISSearch(query)
WAISSearch* query;
/* destroy an object made with makeWAISSearch() */
{
s_free(query);
}
/*----------------------------------------------------------------------*/
void
CSTFreeDocObj(doc)
DocObj* doc;
/* free a docObj */
{
s_free(doc);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISDocumentHeader(header)
WAISDocumentHeader* header;
{
s_free(header);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISDocumentShortHeader(header)
WAISDocumentShortHeader* header;
{
s_free(header);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISDocumentLongHeader(header)
WAISDocumentLongHeader* header;
{
s_free(header);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISSearchResponse(response)
WAISSearchResponse* response;
{
s_free(response);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISDocumentText(docText)
WAISDocumentText* docText;
{
s_free(docText);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISDocHeadlines(docHeadline)
WAISDocumentHeadlines* docHeadline;
{
s_free(docHeadline);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISDocumentCodes(docCodes)
WAISDocumentCodes* docCodes;
{
s_free(docCodes);
}
/*----------------------------------------------------------------------*/
void
CSTFreeWAISTextQuery(query)
any* query;
{
freeAny(query);
}
/*----------------------------------------------------------------------*/
/*
** Routines originally from WMessage.c -- FM
**
**----------------------------------------------------------------------*/
/* WIDE AREA INFORMATION SERVER SOFTWARE
No guarantees or restrictions. See the readme file for the full standard
disclaimer.
3.26.90
*/
/* This file is for reading and writing the wais packet header.
* Morris@think.com
*/
/* to do:
* add check sum
* what do you do when checksum is wrong?
*/
/*---------------------------------------------------------------------*/
void
readWAISPacketHeader(msgBuffer,header_struct)
char* msgBuffer;
WAISMessage *header_struct;
{
/* msgBuffer is a string containing at least HEADER_LENGTH bytes. */
memmove(header_struct->msg_len,msgBuffer,(size_t)10);
header_struct->msg_type = char_downcase((unsigned long)msgBuffer[10]);
header_struct->hdr_vers = char_downcase((unsigned long)msgBuffer[11]);
memmove(header_struct->server,(void*)(msgBuffer + 12),(size_t)10);
header_struct->compression = char_downcase((unsigned long)msgBuffer[22]);
header_struct->encoding = char_downcase((unsigned long)msgBuffer[23]);
header_struct->msg_checksum = char_downcase((unsigned long)msgBuffer[24]);
}
/*---------------------------------------------------------------------*/
/* this modifies the header argument. See wais-message.h for the different
* options for the arguments.
*/
void
writeWAISPacketHeader(header,
dataLen,
type,
server,
compression,
encoding,
version)
char* header;
long dataLen;
long type;
char* server;
long compression;
long encoding;
long version;
/* Puts together the new wais before-the-z39-packet header. */
{
char lengthBuf[11];
char serverBuf[11];
long serverLen = strlen(server);
if (serverLen > 10)
serverLen = 10;
sprintf(lengthBuf, "%010ld", dataLen);
strncpy(header,lengthBuf,10);
header[10] = type & 0xFF;
header[11] = version & 0xFF;
strncpy(serverBuf,server,serverLen);
strncpy((char*)(header + 12),serverBuf,serverLen);
header[22] = compression & 0xFF;
header[23] = encoding & 0xFF;
header[24] = '0'; /* checkSum(header + HEADER_LENGTH,dataLen); XXX the result must be ascii */
}
/*---------------------------------------------------------------------*/
|
avsm/openbsd-lynx
|
WWW/Library/Implementation/HTVMS_WaisProt.c
|
C
|
gpl-2.0
| 69,533
|
/*******************************************************************************
* This file contains iSCSI extentions for RDMA (iSER) Verbs
*
* (c) Copyright 2013 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include "isert_proto.h"
#include "ib_isert.h"
#define ISERT_MAX_CONN 8
#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
static struct workqueue_struct *isert_rx_wq;
static struct workqueue_struct *isert_comp_wq;
static struct kmem_cache *isert_cmd_cache;
static void isert_release_work(struct work_struct *work);
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
struct isert_conn *isert_conn = (struct isert_conn *)context;
pr_err("isert_qp_event_callback event: %d\n", e->event);
switch (e->event) {
case IB_EVENT_COMM_EST:
rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
break;
default:
break;
}
}
static int
isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
{
int ret;
ret = ib_query_device(ib_dev, devattr);
if (ret) {
pr_err("ib_query_device() failed: %d\n", ret);
return ret;
}
pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
return 0;
}
static int
isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
{
struct isert_device *device = isert_conn->conn_device;
struct ib_qp_init_attr attr;
struct ib_device_attr devattr;
int ret, index, min_index = 0;
memset(&devattr, 0, sizeof(struct ib_device_attr));
ret = isert_query_device(cma_id->device, &devattr);
if (ret)
return ret;
mutex_lock(&device_list_mutex);
for (index = 0; index < device->cqs_used; index++)
if (device->cq_active_qps[index] <
device->cq_active_qps[min_index])
min_index = index;
device->cq_active_qps[min_index]++;
pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
mutex_unlock(&device_list_mutex);
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
attr.qp_context = isert_conn;
attr.send_cq = device->dev_tx_cq[min_index];
attr.recv_cq = device->dev_rx_cq[min_index];
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
/*
* FIXME: Use devattr.max_sge - 2 for max_send_sge as
* work-around for RDMA_READ..
*/
attr.cap.max_send_sge = devattr.max_sge - 2;
isert_conn->max_sge = attr.cap.max_send_sge;
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
cma_id->device);
pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
isert_conn->conn_pd->device);
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
if (ret) {
pr_err("rdma_create_qp failed for cma_id %d\n", ret);
return ret;
}
isert_conn->conn_qp = cma_id->qp;
pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
return 0;
}
static void
isert_cq_event_callback(struct ib_event *e, void *context)
{
pr_debug("isert_cq_event_callback event: %d\n", e->event);
}
static int
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
u64 dma_addr;
int i, j;
isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!isert_conn->conn_rx_descs)
goto fail;
rx_desc = isert_conn->conn_rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr))
goto dma_map_fail;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = isert_conn->conn_mr->lkey;
}
isert_conn->conn_rx_desc_head = 0;
return 0;
dma_map_fail:
rx_desc = isert_conn->conn_rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->conn_rx_descs);
isert_conn->conn_rx_descs = NULL;
fail:
return -ENOMEM;
}
static void
isert_free_rx_descriptors(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
struct iser_rx_desc *rx_desc;
int i;
if (!isert_conn->conn_rx_descs)
return;
rx_desc = isert_conn->conn_rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->conn_rx_descs);
isert_conn->conn_rx_descs = NULL;
}
static void isert_cq_tx_callback(struct ib_cq *, void *);
static void isert_cq_rx_callback(struct ib_cq *, void *);
static int
isert_create_device_ib_res(struct isert_device *device)
{
struct ib_device *ib_dev = device->ib_device;
struct isert_cq_desc *cq_desc;
int ret = 0, i, j;
device->cqs_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
pr_debug("Using %d CQs, device %s supports %d vectors\n",
device->cqs_used, device->ib_device->name,
device->ib_device->num_comp_vectors);
device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
device->cqs_used, GFP_KERNEL);
if (!device->cq_desc) {
pr_err("Unable to allocate device->cq_desc\n");
return -ENOMEM;
}
cq_desc = device->cq_desc;
device->dev_pd = ib_alloc_pd(ib_dev);
if (IS_ERR(device->dev_pd)) {
ret = PTR_ERR(device->dev_pd);
pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
goto out_cq_desc;
}
for (i = 0; i < device->cqs_used; i++) {
cq_desc[i].device = device;
cq_desc[i].cq_index = i;
device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_rx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, i);
if (IS_ERR(device->dev_rx_cq[i])) {
ret = PTR_ERR(device->dev_rx_cq[i]);
device->dev_rx_cq[i] = NULL;
goto out_cq;
}
device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_tx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, i);
if (IS_ERR(device->dev_tx_cq[i])) {
ret = PTR_ERR(device->dev_tx_cq[i]);
device->dev_tx_cq[i] = NULL;
goto out_cq;
}
ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
if (ret)
goto out_cq;
ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
if (ret)
goto out_cq;
}
device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(device->dev_mr)) {
ret = PTR_ERR(device->dev_mr);
pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
goto out_cq;
}
return 0;
out_cq:
for (j = 0; j < i; j++) {
cq_desc = &device->cq_desc[j];
if (device->dev_rx_cq[j]) {
cancel_work_sync(&cq_desc->cq_rx_work);
ib_destroy_cq(device->dev_rx_cq[j]);
}
if (device->dev_tx_cq[j]) {
cancel_work_sync(&cq_desc->cq_tx_work);
ib_destroy_cq(device->dev_tx_cq[j]);
}
}
ib_dealloc_pd(device->dev_pd);
out_cq_desc:
kfree(device->cq_desc);
return ret;
}
static void
isert_free_device_ib_res(struct isert_device *device)
{
struct isert_cq_desc *cq_desc;
int i;
for (i = 0; i < device->cqs_used; i++) {
cq_desc = &device->cq_desc[i];
cancel_work_sync(&cq_desc->cq_rx_work);
cancel_work_sync(&cq_desc->cq_tx_work);
ib_destroy_cq(device->dev_rx_cq[i]);
ib_destroy_cq(device->dev_tx_cq[i]);
device->dev_rx_cq[i] = NULL;
device->dev_tx_cq[i] = NULL;
}
ib_dereg_mr(device->dev_mr);
ib_dealloc_pd(device->dev_pd);
kfree(device->cq_desc);
}
static void
isert_device_try_release(struct isert_device *device)
{
mutex_lock(&device_list_mutex);
device->refcount--;
if (!device->refcount) {
isert_free_device_ib_res(device);
list_del(&device->dev_node);
kfree(device);
}
mutex_unlock(&device_list_mutex);
}
static struct isert_device *
isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
{
struct isert_device *device;
int ret;
mutex_lock(&device_list_mutex);
list_for_each_entry(device, &device_list, dev_node) {
if (device->ib_device->node_guid == cma_id->device->node_guid) {
device->refcount++;
mutex_unlock(&device_list_mutex);
return device;
}
}
device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
if (!device) {
mutex_unlock(&device_list_mutex);
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&device->dev_node);
device->ib_device = cma_id->device;
ret = isert_create_device_ib_res(device);
if (ret) {
kfree(device);
mutex_unlock(&device_list_mutex);
return ERR_PTR(ret);
}
device->refcount++;
list_add_tail(&device->dev_node, &device_list);
mutex_unlock(&device_list_mutex);
return device;
}
static int
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
struct isert_np *isert_np = cma_id->context;
struct iscsi_np *np = isert_np->np;
struct isert_conn *isert_conn;
struct isert_device *device;
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
spin_lock_bh(&np->np_thread_lock);
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
pr_debug("iscsi_np is not enabled, reject connect request\n");
return rdma_reject(cma_id, NULL, 0);
}
spin_unlock_bh(&np->np_thread_lock);
pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
cma_id, cma_id->context);
isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
if (!isert_conn) {
pr_err("Unable to allocate isert_conn\n");
return -ENOMEM;
}
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
init_completion(&isert_conn->conn_wait);
init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
INIT_WORK(&isert_conn->release_work, isert_release_work);
cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
isert_conn->responder_resources = event->param.conn.responder_resources;
isert_conn->initiator_depth = event->param.conn.initiator_depth;
pr_debug("Using responder_resources: %u initiator_depth: %u\n",
isert_conn->responder_resources, isert_conn->initiator_depth);
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!isert_conn->login_buf) {
pr_err("Unable to allocate isert_conn->login_buf\n");
ret = -ENOMEM;
goto out;
}
isert_conn->login_req_buf = isert_conn->login_buf;
isert_conn->login_rsp_buf = isert_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
isert_conn->login_buf, isert_conn->login_req_buf,
isert_conn->login_rsp_buf);
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
(void *)isert_conn->login_req_buf,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
ret);
isert_conn->login_req_dma = 0;
goto out_login_buf;
}
isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
(void *)isert_conn->login_rsp_buf,
ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
ret);
isert_conn->login_rsp_dma = 0;
goto out_req_dma_map;
}
device = isert_device_find_by_ib_dev(cma_id);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
goto out_rsp_dma_map;
}
isert_conn->conn_device = device;
isert_conn->conn_pd = device->dev_pd;
isert_conn->conn_mr = device->dev_mr;
ret = isert_conn_setup_qp(isert_conn, cma_id);
if (ret)
goto out_conn_dev;
mutex_lock(&isert_np->np_accept_mutex);
list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
mutex_unlock(&isert_np->np_accept_mutex);
pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
wake_up(&isert_np->np_accept_wq);
return 0;
out_conn_dev:
isert_device_try_release(device);
out_rsp_dma_map:
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
out_req_dma_map:
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
out_login_buf:
kfree(isert_conn->login_buf);
out:
kfree(isert_conn);
return ret;
}
static void
isert_connect_release(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_device *device = isert_conn->conn_device;
int cq_index;
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
if (isert_conn->conn_qp) {
cq_index = ((struct isert_cq_desc *)
isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
isert_conn->conn_device->cq_active_qps[cq_index]--;
rdma_destroy_qp(isert_conn->conn_cm_id);
}
isert_free_rx_descriptors(isert_conn);
rdma_destroy_id(isert_conn->conn_cm_id);
if (isert_conn->login_buf) {
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_FROM_DEVICE);
kfree(isert_conn->login_buf);
}
kfree(isert_conn);
if (device)
isert_device_try_release(device);
pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
}
static void
isert_connected_handler(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->context;
kref_get(&isert_conn->conn_kref);
}
static void
isert_release_conn_kref(struct kref *kref)
{
struct isert_conn *isert_conn = container_of(kref,
struct isert_conn, conn_kref);
pr_debug("Calling isert_connect_release for final kref %s/%d\n",
current->comm, current->pid);
isert_connect_release(isert_conn);
}
static void
isert_put_conn(struct isert_conn *isert_conn)
{
kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
}
static void
isert_disconnect_work(struct work_struct *work)
{
struct isert_conn *isert_conn = container_of(work,
struct isert_conn, conn_logout_work);
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->state == ISER_CONN_UP)
isert_conn->state = ISER_CONN_TERMINATING;
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
mutex_unlock(&isert_conn->conn_mutex);
goto wake_up;
}
if (!isert_conn->conn_cm_id) {
mutex_unlock(&isert_conn->conn_mutex);
isert_put_conn(isert_conn);
return;
}
if (isert_conn->disconnect) {
/* Send DREQ/DREP towards our initiator */
rdma_disconnect(isert_conn->conn_cm_id);
}
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
complete(&isert_conn->conn_wait);
}
static int
isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
{
struct isert_conn *isert_conn;
bool terminating = false;
if (!cma_id->qp) {
struct isert_np *isert_np = cma_id->context;
isert_np->np_cm_id = NULL;
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
isert_np->np_cm_id = isert_setup_id(isert_np);
if (IS_ERR(isert_np->np_cm_id)) {
pr_err("isert np %p setup id failed: %ld\n",
isert_np, PTR_ERR(isert_np->np_cm_id));
isert_np->np_cm_id = NULL;
}
break;
default:
pr_err("isert np %p Unexpected event %d\n",
isert_np, event);
}
isert_conn = (struct isert_conn *)cma_id->context;
isert_conn->disconnect = disconnect;
INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
schedule_work(&isert_conn->conn_logout_work);
return 0;
}
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
bool disconnect = false;
pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
if (ret)
pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
event->event, ret);
break;
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
break;
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
disconnect = true;
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ret = isert_disconnected_handler(cma_id, disconnect);
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
default:
pr_err("Unhandled RDMA CMA event: %d\n", event->event);
break;
}
return ret;
}
static int
isert_post_recv(struct isert_conn *isert_conn, u32 count)
{
struct ib_recv_wr *rx_wr, *rx_wr_failed;
int i, ret;
unsigned int rx_head = isert_conn->conn_rx_desc_head;
struct iser_rx_desc *rx_desc;
for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->conn_rx_descs[rx_head];
rx_wr->wr_id = (unsigned long)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
}
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
&rx_wr_failed);
if (ret) {
pr_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count;
} else {
pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
isert_conn->conn_rx_desc_head = rx_head;
}
return ret;
}
static int
isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct ib_send_wr send_wr, *send_wr_failed;
int ret;
ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
send_wr.wr_id = (unsigned long)tx_desc;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
atomic_inc(&isert_conn->post_send_buf_count);
ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
if (ret) {
pr_err("ib_post_send() failed, ret: %d\n", ret);
atomic_dec(&isert_conn->post_send_buf_count);
}
return ret;
}
static void
isert_create_send_desc(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
tx_desc->iser_header.flags = ISER_VER;
tx_desc->num_sge = 1;
tx_desc->isert_cmd = isert_cmd;
if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
static int
isert_init_tx_hdrs(struct isert_conn *isert_conn,
struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
u64 dma_addr;
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr)) {
pr_err("ib_dma_mapping_error() failed\n");
return -ENOMEM;
}
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
" lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
return 0;
}
static void
isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
{
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
send_wr->opcode = IB_WR_SEND;
send_wr->send_flags = IB_SEND_SIGNALED;
send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
}
static int
isert_rdma_post_recvl(struct isert_conn *isert_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_fail;
struct ib_sge sge;
int ret;
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = isert_conn->conn_mr->lkey;
pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
if (ret) {
pr_err("ib_post_recv() failed: %d\n", ret);
isert_conn->post_recv_buf_count--;
}
pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
return ret;
}
static int
isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
u32 length)
{
struct isert_conn *isert_conn = conn->context;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
int ret;
isert_create_send_desc(isert_conn, NULL, tx_desc);
memcpy(&tx_desc->iscsi_header, &login->rsp[0],
sizeof(struct iscsi_hdr));
isert_init_tx_hdrs(isert_conn, tx_desc);
if (length > 0) {
struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
length, DMA_TO_DEVICE);
memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
length, DMA_TO_DEVICE);
tx_dsg->addr = isert_conn->login_rsp_dma;
tx_dsg->length = length;
tx_dsg->lkey = isert_conn->conn_mr->lkey;
tx_desc->num_sge = 2;
}
if (!login->login_failed) {
if (login->login_complete) {
ret = isert_alloc_rx_descriptors(isert_conn);
if (ret)
return ret;
ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
if (ret)
return ret;
isert_conn->state = ISER_CONN_UP;
goto post_send;
}
ret = isert_rdma_post_recvl(isert_conn);
if (ret)
return ret;
}
post_send:
ret = isert_post_send(isert_conn, tx_desc);
if (ret)
return ret;
return 0;
}
static void
isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
struct isert_conn *isert_conn)
{
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
int size;
if (!login) {
pr_err("conn->conn_login is NULL\n");
dump_stack();
return;
}
if (login->first_request) {
struct iscsi_login_req *login_req =
(struct iscsi_login_req *)&rx_desc->iscsi_header;
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
*/
login->leading_connection = (!login_req->tsih) ? 1 : 0;
login->current_stage =
(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
>> 2;
login->version_min = login_req->min_version;
login->version_max = login_req->max_version;
memcpy(login->isid, login_req->isid, 6);
login->cmd_sn = be32_to_cpu(login_req->cmdsn);
login->init_task_tag = login_req->itt;
login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
login->cid = be16_to_cpu(login_req->cid);
login->tsih = be16_to_cpu(login_req->tsih);
}
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
size, rx_buflen, MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, &rx_desc->data[0], size);
complete(&isert_conn->conn_login_comp);
}
static void
isert_release_cmd(struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
iscsi_cmd);
pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
kfree(cmd->buf_ptr);
kfree(cmd->tmr_req);
kmem_cache_free(isert_cmd_cache, isert_cmd);
}
static struct iscsi_cmd
*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
{
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct isert_cmd *isert_cmd;
isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
if (!isert_cmd) {
pr_err("Unable to allocate isert_cmd\n");
return NULL;
}
isert_cmd->conn = isert_conn;
isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
return &isert_cmd->iscsi_cmd;
}
static int
isert_handle_scsi_cmd(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
unsigned char *buf)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
struct scatterlist *sg;
int imm_data, imm_data_len, unsol_data, sg_nents, rc;
bool dump_payload = false;
rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
if (rc < 0)
return rc;
imm_data = cmd->immediate_data;
imm_data_len = cmd->first_burst_len;
unsol_data = cmd->unsolicited_data;
rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
if (rc < 0) {
return 0;
} else if (rc > 0) {
dump_payload = true;
goto sequence_cmd;
}
if (!imm_data)
return 0;
sg = &cmd->se_cmd.t_data_sg[0];
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
sg, sg_nents, &rx_desc->data[0], imm_data_len);
sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
cmd->write_data_done += imm_data_len;
if (cmd->write_data_done == cmd->se_cmd.data_length) {
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
}
sequence_cmd:
rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (!rc && dump_payload == false && unsol_data)
iscsit_set_unsoliticed_dataout(cmd);
else if (dump_payload && imm_data)
target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
return 0;
}
static int
isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
struct iser_rx_desc *rx_desc, unsigned char *buf)
{
struct scatterlist *sg_start;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_cmd *cmd = NULL;
struct iscsi_data *hdr = (struct iscsi_data *)buf;
u32 unsol_data_len = ntoh24(hdr->dlength);
int rc, sg_nents, sg_off, page_off;
rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
if (rc < 0)
return rc;
else if (!cmd)
return 0;
/*
* FIXME: Unexpected unsolicited_data out
*/
if (!cmd->unsolicited_data) {
pr_err("Received unexpected solicited data payload\n");
dump_stack();
return -1;
}
pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
sg_off = cmd->write_data_done / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
page_off = cmd->write_data_done % PAGE_SIZE;
/*
* FIXME: Non page-aligned unsolicited_data out
*/
if (page_off) {
pr_err("Received unexpected non-page aligned data payload\n");
dump_stack();
return -1;
}
pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
unsol_data_len);
rc = iscsit_check_dataout_payload(cmd, hdr, false);
if (rc < 0)
return rc;
return 0;
}
static int
isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
uint32_t write_stag, uint64_t write_va)
{
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
int ret = -EINVAL;
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
isert_cmd->read_stag = read_stag;
isert_cmd->read_va = read_va;
isert_cmd->write_stag = write_stag;
isert_cmd->write_va = write_va;
ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_NOOP_OUT:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_DATA_OUT:
ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_TMFUNC:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
ret = iscsit_handle_task_mgt_cmd(conn, cmd,
(unsigned char *)hdr);
break;
case ISCSI_OP_LOGOUT:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
if (ret > 0)
wait_for_completion_timeout(&conn->conn_logout_comp,
SECONDS_FOR_LOGOUT_COMP *
HZ);
break;
default:
pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
dump_stack();
break;
}
return ret;
}
static void
isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
{
struct iser_hdr *iser_hdr = &rx_desc->iser_header;
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
int rc;
switch (iser_hdr->flags & 0xF0) {
case ISCSI_CTRL:
if (iser_hdr->flags & ISER_RSV) {
read_stag = be32_to_cpu(iser_hdr->read_stag);
read_va = be64_to_cpu(iser_hdr->read_va);
pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
read_stag, (unsigned long long)read_va);
}
if (iser_hdr->flags & ISER_WSV) {
write_stag = be32_to_cpu(iser_hdr->write_stag);
write_va = be64_to_cpu(iser_hdr->write_va);
pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
write_stag, (unsigned long long)write_va);
}
pr_debug("ISER ISCSI_CTRL PDU\n");
break;
case ISER_HELLO:
pr_err("iSER Hello message\n");
break;
default:
pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
break;
}
rc = isert_rx_opcode(isert_conn, rx_desc,
read_stag, read_va, write_stag, write_va);
}
static void
isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
unsigned long xfer_len)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen, outstanding;
if ((char *)desc == isert_conn->login_req_buf) {
rx_dma = isert_conn->login_req_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
} else {
rx_dma = desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE;
pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
}
ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
hdr = &desc->iscsi_header;
pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
hdr->opcode, hdr->itt, hdr->flags,
(int)(xfer_len - ISER_HEADERS_LEN));
if ((char *)desc == isert_conn->login_req_buf)
isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
isert_conn);
else
isert_rx_do_work(desc, isert_conn);
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
isert_conn->post_recv_buf_count--;
pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
isert_conn->post_recv_buf_count);
if ((char *)desc == isert_conn->login_req_buf)
return;
outstanding = isert_conn->post_recv_buf_count;
if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
ISERT_MIN_POSTED_RX);
err = isert_post_recv(isert_conn, count);
if (err) {
pr_err("isert_post_recv() count: %d failed, %d\n",
count, err);
}
}
}
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
if (wr->sge) {
ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
wr->sge = NULL;
}
kfree(wr->send_wr);
wr->send_wr = NULL;
kfree(isert_cmd->ib_sge);
isert_cmd->ib_sge = NULL;
}
static void
isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn = isert_conn->conn;
pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
/*
* Check for special case during comp_err where
* WRITE_PENDING has been handed off from core,
* but requires an extra target_put_sess_cmd()
* before transport_generic_free_cmd() below.
*/
if (comp_err &&
cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
struct se_cmd *se_cmd = &cmd->se_cmd;
target_put_sess_cmd(se_cmd->se_sess, se_cmd);
}
}
isert_unmap_cmd(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_REJECT:
case ISCSI_OP_NOOP_OUT:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
/*
* Handle special case for REJECT when iscsi_add_reject*() has
* overwritten the original iscsi_opcode assignment, and the
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
pr_debug("Calling transport_generic_free_cmd from"
" isert_put_cmd for 0x%02x\n",
cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
/*
* Fall-through
*/
default:
isert_release_cmd(cmd);
break;
}
}
static void
isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
{
if (tx_desc->dma_addr != 0) {
pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
tx_desc->dma_addr = 0;
}
}
static void
isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
struct ib_device *ib_dev, bool comp_err)
{
if (isert_cmd->sense_buf_dma != 0) {
pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
isert_cmd->sense_buf_len, DMA_TO_DEVICE);
isert_cmd->sense_buf_dma = 0;
}
isert_unmap_tx_desc(tx_desc, ib_dev);
isert_put_cmd(isert_cmd, comp_err);
}
static void
isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
struct isert_cmd *isert_cmd)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
iscsit_stop_dataout_timer(cmd);
if (wr->sge) {
pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
wr->sge = NULL;
}
if (isert_cmd->ib_sge) {
pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
kfree(isert_cmd->ib_sge);
isert_cmd->ib_sge = NULL;
}
cmd->write_data_done = se_cmd->data_length;
wr->send_wr_num = 0;
pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
target_execute_cmd(se_cmd);
}
static void
isert_do_control_comp(struct work_struct *work)
{
struct isert_cmd *isert_cmd = container_of(work,
struct isert_cmd, comp_work);
struct isert_conn *isert_conn = isert_cmd->conn;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
atomic_dec(&isert_conn->post_send_buf_count);
iscsit_tmr_post_handler(cmd, cmd->conn);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
case ISTATE_SEND_REJECT:
pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
case ISTATE_SEND_LOGOUTRSP:
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
atomic_dec(&isert_conn->post_send_buf_count);
iscsit_logout_post_handler(cmd, cmd->conn);
break;
default:
pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
dump_stack();
break;
}
}
static void
isert_response_completion(struct iser_tx_desc *tx_desc,
struct isert_cmd *isert_cmd,
struct isert_conn *isert_conn,
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
cmd->i_state == ISTATE_SEND_REJECT) {
isert_unmap_tx_desc(tx_desc, ib_dev);
INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
}
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
}
static void
isert_send_completion(struct iser_tx_desc *tx_desc,
struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
struct isert_rdma_wr *wr;
if (!isert_cmd) {
atomic_dec(&isert_conn->post_send_buf_count);
isert_unmap_tx_desc(tx_desc, ib_dev);
return;
}
wr = &isert_cmd->rdma_wr;
switch (wr->iser_ib_op) {
case ISER_IB_RECV:
pr_err("isert_send_completion: Got ISER_IB_RECV\n");
dump_stack();
break;
case ISER_IB_SEND:
pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
isert_response_completion(tx_desc, isert_cmd,
isert_conn, ib_dev);
break;
case ISER_IB_RDMA_WRITE:
pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
dump_stack();
break;
case ISER_IB_RDMA_READ:
pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
dump_stack();
break;
}
}
static void
isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
if (!isert_cmd)
isert_unmap_tx_desc(tx_desc, ib_dev);
else
isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
}
static void
isert_cq_rx_comp_err(struct isert_conn *isert_conn)
{
struct iscsi_conn *conn = isert_conn->conn;
if (isert_conn->post_recv_buf_count)
return;
if (conn->sess) {
target_sess_cmd_list_set_waiting(conn->sess->se_sess);
target_wait_for_sess_cmds(conn->sess->se_sess);
}
while (atomic_read(&isert_conn->post_send_buf_count))
msleep(3000);
mutex_lock(&isert_conn->conn_mutex);
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->conn_mutex);
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
complete(&isert_conn->conn_wait_comp_err);
}
static void
isert_cq_tx_work(struct work_struct *work)
{
struct isert_cq_desc *cq_desc = container_of(work,
struct isert_cq_desc, cq_tx_work);
struct isert_device *device = cq_desc->device;
int cq_index = cq_desc->cq_index;
struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
struct isert_conn *isert_conn;
struct iser_tx_desc *tx_desc;
struct ib_wc wc;
while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
isert_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
isert_send_completion(tx_desc, isert_conn);
} else {
pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
pr_debug("TX wc.status: 0x%08x\n", wc.status);
atomic_dec(&isert_conn->post_send_buf_count);
isert_cq_tx_comp_err(tx_desc, isert_conn);
}
}
ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
}
static void
isert_cq_tx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
}
static void
isert_cq_rx_work(struct work_struct *work)
{
struct isert_cq_desc *cq_desc = container_of(work,
struct isert_cq_desc, cq_rx_work);
struct isert_device *device = cq_desc->device;
int cq_index = cq_desc->cq_index;
struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
struct isert_conn *isert_conn;
struct iser_rx_desc *rx_desc;
struct ib_wc wc;
unsigned long xfer_len;
while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
isert_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
xfer_len = (unsigned long)wc.byte_len;
isert_rx_completion(rx_desc, isert_conn, xfer_len);
} else {
pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
if (wc.status != IB_WC_WR_FLUSH_ERR)
pr_debug("RX wc.status: 0x%08x\n", wc.status);
isert_conn->post_recv_buf_count--;
isert_cq_rx_comp_err(isert_conn);
}
}
ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
}
static void
isert_cq_rx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
}
static int
isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
{
struct ib_send_wr *wr_failed;
int ret;
atomic_inc(&isert_conn->post_send_buf_count);
ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
&wr_failed);
if (ret) {
pr_err("ib_post_send failed with %d\n", ret);
atomic_dec(&isert_conn->post_send_buf_count);
return ret;
}
return ret;
}
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_rsp_pdu(cmd, conn, true, hdr);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
/*
* Attach SENSE DATA payload to iSCSI Response PDU
*/
if (cmd->se_cmd.sense_buffer &&
((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
u32 padding, sense_len;
put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
cmd->sense_buffer);
cmd->se_cmd.scsi_sense_length += sizeof(__be16);
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
sense_len = cmd->se_cmd.scsi_sense_length + padding;
isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->sense_buffer, sense_len,
DMA_TO_DEVICE);
isert_cmd->sense_buf_len = sense_len;
tx_dsg->addr = isert_cmd->sense_buf_dma;
tx_dsg->length = sense_len;
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
bool nopout_response)
{
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
struct iscsi_reject *hdr =
(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_reject(cmd, conn, hdr);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
hton24(hdr->dlength, ISCSI_HDR_LEN);
isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
DMA_TO_DEVICE);
isert_cmd->sense_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->sense_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
u32 data_left, u32 offset)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct scatterlist *sg_start, *tmp_sg;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
u32 sg_off, page_off;
int i = 0, sg_nents;
sg_off = offset / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
page_off = offset % PAGE_SIZE;
send_wr->sg_list = ib_sge;
send_wr->num_sge = sg_nents;
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
/*
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
*/
for_each_sg(sg_start, tmp_sg, sg_nents, i) {
pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
(unsigned long long)tmp_sg->dma_address,
tmp_sg->length, page_off);
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
ib_sge->length = min_t(u32, data_left,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
ib_sge->lkey = isert_conn->conn_mr->lkey;
pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
ib_sge->addr, ib_sge->length);
page_off = 0;
data_left -= ib_sge->length;
ib_sge++;
pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
}
pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
send_wr->sg_list, send_wr->num_sge);
return sg_nents;
}
static int
isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *wr_failed, *send_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct ib_sge *ib_sge;
struct scatterlist *sg;
u32 offset = 0, data_len, data_left, rdma_write_max;
int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
sg = &se_cmd->t_data_sg[0];
sg_nents = se_cmd->t_data_nents;
count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
if (unlikely(!count)) {
pr_err("Unable to map put_datain SGs\n");
return -EINVAL;
}
wr->sge = sg;
wr->num_sge = sg_nents;
pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
count, sg, sg_nents);
ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
if (!ib_sge) {
pr_warn("Unable to allocate datain ib_sge\n");
ret = -ENOMEM;
goto unmap_sg;
}
isert_cmd->ib_sge = ib_sge;
pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
ib_sge, se_cmd->t_data_nents);
wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
GFP_KERNEL);
if (!wr->send_wr) {
pr_err("Unable to allocate wr->send_wr\n");
ret = -ENOMEM;
goto unmap_sg;
}
pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
wr->send_wr, wr->send_wr_num);
iscsit_increment_maxcmdsn(cmd, conn->sess);
cmd->stat_sn = conn->stat_sn++;
wr->isert_cmd = isert_cmd;
rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
data_left = se_cmd->data_length;
for (i = 0; i < wr->send_wr_num; i++) {
send_wr = &isert_cmd->rdma_wr.send_wr[i];
data_len = min(data_left, rdma_write_max);
send_wr->opcode = IB_WR_RDMA_WRITE;
send_wr->send_flags = 0;
send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
send_wr->wr.rdma.rkey = isert_cmd->read_stag;
ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
send_wr, data_len, offset);
ib_sge += ib_sge_cnt;
if (i + 1 == wr->send_wr_num)
send_wr->next = &isert_cmd->tx_desc.send_wr;
else
send_wr->next = &wr->send_wr[i + 1];
offset += data_len;
data_left -= data_len;
}
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
}
pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
return 1;
unmap_sg:
ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
return ret;
}
static int
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *wr_failed, *send_wr;
struct ib_sge *ib_sge;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct scatterlist *sg_start;
u32 sg_off, sg_nents, page_off, va_offset = 0;
u32 offset = 0, data_len, data_left, rdma_write_max;
int rc, ret = 0, count, i, ib_sge_cnt;
pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
se_cmd->data_length, cmd->write_data_done);
sg_off = cmd->write_data_done / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
page_off = cmd->write_data_done % PAGE_SIZE;
pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
sg_off, sg_start, page_off);
data_left = se_cmd->data_length - cmd->write_data_done;
sg_nents = se_cmd->t_data_nents - sg_off;
pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
data_left, sg_nents);
count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
if (unlikely(!count)) {
pr_err("Unable to map get_dataout SGs\n");
return -EINVAL;
}
wr->sge = sg_start;
wr->num_sge = sg_nents;
pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
count, sg_start, sg_nents);
ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
if (!ib_sge) {
pr_warn("Unable to allocate dataout ib_sge\n");
ret = -ENOMEM;
goto unmap_sg;
}
isert_cmd->ib_sge = ib_sge;
pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
ib_sge, sg_nents);
wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
GFP_KERNEL);
if (!wr->send_wr) {
pr_debug("Unable to allocate wr->send_wr\n");
ret = -ENOMEM;
goto unmap_sg;
}
pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
wr->send_wr, wr->send_wr_num);
isert_cmd->tx_desc.isert_cmd = isert_cmd;
wr->iser_ib_op = ISER_IB_RDMA_READ;
wr->isert_cmd = isert_cmd;
rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
offset = cmd->write_data_done;
for (i = 0; i < wr->send_wr_num; i++) {
send_wr = &isert_cmd->rdma_wr.send_wr[i];
data_len = min(data_left, rdma_write_max);
send_wr->opcode = IB_WR_RDMA_READ;
send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
send_wr->wr.rdma.rkey = isert_cmd->write_stag;
ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
send_wr, data_len, offset);
ib_sge += ib_sge_cnt;
if (i + 1 == wr->send_wr_num)
send_wr->send_flags = IB_SEND_SIGNALED;
else
send_wr->next = &wr->send_wr[i + 1];
offset += data_len;
va_offset += data_len;
data_left -= data_len;
}
atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
}
pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
return 0;
unmap_sg:
ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
return ret;
}
static int
isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
int ret;
switch (state) {
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
ret = isert_put_nopin(cmd, conn, false);
break;
default:
pr_err("Unknown immediate state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
return ret;
}
static int
isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
int ret;
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
ret = isert_put_logout_rsp(cmd, conn);
if (!ret) {
pr_debug("Returning iSER Logout -EAGAIN\n");
ret = -EAGAIN;
}
break;
case ISTATE_SEND_NOPIN:
ret = isert_put_nopin(cmd, conn, true);
break;
case ISTATE_SEND_TASKMGTRSP:
ret = isert_put_tm_rsp(cmd, conn);
break;
case ISTATE_SEND_REJECT:
ret = isert_put_reject(cmd, conn);
break;
case ISTATE_SEND_STATUS:
/*
* Special case for sending non GOOD SCSI status from TX thread
* context during pre se_cmd excecution failure.
*/
ret = isert_put_response(conn, cmd);
break;
default:
pr_err("Unknown response state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
return ret;
}
struct rdma_cm_id *
isert_setup_id(struct isert_np *isert_np)
{
struct iscsi_np *np = isert_np->np;
struct rdma_cm_id *id;
struct sockaddr *sa;
int ret;
sa = (struct sockaddr *)&np->np_sockaddr;
pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
id = rdma_create_id(isert_cma_handler, isert_np,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) {
pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
ret = PTR_ERR(id);
goto out;
}
pr_debug("id %p context %p\n", id, id->context);
ret = rdma_bind_addr(id, sa);
if (ret) {
pr_err("rdma_bind_addr() failed: %d\n", ret);
goto out_id;
}
ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
if (ret) {
pr_err("rdma_listen() failed: %d\n", ret);
goto out_id;
}
return id;
out_id:
rdma_destroy_id(id);
out:
return ERR_PTR(ret);
}
static int
isert_setup_np(struct iscsi_np *np,
struct __kernel_sockaddr_storage *ksockaddr)
{
struct isert_np *isert_np;
struct rdma_cm_id *isert_lid;
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
if (!isert_np) {
pr_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
init_waitqueue_head(&isert_np->np_accept_wq);
mutex_init(&isert_np->np_accept_mutex);
INIT_LIST_HEAD(&isert_np->np_accept_list);
init_completion(&isert_np->np_login_comp);
isert_np->np = np;
/*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
memcpy(&np->np_sockaddr, ksockaddr,
sizeof(struct __kernel_sockaddr_storage));
isert_lid = isert_setup_id(isert_np);
if (IS_ERR(isert_lid)) {
ret = PTR_ERR(isert_lid);
goto out;
}
isert_np->np_cm_id = isert_lid;
np->np_context = isert_np;
return 0;
out:
kfree(isert_np);
return ret;
}
static int
isert_check_accept_queue(struct isert_np *isert_np)
{
int empty;
mutex_lock(&isert_np->np_accept_mutex);
empty = list_empty(&isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
return empty;
}
static int
isert_rdma_accept(struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
struct rdma_conn_param cp;
int ret;
memset(&cp, 0, sizeof(struct rdma_conn_param));
cp.responder_resources = isert_conn->responder_resources;
cp.initiator_depth = isert_conn->initiator_depth;
cp.retry_count = 7;
cp.rnr_retry_count = 7;
pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
ret = rdma_accept(cm_id, &cp);
if (ret) {
pr_err("rdma_accept() failed with: %d\n", ret);
return ret;
}
pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
return 0;
}
static int
isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
{
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
int ret;
pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
if (ret)
return ret;
pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
return 0;
}
static void
isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
struct rdma_route *cm_route = &cm_id->route;
struct sockaddr_in *sock_in;
struct sockaddr_in6 *sock_in6;
conn->login_family = np->np_sockaddr.ss_family;
if (np->np_sockaddr.ss_family == AF_INET6) {
sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
&sock_in6->sin6_addr.in6_u);
conn->login_port = ntohs(sock_in6->sin6_port);
sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
&sock_in6->sin6_addr.in6_u);
conn->local_port = ntohs(sock_in6->sin6_port);
} else {
sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
sprintf(conn->login_ip, "%pI4",
&sock_in->sin_addr.s_addr);
conn->login_port = ntohs(sock_in->sin_port);
sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
sprintf(conn->local_ip, "%pI4",
&sock_in->sin_addr.s_addr);
conn->local_port = ntohs(sock_in->sin_port);
}
}
static int
isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
{
struct isert_np *isert_np = (struct isert_np *)np->np_context;
struct isert_conn *isert_conn;
int max_accept = 0, ret;
accept_wait:
ret = wait_event_interruptible(isert_np->np_accept_wq,
!isert_check_accept_queue(isert_np) ||
np->np_thread_state == ISCSI_NP_THREAD_RESET);
if (max_accept > 5)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
pr_debug("np_thread_state %d for isert_accept_np\n",
np->np_thread_state);
/**
* No point in stalling here when np_thread
* is in state RESET/SHUTDOWN/EXIT - bail
**/
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
mutex_lock(&isert_np->np_accept_mutex);
if (list_empty(&isert_np->np_accept_list)) {
mutex_unlock(&isert_np->np_accept_mutex);
max_accept++;
goto accept_wait;
}
isert_conn = list_first_entry(&isert_np->np_accept_list,
struct isert_conn, conn_accept_node);
list_del_init(&isert_conn->conn_accept_node);
mutex_unlock(&isert_np->np_accept_mutex);
conn->context = isert_conn;
isert_conn->conn = conn;
max_accept = 0;
ret = isert_rdma_post_recvl(isert_conn);
if (ret)
return ret;
ret = isert_rdma_accept(isert_conn);
if (ret)
return ret;
isert_set_conn_info(np, conn, isert_conn);
pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
return 0;
}
static void
isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = (struct isert_np *)np->np_context;
if (isert_np->np_cm_id)
rdma_destroy_id(isert_np->np_cm_id);
np->np_context = NULL;
kfree(isert_np);
}
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
pr_debug("isert_wait_conn: Starting \n");
mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->conn_cm_id) {
pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
if (isert_conn->state == ISER_CONN_INIT) {
mutex_unlock(&isert_conn->conn_mutex);
return;
}
if (isert_conn->state == ISER_CONN_UP)
isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
wait_for_completion(&isert_conn->conn_wait_comp_err);
wait_for_completion(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}
static void isert_free_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
isert_put_conn(isert_conn);
}
static struct iscsit_transport iser_target_transport = {
.name = "IB/iSER",
.transport_type = ISCSI_INFINIBAND,
.owner = THIS_MODULE,
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
.iscsit_wait_conn = isert_wait_conn,
.iscsit_free_conn = isert_free_conn,
.iscsit_alloc_cmd = isert_alloc_cmd,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,
.iscsit_immediate_queue = isert_immediate_queue,
.iscsit_response_queue = isert_response_queue,
.iscsit_get_dataout = isert_get_dataout,
.iscsit_queue_data_in = isert_put_datain,
.iscsit_queue_status = isert_put_response,
};
static int __init isert_init(void)
{
int ret;
isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
if (!isert_rx_wq) {
pr_err("Unable to allocate isert_rx_wq\n");
return -ENOMEM;
}
isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
if (!isert_comp_wq) {
pr_err("Unable to allocate isert_comp_wq\n");
ret = -ENOMEM;
goto destroy_rx_wq;
}
isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
0, NULL);
if (!isert_cmd_cache) {
pr_err("Unable to create isert_cmd_cache\n");
ret = -ENOMEM;
goto destroy_tx_cq;
}
iscsit_register_transport(&iser_target_transport);
pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
return 0;
destroy_tx_cq:
destroy_workqueue(isert_comp_wq);
destroy_rx_wq:
destroy_workqueue(isert_rx_wq);
return ret;
}
static void __exit isert_exit(void)
{
flush_scheduled_work();
kmem_cache_destroy(isert_cmd_cache);
destroy_workqueue(isert_comp_wq);
destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport);
pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
MODULE_VERSION("0.1");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(isert_init);
module_exit(isert_exit);
|
crimsonthunder/kernel_samsung_trlte_5.1.1
|
drivers/infiniband/ulp/isert/ib_isert.c
|
C
|
gpl-2.0
| 67,213
|
/* $Id: tstCAPIGlue.c 109358 2016-07-31 17:11:31Z bird $ */
/** @file tstCAPIGlue.c
* Demonstrator program to illustrate use of C bindings of Main API.
*
* It has sample code showing how to retrieve all available error information,
* and how to handle active (event delivery through callbacks) or passive
* (event delivery through a polling mechanism) event listeners.
*/
/*
* Copyright (C) 2009-2016 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/** @todo
* Our appologies for the 256+ missing return code checks in this sample file.
*
* We strongly recomment users of the VBoxCAPI to check all return codes!
*/
/*********************************************************************************************************************************
* Header Files *
*********************************************************************************************************************************/
#include "VBoxCAPIGlue.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#ifndef WIN32
# include <signal.h>
# include <unistd.h>
# include <sys/poll.h>
#endif
#ifdef ___iprt_cdefs_h
# error "not supposed to involve any IPRT or VBox headers here."
#endif
/**
* Select between active event listener (defined) and passive event listener
* (undefined). The active event listener case needs much more code, and
* additionally requires a lot more platform dependent code.
*/
#undef USE_ACTIVE_EVENT_LISTENER
/*********************************************************************************************************************************
* Global Variables *
*********************************************************************************************************************************/
/** Set by Ctrl+C handler. */
static volatile int g_fStop = 0;
#ifdef USE_ACTIVE_EVENT_LISTENER
# ifdef WIN32
/** The COM type information for IEventListener, for implementing IDispatch. */
static ITypeInfo *g_pTInfoIEventListener = NULL;
# endif /* WIN32 */
#endif /* USE_ACTIVE_EVENT_LISTENER */
static const char *GetStateName(MachineState_T machineState)
{
switch (machineState)
{
case MachineState_Null: return "<null>";
case MachineState_PoweredOff: return "PoweredOff";
case MachineState_Saved: return "Saved";
case MachineState_Teleported: return "Teleported";
case MachineState_Aborted: return "Aborted";
case MachineState_Running: return "Running";
case MachineState_Paused: return "Paused";
case MachineState_Stuck: return "Stuck";
case MachineState_Teleporting: return "Teleporting";
case MachineState_LiveSnapshotting: return "LiveSnapshotting";
case MachineState_Starting: return "Starting";
case MachineState_Stopping: return "Stopping";
case MachineState_Saving: return "Saving";
case MachineState_Restoring: return "Restoring";
case MachineState_TeleportingPausedVM: return "TeleportingPausedVM";
case MachineState_TeleportingIn: return "TeleportingIn";
case MachineState_FaultTolerantSyncing: return "FaultTolerantSyncing";
case MachineState_DeletingSnapshotOnline: return "DeletingSnapshotOnline";
case MachineState_DeletingSnapshotPaused: return "DeletingSnapshotPaused";
case MachineState_RestoringSnapshot: return "RestoringSnapshot";
case MachineState_DeletingSnapshot: return "DeletingSnapshot";
case MachineState_SettingUp: return "SettingUp";
default: return "no idea";
}
}
/**
* Ctrl+C handler, terminate event listener.
*
* Remember that most function calls are not allowed in this context (including
* printf!), so make sure that this does as little as possible.
*
* @param iInfo Platform dependent detail info (ignored).
*/
static BOOL VBOX_WINAPI ctrlCHandler(DWORD iInfo)
{
(void)iInfo;
g_fStop = 1;
return TRUE;
}
/**
* Sample event processing function, dumping some event information.
* Shared between active and passive event demo, to highlight that this part
* is identical between the two.
*/
static HRESULT EventListenerDemoProcessEvent(IEvent *event)
{
VBoxEventType_T evType;
HRESULT rc;
if (!event)
{
printf("event null\n");
return S_OK;
}
evType = VBoxEventType_Invalid;
rc = IEvent_get_Type(event, &evType);
if (FAILED(rc))
{
printf("cannot get event type, rc=%#x\n", rc);
return S_OK;
}
switch (evType)
{
case VBoxEventType_OnMousePointerShapeChanged:
printf("OnMousePointerShapeChanged\n");
break;
case VBoxEventType_OnMouseCapabilityChanged:
printf("OnMouseCapabilityChanged\n");
break;
case VBoxEventType_OnKeyboardLedsChanged:
printf("OnMouseCapabilityChanged\n");
break;
case VBoxEventType_OnStateChanged:
{
IStateChangedEvent *ev = NULL;
enum MachineState state;
rc = IEvent_QueryInterface(event, &IID_IStateChangedEvent, (void **)&ev);
if (FAILED(rc))
{
printf("cannot get StateChangedEvent interface, rc=%#x\n", rc);
return S_OK;
}
if (!ev)
{
printf("StateChangedEvent reference null\n");
return S_OK;
}
rc = IStateChangedEvent_get_State(ev, &state);
if (FAILED(rc))
printf("warning: cannot get state, rc=%#x\n", rc);
IStateChangedEvent_Release(ev);
printf("OnStateChanged: %s\n", GetStateName(state));
fflush(stdout);
if ( state == MachineState_PoweredOff
|| state == MachineState_Saved
|| state == MachineState_Teleported
|| state == MachineState_Aborted
)
g_fStop = 1;
break;
}
case VBoxEventType_OnAdditionsStateChanged:
printf("OnAdditionsStateChanged\n");
break;
case VBoxEventType_OnNetworkAdapterChanged:
printf("OnNetworkAdapterChanged\n");
break;
case VBoxEventType_OnSerialPortChanged:
printf("OnSerialPortChanged\n");
break;
case VBoxEventType_OnParallelPortChanged:
printf("OnParallelPortChanged\n");
break;
case VBoxEventType_OnStorageControllerChanged:
printf("OnStorageControllerChanged\n");
break;
case VBoxEventType_OnMediumChanged:
printf("OnMediumChanged\n");
break;
case VBoxEventType_OnVRDEServerChanged:
printf("OnVRDEServerChanged\n");
break;
case VBoxEventType_OnUSBControllerChanged:
printf("OnUSBControllerChanged\n");
break;
case VBoxEventType_OnUSBDeviceStateChanged:
printf("OnUSBDeviceStateChanged\n");
break;
case VBoxEventType_OnSharedFolderChanged:
printf("OnSharedFolderChanged\n");
break;
case VBoxEventType_OnRuntimeError:
printf("OnRuntimeError\n");
break;
case VBoxEventType_OnCanShowWindow:
printf("OnCanShowWindow\n");
break;
case VBoxEventType_OnShowWindow:
printf("OnShowWindow\n");
break;
default:
printf("unknown event: %d\n", evType);
}
return S_OK;
}
#ifdef USE_ACTIVE_EVENT_LISTENER
struct IEventListenerDemo;
typedef struct IEventListenerDemo IEventListenerDemo;
typedef struct IEventListenerDemoVtbl
{
HRESULT (*QueryInterface)(IEventListenerDemo *pThis, REFIID riid, void **ppvObject);
ULONG (*AddRef)(IEventListenerDemo *pThis);
ULONG (*Release)(IEventListenerDemo *pThis);
#ifdef WIN32
HRESULT (*GetTypeInfoCount)(IEventListenerDemo *pThis, UINT *pctinfo);
HRESULT (*GetTypeInfo)(IEventListenerDemo *pThis, UINT iTInfo, LCID lcid, ITypeInfo **ppTInfo);
HRESULT (*GetIDsOfNames)(IEventListenerDemo *pThis, REFIID riid, LPOLESTR *rgszNames, UINT cNames, LCID lcid, DISPID *rgDispId);
HRESULT (*Invoke)(IEventListenerDemo *pThis, DISPID dispIdMember, REFIID riid, LCID lcid, WORD wFlags, DISPPARAMS *pDispParams, VARIANT *pVarResult, EXCEPINFO *pExcepInfo, UINT *puArgErr);
#endif
HRESULT (*HandleEvent)(IEventListenerDemo *pThis, IEvent *aEvent);
} IEventListenerDemoVtbl;
typedef struct IEventListenerDemo
{
struct IEventListenerDemoVtbl *lpVtbl;
int cRef;
#ifdef WIN32
/* Active event delivery needs a free threaded marshaler, as the default
* proxy marshaling cannot deal correctly with this case. */
IUnknown *pUnkMarshaler;
#endif
} IEventListenerDemo;
/* Defines for easily calling IEventListenerDemo functions. */
/* IUnknown functions. */
#define IEventListenerDemo_QueryInterface(This,riid,ppvObject) \
( (This)->lpVtbl->QueryInterface(This,riid,ppvObject) )
#define IEventListenerDemo_AddRef(This) \
( (This)->lpVtbl->AddRef(This) )
#define IEventListenerDemo_Release(This) \
( (This)->lpVtbl->Release(This) )
#ifdef WIN32
/* IDispatch functions. */
#define IEventListenerDemo_GetTypeInfoCount(This,pctinfo) \
( (This)->lpVtbl->GetTypeInfoCount(This,pctinfo) )
#define IEventListenerDemo_GetTypeInfo(This,iTInfo,lcid,ppTInfo) \
( (This)->lpVtbl->GetTypeInfo(This,iTInfo,lcid,ppTInfo) )
#define IEventListenerDemo_GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) \
( (This)->lpVtbl->GetIDsOfNames(This,riid,rgszNames,cNames,lcid,rgDispId) )
#define IEventListenerDemo_Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) \
( (This)->lpVtbl->Invoke(This,dispIdMember,riid,lcid,wFlags,pDispParams,pVarResult,pExcepInfo,puArgErr) )
#endif
/* IEventListener functions. */
#define IEventListenerDemo_HandleEvent(This,aEvent) \
( (This)->lpVtbl->HandleEvent(This,aEvent) )
/**
* Event handler function, for active event processing.
*/
static HRESULT IEventListenerDemoImpl_HandleEvent(IEventListenerDemo *pThis, IEvent *event)
{
return EventListenerDemoProcessEvent(event);
}
static HRESULT IEventListenerDemoImpl_QueryInterface(IEventListenerDemo *pThis, const IID *iid, void **resultp)
{
/* match iid */
if ( !memcmp(iid, &IID_IEventListener, sizeof(IID))
|| !memcmp(iid, &IID_IDispatch, sizeof(IID))
|| !memcmp(iid, &IID_IUnknown, sizeof(IID)))
{
IEventListenerDemo_AddRef(pThis);
*resultp = pThis;
return S_OK;
}
#ifdef WIN32
if (!memcmp(iid, &IID_IMarshal, sizeof(IID)))
return IUnknown_QueryInterface(pThis->pUnkMarshaler, iid, resultp);
#endif
return E_NOINTERFACE;
}
static HRESULT IEventListenerDemoImpl_AddRef(IEventListenerDemo *pThis)
{
return ++(pThis->cRef);
}
static HRESULT IEventListenerDemoImpl_Release(IEventListenerDemo *pThis)
{
HRESULT c;
c = --(pThis->cRef);
if (!c)
free(pThis);
return c;
}
#ifdef WIN32
static HRESULT IEventListenerDemoImpl_GetTypeInfoCount(IEventListenerDemo *pThis, UINT *pctinfo)
{
if (!pctinfo)
return E_POINTER;
*pctinfo = 1;
return S_OK;
}
static HRESULT IEventListenerDemoImpl_GetTypeInfo(IEventListenerDemo *pThis, UINT iTInfo, LCID lcid, ITypeInfo **ppTInfo)
{
if (!ppTInfo)
return E_POINTER;
ITypeInfo_AddRef(g_pTInfoIEventListener);
*ppTInfo = g_pTInfoIEventListener;
return S_OK;
}
static HRESULT IEventListenerDemoImpl_GetIDsOfNames(IEventListenerDemo *pThis, REFIID riid, LPOLESTR *rgszNames, UINT cNames, LCID lcid, DISPID *rgDispId)
{
return ITypeInfo_GetIDsOfNames(g_pTInfoIEventListener, rgszNames, cNames, rgDispId);
}
static HRESULT IEventListenerDemoImpl_Invoke(IEventListenerDemo *pThis, DISPID dispIdMember, REFIID riid, LCID lcid, WORD wFlags, DISPPARAMS *pDispParams, VARIANT *pVarResult, EXCEPINFO *pExcepInfo, UINT *puArgErr)
{
return ITypeInfo_Invoke(g_pTInfoIEventListener, (IDispatch *)pThis, dispIdMember, wFlags, pDispParams, pVarResult, pExcepInfo, puArgErr);
}
static HRESULT LoadTypeInfo(REFIID riid, ITypeInfo **pTInfo)
{
HRESULT rc;
ITypeLib *pTypeLib;
rc = LoadRegTypeLib(&LIBID_VirtualBox, 1 /* major */, 0 /* minor */, 0 /* lcid */, &pTypeLib);
if (FAILED(rc))
return rc;
rc = ITypeLib_GetTypeInfoOfGuid(pTypeLib, riid, pTInfo);
/* No longer need access to the type lib, release it. */
ITypeLib_Release(pTypeLib);
return rc;
}
#endif
#ifdef __GNUC__
typedef struct IEventListenerDemoVtblInt
{
ptrdiff_t offset_to_top;
void *typeinfo;
IEventListenerDemoVtbl lpVtbl;
} IEventListenerDemoVtblInt;
static IEventListenerDemoVtblInt g_IEventListenerDemoVtblInt =
{
0, /* offset_to_top */
NULL, /* typeinfo, not vital */
{
IEventListenerDemoImpl_QueryInterface,
IEventListenerDemoImpl_AddRef,
IEventListenerDemoImpl_Release,
#ifdef WIN32
IEventListenerDemoImpl_GetTypeInfoCount,
IEventListenerDemoImpl_GetTypeInfo,
IEventListenerDemoImpl_GetIDsOfNames,
IEventListenerDemoImpl_Invoke,
#endif
IEventListenerDemoImpl_HandleEvent
}
};
#elif defined(_MSC_VER)
typedef struct IEventListenerDemoVtblInt
{
IEventListenerDemoVtbl lpVtbl;
} IEventListenerDemoVtblInt;
static IEventListenerDemoVtblInt g_IEventListenerDemoVtblInt =
{
{
IEventListenerDemoImpl_QueryInterface,
IEventListenerDemoImpl_AddRef,
IEventListenerDemoImpl_Release,
#ifdef WIN32
IEventListenerDemoImpl_GetTypeInfoCount,
IEventListenerDemoImpl_GetTypeInfo,
IEventListenerDemoImpl_GetIDsOfNames,
IEventListenerDemoImpl_Invoke,
#endif
IEventListenerDemoImpl_HandleEvent
}
};
#else
# error Port me!
#endif
/**
* Register active event listener for the selected VM.
*
* @param virtualBox ptr to IVirtualBox object
* @param session ptr to ISession object
*/
static void registerActiveEventListener(IVirtualBox *virtualBox, ISession *session)
{
IConsole *console = NULL;
HRESULT rc;
rc = ISession_get_Console(session, &console);
if ((SUCCEEDED(rc)) && console)
{
IEventSource *es = NULL;
rc = IConsole_get_EventSource(console, &es);
if (SUCCEEDED(rc) && es)
{
static const ULONG s_auInterestingEvents[] =
{
VBoxEventType_OnMousePointerShapeChanged,
VBoxEventType_OnMouseCapabilityChanged,
VBoxEventType_OnKeyboardLedsChanged,
VBoxEventType_OnStateChanged,
VBoxEventType_OnAdditionsStateChanged,
VBoxEventType_OnNetworkAdapterChanged,
VBoxEventType_OnSerialPortChanged,
VBoxEventType_OnParallelPortChanged,
VBoxEventType_OnStorageControllerChanged,
VBoxEventType_OnMediumChanged,
VBoxEventType_OnVRDEServerChanged,
VBoxEventType_OnUSBControllerChanged,
VBoxEventType_OnUSBDeviceStateChanged,
VBoxEventType_OnSharedFolderChanged,
VBoxEventType_OnRuntimeError,
VBoxEventType_OnCanShowWindow,
VBoxEventType_OnShowWindow
};
SAFEARRAY *interestingEventsSA = NULL;
IEventListenerDemo *consoleListener = NULL;
/* The VirtualBox API expects enum values as VT_I4, which in the
* future can be hopefully relaxed. */
interestingEventsSA = g_pVBoxFuncs->pfnSafeArrayCreateVector(VT_I4, 0,
sizeof(s_auInterestingEvents)
/ sizeof(s_auInterestingEvents[0]));
g_pVBoxFuncs->pfnSafeArrayCopyInParamHelper(interestingEventsSA, &s_auInterestingEvents,
sizeof(s_auInterestingEvents));
consoleListener = calloc(1, sizeof(IEventListenerDemo));
if (consoleListener)
{
consoleListener->lpVtbl = &(g_IEventListenerDemoVtblInt.lpVtbl);
#ifdef WIN32
CoCreateFreeThreadedMarshaler((IUnknown *)consoleListener, &consoleListener->pUnkMarshaler);
#endif
IEventListenerDemo_AddRef(consoleListener);
rc = IEventSource_RegisterListener(es, (IEventListener *)consoleListener,
ComSafeArrayAsInParam(interestingEventsSA),
1 /* active */);
if (SUCCEEDED(rc))
{
/* Just wait here for events, no easy way to do this better
* as there's not much to do after this completes. */
printf("Entering event loop, PowerOff the machine to exit or press Ctrl-C to terminate\n");
fflush(stdout);
#ifdef WIN32
SetConsoleCtrlHandler(ctrlCHandler, TRUE);
#else
signal(SIGINT, (void (*)(int))ctrlCHandler);
#endif
while (!g_fStop)
g_pVBoxFuncs->pfnProcessEventQueue(250);
#ifdef WIN32
SetConsoleCtrlHandler(ctrlCHandler, FALSE);
#else
signal(SIGINT, SIG_DFL);
#endif
}
else
printf("Failed to register event listener.\n");
IEventSource_UnregisterListener(es, (IEventListener *)consoleListener);
#ifdef WIN32
if (consoleListener->pUnkMarshaler)
IUnknown_Release(consoleListener->pUnkMarshaler);
#endif
IEventListenerDemo_Release(consoleListener);
}
else
printf("Failed while allocating memory for console event listener.\n");
g_pVBoxFuncs->pfnSafeArrayDestroy(interestingEventsSA);
IEventSource_Release(es);
}
else
printf("Failed to get the event source instance.\n");
IConsole_Release(console);
}
}
#else /* !USE_ACTIVE_EVENT_LISTENER */
/**
* Register passive event listener for the selected VM.
*
* @param virtualBox ptr to IVirtualBox object
* @param session ptr to ISession object
*/
static void registerPassiveEventListener(ISession *session)
{
IConsole *console = NULL;
HRESULT rc;
rc = ISession_get_Console(session, &console);
if (SUCCEEDED(rc) && console)
{
IEventSource *es = NULL;
rc = IConsole_get_EventSource(console, &es);
if (SUCCEEDED(rc) && es)
{
static const ULONG s_auInterestingEvents[] =
{
VBoxEventType_OnMousePointerShapeChanged,
VBoxEventType_OnMouseCapabilityChanged,
VBoxEventType_OnKeyboardLedsChanged,
VBoxEventType_OnStateChanged,
VBoxEventType_OnAdditionsStateChanged,
VBoxEventType_OnNetworkAdapterChanged,
VBoxEventType_OnSerialPortChanged,
VBoxEventType_OnParallelPortChanged,
VBoxEventType_OnStorageControllerChanged,
VBoxEventType_OnMediumChanged,
VBoxEventType_OnVRDEServerChanged,
VBoxEventType_OnUSBControllerChanged,
VBoxEventType_OnUSBDeviceStateChanged,
VBoxEventType_OnSharedFolderChanged,
VBoxEventType_OnRuntimeError,
VBoxEventType_OnCanShowWindow,
VBoxEventType_OnShowWindow
};
SAFEARRAY *interestingEventsSA = NULL;
IEventListener *consoleListener = NULL;
/* The VirtualBox API expects enum values as VT_I4, which in the
* future can be hopefully relaxed. */
interestingEventsSA = g_pVBoxFuncs->pfnSafeArrayCreateVector(VT_I4, 0,
sizeof(s_auInterestingEvents)
/ sizeof(s_auInterestingEvents[0]));
g_pVBoxFuncs->pfnSafeArrayCopyInParamHelper(interestingEventsSA, &s_auInterestingEvents,
sizeof(s_auInterestingEvents));
rc = IEventSource_CreateListener(es, &consoleListener);
if (SUCCEEDED(rc) && consoleListener)
{
rc = IEventSource_RegisterListener(es, consoleListener,
ComSafeArrayAsInParam(interestingEventsSA),
0 /* passive */);
if (SUCCEEDED(rc))
{
/* Just wait here for events, no easy way to do this better
* as there's not much to do after this completes. */
printf("Entering event loop, PowerOff the machine to exit or press Ctrl-C to terminate\n");
fflush(stdout);
#ifdef WIN32
SetConsoleCtrlHandler(ctrlCHandler, TRUE);
#else
signal(SIGINT, (void (*)(int))ctrlCHandler);
#endif
while (!g_fStop)
{
IEvent *ev = NULL;
rc = IEventSource_GetEvent(es, consoleListener, 250, &ev);
if (FAILED(rc))
{
printf("Failed getting event: %#x\n", rc);
g_fStop = 1;
continue;
}
/* handle timeouts, resulting in NULL events */
if (!ev)
continue;
rc = EventListenerDemoProcessEvent(ev);
if (FAILED(rc))
{
printf("Failed processing event: %#x\n", rc);
g_fStop = 1;
/* finish processing the event */
}
rc = IEventSource_EventProcessed(es, consoleListener, ev);
if (FAILED(rc))
{
printf("Failed to mark event as processed: %#x\n", rc);
g_fStop = 1;
/* continue with event release */
}
if (ev)
{
IEvent_Release(ev);
ev = NULL;
}
}
#ifdef WIN32
SetConsoleCtrlHandler(ctrlCHandler, FALSE);
#else
signal(SIGINT, SIG_DFL);
#endif
}
else
printf("Failed to register event listener.\n");
IEventSource_UnregisterListener(es, (IEventListener *)consoleListener);
IEventListener_Release(consoleListener);
}
else
printf("Failed to create an event listener instance.\n");
g_pVBoxFuncs->pfnSafeArrayDestroy(interestingEventsSA);
IEventSource_Release(es);
}
else
printf("Failed to get the event source instance.\n");
IConsole_Release(console);
}
}
#endif /* !USE_ACTIVE_EVENT_LISTENER */
/**
* Print detailed error information if available.
* @param pszExecutable string with the executable name
* @param pszErrorMsg string containing the code location specific error message
* @param rc COM/XPCOM result code
*/
static void PrintErrorInfo(const char *pszExecutable, const char *pszErrorMsg, HRESULT rc)
{
IErrorInfo *ex;
HRESULT rc2;
fprintf(stderr, "%s: %s (rc=%#010x)\n", pszExecutable, pszErrorMsg, (unsigned)rc);
rc2 = g_pVBoxFuncs->pfnGetException(&ex);
if (SUCCEEDED(rc2) && ex)
{
IVirtualBoxErrorInfo *ei;
rc2 = IErrorInfo_QueryInterface(ex, &IID_IVirtualBoxErrorInfo, (void **)&ei);
if (SUCCEEDED(rc2) && ei != NULL)
{
/* got extended error info, maybe multiple infos */
do
{
LONG resultCode = S_OK;
BSTR componentUtf16 = NULL;
char *component = NULL;
BSTR textUtf16 = NULL;
char *text = NULL;
IVirtualBoxErrorInfo *ei_next = NULL;
fprintf(stderr, "Extended error info (IVirtualBoxErrorInfo):\n");
IVirtualBoxErrorInfo_get_ResultCode(ei, &resultCode);
fprintf(stderr, " resultCode=%#010x\n", (unsigned)resultCode);
IVirtualBoxErrorInfo_get_Component(ei, &componentUtf16);
g_pVBoxFuncs->pfnUtf16ToUtf8(componentUtf16, &component);
g_pVBoxFuncs->pfnComUnallocString(componentUtf16);
fprintf(stderr, " component=%s\n", component);
g_pVBoxFuncs->pfnUtf8Free(component);
IVirtualBoxErrorInfo_get_Text(ei, &textUtf16);
g_pVBoxFuncs->pfnUtf16ToUtf8(textUtf16, &text);
g_pVBoxFuncs->pfnComUnallocString(textUtf16);
fprintf(stderr, " text=%s\n", text);
g_pVBoxFuncs->pfnUtf8Free(text);
rc2 = IVirtualBoxErrorInfo_get_Next(ei, &ei_next);
if (FAILED(rc2))
ei_next = NULL;
IVirtualBoxErrorInfo_Release(ei);
ei = ei_next;
} while (ei);
}
IErrorInfo_Release(ex);
g_pVBoxFuncs->pfnClearException();
}
}
/**
* Start a VM.
*
* @param argv0 executable name
* @param virtualBox ptr to IVirtualBox object
* @param session ptr to ISession object
* @param id identifies the machine to start
*/
static void startVM(const char *argv0, IVirtualBox *virtualBox, ISession *session, BSTR id)
{
HRESULT rc;
IMachine *machine = NULL;
IProgress *progress = NULL;
BSTR env = NULL;
BSTR sessionType;
SAFEARRAY *groupsSA = g_pVBoxFuncs->pfnSafeArrayOutParamAlloc();
rc = IVirtualBox_FindMachine(virtualBox, id, &machine);
if (FAILED(rc) || !machine)
{
PrintErrorInfo(argv0, "Error: Couldn't get the Machine reference", rc);
return;
}
rc = IMachine_get_Groups(machine, ComSafeArrayAsOutTypeParam(groupsSA, BSTR));
if (SUCCEEDED(rc))
{
BSTR *groups = NULL;
ULONG cbGroups = 0;
ULONG i, cGroups;
g_pVBoxFuncs->pfnSafeArrayCopyOutParamHelper((void **)&groups, &cbGroups, VT_BSTR, groupsSA);
g_pVBoxFuncs->pfnSafeArrayDestroy(groupsSA);
cGroups = cbGroups / sizeof(groups[0]);
for (i = 0; i < cGroups; ++i)
{
/* Note that the use of %S might be tempting, but it is not
* available on all platforms, and even where it is usable it
* may depend on correct compiler options to make wchar_t a
* 16 bit number. So better play safe and use UTF-8. */
char *group;
g_pVBoxFuncs->pfnUtf16ToUtf8(groups[i], &group);
printf("Groups[%d]: %s\n", i, group);
g_pVBoxFuncs->pfnUtf8Free(group);
}
for (i = 0; i < cGroups; ++i)
g_pVBoxFuncs->pfnComUnallocString(groups[i]);
g_pVBoxFuncs->pfnArrayOutFree(groups);
}
g_pVBoxFuncs->pfnUtf8ToUtf16("gui", &sessionType);
rc = IMachine_LaunchVMProcess(machine, session, sessionType, env, &progress);
g_pVBoxFuncs->pfnUtf16Free(sessionType);
if (SUCCEEDED(rc))
{
BOOL completed;
LONG resultCode;
printf("Waiting for the remote session to open...\n");
IProgress_WaitForCompletion(progress, -1);
rc = IProgress_get_Completed(progress, &completed);
if (FAILED(rc))
fprintf(stderr, "Error: GetCompleted status failed\n");
IProgress_get_ResultCode(progress, &resultCode);
if (FAILED(resultCode))
{
IVirtualBoxErrorInfo *errorInfo;
BSTR textUtf16;
char *text;
IProgress_get_ErrorInfo(progress, &errorInfo);
IVirtualBoxErrorInfo_get_Text(errorInfo, &textUtf16);
g_pVBoxFuncs->pfnUtf16ToUtf8(textUtf16, &text);
printf("Error: %s\n", text);
g_pVBoxFuncs->pfnComUnallocString(textUtf16);
g_pVBoxFuncs->pfnUtf8Free(text);
IVirtualBoxErrorInfo_Release(errorInfo);
}
else
{
fprintf(stderr, "VM process has been successfully started\n");
/* Kick off the event listener demo part, which is quite separate.
* Ignore it if you need a more basic sample. */
#ifdef USE_ACTIVE_EVENT_LISTENER
registerActiveEventListener(virtualBox, session);
#else
registerPassiveEventListener(session);
#endif
}
IProgress_Release(progress);
}
else
PrintErrorInfo(argv0, "Error: LaunchVMProcess failed", rc);
/* It's important to always release resources. */
IMachine_Release(machine);
}
/**
* List the registered VMs.
*
* @param argv0 executable name
* @param virtualBox ptr to IVirtualBox object
* @param session ptr to ISession object
*/
static void listVMs(const char *argv0, IVirtualBox *virtualBox, ISession *session)
{
HRESULT rc;
SAFEARRAY *machinesSA = g_pVBoxFuncs->pfnSafeArrayOutParamAlloc();
IMachine **machines = NULL;
ULONG machineCnt = 0;
ULONG i;
unsigned start_id;
/*
* Get the list of all registered VMs.
*/
rc = IVirtualBox_get_Machines(virtualBox, ComSafeArrayAsOutIfaceParam(machinesSA, IMachine *));
if (FAILED(rc))
{
PrintErrorInfo(argv0, "could not get list of machines", rc);
return;
}
/*
* Extract interface pointers from machinesSA, and update the reference
* counter of each object, as destroying machinesSA would call Release.
*/
g_pVBoxFuncs->pfnSafeArrayCopyOutIfaceParamHelper((IUnknown ***)&machines, &machineCnt, machinesSA);
g_pVBoxFuncs->pfnSafeArrayDestroy(machinesSA);
if (!machineCnt)
{
g_pVBoxFuncs->pfnArrayOutFree(machines);
printf("\tNo VMs\n");
return;
}
printf("VM List:\n\n");
/*
* Iterate through the collection.
*/
for (i = 0; i < machineCnt; ++i)
{
IMachine *machine = machines[i];
BOOL isAccessible = FALSE;
printf("\tMachine #%u\n", (unsigned)i);
if (!machine)
{
printf("\t(skipped, NULL)\n");
continue;
}
IMachine_get_Accessible(machine, &isAccessible);
if (isAccessible)
{
BSTR machineNameUtf16;
char *machineName;
IMachine_get_Name(machine, &machineNameUtf16);
g_pVBoxFuncs->pfnUtf16ToUtf8(machineNameUtf16,&machineName);
g_pVBoxFuncs->pfnComUnallocString(machineNameUtf16);
printf("\tName: %s\n", machineName);
g_pVBoxFuncs->pfnUtf8Free(machineName);
}
else
printf("\tName: <inaccessible>\n");
{
BSTR uuidUtf16;
char *uuidUtf8;
IMachine_get_Id(machine, &uuidUtf16);
g_pVBoxFuncs->pfnUtf16ToUtf8(uuidUtf16, &uuidUtf8);
g_pVBoxFuncs->pfnComUnallocString(uuidUtf16);
printf("\tUUID: %s\n", uuidUtf8);
g_pVBoxFuncs->pfnUtf8Free(uuidUtf8);
}
if (isAccessible)
{
{
BSTR configFileUtf16;
char *configFileUtf8;
IMachine_get_SettingsFilePath(machine, &configFileUtf16);
g_pVBoxFuncs->pfnUtf16ToUtf8(configFileUtf16, &configFileUtf8);
g_pVBoxFuncs->pfnComUnallocString(configFileUtf16);
printf("\tConfig file: %s\n", configFileUtf8);
g_pVBoxFuncs->pfnUtf8Free(configFileUtf8);
}
{
ULONG memorySize;
IMachine_get_MemorySize(machine, &memorySize);
printf("\tMemory size: %uMB\n", memorySize);
}
{
BSTR typeId;
BSTR osNameUtf16;
char *osName;
IGuestOSType *osType = NULL;
IMachine_get_OSTypeId(machine, &typeId);
IVirtualBox_GetGuestOSType(virtualBox, typeId, &osType);
g_pVBoxFuncs->pfnComUnallocString(typeId);
IGuestOSType_get_Description(osType, &osNameUtf16);
g_pVBoxFuncs->pfnUtf16ToUtf8(osNameUtf16,&osName);
g_pVBoxFuncs->pfnComUnallocString(osNameUtf16);
printf("\tGuest OS: %s\n\n", osName);
g_pVBoxFuncs->pfnUtf8Free(osName);
IGuestOSType_Release(osType);
}
}
}
/*
* Let the user chose a machine to start.
*/
printf("Type Machine# to start (0 - %u) or 'quit' to do nothing: ",
(unsigned)(machineCnt - 1));
fflush(stdout);
if (scanf("%u", &start_id) == 1 && start_id < machineCnt)
{
IMachine *machine = machines[start_id];
if (machine)
{
BSTR uuidUtf16 = NULL;
IMachine_get_Id(machine, &uuidUtf16);
startVM(argv0, virtualBox, session, uuidUtf16);
g_pVBoxFuncs->pfnComUnallocString(uuidUtf16);
}
}
/*
* Don't forget to release the objects in the array.
*/
for (i = 0; i < machineCnt; ++i)
{
IMachine *machine = machines[i];
if (machine)
IMachine_Release(machine);
}
g_pVBoxFuncs->pfnArrayOutFree(machines);
}
/* Main - Start the ball rolling. */
int main(int argc, char **argv)
{
IVirtualBoxClient *vboxclient = NULL;
IVirtualBox *vbox = NULL;
ISession *session = NULL;
ULONG revision = 0;
BSTR versionUtf16 = NULL;
BSTR homefolderUtf16 = NULL;
HRESULT rc; /* Result code of various function (method) calls. */
(void)argc;
printf("Starting main()\n");
if (VBoxCGlueInit())
{
fprintf(stderr, "%s: FATAL: VBoxCGlueInit failed: %s\n",
argv[0], g_szVBoxErrMsg);
return EXIT_FAILURE;
}
{
unsigned ver = g_pVBoxFuncs->pfnGetVersion();
printf("VirtualBox version: %u.%u.%u\n", ver / 1000000, ver / 1000 % 1000, ver % 1000);
ver = g_pVBoxFuncs->pfnGetAPIVersion();
printf("VirtualBox API version: %u.%u\n", ver / 1000, ver % 1000);
}
g_pVBoxFuncs->pfnClientInitialize(NULL, &vboxclient);
if (!vboxclient)
{
fprintf(stderr, "%s: FATAL: could not get VirtualBoxClient reference\n", argv[0]);
return EXIT_FAILURE;
}
printf("----------------------------------------------------\n");
rc = IVirtualBoxClient_get_VirtualBox(vboxclient, &vbox);
if (FAILED(rc) || !vbox)
{
PrintErrorInfo(argv[0], "FATAL: could not get VirtualBox reference", rc);
return EXIT_FAILURE;
}
rc = IVirtualBoxClient_get_Session(vboxclient, &session);
if (FAILED(rc) || !session)
{
PrintErrorInfo(argv[0], "FATAL: could not get Session reference", rc);
return EXIT_FAILURE;
}
#ifdef USE_ACTIVE_EVENT_LISTENER
# ifdef WIN32
rc = LoadTypeInfo(&IID_IEventListener, &g_pTInfoIEventListener);
if (FAILED(rc) || !g_pTInfoIEventListener)
{
PrintErrorInfo(argv[0], "FATAL: could not get type information for IEventListener", rc);
return EXIT_FAILURE;
}
# endif /* WIN32 */
#endif /* USE_ACTIVE_EVENT_LISTENER */
/*
* Now ask for revision, version and home folder information of
* this vbox. Were not using fancy macros here so it
* remains easy to see how we access C++'s vtable.
*/
/* 1. Revision */
rc = IVirtualBox_get_Revision(vbox, &revision);
if (SUCCEEDED(rc))
printf("\tRevision: %u\n", revision);
else
PrintErrorInfo(argv[0], "GetRevision() failed", rc);
/* 2. Version */
rc = IVirtualBox_get_Version(vbox, &versionUtf16);
if (SUCCEEDED(rc))
{
char *version = NULL;
g_pVBoxFuncs->pfnUtf16ToUtf8(versionUtf16, &version);
printf("\tVersion: %s\n", version);
g_pVBoxFuncs->pfnUtf8Free(version);
g_pVBoxFuncs->pfnComUnallocString(versionUtf16);
}
else
PrintErrorInfo(argv[0], "GetVersion() failed", rc);
/* 3. Home Folder */
rc = IVirtualBox_get_HomeFolder(vbox, &homefolderUtf16);
if (SUCCEEDED(rc))
{
char *homefolder = NULL;
g_pVBoxFuncs->pfnUtf16ToUtf8(homefolderUtf16, &homefolder);
printf("\tHomeFolder: %s\n", homefolder);
g_pVBoxFuncs->pfnUtf8Free(homefolder);
g_pVBoxFuncs->pfnComUnallocString(homefolderUtf16);
}
else
PrintErrorInfo(argv[0], "GetHomeFolder() failed", rc);
listVMs(argv[0], vbox, session);
ISession_UnlockMachine(session);
printf("----------------------------------------------------\n");
/*
* Do as mom told us: always clean up after yourself.
*/
#ifdef USE_ACTIVE_EVENT_LISTENER
# ifdef WIN32
if (g_pTInfoIEventListener)
{
ITypeInfo_Release(g_pTInfoIEventListener);
g_pTInfoIEventListener = NULL;
}
# endif /* WIN32 */
#endif /* USE_ACTIVE_EVENT_LISTENER */
if (session)
{
ISession_Release(session);
session = NULL;
}
if (vbox)
{
IVirtualBox_Release(vbox);
vbox = NULL;
}
if (vboxclient)
{
IVirtualBoxClient_Release(vboxclient);
vboxclient = NULL;
}
g_pVBoxFuncs->pfnClientUninitialize();
VBoxCGlueTerm();
printf("Finished main()\n");
return 0;
}
/* vim: set ts=4 sw=4 et: */
|
ARL-UTEP-OC/emubox
|
workshop-manager/bin/VirtualBoxSDK-5.1.20-114628/sdk/bindings/c/samples/tstCAPIGlue.c
|
C
|
gpl-2.0
| 39,034
|
/*
This file is part of ffmpeg-php
Copyright (C) 2004-2008 Todd Kirby (ffmpeg.php AT gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
In addition, as a special exception, the copyright holders of ffmpeg-php
give you permission to combine ffmpeg-php with code included in the
standard release of PHP under the PHP license (or modified versions of
such code, with unchanged license). You may copy and distribute such a
system following the terms of the GNU GPL for ffmpeg-php and the licenses
of the other code concerned, provided that you include the source code of
that other code when and as the GNU GPL requires distribution of source code.
You must obey the GNU General Public License in all respects for all of the
code used other than standard release of PHP. If you modify this file, you
may extend this exception to your version of the file, but you are not
obligated to do so. If you do not wish to do so, delete this exception
statement from your version.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
//#include <php.h>
#include "ffmpeg_tools.h"
#if LIBAVCODEC_VERSION_MAJOR >= 52
#include <swscale.h>
/* {{{ ffmpeg_img_convert()
* wrapper around ffmpeg image conversion routines
*/
int img_convert(AVPicture *dst, int dst_pix_fmt,
AVPicture *src, int src_pix_fmt, int src_width, int src_height)
{
struct SwsContext *sws_ctx = NULL;
// TODO: Try to get cached sws_context first
sws_ctx = sws_getContext(src_width, src_height, 0,
src_width, src_height, dst_pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
if (sws_ctx == NULL){
return 2;
}
sws_scale(sws_ctx, src->data, src->linesize, 0, src_height, dst->data, dst->linesize);
sws_freeContext(sws_ctx);
return 0;
}
/* }}} */
void img_resample(ImgReSampleContext * context, AVPicture * pxOut, const AVPicture * pxIn)
{
if (context != NULL && context->context != NULL) {
AVPicture shiftedInput; // = {0};
shiftedInput.data[0] = pxIn->data[0] + pxIn->linesize[0] *
context->bandTop + context->bandLeft;
shiftedInput.data[1] = pxIn->data[1] + (pxIn->linesize[1] *
(context->bandTop / 2)) + (context->bandLeft+1) / 2;
shiftedInput.data[2] = pxIn->data[2] + (pxIn->linesize[2] *
(context->bandTop / 2)) + (context->bandLeft+1) / 2;
shiftedInput.linesize[0] = pxIn->linesize[0];
shiftedInput.linesize[1] = pxIn->linesize[1];
shiftedInput.linesize[2] = pxIn->linesize[2];
sws_scale(context->context, (uint8_t**)shiftedInput.data,
(int*)shiftedInput.linesize, 0, context->height - context->bandBottom -
context->bandTop, pxOut->data, pxOut->linesize);
}
}
ImgReSampleContext * img_resample_full_init (int owidth, int oheight, int iwidth, int iheight, int topBand, int bottomBand, int leftBand, int rightBand, int padtop, int padbottom, int padleft, int padright)
{
ImgReSampleContext * s = (ImgReSampleContext *)av_malloc(sizeof(ImgReSampleContext));
if (s == NULL) {
return NULL;
}
int srcSurface = (iwidth - rightBand - leftBand)* (iheight - topBand - bottomBand);
// We use bilinear when the source surface is big, and bicubic when the number of pixels to handle is less than 1 MPixels
s->context = sws_getContext(iwidth - rightBand - leftBand,
iheight - topBand - bottomBand, PIX_FMT_YUV420P, owidth, oheight,
PIX_FMT_YUV420P, srcSurface > 1024000 ? SWS_FAST_BILINEAR : SWS_BICUBIC,
NULL, NULL, NULL);
if (s->context == NULL) {
av_free(s);
return NULL; }
s->bandLeft = leftBand;
s->bandRight = rightBand;
s->bandTop = topBand;
s->bandBottom = bottomBand;
s->padLeft = padleft;
s->padRight = padright;
s->padTop = padtop;
s->padBottom = padbottom;
s->width = iwidth;
s->height = iheight;
s->outWidth = owidth;
s->outHeight = oheight;
return s;
}
ImgReSampleContext * img_resample_init (int owidth, int oheight, int iwidth, int iheight)
{
return img_resample_full_init(owidth, oheight, iwidth, iheight, 0, 0, 0, 0, 0, 0, 0, 0);
}
void img_resample_close(ImgReSampleContext * s)
{
if (s == NULL) return;
sws_freeContext(s->context);
av_free(s);
}
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4
* vim<600: noet sw=4 ts=4
*/
|
lord2800/ffmpeg-php
|
ffmpeg_tools.c
|
C
|
gpl-2.0
| 5,212
|
// CHDK palette colors for the s90
// Define color values as needed in this file.
#include "palette.h"
#include "platform_palette.h"
// Playback mode colors
unsigned char ply_colors[] =
{
COLOR_TRANSPARENT, // Placeholder for script colors
COLOR_BLACK, // Placeholder for script colors
0x01, // White
0x2B, // Red
0x29, // Dark Red
0x1E, // Light Red
0x99, // Green
0x25, // Dark Green
0x51, // Light Green
0xA1, // Blue
0xA1, // Dark Blue
0xA9, // Light Blue / Cyan
0x17, // Grey
0x61, // Dark Grey
0x16, // Light Grey
0x9A, // Yellow
0x6F, // Dark Yellow
0x66, // Light Yellow
0x61, // Transparent Dark Grey
COLOR_BLACK, // Magenta
};
// Record mode colors
unsigned char rec_colors[] =
{
COLOR_TRANSPARENT, // Placeholder for script colors
COLOR_BLACK, // Placeholder for script colors
0x01, // White
0x2B, // Red
0x29, // Dark Red
0x1E, // Light Red
0x99, // Green
0x25, // Dark Green
0x51, // Light Green
0xA1, // Blue
0xA1, // Dark Blue
0xA9, // Light Blue / Cyan
0x17, // Grey
0x61, // Dark Grey
0x16, // Light Grey
0x9A, // Yellow
0x6F, // Dark Yellow
0x66, // Light Yellow
0x61, // Transparent Dark Grey
COLOR_BLACK, // Magenta
};
|
c10ud/CHDK
|
platform/s90/platform_palette.c
|
C
|
gpl-2.0
| 2,303
|
/*
* keybindings.c - this file is part of Geany, a fast and lightweight IDE
*
* Copyright 2006-2012 Enrico Tröger <enrico(dot)troeger(at)uvena(dot)de>
* Copyright 2006-2012 Nick Treleaven <nick(dot)treleaven(at)btinternet(dot)com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @file keybindings.h
* Configurable keyboard shortcuts.
* - keybindings_send_command() mimics a built-in keybinding action.
* - @ref GeanyKeyGroupID lists groups of built-in keybindings.
* @see plugin_set_key_group().
**/
#include "geany.h"
#include <gdk/gdkkeysyms.h>
#include <string.h>
#include "keybindings.h"
#include "support.h"
#include "utils.h"
#include "ui_utils.h"
#include "document.h"
#include "documentprivate.h"
#include "filetypes.h"
#include "callbacks.h"
#include "prefs.h"
#include "msgwindow.h"
#include "editor.h"
#include "sciwrappers.h"
#include "build.h"
#include "tools.h"
#include "navqueue.h"
#include "symbols.h"
#include "vte.h"
#include "toolbar.h"
#include "sidebar.h"
#include "notebook.h"
#include "geanywraplabel.h"
#include "main.h"
#include "search.h"
#ifdef HAVE_VTE
# include "vte.h"
#endif
GPtrArray *keybinding_groups; /* array of GeanyKeyGroup pointers, in visual order */
/* keyfile group name for non-plugin KB groups */
static const gchar keybindings_keyfile_group_name[] = "Bindings";
/* core keybindings */
static GeanyKeyBinding binding_ids[GEANY_KEYS_COUNT];
static GtkAccelGroup *kb_accel_group = NULL;
static const gboolean swap_alt_tab_order = FALSE;
/* central keypress event handler, almost all keypress events go to this function */
static gboolean on_key_press_event(GtkWidget *widget, GdkEventKey *event, gpointer user_data);
static gboolean check_current_word(GeanyDocument *doc, gboolean sci_word);
static gboolean read_current_word(GeanyDocument *doc, gboolean sci_word);
static gchar *get_current_word_or_sel(GeanyDocument *doc, gboolean sci_word);
static gboolean cb_func_file_action(guint key_id);
static gboolean cb_func_project_action(guint key_id);
static gboolean cb_func_editor_action(guint key_id);
static gboolean cb_func_select_action(guint key_id);
static gboolean cb_func_format_action(guint key_id);
static gboolean cb_func_insert_action(guint key_id);
static gboolean cb_func_search_action(guint key_id);
static gboolean cb_func_goto_action(guint key_id);
static gboolean cb_func_switch_action(guint key_id);
static gboolean cb_func_clipboard_action(guint key_id);
static gboolean cb_func_build_action(guint key_id);
static gboolean cb_func_document_action(guint key_id);
static gboolean cb_func_view_action(guint key_id);
/* note: new keybindings should normally use per group callbacks */
static void cb_func_menu_help(guint key_id);
static void cb_func_menu_preferences(guint key_id);
static void cb_func_menu_fullscreen(guint key_id);
static void cb_func_menu_messagewindow(guint key_id);
static void cb_func_menu_opencolorchooser(guint key_id);
static void cb_func_switch_tableft(guint key_id);
static void cb_func_switch_tabright(guint key_id);
static void cb_func_switch_tablastused(guint key_id);
static void cb_func_move_tab(guint key_id);
static void add_popup_menu_accels(void);
/** Looks up a keybinding item.
* @param group Group.
* @param key_id Keybinding index for the group.
* @return The keybinding.
* @since 0.19. */
GeanyKeyBinding *keybindings_get_item(GeanyKeyGroup *group, gsize key_id)
{
if (group->plugin)
{
g_assert(key_id < group->plugin_key_count);
return &group->plugin_keys[key_id];
}
g_assert(key_id < GEANY_KEYS_COUNT);
return &binding_ids[key_id];
}
/* This is used to set default keybindings on startup.
* Menu accels are set in apply_kb_accel(). */
/** Fills a GeanyKeyBinding struct item.
* @note Always set @a key and @a mod to 0, otherwise you will likely
* cause conflicts with the user's custom, other plugin's keybindings or
* future default keybindings.
* @param group Group.
* @param key_id Keybinding index for the group.
* @param callback Function to call when activated, or @c NULL to use the group callback.
* Usually it's better to use the group callback instead - see plugin_set_key_group().
* @param key (Lower case) default key, e.g. @c GDK_j, but usually 0 for unset.
* @param mod Default modifier, e.g. @c GDK_CONTROL_MASK, but usually 0 for unset.
* @param kf_name Key name for the configuration file, such as @c "menu_new".
* @param label Label used in the preferences dialog keybindings tab. May contain
* underscores - these won't be displayed.
* @param menu_item Optional widget to set an accelerator for, or @c NULL.
* @return The keybinding - normally this is ignored. */
GeanyKeyBinding *keybindings_set_item(GeanyKeyGroup *group, gsize key_id,
GeanyKeyCallback callback, guint key, GdkModifierType mod,
const gchar *kf_name, const gchar *label, GtkWidget *menu_item)
{
GeanyKeyBinding *kb;
g_assert(group->name);
kb = keybindings_get_item(group, key_id);
g_assert(!kb->name);
g_ptr_array_add(group->key_items, kb);
if (group->plugin)
{
/* some plugins e.g. GeanyLua need these fields duplicated */
SETPTR(kb->name, g_strdup(kf_name));
SETPTR(kb->label, g_strdup(label));
}
else
{
/* we don't touch these strings unless group->plugin is set, const cast is safe */
kb->name = (gchar *)kf_name;
kb->label = (gchar *)label;
}
kb->key = key;
kb->mods = mod;
kb->default_key = key;
kb->default_mods = mod;
kb->callback = callback;
kb->menu_item = menu_item;
kb->id = key_id;
return kb;
}
static void add_kb_group(GeanyKeyGroup *group,
const gchar *name, const gchar *label, GeanyKeyGroupCallback callback, gboolean plugin)
{
g_ptr_array_add(keybinding_groups, group);
group->name = name;
group->label = label;
group->callback = callback;
group->plugin = plugin;
group->key_items = g_ptr_array_new();
}
GeanyKeyGroup *keybindings_get_core_group(guint id)
{
static GeanyKeyGroup groups[GEANY_KEY_GROUP_COUNT];
g_return_val_if_fail(id < GEANY_KEY_GROUP_COUNT, NULL);
return &groups[id];
}
static void add_kb(GeanyKeyGroup *group, gsize key_id,
GeanyKeyCallback callback, guint key, GdkModifierType mod,
const gchar *kf_name, const gchar *label, const gchar *widget_name)
{
GtkWidget *widget = widget_name ?
ui_lookup_widget(main_widgets.window, widget_name) : NULL;
keybindings_set_item(group, key_id, callback,
key, mod, kf_name, label, widget);
}
#define ADD_KB_GROUP(group_id, label, callback) \
add_kb_group(keybindings_get_core_group(group_id),\
keybindings_keyfile_group_name, label, callback, FALSE)
static void init_default_kb(void)
{
GeanyKeyGroup *group;
/* visual group order */
ADD_KB_GROUP(GEANY_KEY_GROUP_FILE, _("File"), cb_func_file_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_EDITOR, _("Editor"), cb_func_editor_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_CLIPBOARD, _("Clipboard"), cb_func_clipboard_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_SELECT, _("Select"), cb_func_select_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_FORMAT, _("Format"), cb_func_format_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_INSERT, _("Insert"), cb_func_insert_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_SETTINGS, _("Settings"), NULL);
ADD_KB_GROUP(GEANY_KEY_GROUP_SEARCH, _("Search"), cb_func_search_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_GOTO, _("Go to"), cb_func_goto_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_VIEW, _("View"), cb_func_view_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_DOCUMENT, _("Document"), cb_func_document_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_PROJECT, _("Project"), cb_func_project_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_BUILD, _("Build"), cb_func_build_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_TOOLS, _("Tools"), NULL);
ADD_KB_GROUP(GEANY_KEY_GROUP_HELP, _("Help"), NULL);
ADD_KB_GROUP(GEANY_KEY_GROUP_FOCUS, _("Focus"), cb_func_switch_action);
ADD_KB_GROUP(GEANY_KEY_GROUP_NOTEBOOK, _("Notebook tab"), NULL);
/* Init all fields of keys with default values.
* The menu_item field is always the main menu item, popup menu accelerators are
* set in add_popup_menu_accels(). */
group = keybindings_get_core_group(GEANY_KEY_GROUP_FILE);
add_kb(group, GEANY_KEYS_FILE_NEW, NULL,
GDK_n, GDK_CONTROL_MASK, "menu_new", _("New"), NULL);
add_kb(group, GEANY_KEYS_FILE_OPEN, NULL,
GDK_o, GDK_CONTROL_MASK, "menu_open", _("Open"), NULL);
add_kb(group, GEANY_KEYS_FILE_OPENSELECTED, NULL,
GDK_o, GDK_SHIFT_MASK | GDK_CONTROL_MASK, "menu_open_selected",
_("Open selected file"), "menu_open_selected_file1");
add_kb(group, GEANY_KEYS_FILE_SAVE, NULL,
GDK_s, GDK_CONTROL_MASK, "menu_save", _("Save"), NULL);
add_kb(group, GEANY_KEYS_FILE_SAVEAS, NULL,
0, 0, "menu_saveas", _("Save as"), "menu_save_as1");
add_kb(group, GEANY_KEYS_FILE_SAVEALL, NULL,
GDK_s, GDK_SHIFT_MASK | GDK_CONTROL_MASK, "menu_saveall", _("Save all"),
"menu_save_all1");
add_kb(group, GEANY_KEYS_FILE_PRINT, NULL,
GDK_p, GDK_CONTROL_MASK, "menu_print", _("Print"), "print1");
add_kb(group, GEANY_KEYS_FILE_CLOSE, NULL,
GDK_w, GDK_CONTROL_MASK, "menu_close", _("Close"), "menu_close1");
add_kb(group, GEANY_KEYS_FILE_CLOSEALL, NULL,
GDK_w, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "menu_closeall", _("Close all"),
"menu_close_all1");
add_kb(group, GEANY_KEYS_FILE_RELOAD, NULL,
GDK_r, GDK_CONTROL_MASK, "menu_reloadfile", _("Reload file"), "menu_reload1");
add_kb(group, GEANY_KEYS_FILE_OPENLASTTAB, NULL,
0, 0, "file_openlasttab", _("Re-open last closed tab"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_PROJECT);
add_kb(group, GEANY_KEYS_PROJECT_NEW, NULL,
0, 0, "project_new", _("New"), "project_new1");
add_kb(group, GEANY_KEYS_PROJECT_OPEN, NULL,
0, 0, "project_open", _("Open"), "project_open1");
add_kb(group, GEANY_KEYS_PROJECT_PROPERTIES, NULL,
0, 0, "project_properties",
ui_lookup_stock_label(GTK_STOCK_PROPERTIES), "project_properties1");
add_kb(group, GEANY_KEYS_PROJECT_CLOSE, NULL,
0, 0, "project_close", _("Close"), "project_close1");
group = keybindings_get_core_group(GEANY_KEY_GROUP_EDITOR);
add_kb(group, GEANY_KEYS_EDITOR_UNDO, NULL,
GDK_z, GDK_CONTROL_MASK, "menu_undo", _("Undo"), "menu_undo2");
add_kb(group, GEANY_KEYS_EDITOR_REDO, NULL,
GDK_y, GDK_CONTROL_MASK, "menu_redo", _("Redo"), "menu_redo2");
add_kb(group, GEANY_KEYS_EDITOR_DUPLICATELINE, NULL,
GDK_d, GDK_CONTROL_MASK, "edit_duplicateline", _("_Duplicate Line or Selection"),
"duplicate_line_or_selection1");
add_kb(group, GEANY_KEYS_EDITOR_DELETELINE, NULL,
GDK_k, GDK_CONTROL_MASK, "edit_deleteline", _("_Delete Current Line(s)"),
"delete_current_lines1");
add_kb(group, GEANY_KEYS_EDITOR_DELETELINETOEND, NULL,
GDK_Delete, GDK_SHIFT_MASK | GDK_CONTROL_MASK, "edit_deletelinetoend",
_("Delete to line end"), NULL);
/* transpose may fit better in format group */
add_kb(group, GEANY_KEYS_EDITOR_TRANSPOSELINE, NULL,
0, 0, "edit_transposeline", _("_Transpose Current Line"),
"transpose_current_line1");
add_kb(group, GEANY_KEYS_EDITOR_SCROLLTOLINE, NULL,
GDK_l, GDK_SHIFT_MASK | GDK_CONTROL_MASK, "edit_scrolltoline", _("Scroll to current line"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_SCROLLLINEUP, NULL,
GDK_Up, GDK_MOD1_MASK, "edit_scrolllineup", _("Scroll up the view by one line"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_SCROLLLINEDOWN, NULL,
GDK_Down, GDK_MOD1_MASK, "edit_scrolllinedown", _("Scroll down the view by one line"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_COMPLETESNIPPET, NULL,
GDK_Tab, 0, "edit_completesnippet", _("Complete snippet"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_SNIPPETNEXTCURSOR, NULL,
0, 0, "move_snippetnextcursor", _("Move cursor in snippet"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_SUPPRESSSNIPPETCOMPLETION, NULL,
0, 0, "edit_suppresssnippetcompletion", _("Suppress snippet completion"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_CONTEXTACTION, NULL,
0, 0, "popup_contextaction", _("Context Action"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_AUTOCOMPLETE, NULL,
GDK_space, GDK_CONTROL_MASK, "edit_autocomplete", _("Complete word"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_CALLTIP, NULL,
GDK_space, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "edit_calltip", _("Show calltip"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_MACROLIST, NULL,
GDK_Return, GDK_CONTROL_MASK, "edit_macrolist", _("Show macro list"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_WORDPARTCOMPLETION, NULL,
GDK_Tab, 0, "edit_wordpartcompletion", _("Word part completion"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_MOVELINEUP, NULL,
GDK_Page_Up, GDK_MOD1_MASK, "edit_movelineup", _("Move line(s) up"), NULL);
add_kb(group, GEANY_KEYS_EDITOR_MOVELINEDOWN, NULL,
GDK_Page_Down, GDK_MOD1_MASK, "edit_movelinedown", _("Move line(s) down"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_CLIPBOARD);
add_kb(group, GEANY_KEYS_CLIPBOARD_CUT, NULL,
GDK_x, GDK_CONTROL_MASK, "menu_cut", _("Cut"), NULL);
add_kb(group, GEANY_KEYS_CLIPBOARD_COPY, NULL,
GDK_c, GDK_CONTROL_MASK, "menu_copy", _("Copy"), NULL);
add_kb(group, GEANY_KEYS_CLIPBOARD_PASTE, NULL,
GDK_v, GDK_CONTROL_MASK, "menu_paste", _("Paste"), NULL);
add_kb(group, GEANY_KEYS_CLIPBOARD_COPYLINE, NULL,
GDK_c, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "edit_copyline", _("_Copy Current Line(s)"),
"copy_current_lines1");
add_kb(group, GEANY_KEYS_CLIPBOARD_CUTLINE, NULL,
GDK_x, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "edit_cutline", _("_Cut Current Line(s)"),
"cut_current_lines1");
group = keybindings_get_core_group(GEANY_KEY_GROUP_SELECT);
add_kb(group, GEANY_KEYS_SELECT_ALL, NULL,
GDK_a, GDK_CONTROL_MASK, "menu_selectall", _("Select All"), "menu_select_all1");
add_kb(group, GEANY_KEYS_SELECT_WORD, NULL,
GDK_w, GDK_SHIFT_MASK | GDK_MOD1_MASK, "edit_selectword", _("Select current word"), NULL);
add_kb(group, GEANY_KEYS_SELECT_LINE, NULL,
GDK_l, GDK_SHIFT_MASK | GDK_MOD1_MASK, "edit_selectline", _("_Select Current Line(s)"),
"select_current_lines1");
add_kb(group, GEANY_KEYS_SELECT_PARAGRAPH, NULL,
GDK_p, GDK_SHIFT_MASK | GDK_MOD1_MASK, "edit_selectparagraph", _("_Select Current Paragraph"),
"select_current_paragraph1");
add_kb(group, GEANY_KEYS_SELECT_WORDPARTLEFT, NULL,
0, 0, "edit_selectwordpartleft", _("Select to previous word part"), NULL);
add_kb(group, GEANY_KEYS_SELECT_WORDPARTRIGHT, NULL,
0, 0, "edit_selectwordpartright", _("Select to next word part"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_FORMAT);
add_kb(group, GEANY_KEYS_FORMAT_TOGGLECASE, NULL,
GDK_u, GDK_CONTROL_MASK | GDK_MOD1_MASK, "edit_togglecase",
_("T_oggle Case of Selection"), "menu_toggle_case2");
add_kb(group, GEANY_KEYS_FORMAT_COMMENTLINETOGGLE, NULL,
GDK_e, GDK_CONTROL_MASK, "edit_commentlinetoggle", _("Toggle line commentation"),
"menu_toggle_line_commentation1");
add_kb(group, GEANY_KEYS_FORMAT_COMMENTLINE, NULL,
0, 0, "edit_commentline", _("Comment line(s)"), "menu_comment_line1");
add_kb(group, GEANY_KEYS_FORMAT_UNCOMMENTLINE, NULL,
0, 0, "edit_uncommentline", _("Uncomment line(s)"), "menu_uncomment_line1");
add_kb(group, GEANY_KEYS_FORMAT_INCREASEINDENT, NULL,
GDK_i, GDK_CONTROL_MASK, "edit_increaseindent", _("Increase indent"),
"menu_increase_indent1");
add_kb(group, GEANY_KEYS_FORMAT_DECREASEINDENT, NULL,
GDK_u, GDK_CONTROL_MASK, "edit_decreaseindent", _("Decrease indent"),
"menu_decrease_indent1");
add_kb(group, GEANY_KEYS_FORMAT_INCREASEINDENTBYSPACE, NULL,
0, 0, "edit_increaseindentbyspace", _("Increase indent by one space"), NULL);
add_kb(group, GEANY_KEYS_FORMAT_DECREASEINDENTBYSPACE, NULL,
0, 0, "edit_decreaseindentbyspace", _("Decrease indent by one space"), NULL);
add_kb(group, GEANY_KEYS_FORMAT_AUTOINDENT, NULL,
0, 0, "edit_autoindent", _("_Smart Line Indent"), "smart_line_indent1");
add_kb(group, GEANY_KEYS_FORMAT_SENDTOCMD1, NULL,
GDK_1, GDK_CONTROL_MASK, "edit_sendtocmd1", _("Send to Custom Command 1"), NULL);
add_kb(group, GEANY_KEYS_FORMAT_SENDTOCMD2, NULL,
GDK_2, GDK_CONTROL_MASK, "edit_sendtocmd2", _("Send to Custom Command 2"), NULL);
add_kb(group, GEANY_KEYS_FORMAT_SENDTOCMD3, NULL,
GDK_3, GDK_CONTROL_MASK, "edit_sendtocmd3", _("Send to Custom Command 3"), NULL);
/* may fit better in editor group */
add_kb(group, GEANY_KEYS_FORMAT_SENDTOVTE, NULL,
0, 0, "edit_sendtovte", _("_Send Selection to Terminal"), "send_selection_to_vte1");
add_kb(group, GEANY_KEYS_FORMAT_REFLOWPARAGRAPH, NULL,
GDK_j, GDK_CONTROL_MASK, "format_reflowparagraph", _("_Reflow Lines/Block"),
"reflow_lines_block1");
keybindings_set_item(group, GEANY_KEYS_FORMAT_JOINLINES, NULL,
0, 0, "edit_joinlines", _("Join lines"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_INSERT);
add_kb(group, GEANY_KEYS_INSERT_DATE, NULL,
GDK_d, GDK_SHIFT_MASK | GDK_MOD1_MASK, "menu_insert_date", _("Insert date"),
"insert_date_custom1");
add_kb(group, GEANY_KEYS_INSERT_ALTWHITESPACE, NULL,
0, 0, "edit_insertwhitespace", _("_Insert Alternative White Space"),
"insert_alternative_white_space1");
add_kb(group, GEANY_KEYS_INSERT_LINEBEFORE, NULL,
0, 0, "edit_insertlinebefore", _("Insert New Line Before Current"), NULL);
add_kb(group, GEANY_KEYS_INSERT_LINEAFTER, NULL,
0, 0, "edit_insertlineafter", _("Insert New Line After Current"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_SETTINGS);
add_kb(group, GEANY_KEYS_SETTINGS_PREFERENCES, cb_func_menu_preferences,
GDK_p, GDK_CONTROL_MASK | GDK_MOD1_MASK, "menu_preferences", _("Preferences"),
"preferences1");
add_kb(group, GEANY_KEYS_SETTINGS_PLUGINPREFERENCES, cb_func_menu_preferences,
0, 0, "menu_pluginpreferences", _("P_lugin Preferences"), "plugin_preferences1");
group = keybindings_get_core_group(GEANY_KEY_GROUP_SEARCH);
add_kb(group, GEANY_KEYS_SEARCH_FIND, NULL,
GDK_f, GDK_CONTROL_MASK, "menu_find", _("Find"), "find1");
add_kb(group, GEANY_KEYS_SEARCH_FINDNEXT, NULL,
GDK_g, GDK_CONTROL_MASK, "menu_findnext", _("Find Next"), "find_next1");
add_kb(group, GEANY_KEYS_SEARCH_FINDPREVIOUS, NULL,
GDK_g, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "menu_findprevious", _("Find Previous"),
"find_previous1");
add_kb(group, GEANY_KEYS_SEARCH_FINDNEXTSEL, NULL,
0, 0, "menu_findnextsel", _("Find Next _Selection"), "find_nextsel1");
add_kb(group, GEANY_KEYS_SEARCH_FINDPREVSEL, NULL,
0, 0, "menu_findprevsel", _("Find Pre_vious Selection"), "find_prevsel1");
add_kb(group, GEANY_KEYS_SEARCH_REPLACE, NULL,
GDK_h, GDK_CONTROL_MASK, "menu_replace", _("Replace"), "replace1");
add_kb(group, GEANY_KEYS_SEARCH_FINDINFILES, NULL, GDK_f,
GDK_CONTROL_MASK | GDK_SHIFT_MASK, "menu_findinfiles", _("Find in Files"),
"find_in_files1");
add_kb(group, GEANY_KEYS_SEARCH_NEXTMESSAGE, NULL,
0, 0, "menu_nextmessage", _("Next Message"), "next_message1");
add_kb(group, GEANY_KEYS_SEARCH_PREVIOUSMESSAGE, NULL,
0, 0, "menu_previousmessage", _("Previous Message"), "previous_message1");
add_kb(group, GEANY_KEYS_SEARCH_FINDUSAGE, NULL,
GDK_e, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "popup_findusage",
_("Find Usage"), "find_usage1");
add_kb(group, GEANY_KEYS_SEARCH_FINDDOCUMENTUSAGE, NULL,
GDK_d, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "popup_finddocumentusage",
_("Find Document Usage"), "find_document_usage1");
add_kb(group, GEANY_KEYS_SEARCH_MARKALL, NULL,
GDK_m, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "find_markall", _("_Mark All"), "mark_all1");
group = keybindings_get_core_group(GEANY_KEY_GROUP_GOTO);
add_kb(group, GEANY_KEYS_GOTO_BACK, NULL,
GDK_Left, GDK_MOD1_MASK, "nav_back", _("Navigate back a location"), NULL);
add_kb(group, GEANY_KEYS_GOTO_FORWARD, NULL,
GDK_Right, GDK_MOD1_MASK, "nav_forward", _("Navigate forward a location"), NULL);
add_kb(group, GEANY_KEYS_GOTO_LINE, NULL,
GDK_l, GDK_CONTROL_MASK, "menu_gotoline", _("Go to Line"), "go_to_line1");
add_kb(group, GEANY_KEYS_GOTO_MATCHINGBRACE, NULL,
GDK_b, GDK_CONTROL_MASK, "edit_gotomatchingbrace",
_("Go to matching brace"), NULL);
add_kb(group, GEANY_KEYS_GOTO_TOGGLEMARKER, NULL,
GDK_m, GDK_CONTROL_MASK, "edit_togglemarker",
_("Toggle marker"), NULL);
add_kb(group, GEANY_KEYS_GOTO_NEXTMARKER, NULL,
GDK_period, GDK_CONTROL_MASK, "edit_gotonextmarker",
_("_Go to Next Marker"), "go_to_next_marker1");
add_kb(group, GEANY_KEYS_GOTO_PREVIOUSMARKER, NULL,
GDK_comma, GDK_CONTROL_MASK, "edit_gotopreviousmarker",
_("_Go to Previous Marker"), "go_to_previous_marker1");
add_kb(group, GEANY_KEYS_GOTO_TAGDEFINITION, NULL,
GDK_t, GDK_CONTROL_MASK, "popup_gototagdefinition",
_("Go to Tag Definition"), "goto_tag_definition1");
add_kb(group, GEANY_KEYS_GOTO_TAGDECLARATION, NULL,
GDK_t, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "popup_gototagdeclaration",
_("Go to Tag Declaration"), "goto_tag_declaration1");
add_kb(group, GEANY_KEYS_GOTO_LINESTART, NULL,
GDK_Home, 0, "edit_gotolinestart", _("Go to Start of Line"), NULL);
add_kb(group, GEANY_KEYS_GOTO_LINEEND, NULL,
GDK_End, 0, "edit_gotolineend", _("Go to End of Line"), NULL);
add_kb(group, GEANY_KEYS_GOTO_LINEENDVISUAL, NULL,
GDK_End, GDK_MOD1_MASK, "edit_gotolineendvisual", _("Go to End of Display Line"), NULL);
add_kb(group, GEANY_KEYS_GOTO_PREVWORDPART, NULL,
GDK_slash, GDK_CONTROL_MASK, "edit_prevwordstart", _("Go to Previous Word Part"), NULL);
add_kb(group, GEANY_KEYS_GOTO_NEXTWORDPART, NULL,
GDK_backslash, GDK_CONTROL_MASK, "edit_nextwordstart", _("Go to Next Word Part"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_VIEW);
add_kb(group, GEANY_KEYS_VIEW_TOGGLEALL, NULL,
0, 0, "menu_toggleall", _("Toggle All Additional Widgets"),
"menu_toggle_all_additional_widgets1");
add_kb(group, GEANY_KEYS_VIEW_FULLSCREEN, cb_func_menu_fullscreen,
GDK_F11, 0, "menu_fullscreen", _("Fullscreen"), "menu_fullscreen1");
add_kb(group, GEANY_KEYS_VIEW_MESSAGEWINDOW, cb_func_menu_messagewindow,
0, 0, "menu_messagewindow", _("Toggle Messages Window"),
"menu_show_messages_window1");
add_kb(group, GEANY_KEYS_VIEW_SIDEBAR, NULL,
0, 0, "toggle_sidebar", _("Toggle Sidebar"), "menu_show_sidebar1");
add_kb(group, GEANY_KEYS_VIEW_ZOOMIN, NULL,
GDK_plus, GDK_CONTROL_MASK, "menu_zoomin", _("Zoom In"), "menu_zoom_in1");
add_kb(group, GEANY_KEYS_VIEW_ZOOMOUT, NULL,
GDK_minus, GDK_CONTROL_MASK, "menu_zoomout", _("Zoom Out"), "menu_zoom_out1");
add_kb(group, GEANY_KEYS_VIEW_ZOOMRESET, NULL,
GDK_0, GDK_CONTROL_MASK, "normal_size", _("Zoom Reset"), "normal_size1");
group = keybindings_get_core_group(GEANY_KEY_GROUP_FOCUS);
add_kb(group, GEANY_KEYS_FOCUS_EDITOR, NULL,
GDK_F2, 0, "switch_editor", _("Switch to Editor"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_SEARCHBAR, NULL,
GDK_F7, 0, "switch_search_bar", _("Switch to Search Bar"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_MESSAGE_WINDOW, NULL,
0, 0, "switch_message_window", _("Switch to Message Window"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_COMPILER, NULL,
0, 0, "switch_compiler", _("Switch to Compiler"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_MESSAGES, NULL,
0, 0, "switch_messages", _("Switch to Messages"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_SCRIBBLE, NULL,
GDK_F6, 0, "switch_scribble", _("Switch to Scribble"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_VTE, NULL,
GDK_F4, 0, "switch_vte", _("Switch to VTE"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_SIDEBAR, NULL,
0, 0, "switch_sidebar", _("Switch to Sidebar"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_SIDEBAR_SYMBOL_LIST, NULL,
0, 0, "switch_sidebar_symbol_list", _("Switch to Sidebar Symbol List"), NULL);
add_kb(group, GEANY_KEYS_FOCUS_SIDEBAR_DOCUMENT_LIST, NULL,
0, 0, "switch_sidebar_doc_list", _("Switch to Sidebar Document List"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_NOTEBOOK);
add_kb(group, GEANY_KEYS_NOTEBOOK_SWITCHTABLEFT, cb_func_switch_tableft,
GDK_Page_Up, GDK_CONTROL_MASK, "switch_tableft", _("Switch to left document"), NULL);
add_kb(group, GEANY_KEYS_NOTEBOOK_SWITCHTABRIGHT, cb_func_switch_tabright,
GDK_Page_Down, GDK_CONTROL_MASK, "switch_tabright", _("Switch to right document"), NULL);
add_kb(group, GEANY_KEYS_NOTEBOOK_SWITCHTABLASTUSED, cb_func_switch_tablastused,
GDK_Tab, GDK_CONTROL_MASK, "switch_tablastused", _("Switch to last used document"), NULL);
add_kb(group, GEANY_KEYS_NOTEBOOK_MOVETABLEFT, cb_func_move_tab,
GDK_Page_Up, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "move_tableft",
_("Move document left"), NULL);
add_kb(group, GEANY_KEYS_NOTEBOOK_MOVETABRIGHT, cb_func_move_tab,
GDK_Page_Down, GDK_CONTROL_MASK | GDK_SHIFT_MASK, "move_tabright",
_("Move document right"), NULL);
add_kb(group, GEANY_KEYS_NOTEBOOK_MOVETABFIRST, cb_func_move_tab,
0, 0, "move_tabfirst", _("Move document first"), NULL);
add_kb(group, GEANY_KEYS_NOTEBOOK_MOVETABLAST, cb_func_move_tab,
0, 0, "move_tablast", _("Move document last"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_DOCUMENT);
add_kb(group, GEANY_KEYS_DOCUMENT_LINEWRAP, NULL,
0, 0, "menu_linewrap", _("Toggle Line wrapping"), "menu_line_wrapping1");
add_kb(group, GEANY_KEYS_DOCUMENT_LINEBREAK, NULL,
0, 0, "menu_linebreak", _("Toggle Line breaking"), "line_breaking1");
add_kb(group, GEANY_KEYS_DOCUMENT_REPLACETABS, NULL,
0, 0, "menu_replacetabs", _("Replace tabs by space"), "menu_replace_tabs");
add_kb(group, GEANY_KEYS_DOCUMENT_REPLACESPACES, NULL,
0, 0, "menu_replacespaces", _("Replace spaces by tabs"), "menu_replace_spaces");
add_kb(group, GEANY_KEYS_DOCUMENT_TOGGLEFOLD, NULL,
0, 0, "menu_togglefold", _("Toggle current fold"), NULL);
add_kb(group, GEANY_KEYS_DOCUMENT_FOLDALL, NULL,
0, 0, "menu_foldall", _("Fold all"), "menu_fold_all1");
add_kb(group, GEANY_KEYS_DOCUMENT_UNFOLDALL, NULL,
0, 0, "menu_unfoldall", _("Unfold all"), "menu_unfold_all1");
add_kb(group, GEANY_KEYS_DOCUMENT_RELOADTAGLIST, NULL,
GDK_r, GDK_SHIFT_MASK | GDK_CONTROL_MASK, "reloadtaglist", _("Reload symbol list"), NULL);
add_kb(group, GEANY_KEYS_DOCUMENT_REMOVE_MARKERS, NULL,
0, 0, "remove_markers", _("Remove Markers"), "remove_markers1");
add_kb(group, GEANY_KEYS_DOCUMENT_REMOVE_ERROR_INDICATORS, NULL,
0, 0, "remove_error_indicators", _("Remove Error Indicators"), "menu_remove_indicators1");
add_kb(group, GEANY_KEYS_DOCUMENT_REMOVE_MARKERS_INDICATORS, NULL,
0, 0, "remove_markers_and_indicators", _("Remove Markers and Error Indicators"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_BUILD);
add_kb(group, GEANY_KEYS_BUILD_COMPILE, NULL,
GDK_F8, 0, "build_compile", _("Compile"), NULL);
add_kb(group, GEANY_KEYS_BUILD_LINK, NULL,
GDK_F9, 0, "build_link", _("Build"), NULL);
add_kb(group, GEANY_KEYS_BUILD_MAKE, NULL,
GDK_F9, GDK_SHIFT_MASK, "build_make", _("Make all"), NULL);
add_kb(group, GEANY_KEYS_BUILD_MAKEOWNTARGET, NULL,
GDK_F9, GDK_SHIFT_MASK | GDK_CONTROL_MASK, "build_makeowntarget",
_("Make custom target"), NULL);
add_kb(group, GEANY_KEYS_BUILD_MAKEOBJECT, NULL,
GDK_F8, GDK_SHIFT_MASK, "build_makeobject", _("Make object"), NULL);
add_kb(group, GEANY_KEYS_BUILD_NEXTERROR, NULL,
0, 0, "build_nexterror", _("Next error"), NULL);
add_kb(group, GEANY_KEYS_BUILD_PREVIOUSERROR, NULL,
0, 0, "build_previouserror", _("Previous error"), NULL);
add_kb(group, GEANY_KEYS_BUILD_RUN, NULL,
GDK_F5, 0, "build_run", _("Run"), NULL);
add_kb(group, GEANY_KEYS_BUILD_OPTIONS, NULL,
0, 0, "build_options", _("Build options"), NULL);
group = keybindings_get_core_group(GEANY_KEY_GROUP_TOOLS);
add_kb(group, GEANY_KEYS_TOOLS_OPENCOLORCHOOSER, cb_func_menu_opencolorchooser,
0, 0, "menu_opencolorchooser", _("Show Color Chooser"), "menu_choose_color1");
group = keybindings_get_core_group(GEANY_KEY_GROUP_HELP);
add_kb(group, GEANY_KEYS_HELP_HELP, cb_func_menu_help,
GDK_F1, 0, "menu_help", _("Help"), "help1");
}
void keybindings_init(void)
{
memset(binding_ids, 0, sizeof binding_ids);
keybinding_groups = g_ptr_array_sized_new(GEANY_KEY_GROUP_COUNT);
kb_accel_group = gtk_accel_group_new();
init_default_kb();
gtk_window_add_accel_group(GTK_WINDOW(main_widgets.window), kb_accel_group);
g_signal_connect(main_widgets.window, "key-press-event", G_CALLBACK(on_key_press_event), NULL);
}
typedef void (*KBItemCallback) (GeanyKeyGroup *group, GeanyKeyBinding *kb, gpointer user_data);
static void keybindings_foreach(KBItemCallback cb, gpointer user_data)
{
gsize g, i;
GeanyKeyGroup *group;
GeanyKeyBinding *kb;
foreach_ptr_array(group, g, keybinding_groups)
{
foreach_ptr_array(kb, i, group->key_items)
cb(group, kb, user_data);
}
}
static void load_kb(GeanyKeyGroup *group, GeanyKeyBinding *kb, gpointer user_data)
{
GKeyFile *config = user_data;
gchar *val;
guint key;
GdkModifierType mods;
val = g_key_file_get_string(config, group->name, kb->name, NULL);
if (val != NULL)
{
gtk_accelerator_parse(val, &key, &mods);
kb->key = key;
kb->mods = mods;
g_free(val);
}
}
static void load_user_kb(void)
{
gchar *configfile = g_strconcat(app->configdir, G_DIR_SEPARATOR_S, "keybindings.conf", NULL);
GKeyFile *config = g_key_file_new();
/* backwards compatibility with Geany 0.21 defaults */
if (!g_file_test(configfile, G_FILE_TEST_EXISTS))
{
gchar *geanyconf = g_strconcat(app->configdir, G_DIR_SEPARATOR_S, "geany.conf", NULL);
const gchar data[] = "[Bindings]\n"
"popup_gototagdefinition=\n"
"edit_transposeline=<Control>t\n"
"edit_movelineup=\n"
"edit_movelinedown=\n"
"move_tableft=<Alt>Page_Up\n"
"move_tabright=<Alt>Page_Down\n";
utils_write_file(configfile, g_file_test(geanyconf, G_FILE_TEST_EXISTS) ?
data : "");
g_free(geanyconf);
}
/* now load user defined keys */
if (g_key_file_load_from_file(config, configfile, G_KEY_FILE_KEEP_COMMENTS, NULL))
{
keybindings_foreach(load_kb, config);
}
g_free(configfile);
g_key_file_free(config);
}
static void apply_kb_accel(GeanyKeyGroup *group, GeanyKeyBinding *kb, gpointer user_data)
{
if (kb->key != 0 && kb->menu_item)
{
gtk_widget_add_accelerator(kb->menu_item, "activate", kb_accel_group,
kb->key, kb->mods, GTK_ACCEL_VISIBLE);
}
}
void keybindings_load_keyfile(void)
{
load_user_kb();
add_popup_menu_accels();
/* set menu accels now, after user keybindings have been read */
keybindings_foreach(apply_kb_accel, NULL);
}
static void add_menu_accel(GeanyKeyGroup *group, guint kb_id, GtkWidget *menuitem)
{
GeanyKeyBinding *kb = keybindings_get_item(group, kb_id);
if (kb->key != 0)
gtk_widget_add_accelerator(menuitem, "activate", kb_accel_group,
kb->key, kb->mods, GTK_ACCEL_VISIBLE);
}
#define GEANY_ADD_POPUP_ACCEL(kb_id, wid) \
add_menu_accel(group, kb_id, ui_lookup_widget(main_widgets.editor_menu, G_STRINGIFY(wid)))
/* set the menu item accelerator shortcuts (just for visibility, they are handled anyway) */
static void add_popup_menu_accels(void)
{
GeanyKeyGroup *group;
group = keybindings_get_core_group(GEANY_KEY_GROUP_EDITOR);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_EDITOR_UNDO, undo1);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_EDITOR_REDO, redo1);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_EDITOR_CONTEXTACTION, context_action1);
group = keybindings_get_core_group(GEANY_KEY_GROUP_SELECT);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_SELECT_ALL, menu_select_all2);
group = keybindings_get_core_group(GEANY_KEY_GROUP_INSERT);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_INSERT_DATE, insert_date_custom2);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_INSERT_ALTWHITESPACE, insert_alternative_white_space2);
group = keybindings_get_core_group(GEANY_KEY_GROUP_FILE);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_FILE_OPENSELECTED, menu_open_selected_file2);
group = keybindings_get_core_group(GEANY_KEY_GROUP_SEARCH);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_SEARCH_FINDUSAGE, find_usage2);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_SEARCH_FINDDOCUMENTUSAGE, find_document_usage2);
group = keybindings_get_core_group(GEANY_KEY_GROUP_GOTO);
GEANY_ADD_POPUP_ACCEL(GEANY_KEYS_GOTO_TAGDEFINITION, goto_tag_definition2);
/* Format and Commands share the menu bar submenus */
/* Build menu items are set if the build menus are created */
}
static void set_keyfile_kb(GeanyKeyGroup *group, GeanyKeyBinding *kb, gpointer user_data)
{
GKeyFile *config = user_data;
gchar *val;
val = gtk_accelerator_name(kb->key, kb->mods);
g_key_file_set_string(config, group->name, kb->name, val);
g_free(val);
}
/* just write the content of the keys array to the config file */
void keybindings_write_to_file(void)
{
gchar *configfile = g_strconcat(app->configdir, G_DIR_SEPARATOR_S, "keybindings.conf", NULL);
gchar *data;
GKeyFile *config = g_key_file_new();
g_key_file_load_from_file(config, configfile, 0, NULL);
keybindings_foreach(set_keyfile_kb, config);
/* write the file */
data = g_key_file_to_data(config, NULL, NULL);
utils_write_file(configfile, data);
g_free(data);
g_free(configfile);
g_key_file_free(config);
}
void keybindings_free(void)
{
GeanyKeyGroup *group;
gsize g;
foreach_ptr_array(group, g, keybinding_groups)
keybindings_free_group(group);
g_ptr_array_free(keybinding_groups, TRUE);
}
gchar *keybindings_get_label(GeanyKeyBinding *kb)
{
return utils_str_remove_chars(g_strdup(kb->label), "_");
}
static void fill_shortcut_labels_treeview(GtkWidget *tree)
{
gsize g, i;
GeanyKeyBinding *kb;
GeanyKeyGroup *group;
GtkListStore *store;
GtkTreeIter iter;
store = gtk_list_store_new(3, G_TYPE_STRING, G_TYPE_STRING, PANGO_TYPE_WEIGHT);
foreach_ptr_array(group, g, keybinding_groups)
{
if (g > 0)
{
gtk_list_store_append(store, &iter);
gtk_list_store_set(store, &iter, -1);
}
gtk_list_store_append(store, &iter);
gtk_list_store_set(store, &iter, 0, group->label, 2, PANGO_WEIGHT_BOLD, -1);
foreach_ptr_array(kb, i, group->key_items)
{
gchar *shortcut, *label;
label = keybindings_get_label(kb);
shortcut = gtk_accelerator_get_label(kb->key, kb->mods);
gtk_list_store_append(store, &iter);
gtk_list_store_set(store, &iter, 0, label, 1, shortcut, 2, PANGO_WEIGHT_NORMAL, -1);
g_free(shortcut);
g_free(label);
}
}
gtk_tree_view_set_model(GTK_TREE_VIEW(tree), GTK_TREE_MODEL(store));
g_object_unref(store);
}
static GtkWidget *create_dialog(void)
{
GtkWidget *dialog, *tree, *label, *swin, *vbox;
GtkCellRenderer *text_renderer;
GtkTreeViewColumn *column;
dialog = gtk_dialog_new_with_buttons(_("Keyboard Shortcuts"), GTK_WINDOW(main_widgets.window),
GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_STOCK_EDIT, GTK_RESPONSE_APPLY,
GTK_STOCK_CLOSE, GTK_RESPONSE_CANCEL, NULL);
vbox = ui_dialog_vbox_new(GTK_DIALOG(dialog));
gtk_box_set_spacing(GTK_BOX(vbox), 6);
gtk_widget_set_name(dialog, "GeanyDialog");
gtk_window_set_default_size(GTK_WINDOW(dialog), -1, GEANY_DEFAULT_DIALOG_HEIGHT);
gtk_dialog_set_default_response(GTK_DIALOG(dialog), GTK_RESPONSE_CANCEL);
label = gtk_label_new(_("The following keyboard shortcuts are configurable:"));
gtk_misc_set_alignment(GTK_MISC(label), 0, 0.5);
tree = gtk_tree_view_new();
gtk_tree_view_set_rules_hint(GTK_TREE_VIEW(tree), TRUE);
gtk_tree_view_set_headers_visible(GTK_TREE_VIEW(tree), FALSE);
text_renderer = gtk_cell_renderer_text_new();
/* we can't use "weight-set", see http://bugzilla.gnome.org/show_bug.cgi?id=355214 */
column = gtk_tree_view_column_new_with_attributes(
NULL, text_renderer, "text", 0, "weight", 2, NULL);
gtk_tree_view_append_column(GTK_TREE_VIEW(tree), column);
text_renderer = gtk_cell_renderer_text_new();
column = gtk_tree_view_column_new_with_attributes(NULL, text_renderer, "text", 1, NULL);
gtk_tree_view_append_column(GTK_TREE_VIEW(tree), column);
fill_shortcut_labels_treeview(tree);
swin = gtk_scrolled_window_new(NULL, NULL);
gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(swin), GTK_POLICY_NEVER,
GTK_POLICY_AUTOMATIC);
gtk_scrolled_window_set_shadow_type(GTK_SCROLLED_WINDOW(swin), GTK_SHADOW_ETCHED_IN);
gtk_container_add(GTK_CONTAINER(swin), tree);
gtk_box_pack_start(GTK_BOX(vbox), label, FALSE, FALSE, 6);
gtk_box_pack_start(GTK_BOX(vbox), swin, TRUE, TRUE, 0);
return dialog;
}
/* non-modal keyboard shortcuts dialog, so user can edit whilst seeing the shortcuts */
static GtkWidget *key_dialog = NULL;
static void on_dialog_response(GtkWidget *dialog, gint response, gpointer user_data)
{
if (response == GTK_RESPONSE_APPLY)
{
GtkWidget *wid;
prefs_show_dialog();
/* select the KB page */
wid = ui_lookup_widget(ui_widgets.prefs_dialog, "frame22");
if (wid != NULL)
{
GtkNotebook *nb = GTK_NOTEBOOK(ui_lookup_widget(ui_widgets.prefs_dialog, "notebook2"));
if (nb != NULL)
gtk_notebook_set_current_page(nb, gtk_notebook_page_num(nb, wid));
}
}
gtk_widget_destroy(dialog);
key_dialog = NULL;
}
void keybindings_show_shortcuts(void)
{
if (key_dialog)
gtk_widget_destroy(key_dialog); /* in case the key_dialog is still visible */
key_dialog = create_dialog();
g_signal_connect(key_dialog, "response", G_CALLBACK(on_dialog_response), NULL);
gtk_widget_show_all(key_dialog);
}
static gboolean check_fixed_kb(guint keyval, guint state)
{
/* check alt-0 to alt-9 for setting current notebook page */
if (state == GDK_MOD1_MASK && keyval >= GDK_0 && keyval <= GDK_9)
{
gint page = keyval - GDK_0 - 1;
gint npages = gtk_notebook_get_n_pages(GTK_NOTEBOOK(main_widgets.notebook));
/* alt-0 is for the rightmost tab */
if (keyval == GDK_0)
page = npages - 1;
/* invert the order if tabs are added on the other side */
if (swap_alt_tab_order && ! file_prefs.tab_order_ltr)
page = (npages - 1) - page;
gtk_notebook_set_current_page(GTK_NOTEBOOK(main_widgets.notebook), page);
return TRUE;
}
/* note: these are now overridden by default with move tab bindings */
if (keyval == GDK_Page_Up || keyval == GDK_Page_Down)
{
/* switch to first or last document */
if (state == (GDK_CONTROL_MASK | GDK_SHIFT_MASK))
{
if (keyval == GDK_Page_Up)
gtk_notebook_set_current_page(GTK_NOTEBOOK(main_widgets.notebook), 0);
if (keyval == GDK_Page_Down)
gtk_notebook_set_current_page(GTK_NOTEBOOK(main_widgets.notebook), -1);
return TRUE;
}
}
return FALSE;
}
static gboolean check_snippet_completion(GeanyDocument *doc)
{
GtkWidget *focusw = gtk_window_get_focus(GTK_WINDOW(main_widgets.window));
g_return_val_if_fail(doc, FALSE);
/* keybinding only valid when scintilla widget has focus */
if (focusw == GTK_WIDGET(doc->editor->sci))
{
ScintillaObject *sci = doc->editor->sci;
gint pos = sci_get_current_position(sci);
if (editor_prefs.complete_snippets)
return editor_complete_snippet(doc->editor, pos);
}
return FALSE;
}
/* Transforms a GdkEventKey event into a GdkEventButton event */
static void trigger_button_event(GtkWidget *widget, guint32 event_time)
{
GdkEventButton *event;
gboolean ret;
event = g_new0(GdkEventButton, 1);
if (GTK_IS_TEXT_VIEW(widget))
event->window = gtk_text_view_get_window(GTK_TEXT_VIEW(widget), GTK_TEXT_WINDOW_TEXT);
else
event->window = gtk_widget_get_window(widget);
event->time = event_time;
event->type = GDK_BUTTON_PRESS;
event->button = 3;
g_signal_emit_by_name(widget, "button-press-event", event, &ret);
g_signal_emit_by_name(widget, "button-release-event", event, &ret);
g_free(event);
}
/* Special case for the Menu key and Shift-F10 to show the right-click popup menu for various
* widgets. Without this special handling, the notebook tab list of the documents' notebook
* would be shown. As a very special case, we differentiate between the Menu key and Shift-F10
* if pressed in the editor widget: the Menu key opens the popup menu, Shift-F10 opens the
* notebook tab list. */
static gboolean check_menu_key(GeanyDocument *doc, guint keyval, guint state, guint32 event_time)
{
if ((keyval == GDK_Menu && state == 0) || (keyval == GDK_F10 && state == GDK_SHIFT_MASK))
{
GtkWidget *focusw = gtk_window_get_focus(GTK_WINDOW(main_widgets.window));
if (doc != NULL)
{
if (focusw == doc->priv->tag_tree)
{
trigger_button_event(focusw, event_time);
return TRUE;
}
if (focusw == GTK_WIDGET(doc->editor->sci))
{
if (keyval == GDK_Menu)
{ /* show editor popup menu */
trigger_button_event(focusw, event_time);
return TRUE;
}
else
{ /* show tab bar menu */
trigger_button_event(main_widgets.notebook, event_time);
return TRUE;
}
}
}
if (focusw == tv.tree_openfiles
|| focusw == msgwindow.tree_status
|| focusw == msgwindow.tree_compiler
|| focusw == msgwindow.tree_msg
|| focusw == msgwindow.scribble
#ifdef HAVE_VTE
|| (vte_info.have_vte && focusw == vc->vte)
#endif
)
{
trigger_button_event(focusw, event_time);
return TRUE;
}
}
return FALSE;
}
#ifdef HAVE_VTE
static gboolean on_menu_expose_event(GtkWidget *widget, GdkEventExpose *event,
gpointer user_data)
{
if (!GTK_WIDGET_SENSITIVE(widget))
gtk_widget_set_sensitive(GTK_WIDGET(widget), TRUE);
return FALSE;
}
static gboolean set_sensitive(gpointer widget)
{
gtk_widget_set_sensitive(GTK_WIDGET(widget), TRUE);
return FALSE;
}
static gboolean check_vte(GdkModifierType state, guint keyval)
{
guint i;
GeanyKeyBinding *kb;
GeanyKeyGroup *group;
GtkWidget *widget;
if (gtk_window_get_focus(GTK_WINDOW(main_widgets.window)) != vc->vte)
return FALSE;
/* let VTE copy/paste override any user keybinding */
if (state == (GDK_CONTROL_MASK | GDK_SHIFT_MASK) && (keyval == GDK_c || keyval == GDK_v))
return TRUE;
if (! vc->enable_bash_keys)
return FALSE;
/* prevent menubar flickering: */
if (state == GDK_SHIFT_MASK && (keyval >= GDK_a && keyval <= GDK_z))
return FALSE;
if (state == 0 && (keyval < GDK_F1 || keyval > GDK_F35)) /* e.g. backspace */
return FALSE;
/* make focus commands override any bash commands */
group = keybindings_get_core_group(GEANY_KEY_GROUP_FOCUS);
foreach_ptr_array(kb, i, group->key_items)
{
if (state == kb->mods && keyval == kb->key)
return FALSE;
}
/* Temporarily disable the menus to prevent conflicting menu accelerators
* from overriding the VTE bash shortcuts.
* Note: maybe there's a better way of doing this ;-) */
widget = ui_lookup_widget(main_widgets.window, "menubar1");
gtk_widget_set_sensitive(widget, FALSE);
{
/* make the menubar sensitive before it is redrawn */
static gboolean connected = FALSE;
if (!connected)
g_signal_connect(widget, "expose-event", G_CALLBACK(on_menu_expose_event), NULL);
}
widget = main_widgets.editor_menu;
gtk_widget_set_sensitive(widget, FALSE);
g_idle_add(set_sensitive, widget);
return TRUE;
}
#endif
/* Map the keypad keys to their equivalent functions (taken from ScintillaGTK.cxx) */
static guint key_kp_translate(guint key_in)
{
switch (key_in)
{
case GDK_KP_Down:
return GDK_Down;
case GDK_KP_Up:
return GDK_Up;
case GDK_KP_Left:
return GDK_Left;
case GDK_KP_Right:
return GDK_Right;
case GDK_KP_Home:
return GDK_Home;
case GDK_KP_End:
return GDK_End;
case GDK_KP_Page_Up:
return GDK_Page_Up;
case GDK_KP_Page_Down:
return GDK_Page_Down;
case GDK_KP_Delete:
return GDK_Delete;
case GDK_KP_Insert:
return GDK_Insert;
default:
return key_in;
}
}
/* Check if event keypress matches keybinding combo */
gboolean keybindings_check_event(GdkEventKey *ev, GeanyKeyBinding *kb)
{
guint state, keyval;
if (ev->keyval == 0)
return FALSE;
keyval = ev->keyval;
state = ev->state & gtk_accelerator_get_default_mod_mask();
/* hack to get around that CTRL+Shift+r results in GDK_R not GDK_r */
if ((ev->state & GDK_SHIFT_MASK) || (ev->state & GDK_LOCK_MASK))
if (keyval >= GDK_A && keyval <= GDK_Z)
keyval += GDK_a - GDK_A;
if (keyval >= GDK_KP_Space && keyval < GDK_KP_Equal)
keyval = key_kp_translate(keyval);
return (keyval == kb->key && state == kb->mods);
}
void keybindings_debug_it(void) {
int i;
for (i = 0; i < GEANY_KEYS_COUNT; ++i) {
if (binding_ids[i].id == GEANY_KEYS_CLIPBOARD_PASTE) {
fprintf(stderr, "@@@ after %d\n", binding_ids[i].mods);
}
}
}
/* central keypress event handler, almost all keypress events go to this function */
static gboolean on_key_press_event(GtkWidget *widget, GdkEventKey *ev, gpointer user_data)
{
guint state, keyval;
gsize g, i;
GeanyDocument *doc;
GeanyKeyGroup *group;
GeanyKeyBinding *kb;
if (ev->keyval == 0)
return FALSE;
doc = document_get_current();
if (doc)
document_check_disk_status(doc, FALSE);
keyval = ev->keyval;
// state = ev->state & (gtk_accelerator_get_default_mod_mask() | 16); // meta bit on MacOSX
state = ev->state & gtk_accelerator_get_default_mod_mask();
// geany_debug("key pressed: %d %d %d %d\n", ev->keyval, ev->state, gtk_accelerator_get_default_mod_mask(), state);
/* hack to get around that CTRL+Shift+r results in GDK_R not GDK_r */
if ((ev->state & GDK_SHIFT_MASK) || (ev->state & GDK_LOCK_MASK))
if (keyval >= GDK_A && keyval <= GDK_Z)
keyval += GDK_a - GDK_A;
if (keyval >= GDK_KP_Space && keyval < GDK_KP_Equal)
keyval = key_kp_translate(keyval);
/*geany_debug("%d (%d) %d (%d)", keyval, ev->keyval, state, ev->state);*/
/* special cases */
#ifdef HAVE_VTE
if (vte_info.have_vte && check_vte(state, keyval))
return FALSE;
#endif
if (check_menu_key(doc, keyval, state, ev->time))
return TRUE;
foreach_ptr_array(group, g, keybinding_groups)
{
foreach_ptr_array(kb, i, group->key_items)
{
if (keyval == kb->key && state == kb->mods)
{
/* call the corresponding callback function for this shortcut */
if (kb->callback)
{
kb->callback(kb->id);
return TRUE;
}
else if (group->callback)
{
if (group->callback(kb->id))
return TRUE;
else
continue; /* not handled */
}
g_warning("No callback for keybinding %s: %s!", group->name, kb->name);
}
}
}
/* fixed keybindings can be overridden by user bindings, so check them last */
if (check_fixed_kb(keyval, state))
return TRUE;
return FALSE;
}
/* group_id must be a core group, e.g. GEANY_KEY_GROUP_EDITOR
* key_id e.g. GEANY_KEYS_EDITOR_CALLTIP */
GeanyKeyBinding *keybindings_lookup_item(guint group_id, guint key_id)
{
GeanyKeyGroup *group;
g_return_val_if_fail(group_id < GEANY_KEY_GROUP_COUNT, NULL); /* can't use this for plugin groups */
group = keybindings_get_core_group(group_id);
g_return_val_if_fail(group, NULL);
return keybindings_get_item(group, key_id);
}
/** Mimics a (built-in only) keybinding action.
* Example: @code keybindings_send_command(GEANY_KEY_GROUP_FILE, GEANY_KEYS_FILE_OPEN); @endcode
* @param group_id @ref GeanyKeyGroupID keybinding group index that contains the @a key_id keybinding.
* @param key_id @ref GeanyKeyBindingID keybinding index. */
void keybindings_send_command(guint group_id, guint key_id)
{
GeanyKeyBinding *kb;
kb = keybindings_lookup_item(group_id, key_id);
if (kb)
{
if (kb->callback)
kb->callback(key_id);
else
{
GeanyKeyGroup *group = keybindings_get_core_group(group_id);
if (group->callback)
group->callback(key_id);
}
}
}
/* These are the callback functions, either each group or each shortcut has it's
* own function. */
static gboolean cb_func_file_action(guint key_id)
{
switch (key_id)
{
case GEANY_KEYS_FILE_NEW:
document_new_file(NULL, NULL, NULL);
break;
case GEANY_KEYS_FILE_OPEN:
on_open1_activate(NULL, NULL);
break;
case GEANY_KEYS_FILE_OPENSELECTED:
on_menu_open_selected_file1_activate(NULL, NULL);
break;
case GEANY_KEYS_FILE_OPENLASTTAB:
{
gchar *utf8_filename = g_queue_peek_head(ui_prefs.recent_queue);
gchar *locale_filename = utils_get_locale_from_utf8(utf8_filename);
document_open_file(locale_filename, FALSE, NULL, NULL);
g_free(locale_filename);
break;
}
case GEANY_KEYS_FILE_SAVE:
on_save1_activate(NULL, NULL);
break;
case GEANY_KEYS_FILE_SAVEAS:
on_save_as1_activate(NULL, NULL);
break;
case GEANY_KEYS_FILE_SAVEALL:
on_save_all1_activate(NULL, NULL);
break;
case GEANY_KEYS_FILE_CLOSE:
on_close1_activate(NULL, NULL);
break;
case GEANY_KEYS_FILE_CLOSEALL:
on_close_all1_activate(NULL, NULL);
break;
case GEANY_KEYS_FILE_RELOAD:
on_toolbutton_reload_clicked(NULL, NULL);
break;
case GEANY_KEYS_FILE_PRINT:
on_print1_activate(NULL, NULL);
break;
}
return TRUE;
}
static gboolean cb_func_project_action(guint key_id)
{
switch (key_id)
{
case GEANY_KEYS_PROJECT_NEW:
on_project_new1_activate(NULL, NULL);
break;
case GEANY_KEYS_PROJECT_OPEN:
on_project_open1_activate(NULL, NULL);
break;
case GEANY_KEYS_PROJECT_CLOSE:
if (app->project)
on_project_close1_activate(NULL, NULL);
break;
case GEANY_KEYS_PROJECT_PROPERTIES:
if (app->project)
on_project_properties1_activate(NULL, NULL);
break;
}
return TRUE;
}
static void cb_func_menu_preferences(guint key_id)
{
switch (key_id)
{
case GEANY_KEYS_SETTINGS_PREFERENCES:
on_preferences1_activate(NULL, NULL);
break;
case GEANY_KEYS_SETTINGS_PLUGINPREFERENCES:
on_plugin_preferences1_activate(NULL, NULL);
break;
}
}
static void cb_func_menu_help(G_GNUC_UNUSED guint key_id)
{
on_help1_activate(NULL, NULL);
}
static gboolean cb_func_search_action(guint key_id)
{
GeanyDocument *doc = document_get_current();
ScintillaObject *sci;
if (key_id == GEANY_KEYS_SEARCH_FINDINFILES)
{
on_find_in_files1_activate(NULL, NULL); /* works without docs too */
return TRUE;
}
if (!doc)
return TRUE;
sci = doc->editor->sci;
switch (key_id)
{
case GEANY_KEYS_SEARCH_FIND:
on_find1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_FINDNEXT:
on_find_next1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_FINDPREVIOUS:
on_find_previous1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_FINDPREVSEL:
on_find_prevsel1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_FINDNEXTSEL:
on_find_nextsel1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_REPLACE:
on_replace1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_NEXTMESSAGE:
on_next_message1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_PREVIOUSMESSAGE:
on_previous_message1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_FINDUSAGE:
on_find_usage1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_FINDDOCUMENTUSAGE:
on_find_document_usage1_activate(NULL, NULL); break;
case GEANY_KEYS_SEARCH_MARKALL:
{
gchar *text = get_current_word_or_sel(doc, TRUE);
if (sci_has_selection(sci))
search_mark_all(doc, text, SCFIND_MATCHCASE);
else
{
/* clears markers if text is null */
search_mark_all(doc, text, SCFIND_MATCHCASE | SCFIND_WHOLEWORD);
}
g_free(text);
break;
}
}
return TRUE;
}
static void cb_func_menu_opencolorchooser(G_GNUC_UNUSED guint key_id)
{
on_show_color_chooser1_activate(NULL, NULL);
}
static gboolean cb_func_view_action(guint key_id)
{
switch (key_id)
{
case GEANY_KEYS_VIEW_TOGGLEALL:
on_menu_toggle_all_additional_widgets1_activate(NULL, NULL);
break;
case GEANY_KEYS_VIEW_SIDEBAR:
on_menu_show_sidebar1_toggled(NULL, NULL);
break;
case GEANY_KEYS_VIEW_ZOOMIN:
on_zoom_in1_activate(NULL, NULL);
break;
case GEANY_KEYS_VIEW_ZOOMOUT:
on_zoom_out1_activate(NULL, NULL);
break;
case GEANY_KEYS_VIEW_ZOOMRESET:
on_normal_size1_activate(NULL, NULL);
break;
default:
break;
}
return TRUE;
}
static void cb_func_menu_fullscreen(G_GNUC_UNUSED guint key_id)
{
GtkCheckMenuItem *c = GTK_CHECK_MENU_ITEM(
ui_lookup_widget(main_widgets.window, "menu_fullscreen1"));
gtk_check_menu_item_set_active(c, ! gtk_check_menu_item_get_active(c));
}
static void cb_func_menu_messagewindow(G_GNUC_UNUSED guint key_id)
{
GtkCheckMenuItem *c = GTK_CHECK_MENU_ITEM(
ui_lookup_widget(main_widgets.window, "menu_show_messages_window1"));
gtk_check_menu_item_set_active(c, ! gtk_check_menu_item_get_active(c));
}
static gboolean cb_func_build_action(guint key_id)
{
GtkWidget *item;
BuildMenuItems *menu_items;
GeanyDocument *doc = document_get_current();
if (doc == NULL)
return TRUE;
if (!GTK_WIDGET_IS_SENSITIVE(ui_lookup_widget(main_widgets.window, "menu_build1")))
return TRUE;
menu_items = build_get_menu_items(doc->file_type->id);
/* TODO make it a table??*/
switch (key_id)
{
case GEANY_KEYS_BUILD_COMPILE:
item = menu_items->menu_item[GEANY_GBG_FT][GBO_TO_CMD(GEANY_GBO_COMPILE)];
break;
case GEANY_KEYS_BUILD_LINK:
item = menu_items->menu_item[GEANY_GBG_FT][GBO_TO_CMD(GEANY_GBO_BUILD)];
break;
case GEANY_KEYS_BUILD_MAKE:
item = menu_items->menu_item[GEANY_GBG_NON_FT][GBO_TO_CMD(GEANY_GBO_MAKE_ALL)];
break;
case GEANY_KEYS_BUILD_MAKEOWNTARGET:
item = menu_items->menu_item[GEANY_GBG_NON_FT][GBO_TO_CMD(GEANY_GBO_CUSTOM)];
break;
case GEANY_KEYS_BUILD_MAKEOBJECT:
item = menu_items->menu_item[GEANY_GBG_NON_FT][GBO_TO_CMD(GEANY_GBO_MAKE_OBJECT)];
break;
case GEANY_KEYS_BUILD_NEXTERROR:
item = menu_items->menu_item[GBG_FIXED][GBF_NEXT_ERROR];
break;
case GEANY_KEYS_BUILD_PREVIOUSERROR:
item = menu_items->menu_item[GBG_FIXED][GBF_PREV_ERROR];
break;
case GEANY_KEYS_BUILD_RUN:
item = menu_items->menu_item[GEANY_GBG_EXEC][GBO_TO_CMD(GEANY_GBO_EXEC)];
break;
case GEANY_KEYS_BUILD_OPTIONS:
item = menu_items->menu_item[GBG_FIXED][GBF_COMMANDS];
break;
default:
item = NULL;
}
/* Note: For Build menu items it's OK (at the moment) to assume they are in the correct
* sensitive state, but some other menus don't update the sensitive status until
* they are redrawn. */
if (item && GTK_WIDGET_IS_SENSITIVE(item))
gtk_menu_item_activate(GTK_MENU_ITEM(item));
return TRUE;
}
static gboolean read_current_word(GeanyDocument *doc, gboolean sci_word)
{
if (doc == NULL)
return FALSE;
if (sci_word)
{
editor_find_current_word_sciwc(doc->editor, -1,
editor_info.current_word, GEANY_MAX_WORD_LENGTH);
}
else
{
editor_find_current_word(doc->editor, -1,
editor_info.current_word, GEANY_MAX_WORD_LENGTH, NULL);
}
return (*editor_info.current_word != 0);
}
static gboolean check_current_word(GeanyDocument *doc, gboolean sci_word)
{
if (! read_current_word(doc, sci_word))
{
utils_beep();
return FALSE;
}
return TRUE;
}
static gchar *get_current_word_or_sel(GeanyDocument *doc, gboolean sci_word)
{
ScintillaObject *sci = doc->editor->sci;
if (sci_has_selection(sci))
return sci_get_selection_contents(sci);
return read_current_word(doc, sci_word) ? g_strdup(editor_info.current_word) : NULL;
}
static void focus_sidebar(void)
{
if (ui_prefs.sidebar_visible)
{
gint page_num = gtk_notebook_get_current_page(GTK_NOTEBOOK(main_widgets.sidebar_notebook));
GtkWidget *page = gtk_notebook_get_nth_page(GTK_NOTEBOOK(main_widgets.sidebar_notebook), page_num);
/* gtk_widget_grab_focus() won't work because of the scrolled window containers */
gtk_widget_child_focus(page, GTK_DIR_TAB_FORWARD);
}
}
static void focus_msgwindow(void)
{
if (ui_prefs.msgwindow_visible)
{
gint page_num = gtk_notebook_get_current_page(GTK_NOTEBOOK(msgwindow.notebook));
GtkWidget *page = gtk_notebook_get_nth_page(GTK_NOTEBOOK(msgwindow.notebook), page_num);
gtk_widget_grab_focus(gtk_bin_get_child(GTK_BIN(page)));
}
}
static gboolean cb_func_switch_action(guint key_id)
{
switch (key_id)
{
case GEANY_KEYS_FOCUS_EDITOR:
{
GeanyDocument *doc = document_get_current();
if (doc != NULL)
{
GtkWidget *sci = GTK_WIDGET(doc->editor->sci);
if (GTK_WIDGET_HAS_FOCUS(sci))
ui_update_statusbar(doc, -1);
else
gtk_widget_grab_focus(sci);
}
break;
}
case GEANY_KEYS_FOCUS_SCRIBBLE:
msgwin_switch_tab(MSG_SCRATCH, TRUE);
break;
case GEANY_KEYS_FOCUS_SEARCHBAR:
if (toolbar_prefs.visible)
{
GtkWidget *search_entry = toolbar_get_widget_child_by_name("SearchEntry");
if (search_entry != NULL)
gtk_widget_grab_focus(search_entry);
}
break;
case GEANY_KEYS_FOCUS_SIDEBAR:
focus_sidebar();
break;
case GEANY_KEYS_FOCUS_VTE:
msgwin_switch_tab(MSG_VTE, TRUE);
break;
case GEANY_KEYS_FOCUS_COMPILER:
msgwin_switch_tab(MSG_COMPILER, TRUE);
break;
case GEANY_KEYS_FOCUS_MESSAGES:
msgwin_switch_tab(MSG_MESSAGE, TRUE);
break;
case GEANY_KEYS_FOCUS_MESSAGE_WINDOW:
focus_msgwindow();
break;
case GEANY_KEYS_FOCUS_SIDEBAR_DOCUMENT_LIST:
sidebar_focus_openfiles_tab();
break;
case GEANY_KEYS_FOCUS_SIDEBAR_SYMBOL_LIST:
sidebar_focus_symbols_tab();
break;
}
return TRUE;
}
static void switch_notebook_page(gint direction)
{
gint page_count, cur_page;
gboolean parent_is_notebook = FALSE;
GtkNotebook *notebook;
GtkWidget *focusw = gtk_window_get_focus(GTK_WINDOW(main_widgets.window));
/* check whether the current widget is a GtkNotebook or a child of a GtkNotebook */
do
{
parent_is_notebook = GTK_IS_NOTEBOOK(focusw);
}
while (! parent_is_notebook && (focusw = gtk_widget_get_parent(focusw)) != NULL);
/* if we found a GtkNotebook widget, use it. Otherwise fallback to the documents notebook */
if (parent_is_notebook)
notebook = GTK_NOTEBOOK(focusw);
else
notebook = GTK_NOTEBOOK(main_widgets.notebook);
/* now switch pages */
page_count = gtk_notebook_get_n_pages(notebook);
cur_page = gtk_notebook_get_current_page(notebook);
if (direction == GTK_DIR_LEFT)
{
if (cur_page > 0)
gtk_notebook_set_current_page(notebook, cur_page - 1);
else
gtk_notebook_set_current_page(notebook, page_count - 1);
}
else if (direction == GTK_DIR_RIGHT)
{
if (cur_page < page_count - 1)
gtk_notebook_set_current_page(notebook, cur_page + 1);
else
gtk_notebook_set_current_page(notebook, 0);
}
}
static void cb_func_switch_tableft(G_GNUC_UNUSED guint key_id)
{
switch_notebook_page(GTK_DIR_LEFT);
}
static void cb_func_switch_tabright(G_GNUC_UNUSED guint key_id)
{
switch_notebook_page(GTK_DIR_RIGHT);
}
static void cb_func_switch_tablastused(G_GNUC_UNUSED guint key_id)
{
notebook_switch_tablastused();
}
/* move document left/right/first/last */
static void cb_func_move_tab(guint key_id)
{
GtkWidget *sci;
GtkNotebook *nb = GTK_NOTEBOOK(main_widgets.notebook);
gint cur_page = gtk_notebook_get_current_page(nb);
GeanyDocument *doc = document_get_current();
if (doc == NULL)
return;
sci = GTK_WIDGET(doc->editor->sci);
switch (key_id)
{
case GEANY_KEYS_NOTEBOOK_MOVETABLEFT:
gtk_notebook_reorder_child(nb, sci, cur_page - 1); /* notebook wraps around by default */
break;
case GEANY_KEYS_NOTEBOOK_MOVETABRIGHT:
{
gint npage = cur_page + 1;
if (npage == gtk_notebook_get_n_pages(nb))
npage = 0; /* wraparound */
gtk_notebook_reorder_child(nb, sci, npage);
break;
}
case GEANY_KEYS_NOTEBOOK_MOVETABFIRST:
gtk_notebook_reorder_child(nb, sci, (file_prefs.tab_order_ltr) ? 0 : -1);
break;
case GEANY_KEYS_NOTEBOOK_MOVETABLAST:
gtk_notebook_reorder_child(nb, sci, (file_prefs.tab_order_ltr) ? -1 : 0);
break;
}
return;
}
static void goto_matching_brace(GeanyDocument *doc)
{
gint pos, new_pos;
gint after_brace;
if (doc == NULL)
return;
pos = sci_get_current_position(doc->editor->sci);
after_brace = pos > 0 && utils_isbrace(sci_get_char_at(doc->editor->sci, pos - 1), TRUE);
pos -= after_brace; /* set pos to the brace */
new_pos = sci_find_matching_brace(doc->editor->sci, pos);
if (new_pos != -1)
{ /* set the cursor at/after the brace */
sci_set_current_position(doc->editor->sci, new_pos + (!after_brace), FALSE);
editor_display_current_line(doc->editor, 0.5F);
}
}
static gboolean cb_func_clipboard_action(guint key_id)
{
GeanyDocument *doc = document_get_current();
if (doc == NULL)
return TRUE;
switch (key_id)
{
case GEANY_KEYS_CLIPBOARD_CUT:
on_cut1_activate(NULL, NULL);
break;
case GEANY_KEYS_CLIPBOARD_COPY:
on_copy1_activate(NULL, NULL);
break;
case GEANY_KEYS_CLIPBOARD_PASTE:
on_paste1_activate(NULL, NULL);
break;
case GEANY_KEYS_CLIPBOARD_COPYLINE:
sci_send_command(doc->editor->sci, SCI_LINECOPY);
break;
case GEANY_KEYS_CLIPBOARD_CUTLINE:
sci_send_command(doc->editor->sci, SCI_LINECUT);
break;
}
return TRUE;
}
static void goto_tag(GeanyDocument *doc, gboolean definition)
{
gchar *text = get_current_word_or_sel(doc, FALSE);
if (text)
symbols_goto_tag(text, definition);
else
utils_beep();
g_free(text);
}
/* Common function for goto keybindings, useful even when sci doesn't have focus. */
static gboolean cb_func_goto_action(guint key_id)
{
gint cur_line;
GeanyDocument *doc = document_get_current();
if (doc == NULL)
return TRUE;
cur_line = sci_get_current_line(doc->editor->sci);
switch (key_id)
{
case GEANY_KEYS_GOTO_BACK:
navqueue_go_back();
return TRUE;
case GEANY_KEYS_GOTO_FORWARD:
navqueue_go_forward();
return TRUE;
case GEANY_KEYS_GOTO_LINE:
{
if (toolbar_prefs.visible)
{
GtkWidget *wid = toolbar_get_widget_child_by_name("GotoEntry");
/* use toolbar item if shown & not in the drop down overflow menu */
if (wid && GTK_WIDGET_MAPPED(wid))
{
gtk_widget_grab_focus(wid);
return TRUE;
}
}
on_go_to_line_activate(NULL, NULL);
return TRUE;
}
case GEANY_KEYS_GOTO_MATCHINGBRACE:
goto_matching_brace(doc);
return TRUE;
case GEANY_KEYS_GOTO_TOGGLEMARKER:
{
sci_toggle_marker_at_line(doc->editor->sci, cur_line, 1);
return TRUE;
}
case GEANY_KEYS_GOTO_NEXTMARKER:
{
gint mline = sci_marker_next(doc->editor->sci, cur_line + 1, 1 << 1, TRUE);
if (mline != -1)
{
sci_set_current_line(doc->editor->sci, mline);
editor_display_current_line(doc->editor, 0.5F);
}
return TRUE;
}
case GEANY_KEYS_GOTO_PREVIOUSMARKER:
{
gint mline = sci_marker_previous(doc->editor->sci, cur_line - 1, 1 << 1, TRUE);
if (mline != -1)
{
sci_set_current_line(doc->editor->sci, mline);
editor_display_current_line(doc->editor, 0.5F);
}
return TRUE;
}
case GEANY_KEYS_GOTO_TAGDEFINITION:
goto_tag(doc, TRUE);
return TRUE;
case GEANY_KEYS_GOTO_TAGDECLARATION:
goto_tag(doc, FALSE);
return TRUE;
}
/* only check editor-sensitive keybindings when editor has focus so home,end still
* work in other widgets */
if (gtk_window_get_focus(GTK_WINDOW(main_widgets.window)) != GTK_WIDGET(doc->editor->sci))
return FALSE;
switch (key_id)
{
case GEANY_KEYS_GOTO_LINESTART:
sci_send_command(doc->editor->sci, editor_prefs.smart_home_key ? SCI_VCHOME : SCI_HOME);
break;
case GEANY_KEYS_GOTO_LINEEND:
sci_send_command(doc->editor->sci, SCI_LINEEND);
break;
case GEANY_KEYS_GOTO_LINEENDVISUAL:
sci_send_command(doc->editor->sci, SCI_LINEENDDISPLAY);
break;
case GEANY_KEYS_GOTO_PREVWORDPART:
sci_send_command(doc->editor->sci, SCI_WORDPARTLEFT);
break;
case GEANY_KEYS_GOTO_NEXTWORDPART:
sci_send_command(doc->editor->sci, SCI_WORDPARTRIGHT);
break;
}
return TRUE;
}
static void duplicate_lines(GeanyEditor *editor)
{
if (sci_get_lines_selected(editor->sci) > 1)
{ /* ignore extra_line because of selecting lines from the line number column */
editor_select_lines(editor, FALSE);
sci_selection_duplicate(editor->sci);
}
else if (sci_has_selection(editor->sci))
sci_selection_duplicate(editor->sci);
else
sci_line_duplicate(editor->sci);
}
static void delete_lines(GeanyEditor *editor)
{
editor_select_lines(editor, TRUE); /* include last line (like cut lines, copy lines do) */
sci_clear(editor->sci); /* (SCI_LINEDELETE only does 1 line) */
}
/* common function for editor keybindings, only valid when scintilla has focus. */
static gboolean cb_func_editor_action(guint key_id)
{
GeanyDocument *doc = document_get_current();
GtkWidget *focusw = gtk_window_get_focus(GTK_WINDOW(main_widgets.window));
/* edit keybindings only valid when scintilla widget has focus */
if (doc == NULL || focusw != GTK_WIDGET(doc->editor->sci))
return FALSE; /* also makes tab work outside editor */
switch (key_id)
{
case GEANY_KEYS_EDITOR_UNDO:
on_undo1_activate(NULL, NULL);
break;
case GEANY_KEYS_EDITOR_REDO:
on_redo1_activate(NULL, NULL);
break;
case GEANY_KEYS_EDITOR_SCROLLTOLINE:
editor_scroll_to_line(doc->editor, -1, 0.5F);
break;
case GEANY_KEYS_EDITOR_SCROLLLINEUP:
sci_send_command(doc->editor->sci, SCI_LINESCROLLUP);
break;
case GEANY_KEYS_EDITOR_SCROLLLINEDOWN:
sci_send_command(doc->editor->sci, SCI_LINESCROLLDOWN);
break;
case GEANY_KEYS_EDITOR_DUPLICATELINE:
duplicate_lines(doc->editor);
break;
case GEANY_KEYS_EDITOR_SNIPPETNEXTCURSOR:
editor_goto_next_snippet_cursor(doc->editor);
break;
case GEANY_KEYS_EDITOR_DELETELINE:
delete_lines(doc->editor);
break;
case GEANY_KEYS_EDITOR_DELETELINETOEND:
sci_send_command(doc->editor->sci, SCI_DELLINERIGHT);
break;
case GEANY_KEYS_EDITOR_TRANSPOSELINE:
sci_send_command(doc->editor->sci, SCI_LINETRANSPOSE);
break;
case GEANY_KEYS_EDITOR_AUTOCOMPLETE:
editor_start_auto_complete(doc->editor, sci_get_current_position(doc->editor->sci), TRUE);
break;
case GEANY_KEYS_EDITOR_CALLTIP:
editor_show_calltip(doc->editor, -1);
break;
case GEANY_KEYS_EDITOR_MACROLIST:
editor_show_macro_list(doc->editor);
break;
case GEANY_KEYS_EDITOR_CONTEXTACTION:
if (check_current_word(doc, FALSE))
on_context_action1_activate(GTK_MENU_ITEM(ui_lookup_widget(main_widgets.editor_menu,
"context_action1")), NULL);
break;
case GEANY_KEYS_EDITOR_COMPLETESNIPPET:
/* allow tab to be overloaded */
return check_snippet_completion(doc);
case GEANY_KEYS_EDITOR_SUPPRESSSNIPPETCOMPLETION:
{
GeanyKeyBinding *kb = keybindings_lookup_item(GEANY_KEY_GROUP_EDITOR,
GEANY_KEYS_EDITOR_COMPLETESNIPPET);
switch (kb->key)
{
case GDK_space:
sci_add_text(doc->editor->sci, " ");
break;
case GDK_Tab:
sci_send_command(doc->editor->sci, SCI_TAB);
break;
default:
break;
}
break;
}
case GEANY_KEYS_EDITOR_WORDPARTCOMPLETION:
return editor_complete_word_part(doc->editor);
case GEANY_KEYS_EDITOR_MOVELINEUP:
sci_move_selected_lines_up(doc->editor->sci);
break;
case GEANY_KEYS_EDITOR_MOVELINEDOWN:
sci_move_selected_lines_down(doc->editor->sci);
break;
}
return TRUE;
}
static void join_lines(GeanyEditor *editor)
{
gint start, end, i;
start = sci_get_line_from_position(editor->sci,
sci_get_selection_start(editor->sci));
end = sci_get_line_from_position(editor->sci,
sci_get_selection_end(editor->sci));
/* remove spaces surrounding the lines so that these spaces
* won't appear within text after joining */
for (i = start; i < end; i++)
editor_strip_line_trailing_spaces(editor, i);
for (i = start + 1; i <= end; i++)
sci_set_line_indentation(editor->sci, i, 0);
sci_set_target_start(editor->sci,
sci_get_position_from_line(editor->sci, start));
sci_set_target_end(editor->sci,
sci_get_position_from_line(editor->sci, end));
sci_lines_join(editor->sci);
}
static gint get_reflow_column(GeanyEditor *editor)
{
const GeanyEditorPrefs *eprefs = editor_get_prefs(editor);
if (editor->line_breaking)
return eprefs->line_break_column;
else if (eprefs->long_line_type != 2)
return eprefs->long_line_column;
else
return -1; /* do nothing */
}
static void reflow_lines(GeanyEditor *editor, gint column)
{
gint start, indent, linescount, i;
start = sci_get_line_from_position(editor->sci,
sci_get_selection_start(editor->sci));
/* if several lines are selected, join them. */
if (sci_get_lines_selected(editor->sci) > 1)
join_lines(editor);
/* if this line is short enough, do nothing */
if (column > sci_get_line_end_position(editor->sci, start) -
sci_get_position_from_line(editor->sci, start))
{
return;
}
/*
* We have to manipulate line indentation so that indentation
* of the resulting lines would be consistent. For example,
* the result of splitting "[TAB]very long content":
*
* +-------------+-------------+
* | proper | wrong |
* +-------------+-------------+
* | [TAB]very | [TAB]very |
* | [TAB]long | long |
* | [TAB]content| content |
* +-------------+-------------+
*/
indent = sci_get_line_indentation(editor->sci, start);
sci_set_line_indentation(editor->sci, start, 0);
sci_target_from_selection(editor->sci);
linescount = sci_get_line_count(editor->sci);
sci_lines_split(editor->sci,
(column - indent) * sci_text_width(editor->sci, STYLE_DEFAULT, " "));
/* use lines count to determine how many lines appeared after splitting */
linescount = sci_get_line_count(editor->sci) - linescount;
/* Fix indentation. */
for (i = start; i <= start + linescount; i++)
sci_set_line_indentation(editor->sci, i, indent);
/* Remove trailing spaces. */
if (editor_prefs.newline_strip || file_prefs.strip_trailing_spaces)
{
for (i = start; i <= start + linescount; i++)
editor_strip_line_trailing_spaces(editor, i);
}
}
/* deselect last newline of selection, if any */
static void sci_deselect_last_newline(ScintillaObject *sci)
{
gint start, end;
start = sci_get_selection_start(sci);
end = sci_get_selection_end(sci);
if (end > start && sci_get_col_from_position(sci, end) == 0)
{
end = sci_get_line_end_position(sci, sci_get_line_from_position(sci, end-1));
sci_set_selection(sci, start, end);
}
}
static void reflow_paragraph(GeanyEditor *editor)
{
ScintillaObject *sci = editor->sci;
gboolean sel;
gint column;
column = get_reflow_column(editor);
if (column == -1)
{
utils_beep();
return;
}
sci_start_undo_action(sci);
sel = sci_has_selection(sci);
if (!sel)
editor_select_indent_block(editor);
sci_deselect_last_newline(sci);
reflow_lines(editor, column);
if (!sel)
sci_set_anchor(sci, -1);
sci_end_undo_action(sci);
}
static void join_paragraph(GeanyEditor *editor)
{
ScintillaObject *sci = editor->sci;
gboolean sel;
gint column;
column = get_reflow_column(editor);
if (column == -1)
{
utils_beep();
return;
}
sci_start_undo_action(sci);
sel = sci_has_selection(sci);
if (!sel)
editor_select_indent_block(editor);
sci_deselect_last_newline(sci);
join_lines(editor);
if (!sel)
sci_set_anchor(sci, -1);
sci_end_undo_action(sci);
}
/* common function for format keybindings, only valid when scintilla has focus. */
static gboolean cb_func_format_action(guint key_id)
{
GeanyDocument *doc = document_get_current();
GtkWidget *focusw = gtk_window_get_focus(GTK_WINDOW(main_widgets.window));
/* keybindings only valid when scintilla widget has focus */
if (doc == NULL || focusw != GTK_WIDGET(doc->editor->sci))
return TRUE;
switch (key_id)
{
case GEANY_KEYS_FORMAT_COMMENTLINETOGGLE:
on_menu_toggle_line_commentation1_activate(NULL, NULL);
break;
case GEANY_KEYS_FORMAT_COMMENTLINE:
on_menu_comment_line1_activate(NULL, NULL);
break;
case GEANY_KEYS_FORMAT_UNCOMMENTLINE:
on_menu_uncomment_line1_activate(NULL, NULL);
break;
case GEANY_KEYS_FORMAT_INCREASEINDENT:
on_menu_increase_indent1_activate(NULL, NULL);
break;
case GEANY_KEYS_FORMAT_DECREASEINDENT:
on_menu_decrease_indent1_activate(NULL, NULL);
break;
case GEANY_KEYS_FORMAT_INCREASEINDENTBYSPACE:
editor_indentation_by_one_space(doc->editor, -1, FALSE);
break;
case GEANY_KEYS_FORMAT_DECREASEINDENTBYSPACE:
editor_indentation_by_one_space(doc->editor, -1, TRUE);
break;
case GEANY_KEYS_FORMAT_AUTOINDENT:
editor_smart_line_indentation(doc->editor, -1);
break;
case GEANY_KEYS_FORMAT_TOGGLECASE:
on_toggle_case1_activate(NULL, NULL);
break;
case GEANY_KEYS_FORMAT_SENDTOCMD1:
if (ui_prefs.custom_commands && g_strv_length(ui_prefs.custom_commands) > 0)
tools_execute_custom_command(doc, ui_prefs.custom_commands[0]);
break;
case GEANY_KEYS_FORMAT_SENDTOCMD2:
if (ui_prefs.custom_commands && g_strv_length(ui_prefs.custom_commands) > 1)
tools_execute_custom_command(doc, ui_prefs.custom_commands[1]);
break;
case GEANY_KEYS_FORMAT_SENDTOCMD3:
if (ui_prefs.custom_commands && g_strv_length(ui_prefs.custom_commands) > 2)
tools_execute_custom_command(doc, ui_prefs.custom_commands[2]);
break;
case GEANY_KEYS_FORMAT_SENDTOVTE:
on_send_selection_to_vte1_activate(NULL, NULL);
break;
case GEANY_KEYS_FORMAT_REFLOWPARAGRAPH:
reflow_paragraph(doc->editor);
break;
case GEANY_KEYS_FORMAT_JOINLINES:
join_paragraph(doc->editor);
break;
}
return TRUE;
}
/* common function for select keybindings, only valid when scintilla has focus. */
static gboolean cb_func_select_action(guint key_id)
{
GeanyDocument *doc;
ScintillaObject *sci;
GtkWidget *focusw = gtk_window_get_focus(GTK_WINDOW(main_widgets.window));
GtkWidget *toolbar_search_entry = toolbar_get_widget_child_by_name("SearchEntry");
GtkWidget *toolbar_goto_entry = toolbar_get_widget_child_by_name("GotoEntry");
/* special case for Select All in the scribble widget */
if (key_id == GEANY_KEYS_SELECT_ALL && focusw == msgwindow.scribble)
{
g_signal_emit_by_name(msgwindow.scribble, "select-all", TRUE);
return TRUE;
}
/* special case for Select All in the VTE widget */
#ifdef HAVE_VTE
else if (key_id == GEANY_KEYS_SELECT_ALL && vte_info.have_vte && focusw == vc->vte)
{
vte_select_all();
return TRUE;
}
#endif
/* special case for Select All in the toolbar search widget */
else if (key_id == GEANY_KEYS_SELECT_ALL && focusw == toolbar_search_entry)
{
gtk_editable_select_region(GTK_EDITABLE(toolbar_search_entry), 0, -1);
return TRUE;
}
else if (key_id == GEANY_KEYS_SELECT_ALL && focusw == toolbar_goto_entry)
{
gtk_editable_select_region(GTK_EDITABLE(toolbar_goto_entry), 0, -1);
return TRUE;
}
doc = document_get_current();
/* keybindings only valid when scintilla widget has focus */
if (doc == NULL || focusw != GTK_WIDGET(doc->editor->sci))
return TRUE;
sci = doc->editor->sci;
switch (key_id)
{
case GEANY_KEYS_SELECT_ALL:
on_menu_select_all1_activate(NULL, NULL);
break;
case GEANY_KEYS_SELECT_WORD:
editor_select_word(doc->editor);
break;
case GEANY_KEYS_SELECT_LINE:
editor_select_lines(doc->editor, FALSE);
break;
case GEANY_KEYS_SELECT_PARAGRAPH:
editor_select_paragraph(doc->editor);
break;
case GEANY_KEYS_SELECT_WORDPARTLEFT:
sci_send_command(sci, SCI_WORDPARTLEFTEXTEND);
break;
case GEANY_KEYS_SELECT_WORDPARTRIGHT:
sci_send_command(sci, SCI_WORDPARTRIGHTEXTEND);
break;
}
return TRUE;
}
static gboolean cb_func_document_action(guint key_id)
{
GeanyDocument *doc = document_get_current();
if (doc == NULL)
return TRUE;
switch (key_id)
{
case GEANY_KEYS_DOCUMENT_REPLACETABS:
on_replace_tabs_activate(NULL, NULL);
break;
case GEANY_KEYS_DOCUMENT_REPLACESPACES:
on_replace_spaces_activate(NULL, NULL);
break;
case GEANY_KEYS_DOCUMENT_LINEBREAK:
on_line_breaking1_activate(NULL, NULL);
ui_document_show_hide(doc);
break;
case GEANY_KEYS_DOCUMENT_LINEWRAP:
on_line_wrapping1_toggled(NULL, NULL);
ui_document_show_hide(doc);
break;
case GEANY_KEYS_DOCUMENT_RELOADTAGLIST:
document_update_tags(doc);
break;
case GEANY_KEYS_DOCUMENT_FOLDALL:
editor_fold_all(doc->editor);
break;
case GEANY_KEYS_DOCUMENT_UNFOLDALL:
editor_unfold_all(doc->editor);
break;
case GEANY_KEYS_DOCUMENT_TOGGLEFOLD:
if (editor_prefs.folding)
{
gint line = sci_get_current_line(doc->editor->sci);
editor_toggle_fold(doc->editor, line, 0);
break;
}
case GEANY_KEYS_DOCUMENT_REMOVE_MARKERS:
on_remove_markers1_activate(NULL, NULL);
break;
case GEANY_KEYS_DOCUMENT_REMOVE_ERROR_INDICATORS:
on_menu_remove_indicators1_activate(NULL, NULL);
break;
case GEANY_KEYS_DOCUMENT_REMOVE_MARKERS_INDICATORS:
on_remove_markers1_activate(NULL, NULL);
on_menu_remove_indicators1_activate(NULL, NULL);
break;
}
return TRUE;
}
static void insert_line_after(GeanyEditor *editor)
{
ScintillaObject *sci = editor->sci;
sci_send_command(sci, SCI_LINEEND);
sci_send_command(sci, SCI_NEWLINE);
}
static void insert_line_before(GeanyEditor *editor)
{
ScintillaObject *sci = editor->sci;
gint line = sci_get_current_line(sci);
gint indentpos = sci_get_line_indent_position(sci, line);
sci_set_current_position(sci, indentpos, TRUE);
sci_send_command(sci, SCI_NEWLINE);
sci_send_command(sci, SCI_LINEUP);
}
/* common function for insert keybindings, only valid when scintilla has focus. */
static gboolean cb_func_insert_action(guint key_id)
{
GeanyDocument *doc = document_get_current();
GtkWidget *focusw = gtk_window_get_focus(GTK_WINDOW(main_widgets.window));
/* keybindings only valid when scintilla widget has focus */
if (doc == NULL || focusw != GTK_WIDGET(doc->editor->sci))
return TRUE;
switch (key_id)
{
case GEANY_KEYS_INSERT_ALTWHITESPACE:
editor_insert_alternative_whitespace(doc->editor);
break;
case GEANY_KEYS_INSERT_DATE:
gtk_menu_item_activate(GTK_MENU_ITEM(
ui_lookup_widget(main_widgets.window, "insert_date_custom1")));
break;
case GEANY_KEYS_INSERT_LINEAFTER:
insert_line_after(doc->editor);
break;
case GEANY_KEYS_INSERT_LINEBEFORE:
insert_line_before(doc->editor);
break;
}
return TRUE;
}
/* update key combination */
void keybindings_update_combo(GeanyKeyBinding *kb, guint key, GdkModifierType mods)
{
GtkWidget *widget = kb->menu_item;
if (widget && kb->key)
gtk_widget_remove_accelerator(widget, kb_accel_group, kb->key, kb->mods);
kb->key = key;
kb->mods = mods;
if (widget && kb->key)
gtk_widget_add_accelerator(widget, "activate", kb_accel_group,
kb->key, kb->mods, GTK_ACCEL_VISIBLE);
}
/* used for plugins, can be called repeatedly. */
GeanyKeyGroup *keybindings_set_group(GeanyKeyGroup *group, const gchar *section_name,
const gchar *label, gsize count, GeanyKeyGroupCallback callback)
{
g_return_val_if_fail(section_name, NULL);
g_return_val_if_fail(count, NULL);
/* prevent conflict with core bindings */
g_return_val_if_fail(!g_str_equal(section_name, keybindings_keyfile_group_name), NULL);
if (!group)
{
group = g_new0(GeanyKeyGroup, 1);
add_kb_group(group, section_name, label, callback, TRUE);
}
g_free(group->plugin_keys);
group->plugin_keys = g_new0(GeanyKeyBinding, count);
group->plugin_key_count = count;
g_ptr_array_set_size(group->key_items, 0);
return group;
}
void keybindings_free_group(GeanyKeyGroup *group)
{
GeanyKeyBinding *kb;
g_ptr_array_free(group->key_items, TRUE);
if (group->plugin)
{
foreach_c_array(kb, group->plugin_keys, group->plugin_key_count)
{
g_free(kb->name);
g_free(kb->label);
}
g_free(group->plugin_keys);
g_ptr_array_remove_fast(keybinding_groups, group);
g_free(group);
}
}
|
ktuan89/geany-1.22
|
src/keybindings.c
|
C
|
gpl-2.0
| 79,266
|
/*
* Synopsys DesignWare 8250 driver.
*
* Copyright 2011 Picochip, Jamie Iles.
* Copyright 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The Synopsys DesignWare 8250 has an extra feature whereby it detects if the
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/pm_runtime.h>
#include <asm/byteorder.h>
#include "8250.h"
/* Offsets for the DesignWare specific registers */
#define DW_UART_USR 0x1f /* UART Status Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
/* Component Parameter Register bits */
#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
#define DW_UART_CPR_AFCE_MODE (1 << 4)
#define DW_UART_CPR_THRE_MODE (1 << 5)
#define DW_UART_CPR_SIR_MODE (1 << 6)
#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
#define DW_UART_CPR_FIFO_STAT (1 << 10)
#define DW_UART_CPR_SHADOW (1 << 11)
#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
#define DW_UART_CPR_DMA_EXTRA (1 << 13)
#define DW_UART_CPR_FIFO_MODE (0xff << 16)
/* Helper for fifo size calculation */
#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
struct dw8250_data {
u8 usr_reg;
int last_mcr;
int line;
struct clk *clk;
struct reset_control *rst;
struct uart_8250_dma dma;
};
static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
/* If reading MSR, report CTS asserted when auto-CTS/RTS enabled */
if (offset == UART_MSR && d->last_mcr & UART_MCR_AFE) {
value |= UART_MSR_CTS;
value &= ~UART_MSR_DCTS;
}
return value;
}
static void dw8250_force_idle(struct uart_port *p)
{
serial8250_clear_and_reinit_fifos(container_of
(p, struct uart_8250_port, port));
(void)p->serial_in(p, UART_RX);
}
static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
if (offset == UART_MCR)
d->last_mcr = value;
writeb(value, p->membase + (offset << p->regshift));
/* Make sure LCR write wasn't ignored */
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
unsigned int lcr = p->serial_in(p, UART_LCR);
if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writeb(value, p->membase + (UART_LCR << p->regshift));
}
dev_err(p->dev, "Couldn't set LCR to %d\n", value);
}
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
{
unsigned int value = readb(p->membase + (offset << p->regshift));
return dw8250_modify_msr(p, offset, value);
}
/* Read Back (rb) version to ensure register access ording. */
static void dw8250_serial_out_rb(struct uart_port *p, int offset, int value)
{
dw8250_serial_out(p, offset, value);
dw8250_serial_in(p, UART_LCR);
}
static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
if (offset == UART_MCR)
d->last_mcr = value;
writel(value, p->membase + (offset << p->regshift));
/* Make sure LCR write wasn't ignored */
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
unsigned int lcr = p->serial_in(p, UART_LCR);
if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writel(value, p->membase + (UART_LCR << p->regshift));
}
dev_err(p->dev, "Couldn't set LCR to %d\n", value);
}
}
static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
{
unsigned int value = readl(p->membase + (offset << p->regshift));
return dw8250_modify_msr(p, offset, value);
}
static int dw8250_handle_irq(struct uart_port *p)
{
struct dw8250_data *d = p->private_data;
unsigned int iir = p->serial_in(p, UART_IIR);
if (serial8250_handle_irq(p, iir)) {
return 1;
} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR */
(void)p->serial_in(p, d->usr_reg);
return 1;
}
return 0;
}
static void
dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
{
if (!state)
pm_runtime_get_sync(port->dev);
serial8250_do_pm(port, state, old);
if (state)
pm_runtime_put_sync_suspend(port->dev);
}
static bool dw8250_dma_filter(struct dma_chan *chan, void *param)
{
struct dw8250_data *data = param;
return chan->chan_id == data->dma.tx_chan_id ||
chan->chan_id == data->dma.rx_chan_id;
}
static void dw8250_setup_port(struct uart_8250_port *up)
{
struct uart_port *p = &up->port;
u32 reg = readl(p->membase + DW_UART_UCV);
/*
* If the Component Version Register returns zero, we know that
* ADDITIONAL_FEATURES are not enabled. No need to go any further.
*/
if (!reg)
return;
dev_dbg_ratelimited(p->dev, "Designware UART version %c.%c%c\n",
(reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
reg = readl(p->membase + DW_UART_CPR);
if (!reg)
return;
/* Select the type based on fifo */
if (reg & DW_UART_CPR_FIFO_MODE) {
p->type = PORT_16550A;
p->flags |= UPF_FIXED_TYPE;
p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
up->tx_loadsz = p->fifosize;
up->capabilities = UART_CAP_FIFO;
}
if (reg & DW_UART_CPR_AFCE_MODE)
up->capabilities |= UART_CAP_AFE;
}
static int dw8250_probe_of(struct uart_port *p,
struct dw8250_data *data)
{
struct device_node *np = p->dev->of_node;
u32 val;
bool has_ucv = true;
if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) {
#ifdef __BIG_ENDIAN
/*
* Low order bits of these 64-bit registers, when
* accessed as a byte, are 7 bytes further down in the
* address space in big endian mode.
*/
p->membase += 7;
#endif
p->serial_out = dw8250_serial_out_rb;
p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
p->type = PORT_OCTEON;
data->usr_reg = 0x27;
has_ucv = false;
} else if (!of_property_read_u32(np, "reg-io-width", &val)) {
switch (val) {
case 1:
break;
case 4:
p->iotype = UPIO_MEM32;
p->serial_in = dw8250_serial_in32;
p->serial_out = dw8250_serial_out32;
break;
default:
dev_err(p->dev, "unsupported reg-io-width (%u)\n", val);
return -EINVAL;
}
}
if (has_ucv)
dw8250_setup_port(container_of(p, struct uart_8250_port, port));
if (!of_property_read_u32(np, "reg-shift", &val))
p->regshift = val;
data->rst = devm_reset_control_get_optional(p->dev, NULL);
/* clock got configured through clk api, all done */
if (p->uartclk)
return 0;
/* try to find out clock frequency from DT as fallback */
if (of_property_read_u32(np, "clock-frequency", &val)) {
dev_err(p->dev, "clk or clock-frequency not defined\n");
return -EINVAL;
}
p->uartclk = val;
return 0;
}
static int dw8250_probe_acpi(struct uart_8250_port *up,
struct dw8250_data *data)
{
const struct acpi_device_id *id;
struct uart_port *p = &up->port;
dw8250_setup_port(up);
id = acpi_match_device(p->dev->driver->acpi_match_table, p->dev);
if (!id)
return -ENODEV;
p->iotype = UPIO_MEM32;
p->serial_in = dw8250_serial_in32;
p->serial_out = dw8250_serial_out32;
p->regshift = 2;
if (!p->uartclk)
p->uartclk = (unsigned int)id->driver_data;
up->dma = &data->dma;
up->dma->rxconf.src_maxburst = p->fifosize / 4;
up->dma->txconf.dst_maxburst = p->fifosize / 4;
return 0;
}
static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {};
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
struct dw8250_data *data;
int err;
if (!regs || !irq) {
dev_err(&pdev->dev, "no registers/irq defined\n");
return -EINVAL;
}
spin_lock_init(&uart.port.lock);
uart.port.mapbase = regs->start;
uart.port.irq = irq->start;
uart.port.handle_irq = dw8250_handle_irq;
uart.port.pm = dw8250_do_pm;
uart.port.type = PORT_8250;
uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
uart.port.dev = &pdev->dev;
uart.port.membase = devm_ioremap(&pdev->dev, regs->start,
resource_size(regs));
if (!uart.port.membase)
return -ENOMEM;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->usr_reg = DW_UART_USR;
data->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(data->clk)) {
clk_prepare_enable(data->clk);
uart.port.uartclk = clk_get_rate(data->clk);
}
data->dma.rx_chan_id = -1;
data->dma.tx_chan_id = -1;
data->dma.rx_param = data;
data->dma.tx_param = data;
data->dma.fn = dw8250_dma_filter;
uart.port.iotype = UPIO_MEM;
uart.port.serial_in = dw8250_serial_in;
uart.port.serial_out = dw8250_serial_out;
uart.port.private_data = data;
if (pdev->dev.of_node) {
err = dw8250_probe_of(&uart.port, data);
if (err)
return err;
} else if (ACPI_HANDLE(&pdev->dev)) {
err = dw8250_probe_acpi(&uart, data);
if (err)
return err;
} else {
return -ENODEV;
}
if (!IS_ERR_OR_NULL(data->rst))
reset_control_deassert(data->rst);
data->line = serial8250_register_8250_port(&uart);
if (data->line < 0)
return data->line;
platform_set_drvdata(pdev, data);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return 0;
}
static int dw8250_remove(struct platform_device *pdev)
{
struct dw8250_data *data = platform_get_drvdata(pdev);
pm_runtime_get_sync(&pdev->dev);
serial8250_unregister_port(data->line);
if (!IS_ERR_OR_NULL(data->rst))
reset_control_assert(data->rst);
if (!IS_ERR(data->clk))
clk_disable_unprepare(data->clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
serial8250_suspend_port(data->line);
return 0;
}
static int dw8250_resume(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
serial8250_resume_port(data->line);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
if (!IS_ERR(data->clk))
clk_disable_unprepare(data->clk);
return 0;
}
static int dw8250_runtime_resume(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
if (!IS_ERR(data->clk))
clk_prepare_enable(data->clk);
return 0;
}
#endif
static const struct dev_pm_ops dw8250_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
};
static const struct of_device_id dw8250_of_match[] = {
{ .compatible = "snps,dw-apb-uart" },
{ .compatible = "cavium,octeon-3860-uart" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT33C4", 0 },
{ "INT33C5", 0 },
{ "INT3434", 0 },
{ "INT3435", 0 },
{ "80860F0A", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
.owner = THIS_MODULE,
.pm = &dw8250_pm_ops,
.of_match_table = dw8250_of_match,
.acpi_match_table = ACPI_PTR(dw8250_acpi_match),
},
.probe = dw8250_probe,
.remove = dw8250_remove,
};
module_platform_driver(dw8250_platform_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver");
|
fredericgermain/linux-sunxi
|
drivers/tty/serial/8250/8250_dw.c
|
C
|
gpl-2.0
| 12,111
|
/*****************************************************************************
* m3u.c : M3U playlist format import
*****************************************************************************
* Copyright (C) 2004 VLC authors and VideoLAN
* $Id: 02a95984d5fe1968163cb435268a9874f0c65eb9 $
*
* Authors: Clément Stenac <zorglub@videolan.org>
* Sigmund Augdal Helberg <dnumgis@videolan.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
* Preamble
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <vlc_common.h>
#include <vlc_demux.h>
#include <vlc_charset.h>
#include "playlist.h"
struct demux_sys_t
{
char *psz_prefix;
char *(*pf_dup) (const char *);
};
/*****************************************************************************
* Local prototypes
*****************************************************************************/
static int Demux( demux_t *p_demux);
static void parseEXTINF( char *psz_string, char **ppsz_artist, char **ppsz_name, int *pi_duration );
static bool ContainsURL( demux_t *p_demux );
static char *GuessEncoding (const char *str)
{
return IsUTF8 (str) ? strdup (str) : FromLatin1 (str);
}
static char *CheckUnicode (const char *str)
{
return IsUTF8 (str) ? strdup (str): NULL;
}
/*****************************************************************************
* Import_M3U: main import function
*****************************************************************************/
int Import_M3U( vlc_object_t *p_this )
{
demux_t *p_demux = (demux_t *)p_this;
const uint8_t *p_peek;
char *(*pf_dup) (const char *) = GuessEncoding;
int offset = 0;
if( stream_Peek( p_demux->s, &p_peek, 3 ) == 3
&& !memcmp( p_peek, "\xef\xbb\xbf", 3) )
{
pf_dup = CheckUnicode; /* UTF-8 Byte Order Mark */
offset = 3;
}
if( demux_IsPathExtension( p_demux, ".m3u8" )
|| demux_IsForced( p_demux, "m3u8" )
|| CheckContentType( p_demux->s, "application/vnd.apple.mpegurl" ) )
pf_dup = CheckUnicode; /* UTF-8 file type */
else
if( demux_IsPathExtension( p_demux, ".m3u" )
|| demux_IsPathExtension( p_demux, ".vlc" )
|| demux_IsForced( p_demux, "m3u" )
|| ContainsURL( p_demux )
|| CheckContentType( p_demux->s, "audio/x-mpegurl") )
; /* Guess encoding */
else
{
if( stream_Peek( p_demux->s, &p_peek, 8 + offset ) < (8 + offset) )
return VLC_EGENERIC;
p_peek += offset;
if( !strncasecmp( (const char *)p_peek, "RTSPtext", 8 ) ) /* QuickTime */
pf_dup = CheckUnicode; /* UTF-8 */
else
if( !memcmp( p_peek, "#EXTM3U", 7 ) )
; /* Guess encoding */
else
return VLC_EGENERIC;
}
stream_Seek( p_demux->s, offset );
STANDARD_DEMUX_INIT_MSG( "found valid M3U playlist" );
p_demux->p_sys->psz_prefix = FindPrefix( p_demux );
p_demux->p_sys->pf_dup = pf_dup;
return VLC_SUCCESS;
}
static bool ContainsURL( demux_t *p_demux )
{
const uint8_t *p_peek, *p_peek_end;
int i_peek;
i_peek = stream_Peek( p_demux->s, &p_peek, 1024 );
if( i_peek <= 0 ) return false;
p_peek_end = p_peek + i_peek;
while( p_peek + sizeof( "https://" ) < p_peek_end )
{
/* One line starting with a URL is enough */
if( !strncasecmp( (const char *)p_peek, "http://", 7 ) ||
!strncasecmp( (const char *)p_peek, "mms://", 6 ) ||
!strncasecmp( (const char *)p_peek, "rtsp://", 7 ) ||
!strncasecmp( (const char *)p_peek, "https://", 8 ) ||
!strncasecmp( (const char *)p_peek, "ftp://", 6 ) ||
!strncasecmp( (const char *)p_peek, "ftps://", 7 ) ||
!strncasecmp( (const char *)p_peek, "ftpes://", 8 ) )
{
return true;
}
/* Comments and blank lines are ignored */
else if( *p_peek != '#' && *p_peek != '\n' && *p_peek != '\r')
{
return false;
}
while( p_peek < p_peek_end && *p_peek != '\n' )
p_peek++;
if ( *p_peek == '\n' )
p_peek++;
}
return false;
}
/*****************************************************************************
* Deactivate: frees unused data
*****************************************************************************/
void Close_M3U( vlc_object_t *p_this )
{
demux_t *p_demux = (demux_t *)p_this;
free( p_demux->p_sys->psz_prefix );
free( p_demux->p_sys );
}
static int Demux( demux_t *p_demux )
{
char *psz_line;
char *psz_name = NULL;
char *psz_artist = NULL;
char *psz_album_art = NULL;
int i_parsed_duration = 0;
mtime_t i_duration = -1;
const char**ppsz_options = NULL;
char * (*pf_dup) (const char *) = p_demux->p_sys->pf_dup;
int i_options = 0;
bool b_cleanup = false;
input_item_t *p_input;
input_item_t *p_current_input = GetCurrentItem(p_demux);
input_item_node_t *p_subitems = input_item_node_Create( p_current_input );
psz_line = stream_ReadLine( p_demux->s );
while( psz_line )
{
char *psz_parse = psz_line;
/* Skip leading tabs and spaces */
while( *psz_parse == ' ' || *psz_parse == '\t' ||
*psz_parse == '\n' || *psz_parse == '\r' ) psz_parse++;
if( *psz_parse == '#' )
{
/* Parse extra info */
/* Skip leading tabs and spaces */
while( *psz_parse == ' ' || *psz_parse == '\t' ||
*psz_parse == '\n' || *psz_parse == '\r' ||
*psz_parse == '#' ) psz_parse++;
if( !*psz_parse ) goto error;
if( !strncasecmp( psz_parse, "EXTINF:", sizeof("EXTINF:") -1 ) )
{
/* Extended info */
psz_parse += sizeof("EXTINF:") - 1;
FREENULL( psz_name );
FREENULL( psz_artist );
parseEXTINF( psz_parse, &psz_artist, &psz_name, &i_parsed_duration );
if( i_parsed_duration >= 0 )
i_duration = i_parsed_duration * INT64_C(1000000);
if( psz_name )
psz_name = pf_dup( psz_name );
if( psz_artist )
psz_artist = pf_dup( psz_artist );
}
else if( !strncasecmp( psz_parse, "EXTVLCOPT:",
sizeof("EXTVLCOPT:") -1 ) )
{
/* VLC Option */
char *psz_option;
psz_parse += sizeof("EXTVLCOPT:") -1;
if( !*psz_parse ) goto error;
psz_option = pf_dup( psz_parse );
if( psz_option )
INSERT_ELEM( (const char **), ppsz_options, i_options, i_options, // sunqueen modify
psz_option );
}
/* Special case for jamendo which provide the albumart */
else if( !strncasecmp( psz_parse, "EXTALBUMARTURL:",
sizeof( "EXTALBUMARTURL:" ) -1 ) )
{
psz_parse += sizeof( "EXTALBUMARTURL:" ) - 1;
free( psz_album_art );
psz_album_art = pf_dup( psz_parse );
}
}
else if( !strncasecmp( psz_parse, "RTSPtext", sizeof("RTSPtext") -1 ) )
{
;/* special case to handle QuickTime RTSPtext redirect files */
}
else if( *psz_parse )
{
char *psz_mrl;
psz_parse = pf_dup( psz_parse );
if( !psz_name && psz_parse )
/* Use filename as name for relative entries */
psz_name = strdup( psz_parse );
psz_mrl = ProcessMRL( psz_parse, p_demux->p_sys->psz_prefix );
b_cleanup = true;
if( !psz_mrl )
{
free( psz_parse );
goto error;
}
p_input = input_item_NewExt( psz_mrl, psz_name,
i_options, ppsz_options, 0, i_duration );
free( psz_parse );
free( psz_mrl );
if ( !EMPTY_STR(psz_artist) )
input_item_SetArtist( p_input, psz_artist );
if( psz_name ) input_item_SetTitle( p_input, psz_name );
if( !EMPTY_STR(psz_album_art) )
input_item_SetArtURL( p_input, psz_album_art );
input_item_node_AppendItem( p_subitems, p_input );
vlc_gc_decref( p_input );
}
error:
/* Fetch another line */
free( psz_line );
psz_line = stream_ReadLine( p_demux->s );
if( !psz_line ) b_cleanup = true;
if( b_cleanup )
{
/* Cleanup state */
while( i_options-- ) free( (char*)ppsz_options[i_options] );
FREENULL( ppsz_options );
i_options = 0;
FREENULL( psz_name );
FREENULL( psz_artist );
FREENULL( psz_album_art );
i_parsed_duration = 0;
i_duration = -1;
b_cleanup = false;
}
}
input_item_node_PostAndDelete( p_subitems );
vlc_gc_decref(p_current_input);
var_Destroy( p_demux, "m3u-extvlcopt" );
return 0; /* Needed for correct operation of go back */
}
static void parseEXTINF(char *psz_string, char **ppsz_artist,
char **ppsz_name, int *pi_duration)
{
char *end = NULL;
char *psz_item = NULL;
end = psz_string + strlen( psz_string );
/* ignore whitespaces */
for (; psz_string < end && ( *psz_string == '\t' || *psz_string == ' ' );
psz_string++ );
/* duration: read to next comma */
psz_item = psz_string;
psz_string = strchr( psz_string, ',' );
if ( psz_string )
{
*psz_string = '\0';
*pi_duration = atoi( psz_item );
}
else
{
return;
}
if ( psz_string < end ) /* continue parsing if possible */
psz_string++;
/* analyse the remaining string */
psz_item = strstr( psz_string, " - " );
/* here we have the 0.8.2+ format with artist */
if ( psz_item )
{
/* *** "EXTINF:time,artist - name" */
*psz_item = '\0';
*ppsz_artist = psz_string;
*ppsz_name = psz_item + 3; /* points directly after ' - ' */
return;
}
/* reaching this point means: 0.8.1- with artist or something without artist */
if ( *psz_string == ',' )
{
/* *** "EXTINF:time,,name" */
psz_string++;
*ppsz_name = psz_string;
return;
}
psz_item = psz_string;
psz_string = strchr( psz_string, ',' );
if ( psz_string )
{
/* *** "EXTINF:time,artist,name" */
*psz_string = '\0';
*ppsz_artist = psz_item;
*ppsz_name = psz_string+1;
}
else
{
/* *** "EXTINF:time,name" */
*ppsz_name = psz_item;
}
return;
}
|
sunqueen/vlc-2.2.0-rc2.32-2013
|
modules/demux/playlist/m3u.c
|
C
|
gpl-2.0
| 11,910
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <CL/cl.h>
unsigned char *read_buffer(char *file_name, size_t *size_ptr)
{
FILE *f;
unsigned char *buf;
size_t size;
/* Open file */
f = fopen(file_name, "rb");
if (!f)
return NULL;
/* Obtain file size */
fseek(f, 0, SEEK_END);
size = ftell(f);
fseek(f, 0, SEEK_SET);
/* Allocate and read buffer */
buf = malloc(size + 1);
fread(buf, 1, size, f);
buf[size] = '\0';
/* Return size of buffer */
if (size_ptr)
*size_ptr = size;
/* Return buffer */
return buf;
}
void write_buffer(char *file_name, const char *buffer, size_t buffer_size)
{
FILE *f;
/* Open file */
f = fopen(file_name, "w+");
/* Write buffer */
if(buffer)
fwrite(buffer, 1, buffer_size, f);
/* Close file */
fclose(f);
}
int main(int argc, char const *argv[])
{
/* Get platform */
cl_platform_id platform;
cl_uint num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform, &num_platforms);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clGetPlatformIDs' failed\n");
exit(1);
}
printf("Number of platforms: %d\n", num_platforms);
printf("platform=%p\n", platform);
/* Get platform name */
char platform_name[100];
ret = clGetPlatformInfo(platform, CL_PLATFORM_NAME, sizeof(platform_name), platform_name, NULL);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clGetPlatformInfo' failed\n");
exit(1);
}
printf("platform.name='%s'\n\n", platform_name);
/* Get device */
cl_device_id device;
cl_uint num_devices;
ret = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, &num_devices);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clGetDeviceIDs' failed\n");
exit(1);
}
printf("Number of devices: %d\n", num_devices);
printf("device=%p\n", device);
/* Get device name */
char device_name[100];
ret = clGetDeviceInfo(device, CL_DEVICE_NAME, sizeof(device_name),
device_name, NULL);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clGetDeviceInfo' failed\n");
exit(1);
}
printf("device.name='%s'\n", device_name);
printf("\n");
/* Create a Context Object */
cl_context context;
context = clCreateContext(NULL, 1, &device, NULL, NULL, &ret);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clCreateContext' failed\n");
exit(1);
}
printf("context=%p\n", context);
/* Create a Command Queue Object*/
cl_command_queue command_queue;
command_queue = clCreateCommandQueue(context, device, 0, &ret);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clCreateCommandQueue' failed\n");
exit(1);
}
printf("command_queue=%p\n", command_queue);
printf("\n");
/* Program source */
unsigned char *source_code;
size_t source_length;
/* Read program from 'sub_sat_char2char2.cl' */
source_code = read_buffer("sub_sat_char2char2.cl", &source_length);
/* Create a program */
cl_program program;
program = clCreateProgramWithSource(context, 1, (const char **)&source_code, &source_length, &ret);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clCreateProgramWithSource' failed\n");
exit(1);
}
printf("program=%p\n", program);
/* Build program */
ret = clBuildProgram(program, 1, &device, NULL, NULL, NULL);
if (ret != CL_SUCCESS )
{
size_t size;
char *log;
/* Get log size */
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG,0, NULL, &size);
/* Allocate log and print */
log = malloc(size);
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG,size, log, NULL);
printf("error: call to 'clBuildProgram' failed:\n%s\n", log);
/* Free log and exit */
free(log);
exit(1);
}
printf("program built\n");
printf("\n");
/* Create a Kernel Object */
cl_kernel kernel;
kernel = clCreateKernel(program, "sub_sat_char2char2", &ret);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clCreateKernel' failed\n");
exit(1);
}
/* Create and allocate host buffers */
size_t num_elem = 10;
/* Create and init host side src buffer 0 */
cl_char2 *src_0_host_buffer;
src_0_host_buffer = malloc(num_elem * sizeof(cl_char2));
for (int i = 0; i < num_elem; i++)
src_0_host_buffer[i] = (cl_char2){{2, 2}};
/* Create and init device side src buffer 0 */
cl_mem src_0_device_buffer;
src_0_device_buffer = clCreateBuffer(context, CL_MEM_READ_ONLY, num_elem * sizeof(cl_char2), NULL, &ret);
if (ret != CL_SUCCESS)
{
printf("error: could not create source buffer\n");
exit(1);
}
ret = clEnqueueWriteBuffer(command_queue, src_0_device_buffer, CL_TRUE, 0, num_elem * sizeof(cl_char2), src_0_host_buffer, 0, NULL, NULL);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clEnqueueWriteBuffer' failed\n");
exit(1);
}
/* Create and init host side src buffer 1 */
cl_char2 *src_1_host_buffer;
src_1_host_buffer = malloc(num_elem * sizeof(cl_char2));
for (int i = 0; i < num_elem; i++)
src_1_host_buffer[i] = (cl_char2){{2, 2}};
/* Create and init device side src buffer 1 */
cl_mem src_1_device_buffer;
src_1_device_buffer = clCreateBuffer(context, CL_MEM_READ_ONLY, num_elem * sizeof(cl_char2), NULL, &ret);
if (ret != CL_SUCCESS)
{
printf("error: could not create source buffer\n");
exit(1);
}
ret = clEnqueueWriteBuffer(command_queue, src_1_device_buffer, CL_TRUE, 0, num_elem * sizeof(cl_char2), src_1_host_buffer, 0, NULL, NULL);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clEnqueueWriteBuffer' failed\n");
exit(1);
}
/* Create host dst buffer */
cl_char2 *dst_host_buffer;
dst_host_buffer = malloc(num_elem * sizeof(cl_char2));
memset((void *)dst_host_buffer, 1, num_elem * sizeof(cl_char2));
/* Create device dst buffer */
cl_mem dst_device_buffer;
dst_device_buffer = clCreateBuffer(context, CL_MEM_WRITE_ONLY, num_elem *sizeof(cl_char2), NULL, &ret);
if (ret != CL_SUCCESS)
{
printf("error: could not create dst buffer\n");
exit(1);
}
/* Set kernel arguments */
ret = CL_SUCCESS;
ret |= clSetKernelArg(kernel, 0, sizeof(cl_mem), &src_0_device_buffer);
ret |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &src_1_device_buffer);
ret |= clSetKernelArg(kernel, 2, sizeof(cl_mem), &dst_device_buffer);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clSetKernelArg' failed\n");
exit(1);
}
/* Launch the kernel */
size_t global_work_size = num_elem;
size_t local_work_size = num_elem;
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clEnqueueNDRangeKernel' failed\n");
exit(1);
}
/* Wait for it to finish */
clFinish(command_queue);
/* Read results from GPU */
ret = clEnqueueReadBuffer(command_queue, dst_device_buffer, CL_TRUE,0, num_elem * sizeof(cl_char2), dst_host_buffer, 0, NULL, NULL);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clEnqueueReadBuffer' failed\n");
exit(1);
}
/* Dump dst buffer to file */
char dump_file[100];
sprintf((char *)&dump_file, "%s.result", argv[0]);
write_buffer(dump_file, (const char *)dst_host_buffer, num_elem * sizeof(cl_char2));
printf("Result dumped to %s\n", dump_file);
/* Free host dst buffer */
free(dst_host_buffer);
/* Free device dst buffer */
ret = clReleaseMemObject(dst_device_buffer);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clReleaseMemObject' failed\n");
exit(1);
}
/* Free host side src buffer 0 */
free(src_0_host_buffer);
/* Free device side src buffer 0 */
ret = clReleaseMemObject(src_0_device_buffer);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clReleaseMemObject' failed\n");
exit(1);
}
/* Free host side src buffer 1 */
free(src_1_host_buffer);
/* Free device side src buffer 1 */
ret = clReleaseMemObject(src_1_device_buffer);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clReleaseMemObject' failed\n");
exit(1);
}
/* Release kernel */
ret = clReleaseKernel(kernel);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clReleaseKernel' failed\n");
exit(1);
}
/* Release program */
ret = clReleaseProgram(program);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clReleaseProgram' failed\n");
exit(1);
}
/* Release command queue */
ret = clReleaseCommandQueue(command_queue);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clReleaseCommandQueue' failed\n");
exit(1);
}
/* Release context */
ret = clReleaseContext(context);
if (ret != CL_SUCCESS)
{
printf("error: call to 'clReleaseContext' failed\n");
exit(1);
}
return 0;
}
|
xianggong/m2c_unit_test
|
test/integer/sub_sat_char2char2/sub_sat_char2char2_src.c
|
C
|
gpl-2.0
| 10,934
|
/*
* arch/i386/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
if (address >= end)
BUG();
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
} while (address && (address < end));
}
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
phys_addr -= address;
if (address >= end)
BUG();
do {
pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address && (address < end));
return 0;
}
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{
int error;
pgd_t * dir;
unsigned long end = address + size;
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
flush_cache_all();
if (address >= end)
BUG();
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd;
pmd = pmd_alloc(&init_mm, dir, address);
error = -ENOMEM;
if (!pmd)
break;
if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags))
break;
error = 0;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
return error;
}
/*
* Generic mapping function (not visible outside):
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
void * addr;
struct vm_struct * area;
unsigned long offset, last_addr;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
if (phys_addr >= 0xA0000 && last_addr < 0x100000)
return phys_to_virt(phys_addr);
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
if (phys_addr < virt_to_phys(high_memory)) {
char *t_addr, *t_end;
struct page *page;
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
if(!PageReserved(page))
return NULL;
}
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr) - phys_addr;
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
addr = area->addr;
if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
vfree(addr);
return NULL;
}
return (void *) (offset + (char *)addr);
}
void iounmap(void *addr)
{
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long) addr));
}
void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
{
unsigned long offset, last_addr;
unsigned int nrpages;
enum fixed_addresses idx;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
if (phys_addr >= 0xA0000 && last_addr < 0x100000)
return phys_to_virt(phys_addr);
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr) - phys_addr;
/*
* Mappings have to fit in the FIX_BTMAP area.
*/
nrpages = size >> PAGE_SHIFT;
if (nrpages > NR_FIX_BTMAPS)
return NULL;
/*
* Ok, go for it..
*/
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
set_fixmap(idx, phys_addr);
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
}
return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
}
void __init bt_iounmap(void *addr, unsigned long size)
{
unsigned long virt_addr;
unsigned long offset;
unsigned int nrpages;
enum fixed_addresses idx;
virt_addr = (unsigned long)addr;
if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
return;
offset = virt_addr & ~PAGE_MASK;
nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
__set_fixmap(idx, 0, __pgprot(0));
--idx;
--nrpages;
}
}
|
dduval/kernel-rhel3
|
arch/i386/mm/ioremap.c
|
C
|
gpl-2.0
| 5,360
|
/*
* linux/kernel/fork.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'fork.c' contains the help-routines for the 'fork' system call
* (see also entry.S and others).
* Fork is rather simple, once you get the hang of it, but the memory
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/mnt_namespace.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
int nr_processes(void)
{
int cpu;
int total = 0;
for_each_online_cpu(cpu)
total += per_cpu(process_counts, cpu);
return total;
}
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
static struct kmem_cache *task_struct_cachep;
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
struct kmem_cache *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
struct kmem_cache *files_cachep;
/* SLAB cache for fs_struct structures (tsk->fs) */
struct kmem_cache *fs_cachep;
/* SLAB cache for vm_area_struct structures */
struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
security_task_free(tsk);
free_uid(tsk->user);
put_group_info(tsk->group_info);
delayacct_tsk_free(tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
#endif
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
* of memory.
*/
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
/*
* we need to allow at least 20 threads to boot a system
*/
if(max_threads < 20)
max_threads = 20;
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
prepare_to_copy(orig);
tsk = alloc_task_struct();
if (!tsk)
return NULL;
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
*tsk = *orig;
tsk->stack = ti;
setup_thread_stack(tsk, orig);
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
return tsk;
}
#ifdef CONFIG_MMU
static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
/*
* Not linked in yet - no deadlock potential:
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
cpus_clear(mm->cpu_vm_mask);
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
mm->total_vm -= pages;
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-pages);
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory(len))
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
pol = mpol_copy(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
anon_vma_link(tmp);
file = tmp->vm_file;
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
/* insert tmp into the share list, just after mpnt */
spin_lock(&file->f_mapping->i_mmap_lock);
tmp->vm_truncate_count = mpnt->vm_truncate_count;
flush_dcache_mmap_lock(file->f_mapping);
vma_prio_tree_add(tmp, mpnt);
flush_dcache_mmap_unlock(file->f_mapping);
spin_unlock(&file->f_mapping->i_mmap_lock);
}
/*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
pprev = &tmp->vm_next;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
mm->map_count++;
retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto out;
}
static inline int mm_alloc_pgd(struct mm_struct * mm)
{
mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd))
return -ENOMEM;
return 0;
}
static inline void mm_free_pgd(struct mm_struct * mm)
{
pgd_free(mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
#define mm_alloc_pgd(mm) (0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
#include <linux/init_task.h>
static struct mm_struct * mm_init(struct mm_struct * mm)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_waiters = 0;
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock);
rwlock_init(&mm->ioctx_list_lock);
mm->ioctx_list = NULL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
return mm;
}
free_mm(mm);
return NULL;
}
/*
* Allocate and initialize an mm_struct.
*/
struct mm_struct * mm_alloc(void)
{
struct mm_struct * mm;
mm = allocate_mm();
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm);
}
return mm;
}
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
void fastcall __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
free_mm(mm);
}
/*
* Decrement the use count and release all resources for an mm.
*/
void mmput(struct mm_struct *mm)
{
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
exit_mmap(mm);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
put_swap_token(mm);
mmdrop(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);
/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
* this kernel workthread has transiently adopted a user mm with use_mm,
* to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
struct mm_struct *mm;
task_lock(task);
mm = task->mm;
if (mm) {
if (task->flags & PF_BORROWED_MM)
mm = NULL;
else
atomic_inc(&mm->mm_users);
}
task_unlock(task);
return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
*
* mm_release is called after a mm_struct has been removed
* from the current process.
*
* This difference is important for error handling, when we
* only half set up a mm_struct for a new process and need to restore
* the old one. Because we mmput the new mm_struct before
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
struct completion *vfork_done = tsk->vfork_done;
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);
/* notify parent sleeping on vfork() */
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary
* trouble otherwise. Userland only wants this done for a sys_exit.
*/
if (tsk->clear_child_tid
&& !(tsk->flags & PF_SIGNALED)
&& atomic_read(&mm->mm_users) > 1) {
u32 __user * tidptr = tsk->clear_child_tid;
tsk->clear_child_tid = NULL;
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
*/
put_user(0, tidptr);
sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
}
}
/*
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
static struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
if (!oldmm)
return NULL;
mm = allocate_mm();
if (!mm)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
if (!mm_init(mm))
goto fail_nomem;
if (init_new_context(tsk, mm))
goto fail_nocontext;
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
return mm;
free_pt:
mmput(mm);
fail_nomem:
return NULL;
fail_nocontext:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
*/
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
struct mm_struct * mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
tsk->mm = NULL;
tsk->active_mm = NULL;
/*
* Are we cloning a kernel thread?
*
* We need to steal a active VM for that..
*/
oldmm = current->mm;
if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
retval = -ENOMEM;
mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
good_mm:
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
fail_nomem:
return retval;
}
static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
{
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
/* We don't need to lock fs - think why ;-) */
if (fs) {
atomic_set(&fs->count, 1);
rwlock_init(&fs->lock);
fs->umask = old->umask;
read_lock(&old->lock);
fs->rootmnt = mntget(old->rootmnt);
fs->root = dget(old->root);
fs->pwdmnt = mntget(old->pwdmnt);
fs->pwd = dget(old->pwd);
if (old->altroot) {
fs->altrootmnt = mntget(old->altrootmnt);
fs->altroot = dget(old->altroot);
} else {
fs->altrootmnt = NULL;
fs->altroot = NULL;
}
read_unlock(&old->lock);
}
return fs;
}
struct fs_struct *copy_fs_struct(struct fs_struct *old)
{
return __copy_fs_struct(old);
}
EXPORT_SYMBOL_GPL(copy_fs_struct);
static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
{
if (clone_flags & CLONE_FS) {
atomic_inc(¤t->fs->count);
return 0;
}
tsk->fs = __copy_fs_struct(current->fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
}
static int count_open_files(struct fdtable *fdt)
{
int size = fdt->max_fds;
int i;
/* Find the last open fd */
for (i = size/(8*sizeof(long)); i > 0; ) {
if (fdt->open_fds->fds_bits[--i])
break;
}
i = (i+1) * 8 * sizeof(long);
return i;
}
static struct files_struct *alloc_files(void)
{
struct files_struct *newf;
struct fdtable *fdt;
newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
if (!newf)
goto out;
atomic_set(&newf->count, 1);
spin_lock_init(&newf->file_lock);
newf->next_fd = 0;
fdt = &newf->fdtab;
fdt->max_fds = NR_OPEN_DEFAULT;
fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
fdt->open_fds = (fd_set *)&newf->open_fds_init;
fdt->fd = &newf->fd_array[0];
INIT_RCU_HEAD(&fdt->rcu);
fdt->next = NULL;
rcu_assign_pointer(newf->fdt, fdt);
out:
return newf;
}
/*
* Allocate a new files structure and copy contents from the
* passed in files structure.
* errorp will be valid only when the returned files_struct is NULL.
*/
static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
{
struct files_struct *newf;
struct file **old_fds, **new_fds;
int open_files, size, i;
struct fdtable *old_fdt, *new_fdt;
*errorp = -ENOMEM;
newf = alloc_files();
if (!newf)
goto out;
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf);
new_fdt = files_fdtable(newf);
open_files = count_open_files(old_fdt);
/*
* Check whether we need to allocate a larger fd array and fd set.
* Note: we're not a clone task, so the open count won't change.
*/
if (open_files > new_fdt->max_fds) {
new_fdt->max_fds = 0;
spin_unlock(&oldf->file_lock);
spin_lock(&newf->file_lock);
*errorp = expand_files(newf, open_files-1);
spin_unlock(&newf->file_lock);
if (*errorp < 0)
goto out_release;
new_fdt = files_fdtable(newf);
/*
* Reacquire the oldf lock and a pointer to its fd table
* who knows it may have a new bigger fd table. We need
* the latest pointer.
*/
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf);
}
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
memcpy(new_fdt->open_fds->fds_bits,
old_fdt->open_fds->fds_bits, open_files/8);
memcpy(new_fdt->close_on_exec->fds_bits,
old_fdt->close_on_exec->fds_bits, open_files/8);
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
if (f) {
get_file(f);
} else {
/*
* The fd may be claimed in the fd bitmap but not yet
* instantiated in the files array if a sibling thread
* is partway through open(). So make sure that this
* fd is available to the new process.
*/
FD_CLR(open_files - i, new_fdt->open_fds);
}
rcu_assign_pointer(*new_fds++, f);
}
spin_unlock(&oldf->file_lock);
/* compute the remainder to be cleared */
size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
/* This is long word aligned thus could use a optimized version */
memset(new_fds, 0, size);
if (new_fdt->max_fds > open_files) {
int left = (new_fdt->max_fds-open_files)/8;
int start = open_files / (8 * sizeof(unsigned long));
memset(&new_fdt->open_fds->fds_bits[start], 0, left);
memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
}
return newf;
out_release:
kmem_cache_free(files_cachep, newf);
out:
return NULL;
}
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
struct files_struct *oldf, *newf;
int error = 0;
/*
* A background process may not have any files ...
*/
oldf = current->files;
if (!oldf)
goto out;
if (clone_flags & CLONE_FILES) {
atomic_inc(&oldf->count);
goto out;
}
/*
* Note: we may be using current for both targets (See exec.c)
* This works because we cache current->files (old) as oldf. Don't
* break this.
*/
tsk->files = NULL;
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
tsk->files = newf;
error = 0;
out:
return error;
}
/*
* Helper to unshare the files of the current task.
* We don't want to expose copy_files internals to
* the exec layer of the kernel.
*/
int unshare_files(void)
{
struct files_struct *files = current->files;
int rc;
BUG_ON(!files);
/* This can race but the race causes us to copy when we don't
need to and drop the copy */
if(atomic_read(&files->count) == 1)
{
atomic_inc(&files->count);
return 0;
}
rc = copy_files(0, current);
if(rc)
current->files = files;
return rc;
}
EXPORT_SYMBOL(unshare_files);
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
struct sighand_struct *sig;
if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
atomic_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
if (atomic_dec_and_test(&sighand->count))
kmem_cache_free(sighand_cachep, sighand);
}
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
int ret;
if (clone_flags & CLONE_THREAD) {
atomic_inc(¤t->signal->count);
atomic_inc(¤t->signal->live);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
ret = copy_thread_group_keys(tsk);
if (ret < 0) {
kmem_cache_free(signal_cachep, sig);
return ret;
}
atomic_set(&sig->count, 1);
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
sig->flags = 0;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->it_real_incr.tv64 = 0;
sig->real_timer.function = it_real_fn;
sig->tsk = tsk;
sig->it_virt_expires = cputime_zero;
sig->it_virt_incr = cputime_zero;
sig->it_prof_expires = cputime_zero;
sig->it_prof_incr = cputime_zero;
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = NULL;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
sig->sched_time = 0;
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
taskstats_tgid_init(sig);
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
/*
* New sole thread in the process gets an expiry time
* of the whole CPU time limit.
*/
tsk->it_prof_expires =
secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
}
acct_init_pacct(&sig->pacct);
return 0;
}
void __cleanup_signal(struct signal_struct *sig)
{
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void cleanup_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
atomic_dec(&sig->live);
if (atomic_dec_and_test(&sig->count))
__cleanup_signal(sig);
}
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
new_flags |= PF_FORKNOEXEC;
if (!(clone_flags & CLONE_PTRACE))
p->ptrace = 0;
p->flags = new_flags;
}
asmlinkage long sys_set_tid_address(int __user *tidptr)
{
current->clear_child_tid = tidptr;
return current->pid;
}
static inline void rt_mutex_init_task(struct task_struct *p)
{
spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
plist_head_init(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr,
struct pid *pid)
{
int retval;
struct task_struct *p = NULL;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
rt_mutex_init_task(p);
#ifdef CONFIG_TRACE_IRQFLAGS
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->user->processes) >=
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->user != &root_user)
goto bad_fork_free;
}
atomic_inc(&p->user->__count);
atomic_inc(&p->user->processes);
get_group_info(p->group_info);
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
p->pid = pid_nr(pid);
retval = -EFAULT;
if (clone_flags & CLONE_PARENT_SETTID)
if (put_user(p->pid, parent_tidptr))
goto bad_fork_cleanup_delays_binfmt;
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->sched_time = 0;
#ifdef CONFIG_TASK_XACCT
p->rchar = 0; /* I/O counter: bytes read */
p->wchar = 0; /* I/O counter: bytes written */
p->syscr = 0; /* I/O counter: read syscalls */
p->syscw = 0; /* I/O counter: write syscalls */
#endif
task_io_accounting_init(p);
acct_clear_integrals(p);
p->it_virt_expires = cputime_zero;
p->it_prof_expires = cputime_zero;
p->it_sched_expires = 0;
INIT_LIST_HEAD(&p->cpu_timers[0]);
INIT_LIST_HEAD(&p->cpu_timers[1]);
INIT_LIST_HEAD(&p->cpu_timers[2]);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->security = NULL;
p->io_context = NULL;
p->io_wait = NULL;
p->audit_context = NULL;
cpuset_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_copy(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cpuset;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_security;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_keys(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_keys;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_namespaces;
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing should be turned off in the child regardless
* of CLONE_PTRACE.
*/
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
p->parent_exec_id = p->self_exec_id;
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
p->ioprio = current->ioprio;
/*
* The task hasn't been attached yet, so its cpus_allowed mask will
* not be changed, nor will its assigned CPU.
*
* The cpus_allowed mask of the parent may have changed after it was
* copied first time - so re-copy it here, then check the child's CPU
* to ensure it is on a valid CPU (and if not, just force it back to
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
p->real_parent = current->real_parent;
else
p->real_parent = current;
p->parent = p->real_parent;
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_cleanup_namespaces;
}
if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
if (!cputime_eq(current->signal->it_virt_expires,
cputime_zero) ||
!cputime_eq(current->signal->it_prof_expires,
cputime_zero) ||
current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
!list_empty(¤t->signal->cpu_timers[0]) ||
!list_empty(¤t->signal->cpu_timers[1]) ||
!list_empty(¤t->signal->cpu_timers[2])) {
/*
* Have child wake up on its first tick to check
* for process CPU timers.
*/
p->it_prof_expires = jiffies_to_cputime(1);
}
}
if (likely(p->pid)) {
add_parent(p);
if (unlikely(p->ptrace & PT_PTRACED))
__ptrace_link(p, current->parent);
if (thread_group_leader(p)) {
p->signal->tty = current->signal->tty;
p->signal->pgrp = process_group(current);
set_signal_session(p->signal, process_session(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
/** Init the wait queue we added */
init_waitqueue_head(&p->join_queue);
return p;
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_keys:
exit_keys(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
cleanup_signal(p);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_security:
security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_free(p->mempolicy);
bad_fork_cleanup_cpuset:
#endif
cpuset_exit(p);
bad_fork_cleanup_delays_binfmt:
delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
put_group_info(p->group_info);
atomic_dec(&p->user->processes);
free_uid(p->user);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
}
noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
}
struct task_struct * __cpuinit fork_idle(int cpu)
{
struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL,
&init_struct_pid);
if (!IS_ERR(task))
init_idle(task, cpu);
return task;
}
static inline int fork_traceflag (unsigned clone_flags)
{
if (clone_flags & CLONE_UNTRACED)
return 0;
else if (clone_flags & CLONE_VFORK) {
if (current->ptrace & PT_TRACE_VFORK)
return PTRACE_EVENT_VFORK;
} else if ((clone_flags & CSIGNAL) != SIGCHLD) {
if (current->ptrace & PT_TRACE_CLONE)
return PTRACE_EVENT_CLONE;
} else if (current->ptrace & PT_TRACE_FORK)
return PTRACE_EVENT_FORK;
return 0;
}
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
struct pid *pid = alloc_pid();
long nr;
if (!pid)
return -EAGAIN;
nr = pid->nr;
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
clone_flags |= CLONE_PTRACE;
}
p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
}
if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
}
if (!(clone_flags & CLONE_STOPPED))
wake_up_new_task(p, clone_flags);
else
p->state = TASK_STOPPED;
if (unlikely (trace)) {
current->ptrace_message = nr;
ptrace_notify ((trace << 8) | SIGTRAP);
}
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
current->ptrace_message = nr;
ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
}
}
} else {
free_pid(pid);
nr = PTR_ERR(p);
}
return nr;
}
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
static void sighand_ctor(void *data, struct kmem_cache *cachep,
unsigned long flags)
{
struct sighand_struct *sighand = data;
spin_lock_init(&sighand->siglock);
INIT_LIST_HEAD(&sighand->signalfd_list);
}
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
sighand_ctor, NULL);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
vm_area_cachep = kmem_cache_create("vm_area_struct",
sizeof(struct vm_area_struct), 0,
SLAB_PANIC, NULL, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
}
/*
* Check constraints on flags passed to the unshare system call and
* force unsharing of additional process context as appropriate.
*/
static inline void check_unshare_flags(unsigned long *flags_ptr)
{
/*
* If unsharing a thread from a thread group, must also
* unshare vm.
*/
if (*flags_ptr & CLONE_THREAD)
*flags_ptr |= CLONE_VM;
/*
* If unsharing vm, must also unshare signal handlers.
*/
if (*flags_ptr & CLONE_VM)
*flags_ptr |= CLONE_SIGHAND;
/*
* If unsharing signal handlers and the task was created
* using CLONE_THREAD, then must unshare the thread
*/
if ((*flags_ptr & CLONE_SIGHAND) &&
(atomic_read(¤t->signal->count) > 1))
*flags_ptr |= CLONE_THREAD;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (*flags_ptr & CLONE_NEWNS)
*flags_ptr |= CLONE_FS;
}
/*
* Unsharing of tasks created with CLONE_THREAD is not supported yet
*/
static int unshare_thread(unsigned long unshare_flags)
{
if (unshare_flags & CLONE_THREAD)
return -EINVAL;
return 0;
}
/*
* Unshare the filesystem structure if it is being shared
*/
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
if ((unshare_flags & CLONE_FS) &&
(fs && atomic_read(&fs->count) > 1)) {
*new_fsp = __copy_fs_struct(current->fs);
if (!*new_fsp)
return -ENOMEM;
}
return 0;
}
/*
* Unsharing of sighand is not supported yet
*/
static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
{
struct sighand_struct *sigh = current->sighand;
if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
return -EINVAL;
else
return 0;
}
/*
* Unshare vm if it is being shared
*/
static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
{
struct mm_struct *mm = current->mm;
if ((unshare_flags & CLONE_VM) &&
(mm && atomic_read(&mm->mm_users) > 1)) {
return -EINVAL;
}
return 0;
}
/*
* Unshare file descriptor table if it is being shared
*/
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
struct files_struct *fd = current->files;
int error = 0;
if ((unshare_flags & CLONE_FILES) &&
(fd && atomic_read(&fd->count) > 1)) {
*new_fdp = dup_fd(fd, &error);
if (!*new_fdp)
return error;
}
return 0;
}
/*
* Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
* supported yet
*/
static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
{
if (unshare_flags & CLONE_SYSVSEM)
return -EINVAL;
return 0;
}
/*
* unshare allows a process to 'unshare' part of the process
* context which was originally shared using clone. copy_*
* functions used by do_fork() cannot be used here directly
* because they modify an inactive task_struct that is being
* constructed. Here we are modifying the current, active,
* task_struct.
*/
asmlinkage long sys_unshare(unsigned long unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
struct sighand_struct *new_sigh = NULL;
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
struct files_struct *fd, *new_fd = NULL;
struct sem_undo_list *new_ulist = NULL;
struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;
check_unshare_flags(&unshare_flags);
/* Return -EINVAL for all unsupported flags */
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
CLONE_NEWUTS|CLONE_NEWIPC))
goto bad_unshare_out;
if ((err = unshare_thread(unshare_flags)))
goto bad_unshare_out;
if ((err = unshare_fs(unshare_flags, &new_fs)))
goto bad_unshare_cleanup_thread;
if ((err = unshare_sighand(unshare_flags, &new_sigh)))
goto bad_unshare_cleanup_fs;
if ((err = unshare_vm(unshare_flags, &new_mm)))
goto bad_unshare_cleanup_sigh;
if ((err = unshare_fd(unshare_flags, &new_fd)))
goto bad_unshare_cleanup_vm;
if ((err = unshare_semundo(unshare_flags, &new_ulist)))
goto bad_unshare_cleanup_fd;
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_fs)))
goto bad_unshare_cleanup_semundo;
if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) {
task_lock(current);
if (new_nsproxy) {
old_nsproxy = current->nsproxy;
current->nsproxy = new_nsproxy;
new_nsproxy = old_nsproxy;
}
if (new_fs) {
fs = current->fs;
current->fs = new_fs;
new_fs = fs;
}
if (new_mm) {
mm = current->mm;
active_mm = current->active_mm;
current->mm = new_mm;
current->active_mm = new_mm;
activate_mm(active_mm, new_mm);
new_mm = mm;
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
bad_unshare_cleanup_semundo:
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_vm:
if (new_mm)
mmput(new_mm);
bad_unshare_cleanup_sigh:
if (new_sigh)
if (atomic_dec_and_test(&new_sigh->count))
kmem_cache_free(sighand_cachep, new_sigh);
bad_unshare_cleanup_fs:
if (new_fs)
put_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
return err;
}
|
j0n3lson/linux-2.6.22.19-cs370
|
kernel/fork.c
|
C
|
gpl-2.0
| 41,833
|
/*
* linux/fs/read_write.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/smp_lock.h>
#include <linux/fsnotify.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <linux/splice.h>
#include <trace/fs.h>
#include "read_write.h"
#include <asm/uaccess.h>
#include <asm/unistd.h>
const struct file_operations generic_ro_fops = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.aio_read = generic_file_aio_read,
.mmap = generic_file_readonly_mmap,
.splice_read = generic_file_splice_read,
};
EXPORT_SYMBOL(generic_ro_fops);
/**
* generic_file_llseek_unlocked - lockless generic llseek implementation
* @file: file structure to seek on
* @offset: file offset to seek to
* @origin: type of seek
*
* Updates the file offset to the value specified by @offset and @origin.
* Locking must be provided by the caller.
*/
loff_t
generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_mapping->host;
switch (origin) {
case SEEK_END:
offset += inode->i_size;
break;
case SEEK_CUR:
offset += file->f_pos;
break;
}
if (offset < 0 || offset > inode->i_sb->s_maxbytes)
return -EINVAL;
/* Special lock needed here? */
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
}
return offset;
}
EXPORT_SYMBOL(generic_file_llseek_unlocked);
/**
* generic_file_llseek - generic llseek implementation for regular files
* @file: file structure to seek on
* @offset: file offset to seek to
* @origin: type of seek
*
* This is a generic implemenation of ->llseek useable for all normal local
* filesystems. It just updates the file offset to the value specified by
* @offset and @origin under i_mutex.
*/
loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
{
loff_t rval;
mutex_lock(&file->f_dentry->d_inode->i_mutex);
rval = generic_file_llseek_unlocked(file, offset, origin);
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return rval;
}
EXPORT_SYMBOL(generic_file_llseek);
loff_t no_llseek(struct file *file, loff_t offset, int origin)
{
return -ESPIPE;
}
EXPORT_SYMBOL(no_llseek);
loff_t default_llseek(struct file *file, loff_t offset, int origin)
{
loff_t retval;
lock_kernel();
switch (origin) {
case SEEK_END:
offset += i_size_read(file->f_path.dentry->d_inode);
break;
case SEEK_CUR:
offset += file->f_pos;
}
retval = -EINVAL;
if (offset >= 0) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
}
retval = offset;
}
unlock_kernel();
return retval;
}
EXPORT_SYMBOL(default_llseek);
loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
{
loff_t (*fn)(struct file *, loff_t, int);
fn = no_llseek;
if (file->f_mode & FMODE_LSEEK) {
fn = default_llseek;
if (file->f_op && file->f_op->llseek)
fn = file->f_op->llseek;
}
return fn(file, offset, origin);
}
EXPORT_SYMBOL(vfs_llseek);
SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
{
off_t retval;
struct file * file;
int fput_needed;
retval = -EBADF;
file = fget_light(fd, &fput_needed);
if (!file)
goto bad;
retval = -EINVAL;
if (origin <= SEEK_MAX) {
loff_t res = vfs_llseek(file, offset, origin);
retval = res;
if (res != (loff_t)retval)
retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
}
trace_fs_lseek(fd, offset, origin);
fput_light(file, fput_needed);
bad:
return retval;
}
#ifdef __ARCH_WANT_SYS_LLSEEK
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, loff_t __user *, result,
unsigned int, origin)
{
int retval;
struct file * file;
loff_t offset;
int fput_needed;
retval = -EBADF;
file = fget_light(fd, &fput_needed);
if (!file)
goto bad;
retval = -EINVAL;
if (origin > SEEK_MAX)
goto out_putf;
offset = vfs_llseek(file, ((loff_t) offset_high << 32) | offset_low,
origin);
trace_fs_llseek(fd, offset, origin);
retval = (int)offset;
if (offset >= 0) {
retval = -EFAULT;
if (!copy_to_user(result, &offset, sizeof(offset)))
retval = 0;
}
out_putf:
fput_light(file, fput_needed);
bad:
return retval;
}
#endif
/*
* rw_verify_area doesn't like huge counts. We limit
* them to something that fits in "int" so that others
* won't have to do range checks all the time.
*/
#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count)
{
struct inode *inode;
loff_t pos;
int retval = -EINVAL;
inode = file->f_path.dentry->d_inode;
if (unlikely((ssize_t) count < 0))
return retval;
pos = *ppos;
if (unlikely((pos < 0) || (loff_t) (pos + count) < 0))
return retval;
if (unlikely(inode->i_flock && mandatory_lock(inode))) {
retval = locks_mandatory_area(
read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
inode, file, pos, count);
if (retval < 0)
return retval;
}
retval = security_file_permission(file,
read_write == READ ? MAY_READ : MAY_WRITE);
if (retval)
return retval;
return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
}
static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
{
set_current_state(TASK_UNINTERRUPTIBLE);
if (!kiocbIsKicked(iocb))
schedule();
else
kiocbClearKicked(iocb);
__set_current_state(TASK_RUNNING);
}
ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
{
struct iovec iov = { .iov_base = buf, .iov_len = len };
struct kiocb kiocb;
ssize_t ret;
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
kiocb.ki_left = len;
for (;;) {
ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
if (ret != -EIOCBRETRY)
break;
wait_on_retry_sync_kiocb(&kiocb);
}
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
return ret;
}
EXPORT_SYMBOL(do_sync_read);
ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
{
ssize_t ret;
if (!(file->f_mode & FMODE_READ))
return -EBADF;
if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
return -EFAULT;
ret = rw_verify_area(READ, file, pos, count);
if (ret >= 0) {
count = ret;
if (file->f_op->read)
ret = file->f_op->read(file, buf, count, pos);
else
ret = do_sync_read(file, buf, count, pos);
if (ret > 0) {
fsnotify_access(file->f_path.dentry);
add_rchar(current, ret);
}
inc_syscr(current);
}
return ret;
}
EXPORT_SYMBOL(vfs_read);
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
{
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
struct kiocb kiocb;
ssize_t ret;
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
kiocb.ki_left = len;
for (;;) {
ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
if (ret != -EIOCBRETRY)
break;
wait_on_retry_sync_kiocb(&kiocb);
}
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
return ret;
}
EXPORT_SYMBOL(do_sync_write);
ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
ssize_t ret;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_READ, buf, count)))
return -EFAULT;
ret = rw_verify_area(WRITE, file, pos, count);
if (ret >= 0) {
count = ret;
if (file->f_op->write)
ret = file->f_op->write(file, buf, count, pos);
else
ret = do_sync_write(file, buf, count, pos);
if (ret > 0) {
fsnotify_modify(file->f_path.dentry);
add_wchar(current, ret);
}
inc_syscw(current);
}
return ret;
}
EXPORT_SYMBOL(vfs_write);
static inline loff_t file_pos_read(struct file *file)
{
return file->f_pos;
}
static inline void file_pos_write(struct file *file, loff_t pos)
{
file->f_pos = pos;
}
SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
{
struct file *file;
ssize_t ret = -EBADF;
int fput_needed;
file = fget_light(fd, &fput_needed);
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_read(file, buf, count, &pos);
trace_fs_read(fd, buf, count, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
return ret;
}
SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
size_t, count)
{
struct file *file;
ssize_t ret = -EBADF;
int fput_needed;
file = fget_light(fd, &fput_needed);
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_write(file, buf, count, &pos);
trace_fs_write(fd, buf, count, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
return ret;
}
SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf,
size_t count, loff_t pos)
{
struct file *file;
ssize_t ret = -EBADF;
int fput_needed;
if (pos < 0)
return -EINVAL;
file = fget_light(fd, &fput_needed);
if (file) {
ret = -ESPIPE;
if (file->f_mode & FMODE_PREAD) {
ret = vfs_read(file, buf, count, &pos);
trace_fs_pread64(fd, buf, count, pos, ret);
}
fput_light(file, fput_needed);
}
return ret;
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_pread64(long fd, long buf, long count, loff_t pos)
{
return SYSC_pread64((unsigned int) fd, (char __user *) buf,
(size_t) count, pos);
}
SYSCALL_ALIAS(sys_pread64, SyS_pread64);
#endif
SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf,
size_t count, loff_t pos)
{
struct file *file;
ssize_t ret = -EBADF;
int fput_needed;
if (pos < 0)
return -EINVAL;
file = fget_light(fd, &fput_needed);
if (file) {
ret = -ESPIPE;
if (file->f_mode & FMODE_PWRITE) {
ret = vfs_write(file, buf, count, &pos);
trace_fs_pwrite64(fd, buf, count, pos, ret);
}
fput_light(file, fput_needed);
}
return ret;
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_pwrite64(long fd, long buf, long count, loff_t pos)
{
return SYSC_pwrite64((unsigned int) fd, (const char __user *) buf,
(size_t) count, pos);
}
SYSCALL_ALIAS(sys_pwrite64, SyS_pwrite64);
#endif
/*
* Reduce an iovec's length in-place. Return the resulting number of segments
*/
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
{
unsigned long seg = 0;
size_t len = 0;
while (seg < nr_segs) {
seg++;
if (len + iov->iov_len >= to) {
iov->iov_len = to - len;
break;
}
len += iov->iov_len;
iov++;
}
return seg;
}
EXPORT_SYMBOL(iov_shorten);
ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
{
struct kiocb kiocb;
ssize_t ret;
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
kiocb.ki_left = len;
kiocb.ki_nbytes = len;
for (;;) {
ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
if (ret != -EIOCBRETRY)
break;
wait_on_retry_sync_kiocb(&kiocb);
}
if (ret == -EIOCBQUEUED)
ret = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
return ret;
}
/* Do it by hand, with file-ops */
ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
{
struct iovec *vector = iov;
ssize_t ret = 0;
while (nr_segs > 0) {
void __user *base;
size_t len;
ssize_t nr;
base = vector->iov_base;
len = vector->iov_len;
vector++;
nr_segs--;
nr = fn(filp, base, len, ppos);
if (nr < 0) {
if (!ret)
ret = nr;
break;
}
ret += nr;
if (nr != len)
break;
}
return ret;
}
/* A write operation does a read from user space and vice versa */
#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_pointer,
struct iovec **ret_pointer)
{
unsigned long seg;
ssize_t ret;
struct iovec *iov = fast_pointer;
/*
* SuS says "The readv() function *may* fail if the iovcnt argument
* was less than or equal to 0, or greater than {IOV_MAX}. Linux has
* traditionally returned zero for zero segments, so...
*/
if (nr_segs == 0) {
ret = 0;
goto out;
}
/*
* First get the "struct iovec" from user memory and
* verify all the pointers
*/
if (nr_segs > UIO_MAXIOV) {
ret = -EINVAL;
goto out;
}
if (nr_segs > fast_segs) {
iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
if (iov == NULL) {
ret = -ENOMEM;
goto out;
}
}
if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
ret = -EFAULT;
goto out;
}
/*
* According to the Single Unix Specification we should return EINVAL
* if an element length is < 0 when cast to ssize_t or if the
* total length would overflow the ssize_t return value of the
* system call.
*/
ret = 0;
for (seg = 0; seg < nr_segs; seg++) {
void __user *buf = iov[seg].iov_base;
ssize_t len = (ssize_t)iov[seg].iov_len;
/* see if we we're about to use an invalid len or if
* it's about to overflow ssize_t */
if (len < 0 || (ret + len < ret)) {
ret = -EINVAL;
goto out;
}
if (unlikely(!access_ok(vrfy_dir(type), buf, len))) {
ret = -EFAULT;
goto out;
}
ret += len;
}
out:
*ret_pointer = iov;
return ret;
}
static ssize_t do_readv_writev(int type, struct file *file,
const struct iovec __user * uvector,
unsigned long nr_segs, loff_t *pos)
{
size_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
ssize_t ret;
io_fn_t fn;
iov_fn_t fnv;
if (!file->f_op) {
ret = -EINVAL;
goto out;
}
ret = rw_copy_check_uvector(type, uvector, nr_segs,
ARRAY_SIZE(iovstack), iovstack, &iov);
if (ret <= 0)
goto out;
tot_len = ret;
ret = rw_verify_area(type, file, pos, tot_len);
if (ret < 0)
goto out;
fnv = NULL;
if (type == READ) {
fn = file->f_op->read;
fnv = file->f_op->aio_read;
} else {
fn = (io_fn_t)file->f_op->write;
fnv = file->f_op->aio_write;
}
if (fnv)
ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
pos, fnv);
else
ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
out:
if (iov != iovstack)
kfree(iov);
if ((ret + (type == READ)) > 0) {
if (type == READ)
fsnotify_access(file->f_path.dentry);
else
fsnotify_modify(file->f_path.dentry);
}
return ret;
}
ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
unsigned long vlen, loff_t *pos)
{
if (!(file->f_mode & FMODE_READ))
return -EBADF;
if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
return -EINVAL;
return do_readv_writev(READ, file, vec, vlen, pos);
}
EXPORT_SYMBOL(vfs_readv);
ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
unsigned long vlen, loff_t *pos)
{
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
return -EINVAL;
return do_readv_writev(WRITE, file, vec, vlen, pos);
}
EXPORT_SYMBOL(vfs_writev);
SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
struct file *file;
ssize_t ret = -EBADF;
int fput_needed;
file = fget_light(fd, &fput_needed);
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_readv(file, vec, vlen, &pos);
trace_fs_readv(fd, vec, vlen, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
if (ret > 0)
add_rchar(current, ret);
inc_syscr(current);
return ret;
}
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
struct file *file;
ssize_t ret = -EBADF;
int fput_needed;
file = fget_light(fd, &fput_needed);
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_writev(file, vec, vlen, &pos);
trace_fs_writev(fd, vec, vlen, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
if (ret > 0)
add_wchar(current, ret);
inc_syscw(current);
return ret;
}
static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
size_t count, loff_t max)
{
struct file * in_file, * out_file;
struct inode * in_inode, * out_inode;
loff_t pos;
ssize_t retval;
int fput_needed_in, fput_needed_out, fl;
/*
* Get input file, and verify that it is ok..
*/
retval = -EBADF;
in_file = fget_light(in_fd, &fput_needed_in);
if (!in_file)
goto out;
if (!(in_file->f_mode & FMODE_READ))
goto fput_in;
retval = -EINVAL;
in_inode = in_file->f_path.dentry->d_inode;
if (!in_inode)
goto fput_in;
if (!in_file->f_op || !in_file->f_op->splice_read)
goto fput_in;
retval = -ESPIPE;
if (!ppos)
ppos = &in_file->f_pos;
else
if (!(in_file->f_mode & FMODE_PREAD))
goto fput_in;
retval = rw_verify_area(READ, in_file, ppos, count);
if (retval < 0)
goto fput_in;
count = retval;
/*
* Get output file, and verify that it is ok..
*/
retval = -EBADF;
out_file = fget_light(out_fd, &fput_needed_out);
if (!out_file)
goto fput_in;
if (!(out_file->f_mode & FMODE_WRITE))
goto fput_out;
retval = -EINVAL;
if (!out_file->f_op || !out_file->f_op->sendpage)
goto fput_out;
out_inode = out_file->f_path.dentry->d_inode;
retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
if (retval < 0)
goto fput_out;
count = retval;
if (!max)
max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
pos = *ppos;
retval = -EINVAL;
if (unlikely(pos < 0))
goto fput_out;
if (unlikely(pos + count > max)) {
retval = -EOVERFLOW;
if (pos >= max)
goto fput_out;
count = max - pos;
}
fl = 0;
#if 0
/*
* We need to debate whether we can enable this or not. The
* man page documents EAGAIN return for the output at least,
* and the application is arguably buggy if it doesn't expect
* EAGAIN on a non-blocking file descriptor.
*/
if (in_file->f_flags & O_NONBLOCK)
fl = SPLICE_F_NONBLOCK;
#endif
retval = do_splice_direct(in_file, ppos, out_file, count, fl);
if (retval > 0) {
add_rchar(current, retval);
add_wchar(current, retval);
}
inc_syscr(current);
inc_syscw(current);
if (*ppos > max)
retval = -EOVERFLOW;
fput_out:
fput_light(out_file, fput_needed_out);
fput_in:
fput_light(in_file, fput_needed_in);
out:
return retval;
}
SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
{
loff_t pos;
off_t off;
ssize_t ret;
if (offset) {
if (unlikely(get_user(off, offset)))
return -EFAULT;
pos = off;
ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
if (unlikely(put_user(pos, offset)))
return -EFAULT;
return ret;
}
return do_sendfile(out_fd, in_fd, NULL, count, 0);
}
SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
{
loff_t pos;
ssize_t ret;
if (offset) {
if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
return -EFAULT;
ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
if (unlikely(put_user(pos, offset)))
return -EFAULT;
return ret;
}
return do_sendfile(out_fd, in_fd, NULL, count, 0);
}
|
kzlin129/tt-gpl
|
go12/linux-2.6.28.10/fs/read_write.c
|
C
|
gpl-2.0
| 19,284
|
/* Copyright Statement:
*
* This software/firmware and related documentation ("MediaTek Software") are
* protected under relevant copyright laws. The information contained herein is
* confidential and proprietary to MediaTek Inc. and/or its licensors. Without
* the prior written permission of MediaTek inc. and/or its licensors, any
* reproduction, modification, use or disclosure of MediaTek Software, and
* information contained herein, in whole or in part, shall be strictly
* prohibited.
*
* MediaTek Inc. (C) 2010. All rights reserved.
*
* BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. NEITHER
* DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE SOFTWARE OF
* ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE
* MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTY FOR
* ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES THAT IT
* IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER
* LICENSES CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE
* RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO RECEIVER'S
* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
* RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
*/
/*
* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <boot/boot.h>
#include <msm7k/gpio.h>
/* gross */
typedef struct gpioregs gpioregs;
struct gpioregs
{
unsigned out;
unsigned in;
unsigned int_status;
unsigned int_clear;
unsigned int_en;
unsigned int_edge;
unsigned int_pos;
unsigned oe;
};
static gpioregs GPIO_REGS[] = {
{
.out = GPIO_OUT_0,
.in = GPIO_IN_0,
.int_status = GPIO_INT_STATUS_0,
.int_clear = GPIO_INT_CLEAR_0,
.int_en = GPIO_INT_EN_0,
.int_edge = GPIO_INT_EDGE_0,
.int_pos = GPIO_INT_POS_0,
.oe = GPIO_OE_0,
},
{
.out = GPIO_OUT_1,
.in = GPIO_IN_1,
.int_status = GPIO_INT_STATUS_1,
.int_clear = GPIO_INT_CLEAR_1,
.int_en = GPIO_INT_EN_1,
.int_edge = GPIO_INT_EDGE_1,
.int_pos = GPIO_INT_POS_1,
.oe = GPIO_OE_1,
},
{
.out = GPIO_OUT_2,
.in = GPIO_IN_2,
.int_status = GPIO_INT_STATUS_2,
.int_clear = GPIO_INT_CLEAR_2,
.int_en = GPIO_INT_EN_2,
.int_edge = GPIO_INT_EDGE_2,
.int_pos = GPIO_INT_POS_2,
.oe = GPIO_OE_2,
},
{
.out = GPIO_OUT_3,
.in = GPIO_IN_3,
.int_status = GPIO_INT_STATUS_3,
.int_clear = GPIO_INT_CLEAR_3,
.int_en = GPIO_INT_EN_3,
.int_edge = GPIO_INT_EDGE_3,
.int_pos = GPIO_INT_POS_3,
.oe = GPIO_OE_3,
},
{
.out = GPIO_OUT_4,
.in = GPIO_IN_4,
.int_status = GPIO_INT_STATUS_4,
.int_clear = GPIO_INT_CLEAR_4,
.int_en = GPIO_INT_EN_4,
.int_edge = GPIO_INT_EDGE_4,
.int_pos = GPIO_INT_POS_4,
.oe = GPIO_OE_4,
},
};
static gpioregs *find_gpio(unsigned n, unsigned *bit)
{
if(n > 106) return 0;
if(n > 94) {
*bit = 1 << (n - 95);
return GPIO_REGS + 4;
}
if(n > 67) {
*bit = 1 << (n - 68);
return GPIO_REGS + 3;
}
if(n > 42) {
*bit = 1 << (n - 43);
return GPIO_REGS + 2;
}
if(n > 15) {
*bit = 1 << (n - 16);
return GPIO_REGS + 1;
}
*bit = 1 << n;
return GPIO_REGS + 0;
}
void gpio_output_enable(unsigned n, unsigned out)
{
gpioregs *r;
unsigned b;
unsigned v;
if((r = find_gpio(n, &b)) == 0) return;
v = readl(r->oe);
if(out) {
writel(v | b, r->oe);
} else {
writel(v & (~b), r->oe);
}
}
void gpio_write(unsigned n, unsigned on)
{
gpioregs *r;
unsigned b;
unsigned v;
if((r = find_gpio(n, &b)) == 0) return;
v = readl(r->out);
if(on) {
writel(v | b, r->out);
} else {
writel(v & (~b), r->out);
}
}
int gpio_read(unsigned n)
{
gpioregs *r;
unsigned b;
if((r = find_gpio(n, &b)) == 0) return 0;
return (readl(r->in) & b) ? 1 : 0;
}
void gpio_dir(int nr, int out)
{
gpio_output_enable(nr, out);
}
void gpio_set(int nr, int set)
{
gpio_write(nr, set);
}
int gpio_get(int nr)
{
return gpio_read(nr);
}
|
luckasfb/OT_903D-kernel-2.6.35.7
|
bootable/bootloader/legacy/arch_msm7k/gpio.c
|
C
|
gpl-2.0
| 6,641
|
/*************************************************************************
*
* FILE NAME : ifxmips_pci.c
* PROJECT : IFX UEIP
* MODULES : PCI
*
* DATE : 29 June 2009
* AUTHOR : Lei Chuanhua
*
* DESCRIPTION : PCI Host Controller Driver
* COPYRIGHT : Copyright (c) 2009
* Infineon Technologies AG
* Am Campeon 1-12, 85579 Neubiberg, Germany
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* HISTORY
* $Version $Date $Author $Comment
* 1.0 29 Jun Lei Chuanhua First UEIP release
*************************************************************************/
/*!
\file ifxmips_pci.c
\ingroup IFX_PCI
\brief pci bus driver source file
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <asm/paccess.h>
#include <asm/addrspace.h>
#include <asm/ifx/ifx_types.h>
#include <asm/ifx/ifx_regs.h>
#include <asm/ifx/common_routines.h>
#include <asm/ifx/ifx_gpio.h>
#include <asm/ifx/irq.h>
#include <asm/ifx/ifx_rcu.h>
#include "ifxmips_pci_reg.h"
#include "ifxmips_pci.h"
#define IFX_PCI_VER_MAJOR 1
#define IFX_PCI_VER_MID 2
#define IFX_PCI_VER_MINOR 0
extern u32 max_pfn, max_low_pfn;
extern struct pci_controller ifx_pci_controller;
extern int ifx_pci_bus_status;
extern void __iomem *ifx_pci_cfg_space;
extern u32 pci_config_addr(u8 bus_num, u16 devfn, int where);
/* Used by ifxmips_interrupt.c to suppress bus exception */
int pci_bus_error_flag;
extern u32 ifx_pci_config_read(u32 addr);
extern void ifx_pci_config_write(u32 addr, u32 data);
#ifdef CONFIG_IFX_DUAL_MINI_PCI
#define IFX_PCI_REQ1 29
#define IFX_PCI_GNT1 30
#endif /* CONFIG_IFX_DUAL_MINI_PCI */
static const int pci_gpio_module_id = IFX_GPIO_MODULE_PCI;
#ifdef CONFIG_IFX_PCI_DANUBE_EBU_LED_RST
#include <asm/ifx/ifx_ebu_led.h>
static inline void
ifx_pci_dev_reset_init(void)
{
ifx_ebu_led_enable();
ifx_ebu_led_set_data(9, 1);
}
static inline void
ifx_pci_dev_reset(void)
{
ifx_ebu_led_set_data(9, 0);
mdelay(5);
ifx_ebu_led_set_data(9, 1);
mdelay(1);
ifx_ebu_led_disable();
}
#else
/* GPIO in global view */
#define IFX_PCI_RST 21
static inline void
ifx_pci_dev_reset_init(void)
{
/*
* PCI_RST: P1.5 used as a general GPIO, instead of PCI_RST gpio.
* In Danube/AR9, it reset internal PCI core and external PCI device
* However, in VR9, it only resets external PCI device. Internal core
* reset by PCI software reset registers.
* GPIO21 if used as PCI_RST, software can't control reset time.
* Since it uses as a general GPIO, ALT should 0, 0.
*/
ifx_gpio_pin_reserve(IFX_PCI_RST, pci_gpio_module_id);
ifx_gpio_output_set(IFX_PCI_RST, pci_gpio_module_id);
ifx_gpio_dir_out_set(IFX_PCI_RST, pci_gpio_module_id);
ifx_gpio_altsel0_clear(IFX_PCI_RST, pci_gpio_module_id);
ifx_gpio_altsel1_clear(IFX_PCI_RST, pci_gpio_module_id);
ifx_gpio_open_drain_set(IFX_PCI_RST, pci_gpio_module_id);
}
static inline void
ifx_pci_dev_reset(void)
{
/* Reset external PCI device. */
ifx_gpio_output_clear(IFX_PCI_RST, pci_gpio_module_id);
smp_wmb();
mdelay(5);
ifx_gpio_output_set(IFX_PCI_RST, pci_gpio_module_id);
smp_wmb();
mdelay(1);
}
#endif /* CONFIG_IFX_PCI_DANUBE_EBU_LED_RST */
static u32 round_up_to_next_power_of_two(u32 x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return x;
}
static void
ifx_pci_startup(void)
{
u32 reg;
/* Choose reset from RCU first, then do reset */
reg = IFX_REG_R32(IFX_CGU_PCI_CR);
reg |= IFX_PCI_CLK_RESET_FROM_CGU; /* XXX, Should be RCU*/
IFX_REG_W32(reg, IFX_CGU_PCI_CR);
/* Secondly, RCU reset PCI domain including PCI CLK and PCI bridge */
ifx_rcu_rst(IFX_RCU_DOMAIN_PCI, IFX_RCU_MODULE_PCI);
reg = IFX_REG_R32(IFX_CGU_IF_CLK);
reg &= ~ IFX_PCI_CLK_MASK;
/*
* CGU PCI CLK is specific with platforms. Danube has four bits,
* AR9 and VR9 has five bits. Their definitions are in platform header
* file
*/
#ifdef CONFIG_IFX_PCI_EXTERNAL_CLK_SRC
reg &= ~IFX_PCI_INTERNAL_CLK_SRC;
/* External clk has no need to set clk in register */
#else
reg |= IFX_PCI_INTERNAL_CLK_SRC;
#ifdef CONFIG_IFX_PCI_INTERNAL_CLK_SRC_60
/* PCI 62.5 MHz clock */
reg |= IFX_PCI_60MHZ;
#else
/* PCI 33.3 MHz clock */
reg |= IFX_PCI_33MHZ;
#endif /* CONFIG_IFX_PCI_INTERNAL_CLK_SRC_60 */
#endif /* CONFIG_IFX_PCI_EXTERNAL_CLK_SRC */
IFX_REG_W32(reg, IFX_CGU_IF_CLK);
reg = IFX_REG_R32(IFX_CGU_PCI_CR);
#ifdef CONFIG_IFX_PCI_EXTERNAL_CLK_SRC
reg &= ~IFX_PCI_CLK_FROM_CGU;
#else
reg |= IFX_PCI_CLK_FROM_CGU;
#endif /* CONFIG_IFX_PCI_EXTERNAL_CLK_SRC */
reg &= ~IFX_PCI_DELAY_MASK;
#if !defined(CONFIG_IFX_PCI_CLOCK_DELAY_NANO_SECONDS) || CONFIG_IFX_PCI_CLOCK_DELAY_NANO_SECONDS < 0 || CONFIG_IFX_PCI_CLOCK_DELAY_NANO_SECONDS > 7
#error "please define CONFIG_IFX_PCI_CLOCK_DELAY_NANO_SECONDS properly"
#endif
reg |= (CONFIG_IFX_PCI_CLOCK_DELAY_NANO_SECONDS << IFX_PCI_DELAY_SHIFT);
#if !defined(CONFIG_IFX_PCI_CLOCK_DELAY_TENTH_NANO_SECOND) || CONFIG_IFX_PCI_CLOCK_DELAY_TENTH_NANO_SECOND < 0 || CONFIG_IFX_PCI_CLOCK_DELAY_TENTH_NANO_SECOND > 5
#error "Please CONFIG_CONFIG_IFX_PCI_CLOCK_DELAY_TENTH_NANO_SECOND properly"
#endif
reg |= (CONFIG_IFX_PCI_CLOCK_DELAY_TENTH_NANO_SECOND << 18);
IFX_REG_W32(reg, IFX_CGU_PCI_CR);
ifx_pci_dev_reset_init();
#ifdef CONFIG_IFX_DUAL_MINI_PCI
/* PCI_REQ1: P1.13 ALT 01 */
ifx_gpio_pin_reserve(IFX_PCI_REQ1, pci_gpio_module_id);
ifx_gpio_dir_in_set(IFX_PCI_REQ1, pci_gpio_module_id);
ifx_gpio_altsel0_set(IFX_PCI_REQ1, pci_gpio_module_id);
ifx_gpio_altsel1_clear(IFX_PCI_REQ1, pci_gpio_module_id);
/* PCI_GNT1: P1.14 ALT 01 */
ifx_gpio_pin_reserve(IFX_PCI_GNT1, pci_gpio_module_id);
ifx_gpio_dir_out_set(IFX_PCI_GNT1, pci_gpio_module_id);
ifx_gpio_altsel0_set(IFX_PCI_GNT1, pci_gpio_module_id);
ifx_gpio_altsel1_clear(IFX_PCI_GNT1, pci_gpio_module_id);
ifx_gpio_open_drain_set(IFX_PCI_GNT1, pci_gpio_module_id);
#endif /* CONFIG_IFX_DUAL_MINI_PCI */
/* Enable auto-switching between PCI and EBU, normal ack */
reg = IFX_PCI_CLK_CTRL_EBU_PCI_SWITCH_EN | IFX_PCI_CLK_CTRL_FPI_NORMAL_ACK;
IFX_REG_W32(reg, IFX_PCI_CLK_CTRL);
/* Configuration mode, i.e. configuration is not done, PCI access has to be retried */
IFX_REG_CLR_BIT(IFX_PCI_MOD_CFG_OK, IFX_PCI_MOD);
smp_wmb();
/* Enable bus master/IO/MEM access */
reg = IFX_REG_R32(IFX_PCI_CMD);
reg |= IFX_PCI_CMD_IO_EN | IFX_PCI_CMD_MEM_EN | IFX_PCI_CMD_MASTER_EN;
IFX_REG_W32(reg, IFX_PCI_CMD);
reg = IFX_REG_R32(IFX_PCI_ARB);
#ifdef CONFIG_IFX_DUAL_MINI_PCI
/* Enable external 4 PCI masters */
reg &= ~(SM(3, IFX_PCI_ARB_PCI_PORT_ARB));
#else
/* Enable external 1 PCI masters */
reg &= ~(SM(1, IFX_PCI_ARB_PCI_PORT_ARB));
#endif
/* Enable internal arbiter */
reg |= IFX_PCI_ARB_INTERNAL_EN;
/* Enable internal PCI master reqest */
reg &= ~(SM(3, IFX_PCI_ARB_PCI_MASTER_REQ0));
/* Enable EBU reqest */
reg &= ~(SM(3, IFX_PCI_ARB_PCI_MASTER_REQ1));
/* Enable all external masters request */
reg &= ~(SM(3, IFX_PCI_ARB_PCI_MASTER_REQ2));
IFX_REG_W32(reg, IFX_PCI_ARB);
smp_wmb();
/*
* FPI ==> PCI MEM address mapping
* base: 0xb8000000 == > 0x18000000
* size: 8x4M = 32M
*/
reg = IFX_PCI_MEM_PHY_BASE;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP0);
reg += 0x400000;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP1);
reg += 0x400000;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP2);
reg += 0x400000;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP3);
reg += 0x400000;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP4);
reg += 0x400000;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP5);
reg += 0x400000;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP6);
reg += 0x400000;
IFX_REG_W32(reg, IFX_PCI_FPI_ADDR_MAP7);
/* FPI ==> PCI IO address mapping
* base: 0xbAE00000 == > 0xbAE00000
* size: 2M
*/
IFX_REG_W32(IFX_PCI_IO_PHY_BASE, IFX_PCI_FPI_ADDR_MAP11_HIGH);
/* PCI ==> FPI address mapping
* base: 0x0 ==> 0x0
* size: 32M
*/
/* At least 16M. Otherwise there will be discontiguous memory region. */
reg = IFX_PCI_BAR_PREFETCH;
reg |= ((-round_up_to_next_power_of_two((max_low_pfn << PAGE_SHIFT))) & 0x0F000000);
IFX_REG_W32(reg, IFX_PCI_BAR11MASK);
IFX_REG_W32(0x0, IFX_PCI_ADDR_MAP11);
IFX_REG_W32(0x0, IFX_PCI_BAR1);
#ifdef CONFIG_IFX_PCI_HW_SWAP
/* both TX and RX endian swap are enabled */
reg = IFX_PCI_SWAP_RX | IFX_PCI_SWAP_TX;
IFX_REG_W32(reg, IFX_PCI_SWAP);
smp_wmb();
#endif
reg = IFX_REG_R32(IFX_PCI_BAR12MASK);
reg |= IFX_PCI_BAR_DECODING_EN;
IFX_REG_W32(reg, IFX_PCI_BAR12MASK);
reg = IFX_REG_R32(IFX_PCI_BAR13MASK);
reg |= IFX_PCI_BAR_DECODING_EN;
IFX_REG_W32(reg, IFX_PCI_BAR13MASK);
reg = IFX_REG_R32(IFX_PCI_BAR14MASK);
reg |= IFX_PCI_BAR_DECODING_EN;
IFX_REG_W32(reg, IFX_PCI_BAR14MASK);
reg = IFX_REG_R32(IFX_PCI_BAR15MASK);
reg |= IFX_PCI_BAR_DECODING_EN;
IFX_REG_W32(reg, IFX_PCI_BAR15MASK);
reg = IFX_REG_R32(IFX_PCI_BAR16MASK);
reg |= IFX_PCI_BAR_DECODING_EN;
IFX_REG_W32(reg, IFX_PCI_BAR16MASK);
/* Use 4 dw burse length, XXX 8 dw will cause PCI timeout */
reg = SM(IFX_PCI_FPI_BURST_LEN4, IFX_PCI_FPI_RD_BURST_LEN) |
SM(IFX_PCI_FPI_BURST_LEN4, IFX_PCI_FPI_WR_BURST_LEN);
IFX_REG_W32(reg, IFX_PCI_FPI_BURST_LENGTH);
/* Configuration OK. */
IFX_REG_SET_BIT(IFX_PCI_MOD_CFG_OK, IFX_PCI_MOD);
smp_wmb();
mdelay(1);
ifx_pci_dev_reset();
}
/* Brief: disable external pci aribtor request
* Details:
* blocking call, i.e. only return when there is no external PCI bus activities
*/
void
ifx_disable_external_pci(void)
{
IFX_REG_RMW32_FILED(IFX_PCI_ARB_PCI_MASTER_REQ2, 3, IFX_PCI_ARB);
smp_wmb();
/* make sure EBUSY is low && Frame Ird is high) */
while ((IFX_REG_R32(IFX_PCI_ARB) &
(IFX_PCI_ARB_PCI_NOT_READY | IFX_PCI_ARB_PCI_NO_FRM | IFX_PCI_ARB_EBU_IDLE))
!= (IFX_PCI_ARB_PCI_NOT_READY | IFX_PCI_ARB_PCI_NO_FRM));
}
/* Brief: enable external pci aribtor request
* Details:
* non-blocking call
*/
void
ifx_enable_external_pci(void)
{
/* Clear means enabling all external masters request */
IFX_REG_CLR_BIT(IFX_PCI_ARB_PCI_MASTER_REQ2, IFX_PCI_ARB);
smp_wmb();
}
#ifdef CONFIG_DANUBE_EBU_PCI_SW_ARBITOR
void
ifx_enable_ebu(void)
{
u32 reg;
/* Delay before enabling ebu ??? */
/* Disable int/ext pci request */
reg = IFX_REG_R32(IFX_PCI_ARB);
reg &= ~(IFX_PCI_ARB_PCI_MASTER_REQ1);
reg |= (IFX_PCI_ARB_PCI_MASTER_REQ0 | IFX_PCI_ARB_PCI_MASTER_REQ2);
IFX_REG_W32(reg, IFX_PCI_ARB);
/* Poll for pci bus idle */
reg = IFX_REG_R32(IFX_PCI_ARB);
while ((reg & IFX_PCI_ART_PCI_IDLE) != IFX_PCI_ART_PCI_IDLE) {
reg = IFX_REG_R32(IFX_PCI_ARB);
};
/* EBU only, Arbitor fix*/
IFX_REG_W32(0, IFX_PCI_CLK_CTRL);
/*
* Unmask CFRAME_MASK changing PCI's Config Space via internal path
* might need to change to external path
*/
/* Start configuration, one burst read is allowed */
reg = IFX_REG_R32(IFX_PCI_MOD);
reg &= ~IFX_PCI_MOD_CFG_OK;
reg |= IFX_PCI_MOD_TWO_IRQ_INTA_AND_INTB | SM(1, IFX_PCI_MOD_READ_BURST_THRESHOLD);
IFX_REG_W32(reg, IFX_PCI_MOD);
reg = IFX_REG_R32(IFX_PCI_CARDBUS_FRAME_MASK);
reg &= ~IFX_PCI_CARDBUS_FRAME_MASK_EN;
IFX_REG_W32(reg, IFX_PCI_CARDBUS_FRAME_MASK);
IFX_REG_SET_BIT(IFX_PCI_MOD_CFG_OK, IFX_PCI_MOD);
reg = IFX_REG_R32(IFX_PCI_CARDBUS_FRAME_MASK);
}
EXPORT_SYMBOL(ifx_enable_ebu);
void
ifx_disable_ebu(void)
{
u32 reg;
/* Delay before enabling ebu ??? */
/* Restore EBU and PCI auto switch */
IFX_REG_W32(IFX_PCI_CLK_CTRL_EBU_PCI_SWITCH_EN, IFX_PCI_CLK_CTRL);
/*
* unmask CFRAME_MASK changing PCI's Config Space via internal path
* might need to change to external path
*/
reg = IFX_REG_R32(IFX_PCI_MOD);
/* Start configuration, one burst read is allowed */
reg &= ~IFX_PCI_MOD_CFG_OK;
reg |= IFX_PCI_MOD_TWO_IRQ_INTA_AND_INTB | SM(1, IFX_PCI_MOD_READ_BURST_THRESHOLD);
IFX_REG_W32(reg, IFX_PCI_MOD);
reg = IFX_REG_R32(IFX_PCI_CARDBUS_FRAME_MASK);
reg |= IFX_PCI_CARDBUS_FRAME_MASK_EN;
IFX_REG_W32(reg, IFX_PCI_CARDBUS_FRAME_MASK);
IFX_REG_SET_BIT(IFX_PCI_MOD_CFG_OK, IFX_PCI_MOD);
/* Enable int/ext pci request */
reg = IFX_REG_R32(IFX_PCI_ARB);
reg &= ~(IFX_PCI_ARB_PCI_MASTER_REQ0 | IFX_PCI_ARB_PCI_MASTER_REQ2);
reg |= IFX_PCI_ARB_PCI_MASTER_REQ1;
IFX_REG_W32(reg, IFX_PCI_ARB);
}
EXPORT_SYMBOL(ifx_disable_ebu);
#endif /* CONFIG_DANUBE_EBU_PCI_SW_ARBITOR */
static void __devinit
pcibios_fixup_resources(struct pci_dev *dev)
{
struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
int i;
unsigned long offset;
if (!hose) {
printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
return;
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
if (!res->flags)
continue;
if (res->end == 0xffffffff) {
printk(KERN_INFO "PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
pci_name(dev), i, (u64)res->start, (u64)res->end);
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
continue;
}
offset = 0;
if (res->flags & IORESOURCE_MEM) {
offset = hose->mem_offset;
}
else if (res->flags & IORESOURCE_IO) {
offset = hose->io_offset;
}
if (offset != 0) {
res->start += offset;
res->end += offset;
printk(KERN_INFO "Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
i, res->flags, pci_name(dev),
(u64)res->start - offset, (u64)res->start);
}
}
IFX_PCI_PRINT("[%s %s %d]: %s\n", __FILE__, __func__, __LINE__, pci_name(dev));
/* Enable I/O, MEM, Bus Master, Special Cycles SERR, Fast back-to-back */
pci_write_config_word(dev, PCI_COMMAND,
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_SPECIAL | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
}
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
/**
* \fn int ifx_pci_bios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
* \brief Map a PCI device to the appropriate interrupt line
*
* \param[in] dev The Linux PCI device structure for the device to map
* \param[in] slot The slot number for this device on __BUS 0__. Linux
* enumerates through all the bridges and figures out the
* slot on Bus 0 where this device eventually hooks to.
* \param[in] pin The PCI interrupt pin read from the device, then swizzled
* as it goes through each bridge.
* \return Interrupt number for the device
* \ingroup IFX_PCI_OS
*/
int
ifx_pci_bios_map_irq(IFX_PCI_CONST struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = -1;
IFX_PCI_PRINT("%s dev %s slot %d pin %d \n", __func__, pci_name(dev), slot, pin);
switch (pin) {
case 0:
break;
case 1:
IFX_PCI_PRINT("%s dev %s: interrupt pin 1\n", __func__, pci_name(dev));
/*
* PCI_INTA--shared with EBU
* falling edge level triggered:0x4, low level:0xc, rising edge:0x2
*/
IFX_REG_W32(IFX_EBU_PCC_CON_IREQ_LOW_LEVEL_DETECT, IFX_EBU_PCC_CON);
/* enable interrupt only */
IFX_REG_W32(IFX_EBU_PCC_IEN_PCI_EN, IFX_EBU_PCC_IEN);
irq = INT_NUM_IM0_IRL22;
break;
case 2:
case 3:
break;
default:
printk(KERN_WARNING "WARNING: %s dev %s: invalid interrupt pin %d\n", __func__, pci_name(dev), pin);
break;
}
return irq;
}
/**
* \fn int ifx_pci_bios_plat_dev_init(struct pci_dev *dev)
* \brief Called to perform platform specific PCI setup
*
* \param[in] dev The Linux PCI device structure for the device to map
* \return OK
* \ingroup IFX_PCI_OS
*/
int
ifx_pci_bios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
static void inline
ifx_pci_core_rst(void)
{
u32 reg;
unsigned long flags;
volatile int i;
local_irq_save(flags);
/* Ack the interrupt */
IFX_REG_W32(IFX_REG_R32(IFX_PCI_IRA), IFX_PCI_IRA);
/* Disable external masters */
ifx_disable_external_pci();
/* PCI core reset start */
reg = IFX_REG_R32(IFX_PCI_SFT_RST);
reg |= IFX_PCI_SFT_RST_REQ;
IFX_REG_W32(reg, IFX_PCI_SFT_RST);
for (i = 0; i < 100; i++);
/* Wait for PCI core reset to be finished */
while ((IFX_REG_R32(IFX_PCI_SFT_RST) & IFX_PCI_SFT_RST_ACKING));
/* Out of reset to normal operation */
reg = IFX_REG_R32(IFX_PCI_SFT_RST);
reg &= ~IFX_PCI_SFT_RST_REQ;
IFX_REG_W32(reg, IFX_PCI_SFT_RST);
for (i = 0; i < 50; i++);
/* Enable external masters */
ifx_enable_external_pci();
local_irq_restore(flags);
}
static irqreturn_t
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
ifx_pci_core_int_isr(int irq, void *dev_id)
#else
ifx_pci_core_int_isr(int irq, void *dev_id, struct pt_regs *regs)
#endif
{
/* Only care about Timeout interrupt */
if ((IFX_REG_R32(IFX_PCI_IRR) & IFX_PCI_IR_TIMEOUT) == IFX_PCI_IR_TIMEOUT) {
printk(KERN_ERR "%s: PCI timeout occurred\n", __func__);
ifx_pci_core_rst();
}
return IRQ_HANDLED;
}
static void
ifx_pci_ir_irq_init(void)
{
int ret;
/* Clear the interrupt first */
IFX_REG_W32(IFX_PCI_IR_TIMEOUT, IFX_PCI_IRA);
ret = request_irq(IFX_PCI_IR, ifx_pci_core_int_isr,
IRQF_DISABLED, "ifx_pci_ir", NULL);
if (ret) {
printk("%s:request irq %d failed with %d \n", __func__, IFX_PCI_IR, ret);
return;
}
/* Unmask Timeout interrupt */
IFX_REG_W32(IFX_PCI_IR_TIMEOUT, IFX_PCI_IRM);
}
/*!
* \fn static int __init ifx_pci_init(void)
* \brief Initialize the IFX PCI host controller, register with PCI
* bus subsystem.
*
* \return -ENOMEM configuration/io space mapping failed.
* \return -EIO pci bus not initialized
* \return 0 OK
* \ingroup IFX_PCI_OS
*/
static int __init
ifx_pci_init(void)
{
u32 cmdreg;
void __iomem *io_map_base;
char ver_str[128] = {0};
pci_bus_error_flag = 1;
ifx_pci_startup();
ifx_pci_cfg_space = ioremap_nocache(IFX_PCI_CFG_PHY_BASE, IFX_PCI_CFG_SIZE);
if (ifx_pci_cfg_space == NULL) {
printk(KERN_ERR "%s configuration space ioremap failed\n", __func__);
return -ENOMEM;
}
IFX_PCI_PRINT("[%s %s %d]: ifx_pci_cfg_space %p\n", __FILE__, __func__, __LINE__, ifx_pci_cfg_space);
/* Otherwise, warning will pop up */
io_map_base = ioremap(IFX_PCI_IO_PHY_BASE, IFX_PCI_IO_SIZE);
if (io_map_base == NULL) {
iounmap(ifx_pci_cfg_space);
IFX_PCI_PRINT("%s io space ioremap failed\n", __func__);
return -ENOMEM;
}
ifx_pci_controller.io_map_base = (unsigned long)io_map_base;
cmdreg = ifx_pci_config_read(pci_config_addr(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), PCI_COMMAND));
if (!(cmdreg & PCI_COMMAND_MASTER)) {
printk(KERN_INFO "PCI: Skipping PCI probe. Bus is not initialized.\n");
iounmap(ifx_pci_cfg_space);
return -EIO;
}
ifx_pci_bus_status |= PCI_BUS_ENABLED;
/* Turn on ExpMemEn */
cmdreg = ifx_pci_config_read(pci_config_addr(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40));
ifx_pci_config_write(pci_config_addr(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40),
cmdreg | 0x10);
cmdreg = ifx_pci_config_read(pci_config_addr(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40));
/* Enable normal FPI bus exception after we configured everything */
IFX_REG_CLR_BIT(IFX_PCI_CLK_CTRL_FPI_NORMAL_ACK, IFX_PCI_CLK_CTRL);
IFX_PCI_PRINT("[%s %s %d]: mem_resource @%p, io_resource @%p\n", __FILE__, __func__, __LINE__,
&ifx_pci_controller.mem_resource, &ifx_pci_controller.io_resource);
register_pci_controller(&ifx_pci_controller);
ifx_pci_ir_irq_init();
ifx_drv_ver(ver_str, "PCI host controller", IFX_PCI_VER_MAJOR, IFX_PCI_VER_MID, IFX_PCI_VER_MINOR);
printk(KERN_INFO "%s", ver_str);
return 0;
}
arch_initcall(ifx_pci_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lei Chuanhua, chuanhua.lei@infineon.com");
MODULE_SUPPORTED_DEVICE("Infineon builtin PCI module for Danube AR9 and VR9");
MODULE_DESCRIPTION("Infineon builtin PCI host controller driver");
|
kbridgers/VOLTE4GFAX
|
target/linux/ltqcpe/files-2.6.32/arch/mips/pci/ifxmips_pci.c
|
C
|
gpl-2.0
| 21,354
|
/*
* Copyright (c) 2010, Google, Inc.
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* VP8 decoder support via libvpx
*/
#define VPX_CODEC_DISABLE_COMPAT 1
#include <vpx/vpx_decoder.h>
#include <vpx/vp8dx.h>
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "avcodec.h"
#include "internal.h"
typedef struct VP8DecoderContext {
struct vpx_codec_ctx decoder;
} VP8Context;
static av_cold int vpx_init(AVCodecContext *avctx,
const struct vpx_codec_iface *iface)
{
VP8Context *ctx = avctx->priv_data;
struct vpx_codec_dec_cfg deccfg = {
/* token partitions+1 would be a decent choice */
.threads = FFMIN(avctx->thread_count, 16)
};
av_log(avctx, AV_LOG_INFO, "%s\n", vpx_codec_version_str());
av_log(avctx, AV_LOG_VERBOSE, "%s\n", vpx_codec_build_config());
if (vpx_codec_dec_init(&ctx->decoder, iface, &deccfg, 0) != VPX_CODEC_OK) {
const char *error = vpx_codec_error(&ctx->decoder);
av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder: %s\n",
error);
return AVERROR(EINVAL);
}
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
return 0;
}
static int vp8_decode(AVCodecContext *avctx,
void *data, int *got_frame, AVPacket *avpkt)
{
VP8Context *ctx = avctx->priv_data;
AVFrame *picture = data;
const void *iter = NULL;
struct vpx_image *img;
int ret;
if (vpx_codec_decode(&ctx->decoder, avpkt->data, avpkt->size, NULL, 0) !=
VPX_CODEC_OK) {
const char *error = vpx_codec_error(&ctx->decoder);
const char *detail = vpx_codec_error_detail(&ctx->decoder);
av_log(avctx, AV_LOG_ERROR, "Failed to decode frame: %s\n", error);
if (detail)
av_log(avctx, AV_LOG_ERROR, " Additional information: %s\n",
detail);
return AVERROR_INVALIDDATA;
}
if ((img = vpx_codec_get_frame(&ctx->decoder, &iter))) {
if (img->fmt != VPX_IMG_FMT_I420) {
av_log(avctx, AV_LOG_ERROR, "Unsupported output colorspace (%d)\n",
img->fmt);
return AVERROR_INVALIDDATA;
}
if ((int) img->d_w != avctx->width || (int) img->d_h != avctx->height) {
av_log(avctx, AV_LOG_INFO, "dimension change! %dx%d -> %dx%d\n",
avctx->width, avctx->height, img->d_w, img->d_h);
if (av_image_check_size(img->d_w, img->d_h, 0, avctx))
return AVERROR_INVALIDDATA;
avcodec_set_dimensions(avctx, img->d_w, img->d_h);
}
if ((ret = ff_get_buffer(avctx, picture, 0)) < 0)
return ret;
av_image_copy(picture->data, picture->linesize, img->planes,
img->stride, avctx->pix_fmt, img->d_w, img->d_h);
*got_frame = 1;
}
return avpkt->size;
}
static av_cold int vp8_free(AVCodecContext *avctx)
{
VP8Context *ctx = avctx->priv_data;
vpx_codec_destroy(&ctx->decoder);
return 0;
}
#if CONFIG_LIBVPX_VP8_DECODER
static av_cold int vp8_init(AVCodecContext *avctx)
{
return vpx_init(avctx, &vpx_codec_vp8_dx_algo);
}
AVCodec ff_libvpx_vp8_decoder = {
.name = "libvpx",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VP8,
.priv_data_size = sizeof(VP8Context),
.init = vp8_init,
.close = vp8_free,
.decode = vp8_decode,
.capabilities = CODEC_CAP_AUTO_THREADS | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"),
};
#endif /* CONFIG_LIBVPX_VP8_DECODER */
#if CONFIG_LIBVPX_VP9_DECODER
static av_cold int vp9_init(AVCodecContext *avctx)
{
return vpx_init(avctx, &vpx_codec_vp9_dx_algo);
}
AVCodec ff_libvpx_vp9_decoder = {
.name = "libvpx-vp9",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VP9,
.priv_data_size = sizeof(VP8Context),
.init = vp9_init,
.close = vp8_free,
.decode = vp8_decode,
.capabilities = CODEC_CAP_AUTO_THREADS | CODEC_CAP_EXPERIMENTAL,
.long_name = NULL_IF_CONFIG_SMALL("libvpx VP9"),
};
#endif /* CONFIG_LIBVPX_VP9_DECODER */
|
DDTChen/CookieVLC
|
vlc/contrib/android/ffmpeg/libavcodec/libvpxdec.c
|
C
|
gpl-2.0
| 4,941
|
/*
* Driver for the SGS-Thomson M48T35 Timekeeper RAM chip
*
* Real Time Clock interface for Linux
*
* TODO: Implement periodic interrupts.
*
* Copyright (C) 2000 Silicon Graphics, Inc.
* Written by Ulf Carlsson (ulfc@engr.sgi.com)
*
* Based on code written by Paul Gortmaker.
*
* This driver allows use of the real time clock (built into
* nearly all computers) from user space. It exports the /dev/rtc
* interface supporting various ioctl() and also the /proc/rtc
* pseudo-file for status information.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#define RTC_VERSION "1.09b"
#include <linux/bcd.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <asm/m48t35.h>
#include <asm/sn/ioc3.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn0/ip27.h>
#include <asm/sn/sn0/hub.h>
#include <asm/sn/sn_private.h>
static long rtc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
static void get_rtc_time(struct rtc_time *rtc_tm);
/*
* Bits in rtc_status. (6 bits of room for future expansion)
*/
#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
#define RTC_TIMER_ON 0x02 /* missed irq timer active */
static unsigned char rtc_status; /* bitmapped status byte. */
static unsigned long rtc_freq; /* Current periodic IRQ rate */
static struct m48t35_rtc *rtc;
/*
* If this driver ever becomes modularised, it will be really nice
* to make the epoch retain its value across module reload...
*/
static unsigned long epoch = 1970; /* year corresponding to 0x00 */
static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
static long rtc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct rtc_time wtime;
switch (cmd) {
case RTC_RD_TIME: /* Read the time/date from RTC */
{
get_rtc_time(&wtime);
break;
}
case RTC_SET_TIME: /* Set the RTC */
{
struct rtc_time rtc_tm;
unsigned char mon, day, hrs, min, sec, leap_yr;
unsigned int yrs;
if (!capable(CAP_SYS_TIME))
return -EACCES;
if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
sizeof(struct rtc_time)))
return -EFAULT;
yrs = rtc_tm.tm_year + 1900;
mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm.tm_mday;
hrs = rtc_tm.tm_hour;
min = rtc_tm.tm_min;
sec = rtc_tm.tm_sec;
if (yrs < 1970)
return -EINVAL;
leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
if ((mon > 12) || (day == 0))
return -EINVAL;
if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
return -EINVAL;
if ((hrs >= 24) || (min >= 60) || (sec >= 60))
return -EINVAL;
if ((yrs -= epoch) > 255) /* They are unsigned */
return -EINVAL;
if (yrs > 169)
return -EINVAL;
if (yrs >= 100)
yrs -= 100;
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
spin_lock_irq(&rtc_lock);
rtc->control |= M48T35_RTC_SET;
rtc->year = yrs;
rtc->month = mon;
rtc->date = day;
rtc->hour = hrs;
rtc->min = min;
rtc->sec = sec;
rtc->control &= ~M48T35_RTC_SET;
spin_unlock_irq(&rtc_lock);
return 0;
}
default:
return -EINVAL;
}
return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
}
/*
* We enforce only one user at a time here with the open/close.
* Also clear the previous interrupt data on an open, and clean
* up things on a close.
*/
static int rtc_open(struct inode *inode, struct file *file)
{
lock_kernel();
spin_lock_irq(&rtc_lock);
if (rtc_status & RTC_IS_OPEN) {
spin_unlock_irq(&rtc_lock);
unlock_kernel();
return -EBUSY;
}
rtc_status |= RTC_IS_OPEN;
spin_unlock_irq(&rtc_lock);
unlock_kernel();
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
/*
* Turn off all interrupts once the device is no longer
* in use, and clear the data.
*/
spin_lock_irq(&rtc_lock);
rtc_status &= ~RTC_IS_OPEN;
spin_unlock_irq(&rtc_lock);
return 0;
}
/*
* The various file operations we support.
*/
static const struct file_operations rtc_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = rtc_ioctl,
.open = rtc_open,
.release = rtc_release,
};
static struct miscdevice rtc_dev=
{
RTC_MINOR,
"rtc",
&rtc_fops
};
static int __init rtc_init(void)
{
rtc = (struct m48t35_rtc *)
(KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base + IOC3_BYTEBUS_DEV0);
printk(KERN_INFO "Real Time Clock Driver v%s\n", RTC_VERSION);
if (misc_register(&rtc_dev)) {
printk(KERN_ERR "rtc: cannot register misc device.\n");
return -ENODEV;
}
if (!create_proc_read_entry("driver/rtc", 0, NULL, rtc_read_proc, NULL)) {
printk(KERN_ERR "rtc: cannot create /proc/rtc.\n");
misc_deregister(&rtc_dev);
return -ENOENT;
}
rtc_freq = 1024;
return 0;
}
static void __exit rtc_exit (void)
{
/* interrupts and timer disabled at this point by rtc_release */
remove_proc_entry ("rtc", NULL);
misc_deregister(&rtc_dev);
}
module_init(rtc_init);
module_exit(rtc_exit);
/*
* Info exported via "/proc/rtc".
*/
static int rtc_get_status(char *buf)
{
char *p;
struct rtc_time tm;
/*
* Just emulate the standard /proc/rtc
*/
p = buf;
get_rtc_time(&tm);
/*
* There is no way to tell if the luser has the RTC set for local
* time or for Universal Standard Time (GMT). Probably local though.
*/
p += sprintf(p,
"rtc_time\t: %02d:%02d:%02d\n"
"rtc_date\t: %04d-%02d-%02d\n"
"rtc_epoch\t: %04lu\n"
"24hr\t\t: yes\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch);
return p - buf;
}
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len = rtc_get_status(page);
if (len <= off+count) *eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
return len;
}
static void get_rtc_time(struct rtc_time *rtc_tm)
{
/*
* Do we need to wait for the last update to finish?
*/
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irq(&rtc_lock);
rtc->control |= M48T35_RTC_READ;
rtc_tm->tm_sec = rtc->sec;
rtc_tm->tm_min = rtc->min;
rtc_tm->tm_hour = rtc->hour;
rtc_tm->tm_mday = rtc->date;
rtc_tm->tm_mon = rtc->month;
rtc_tm->tm_year = rtc->year;
rtc->control &= ~M48T35_RTC_READ;
spin_unlock_irq(&rtc_lock);
rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
if ((rtc_tm->tm_year += (epoch - 1900)) <= 69)
rtc_tm->tm_year += 100;
rtc_tm->tm_mon--;
}
|
mpalmer/linux-2.6
|
drivers/char/ip27-rtc.c
|
C
|
gpl-2.0
| 7,743
|
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
int f1(int limite);
int f2(int limite)
{
int i;
for (i = 1; i < limite; i ++)
f1(i);
}
int f1(int limite)
{
int i;
for (i = 1; i < limite; i ++)
f2(i);
}
int main(void)
{
f1(25);
}
|
Logilin/ils
|
exemples/chapitre-01/test_gprof.c
|
C
|
gpl-2.0
| 256
|
/* dnsmasq is Copyright (c) 2000-2015 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 dated June, 1991, or
(at your option) version 3 dated 29 June, 2007.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "dnsmasq.h"
#ifdef HAVE_LOOP
static ssize_t loop_make_probe(u32 uid);
void loop_send_probes()
{
struct server *serv;
if (!option_bool(OPT_LOOP_DETECT))
return;
/* Loop through all upstream servers not for particular domains, and send a query to that server which is
identifiable, via the uid. If we see that query back again, then the server is looping, and we should not use it. */
for (serv = daemon->servers; serv; serv = serv->next)
if (!(serv->flags &
(SERV_LITERAL_ADDRESS | SERV_NO_ADDR | SERV_USE_RESOLV | SERV_NO_REBIND | SERV_HAS_DOMAIN | SERV_FOR_NODOTS | SERV_LOOP)))
{
ssize_t len = loop_make_probe(serv->uid);
int fd;
struct randfd *rfd = NULL;
if (serv->sfd)
fd = serv->sfd->fd;
else
{
if (!(rfd = allocate_rfd(serv->addr.sa.sa_family)))
continue;
fd = rfd->fd;
}
while (retry_send(sendto(fd, daemon->packet, len, 0,
&serv->addr.sa, sa_len(&serv->addr))));
free_rfd(rfd);
}
}
static ssize_t loop_make_probe(u32 uid)
{
struct dns_header *header = (struct dns_header *)daemon->packet;
unsigned char *p = (unsigned char *)(header+1);
/* packet buffer overwritten */
daemon->srv_save = NULL;
header->id = rand16();
header->ancount = header->nscount = header->arcount = htons(0);
header->qdcount = htons(1);
header->hb3 = HB3_RD;
header->hb4 = 0;
SET_OPCODE(header, QUERY);
*p++ = 8;
sprintf((char *)p, "%.8x", uid);
p += 8;
*p++ = strlen(LOOP_TEST_DOMAIN);
strcpy((char *)p, LOOP_TEST_DOMAIN); /* Add terminating zero */
p += strlen(LOOP_TEST_DOMAIN) + 1;
PUTSHORT(LOOP_TEST_TYPE, p);
PUTSHORT(C_IN, p);
return p - (unsigned char *)header;
}
int detect_loop(char *query, int type)
{
int i;
u32 uid;
struct server *serv;
if (!option_bool(OPT_LOOP_DETECT))
return 0;
if (type != LOOP_TEST_TYPE ||
strlen(LOOP_TEST_DOMAIN) + 9 != strlen(query) ||
strstr(query, LOOP_TEST_DOMAIN) != query + 9)
return 0;
for (i = 0; i < 8; i++)
if (!isxdigit(query[i]))
return 0;
uid = strtol(query, NULL, 16);
for (serv = daemon->servers; serv; serv = serv->next)
if (!(serv->flags &
(SERV_LITERAL_ADDRESS | SERV_NO_ADDR | SERV_USE_RESOLV | SERV_NO_REBIND | SERV_HAS_DOMAIN | SERV_FOR_NODOTS | SERV_LOOP)) &&
uid == serv->uid)
{
serv->flags |= SERV_LOOP;
check_servers(); /* log new state */
return 1;
}
return 0;
}
#endif
|
ghmajx/asuswrt-merlin
|
release/src/router/dnsmasq/src/loop.c
|
C
|
gpl-2.0
| 3,165
|
/*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2008 Luis R. Rodriguez <lrodriguz@atheros.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/**
* DOC: Wireless regulatory infrastructure
*
* The usual implementation is for a driver to read a device EEPROM to
* determine which regulatory domain it should be operating under, then
* looking up the allowable channels in a driver-local table and finally
* registering those channels in the wiphy structure.
*
* Another set of compliance enforcement is for drivers to use their
* own compliance limits which can be stored on the EEPROM. The host
* driver or firmware may ensure these are used.
*
* In addition to all this we provide an extra layer of regulatory
* conformance. For drivers which do not have any regulatory
* information CRDA provides the complete regulatory solution.
* For others it provides a community effort on further restrictions
* to enhance compliance.
*
* Note: When number of rules --> infinity we will not be able to
* index on alpha2 any more, instead we'll probably have to
* rely on some SHA1 checksum of the regdomain for example.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/ctype.h>
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
#include "regdb.h"
#include "nl80211.h"
#ifdef CONFIG_CFG80211_REG_DEBUG
#define REG_DBG_PRINT(format, args...) \
do { \
printk(KERN_DEBUG pr_fmt(format), ##args); \
} while (0)
#else
#define REG_DBG_PRINT(args...)
#endif
/* Receipt of information from last regulatory request */
static struct regulatory_request *last_request;
/* To trigger userspace events */
static struct platform_device *reg_pdev;
static struct device_type reg_device_type = {
.uevent = reg_device_uevent,
};
/*
* Central wireless core regulatory domains, we only need two,
* the current one and a world regulatory domain in case we have no
* information to give us an alpha2
*/
const struct ieee80211_regdomain *cfg80211_regdomain;
/*
* Protects static reg.c components:
* - cfg80211_world_regdom
* - cfg80211_regdom
* - last_request
*/
static DEFINE_MUTEX(reg_mutex);
static inline void assert_reg_lock(void)
{
lockdep_assert_held(®_mutex);
}
/* Used to queue up regulatory hints */
static LIST_HEAD(reg_requests_list);
static spinlock_t reg_requests_lock;
/* Used to queue up beacon hints for review */
static LIST_HEAD(reg_pending_beacons);
static spinlock_t reg_pending_beacons_lock;
/* Used to keep track of processed beacon hints */
static LIST_HEAD(reg_beacon_list);
struct reg_beacon {
struct list_head list;
struct ieee80211_channel chan;
};
static void reg_todo(struct work_struct *work);
static DECLARE_WORK(reg_work, reg_todo);
static void reg_timeout_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
/* We keep a static world regulatory domain in case of the absence of CRDA */
static const struct ieee80211_regdomain world_regdom = {
.n_reg_rules = 5,
.alpha2 = "00",
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
/* IEEE 802.11b/g, channels 12..13. No HT40
* channel fits here. */
REG_RULE(2467-10, 2472+10, 20, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
/* IEEE 802.11 channel 14 - Only JP enables
* this and for 802.11b only */
REG_RULE(2484-10, 2484+10, 20, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS |
NL80211_RRF_NO_OFDM),
/* IEEE 802.11a, channel 36..48 */
REG_RULE(5180-10, 5240+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
/* NB: 5260 MHz - 5700 MHz requies DFS */
/* IEEE 802.11a, channel 149..165 */
REG_RULE(5745-10, 5825+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
}
};
static const struct ieee80211_regdomain *cfg80211_world_regdom =
&world_regdom;
static char *ieee80211_regdom = "00";
static char user_alpha2[2];
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
static void reset_regdomains(void)
{
/* avoid freeing static information or freeing something twice */
if (cfg80211_regdomain == cfg80211_world_regdom)
cfg80211_regdomain = NULL;
if (cfg80211_world_regdom == &world_regdom)
cfg80211_world_regdom = NULL;
if (cfg80211_regdomain == &world_regdom)
cfg80211_regdomain = NULL;
kfree(cfg80211_regdomain);
kfree(cfg80211_world_regdom);
cfg80211_world_regdom = &world_regdom;
cfg80211_regdomain = NULL;
}
/*
* Dynamic world regulatory domain requested by the wireless
* core upon initialization
*/
static void update_world_regdomain(const struct ieee80211_regdomain *rd)
{
BUG_ON(!last_request);
reset_regdomains();
cfg80211_world_regdom = rd;
cfg80211_regdomain = rd;
}
bool is_world_regdom(const char *alpha2)
{
if (!alpha2)
return false;
if (alpha2[0] == '0' && alpha2[1] == '0')
return true;
return false;
}
static bool is_alpha2_set(const char *alpha2)
{
if (!alpha2)
return false;
if (alpha2[0] != 0 && alpha2[1] != 0)
return true;
return false;
}
static bool is_unknown_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
/*
* Special case where regulatory domain was built by driver
* but a specific alpha2 cannot be determined
*/
if (alpha2[0] == '9' && alpha2[1] == '9')
return true;
return false;
}
static bool is_intersected_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
/*
* Special case where regulatory domain is the
* result of an intersection between two regulatory domain
* structures
*/
if (alpha2[0] == '9' && alpha2[1] == '8')
return true;
return false;
}
static bool is_an_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
return true;
return false;
}
static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
{
if (!alpha2_x || !alpha2_y)
return false;
if (alpha2_x[0] == alpha2_y[0] &&
alpha2_x[1] == alpha2_y[1])
return true;
return false;
}
static bool regdom_changes(const char *alpha2)
{
assert_cfg80211_lock();
if (!cfg80211_regdomain)
return true;
if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
return false;
return true;
}
/*
* The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets
* you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER
* has ever been issued.
*/
static bool is_user_regdom_saved(void)
{
if (user_alpha2[0] == '9' && user_alpha2[1] == '7')
return false;
/* This would indicate a mistake on the design */
if (WARN((!is_world_regdom(user_alpha2) &&
!is_an_alpha2(user_alpha2)),
"Unexpected user alpha2: %c%c\n",
user_alpha2[0],
user_alpha2[1]))
return false;
return true;
}
static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
const struct ieee80211_regdomain *src_regd)
{
struct ieee80211_regdomain *regd;
int size_of_regd = 0;
unsigned int i;
size_of_regd = sizeof(struct ieee80211_regdomain) +
((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
regd = kzalloc(size_of_regd, GFP_KERNEL);
if (!regd)
return -ENOMEM;
memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
for (i = 0; i < src_regd->n_reg_rules; i++)
memcpy(®d->reg_rules[i], &src_regd->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
*dst_regd = regd;
return 0;
}
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
struct reg_regdb_search_request {
char alpha2[2];
struct list_head list;
};
static LIST_HEAD(reg_regdb_search_list);
static DEFINE_MUTEX(reg_regdb_search_mutex);
static void reg_regdb_search(struct work_struct *work)
{
struct reg_regdb_search_request *request;
const struct ieee80211_regdomain *curdom, *regdom;
int i, r;
mutex_lock(®_regdb_search_mutex);
while (!list_empty(®_regdb_search_list)) {
request = list_first_entry(®_regdb_search_list,
struct reg_regdb_search_request,
list);
list_del(&request->list);
for (i=0; i<reg_regdb_size; i++) {
curdom = reg_regdb[i];
if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
r = reg_copy_regd(®dom, curdom);
if (r)
break;
mutex_lock(&cfg80211_mutex);
set_regdom(regdom);
mutex_unlock(&cfg80211_mutex);
break;
}
}
kfree(request);
}
mutex_unlock(®_regdb_search_mutex);
}
static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
static void reg_regdb_query(const char *alpha2)
{
struct reg_regdb_search_request *request;
if (!alpha2)
return;
request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
if (!request)
return;
memcpy(request->alpha2, alpha2, 2);
mutex_lock(®_regdb_search_mutex);
list_add_tail(&request->list, ®_regdb_search_list);
mutex_unlock(®_regdb_search_mutex);
schedule_work(®_regdb_work);
}
#else
static inline void reg_regdb_query(const char *alpha2) {}
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
/*
* This lets us keep regulatory code which is updated on a regulatory
* basis in userspace. Country information is filled in by
* reg_device_uevent
*/
static int call_crda(const char *alpha2)
{
if (!is_world_regdom((char *) alpha2))
pr_info("Calling CRDA for country: %c%c\n",
alpha2[0], alpha2[1]);
else
pr_info("Calling CRDA to update world regulatory domain\n");
/* query internal regulatory database (if it exists) */
reg_regdb_query(alpha2);
return kobject_uevent(®_pdev->dev.kobj, KOBJ_CHANGE);
}
/* Used by nl80211 before kmalloc'ing our regulatory domain */
bool reg_is_valid_request(const char *alpha2)
{
assert_cfg80211_lock();
if (!last_request)
return false;
return alpha2_equal(last_request->alpha2, alpha2);
}
/* Sanity check on a regulatory rule */
static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
{
const struct ieee80211_freq_range *freq_range = &rule->freq_range;
u32 freq_diff;
if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0)
return false;
if (freq_range->start_freq_khz > freq_range->end_freq_khz)
return false;
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
freq_range->max_bandwidth_khz > freq_diff)
return false;
return true;
}
static bool is_valid_rd(const struct ieee80211_regdomain *rd)
{
const struct ieee80211_reg_rule *reg_rule = NULL;
unsigned int i;
if (!rd->n_reg_rules)
return false;
if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
return false;
for (i = 0; i < rd->n_reg_rules; i++) {
reg_rule = &rd->reg_rules[i];
if (!is_valid_reg_rule(reg_rule))
return false;
}
return true;
}
static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
u32 center_freq_khz,
u32 bw_khz)
{
u32 start_freq_khz, end_freq_khz;
start_freq_khz = center_freq_khz - (bw_khz/2);
end_freq_khz = center_freq_khz + (bw_khz/2);
if (start_freq_khz >= freq_range->start_freq_khz &&
end_freq_khz <= freq_range->end_freq_khz)
return true;
return false;
}
/**
* freq_in_rule_band - tells us if a frequency is in a frequency band
* @freq_range: frequency rule we want to query
* @freq_khz: frequency we are inquiring about
*
* This lets us know if a specific frequency rule is or is not relevant to
* a specific frequency's band. Bands are device specific and artificial
* definitions (the "2.4 GHz band" and the "5 GHz band"), however it is
* safe for now to assume that a frequency rule should not be part of a
* frequency's band if the start freq or end freq are off by more than 2 GHz.
* This resolution can be lowered and should be considered as we add
* regulatory rule support for other "bands".
**/
static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
u32 freq_khz)
{
#define ONE_GHZ_IN_KHZ 1000000
if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
return true;
return false;
#undef ONE_GHZ_IN_KHZ
}
/*
* Helper for regdom_intersect(), this does the real
* mathematical intersection fun
*/
static int reg_rules_intersect(
const struct ieee80211_reg_rule *rule1,
const struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *intersected_rule)
{
const struct ieee80211_freq_range *freq_range1, *freq_range2;
struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule1, *power_rule2;
struct ieee80211_power_rule *power_rule;
u32 freq_diff;
freq_range1 = &rule1->freq_range;
freq_range2 = &rule2->freq_range;
freq_range = &intersected_rule->freq_range;
power_rule1 = &rule1->power_rule;
power_rule2 = &rule2->power_rule;
power_rule = &intersected_rule->power_rule;
freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
freq_range2->start_freq_khz);
freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
freq_range2->end_freq_khz);
freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz,
freq_range2->max_bandwidth_khz);
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
if (freq_range->max_bandwidth_khz > freq_diff)
freq_range->max_bandwidth_khz = freq_diff;
power_rule->max_eirp = min(power_rule1->max_eirp,
power_rule2->max_eirp);
power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain,
power_rule2->max_antenna_gain);
intersected_rule->flags = (rule1->flags | rule2->flags);
if (!is_valid_reg_rule(intersected_rule))
return -EINVAL;
return 0;
}
/**
* regdom_intersect - do the intersection between two regulatory domains
* @rd1: first regulatory domain
* @rd2: second regulatory domain
*
* Use this function to get the intersection between two regulatory domains.
* Once completed we will mark the alpha2 for the rd as intersected, "98",
* as no one single alpha2 can represent this regulatory domain.
*
* Returns a pointer to the regulatory domain structure which will hold the
* resulting intersection of rules between rd1 and rd2. We will
* kzalloc() this structure for you.
*/
static struct ieee80211_regdomain *regdom_intersect(
const struct ieee80211_regdomain *rd1,
const struct ieee80211_regdomain *rd2)
{
int r, size_of_regd;
unsigned int x, y;
unsigned int num_rules = 0, rule_idx = 0;
const struct ieee80211_reg_rule *rule1, *rule2;
struct ieee80211_reg_rule *intersected_rule;
struct ieee80211_regdomain *rd;
/* This is just a dummy holder to help us count */
struct ieee80211_reg_rule irule;
/* Uses the stack temporarily for counter arithmetic */
intersected_rule = &irule;
memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule));
if (!rd1 || !rd2)
return NULL;
/*
* First we get a count of the rules we'll need, then we actually
* build them. This is to so we can malloc() and free() a
* regdomain once. The reason we use reg_rules_intersect() here
* is it will return -EINVAL if the rule computed makes no sense.
* All rules that do check out OK are valid.
*/
for (x = 0; x < rd1->n_reg_rules; x++) {
rule1 = &rd1->reg_rules[x];
for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
if (!reg_rules_intersect(rule1, rule2,
intersected_rule))
num_rules++;
memset(intersected_rule, 0,
sizeof(struct ieee80211_reg_rule));
}
}
if (!num_rules)
return NULL;
size_of_regd = sizeof(struct ieee80211_regdomain) +
((num_rules + 1) * sizeof(struct ieee80211_reg_rule));
rd = kzalloc(size_of_regd, GFP_KERNEL);
if (!rd)
return NULL;
for (x = 0; x < rd1->n_reg_rules; x++) {
rule1 = &rd1->reg_rules[x];
for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
/*
* This time around instead of using the stack lets
* write to the target rule directly saving ourselves
* a memcpy()
*/
intersected_rule = &rd->reg_rules[rule_idx];
r = reg_rules_intersect(rule1, rule2,
intersected_rule);
/*
* No need to memset here the intersected rule here as
* we're not using the stack anymore
*/
if (r)
continue;
rule_idx++;
}
}
if (rule_idx != num_rules) {
kfree(rd);
return NULL;
}
rd->n_reg_rules = num_rules;
rd->alpha2[0] = '9';
rd->alpha2[1] = '8';
return rd;
}
/*
* XXX: add support for the rest of enum nl80211_reg_rule_flags, we may
* want to just have the channel structure use these
*/
static u32 map_regdom_flags(u32 rd_flags)
{
u32 channel_flags = 0;
if (rd_flags & NL80211_RRF_PASSIVE_SCAN)
channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN;
if (rd_flags & NL80211_RRF_NO_IBSS)
channel_flags |= IEEE80211_CHAN_NO_IBSS;
if (rd_flags & NL80211_RRF_DFS)
channel_flags |= IEEE80211_CHAN_RADAR;
return channel_flags;
}
static int freq_reg_info_regd(struct wiphy *wiphy,
u32 center_freq,
u32 desired_bw_khz,
const struct ieee80211_reg_rule **reg_rule,
const struct ieee80211_regdomain *custom_regd)
{
int i;
bool band_rule_found = false;
const struct ieee80211_regdomain *regd;
bool bw_fits = false;
if (!desired_bw_khz)
desired_bw_khz = MHZ_TO_KHZ(20);
regd = custom_regd ? custom_regd : cfg80211_regdomain;
/*
* Follow the driver's regulatory domain, if present, unless a country
* IE has been processed or a user wants to help complaince further
*/
if (!custom_regd &&
last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
last_request->initiator != NL80211_REGDOM_SET_BY_USER &&
wiphy->regd)
regd = wiphy->regd;
if (!regd)
return -EINVAL;
for (i = 0; i < regd->n_reg_rules; i++) {
const struct ieee80211_reg_rule *rr;
const struct ieee80211_freq_range *fr = NULL;
rr = ®d->reg_rules[i];
fr = &rr->freq_range;
/*
* We only need to know if one frequency rule was
* was in center_freq's band, that's enough, so lets
* not overwrite it once found
*/
if (!band_rule_found)
band_rule_found = freq_in_rule_band(fr, center_freq);
bw_fits = reg_does_bw_fit(fr,
center_freq,
desired_bw_khz);
if (band_rule_found && bw_fits) {
*reg_rule = rr;
return 0;
}
}
if (!band_rule_found)
return -ERANGE;
return -EINVAL;
}
int freq_reg_info(struct wiphy *wiphy,
u32 center_freq,
u32 desired_bw_khz,
const struct ieee80211_reg_rule **reg_rule)
{
assert_cfg80211_lock();
return freq_reg_info_regd(wiphy,
center_freq,
desired_bw_khz,
reg_rule,
NULL);
}
EXPORT_SYMBOL(freq_reg_info);
#ifdef CONFIG_CFG80211_REG_DEBUG
static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
{
switch (initiator) {
case NL80211_REGDOM_SET_BY_CORE:
return "Set by core";
case NL80211_REGDOM_SET_BY_USER:
return "Set by user";
case NL80211_REGDOM_SET_BY_DRIVER:
return "Set by driver";
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
return "Set by country IE";
default:
WARN_ON(1);
return "Set by bug";
}
}
static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
u32 desired_bw_khz,
const struct ieee80211_reg_rule *reg_rule)
{
const struct ieee80211_power_rule *power_rule;
const struct ieee80211_freq_range *freq_range;
char max_antenna_gain[32];
power_rule = ®_rule->power_rule;
freq_range = ®_rule->freq_range;
if (!power_rule->max_antenna_gain)
snprintf(max_antenna_gain, 32, "N/A");
else
snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
REG_DBG_PRINT("Updating information on frequency %d MHz "
"for a %d MHz width channel with regulatory rule:\n",
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
max_antenna_gain,
power_rule->max_eirp);
}
#else
static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
u32 desired_bw_khz,
const struct ieee80211_reg_rule *reg_rule)
{
return;
}
#endif
/*
* Note that right now we assume the desired channel bandwidth
* is always 20 MHz for each individual channel (HT40 uses 20 MHz
* per channel, the primary and the extension channel). To support
* smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a
* new ieee80211_channel.target_bw and re run the regulatory check
* on the wiphy with the target_bw specified. Then we can simply use
* that below for the desired_bw_khz below.
*/
static void handle_channel(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
enum ieee80211_band band,
unsigned int chan_idx)
{
int r;
u32 flags, bw_flags = 0;
u32 desired_bw_khz = MHZ_TO_KHZ(20);
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct wiphy *request_wiphy = NULL;
assert_cfg80211_lock();
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
chan = &sband->channels[chan_idx];
flags = chan->orig_flags;
r = freq_reg_info(wiphy,
MHZ_TO_KHZ(chan->center_freq),
desired_bw_khz,
®_rule);
if (r) {
/*
* We will disable all channels that do not match our
* received regulatory rule unless the hint is coming
* from a Country IE and the Country IE had no information
* about a band. The IEEE 802.11 spec allows for an AP
* to send only a subset of the regulatory rules allowed,
* so an AP in the US that only supports 2.4 GHz may only send
* a country IE with information for the 2.4 GHz band
* while 5 GHz is still supported.
*/
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
r == -ERANGE)
return;
REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
chan->flags = IEEE80211_CHAN_DISABLED;
return;
}
chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
power_rule = ®_rule->power_rule;
freq_range = ®_rule->freq_range;
if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
request_wiphy && request_wiphy == wiphy &&
request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
/*
* This guarantees the driver's requested regulatory domain
* will always be used as a base for further regulatory
* settings
*/
chan->flags = chan->orig_flags =
map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = chan->orig_mag =
(int) MBI_TO_DBI(power_rule->max_antenna_gain);
chan->max_power = chan->orig_mpwr =
(int) MBM_TO_DBM(power_rule->max_eirp);
return;
}
chan->beacon_found = false;
chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
if (chan->orig_mpwr) {
/*
* Devices that have their own custom regulatory domain
* but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
* passed country IE power settings.
*/
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
chan->max_power =
MBM_TO_DBM(power_rule->max_eirp);
} else {
chan->max_power = min(chan->orig_mpwr,
(int) MBM_TO_DBM(power_rule->max_eirp));
}
} else
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
static void handle_band(struct wiphy *wiphy,
enum ieee80211_band band,
enum nl80211_reg_initiator initiator)
{
unsigned int i;
struct ieee80211_supported_band *sband;
BUG_ON(!wiphy->bands[band]);
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
handle_channel(wiphy, initiator, band, i);
}
static bool ignore_reg_update(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
if (!last_request) {
REG_DBG_PRINT("Ignoring regulatory request %s since "
"last_request is not set\n",
reg_initiator_name(initiator));
return true;
}
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver uses its own custom "
"regulatory domain ",
reg_initiator_name(initiator));
return true;
}
/*
* wiphy->regd will be set once the device has its own
* desired regulatory domain set
*/
if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
!is_world_regdom(last_request->alpha2)) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver requires its own regulaotry "
"domain to be set first",
reg_initiator_name(initiator));
return true;
}
return false;
}
static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
{
struct cfg80211_registered_device *rdev;
struct wiphy *wiphy;
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
wiphy = &rdev->wiphy;
wiphy_update_regulatory(wiphy, initiator);
/*
* Regulatory updates set by CORE are ignored for custom
* regulatory cards. Let us notify the changes to the driver,
* as some drivers used this to restore its orig_* reg domain.
*/
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
wiphy->reg_notifier)
wiphy->reg_notifier(wiphy, last_request);
}
}
static void handle_reg_beacon(struct wiphy *wiphy,
unsigned int chan_idx,
struct reg_beacon *reg_beacon)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
bool channel_changed = false;
struct ieee80211_channel chan_before;
assert_cfg80211_lock();
sband = wiphy->bands[reg_beacon->chan.band];
chan = &sband->channels[chan_idx];
if (likely(chan->center_freq != reg_beacon->chan.center_freq))
return;
if (chan->beacon_found)
return;
chan->beacon_found = true;
if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS)
return;
chan_before.center_freq = chan->center_freq;
chan_before.flags = chan->flags;
if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
channel_changed = true;
}
if (chan->flags & IEEE80211_CHAN_NO_IBSS) {
chan->flags &= ~IEEE80211_CHAN_NO_IBSS;
channel_changed = true;
}
if (channel_changed)
nl80211_send_beacon_hint_event(wiphy, &chan_before, chan);
}
/*
* Called when a scan on a wiphy finds a beacon on
* new channel
*/
static void wiphy_update_new_beacon(struct wiphy *wiphy,
struct reg_beacon *reg_beacon)
{
unsigned int i;
struct ieee80211_supported_band *sband;
assert_cfg80211_lock();
if (!wiphy->bands[reg_beacon->chan.band])
return;
sband = wiphy->bands[reg_beacon->chan.band];
for (i = 0; i < sband->n_channels; i++)
handle_reg_beacon(wiphy, i, reg_beacon);
}
/*
* Called upon reg changes or a new wiphy is added
*/
static void wiphy_update_beacon_reg(struct wiphy *wiphy)
{
unsigned int i;
struct ieee80211_supported_band *sband;
struct reg_beacon *reg_beacon;
assert_cfg80211_lock();
if (list_empty(®_beacon_list))
return;
list_for_each_entry(reg_beacon, ®_beacon_list, list) {
if (!wiphy->bands[reg_beacon->chan.band])
continue;
sband = wiphy->bands[reg_beacon->chan.band];
for (i = 0; i < sband->n_channels; i++)
handle_reg_beacon(wiphy, i, reg_beacon);
}
}
static bool reg_is_world_roaming(struct wiphy *wiphy)
{
if (is_world_regdom(cfg80211_regdomain->alpha2) ||
(wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
return true;
if (last_request &&
last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
return true;
return false;
}
/* Reap the advantages of previously found beacons */
static void reg_process_beacons(struct wiphy *wiphy)
{
/*
* Means we are just firing up cfg80211, so no beacons would
* have been processed yet.
*/
if (!last_request)
return;
if (!reg_is_world_roaming(wiphy))
return;
wiphy_update_beacon_reg(wiphy);
}
static bool is_ht40_not_allowed(struct ieee80211_channel *chan)
{
if (!chan)
return true;
if (chan->flags & IEEE80211_CHAN_DISABLED)
return true;
/* This would happen when regulatory rules disallow HT40 completely */
if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40)))
return true;
return false;
}
static void reg_process_ht_flags_channel(struct wiphy *wiphy,
enum ieee80211_band band,
unsigned int chan_idx)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *channel;
struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
unsigned int i;
assert_cfg80211_lock();
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
channel = &sband->channels[chan_idx];
if (is_ht40_not_allowed(channel)) {
channel->flags |= IEEE80211_CHAN_NO_HT40;
return;
}
/*
* We need to ensure the extension channels exist to
* be able to use HT40- or HT40+, this finds them (or not)
*/
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *c = &sband->channels[i];
if (c->center_freq == (channel->center_freq - 20))
channel_before = c;
if (c->center_freq == (channel->center_freq + 20))
channel_after = c;
}
/*
* Please note that this assumes target bandwidth is 20 MHz,
* if that ever changes we also need to change the below logic
* to include that as well.
*/
if (is_ht40_not_allowed(channel_before))
channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
if (is_ht40_not_allowed(channel_after))
channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
}
static void reg_process_ht_flags_band(struct wiphy *wiphy,
enum ieee80211_band band)
{
unsigned int i;
struct ieee80211_supported_band *sband;
BUG_ON(!wiphy->bands[band]);
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
reg_process_ht_flags_channel(wiphy, band, i);
}
static void reg_process_ht_flags(struct wiphy *wiphy)
{
enum ieee80211_band band;
if (!wiphy)
return;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
reg_process_ht_flags_band(wiphy, band);
}
}
void wiphy_update_regulatory(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
enum ieee80211_band band;
if (ignore_reg_update(wiphy, initiator))
return;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
handle_band(wiphy, band, initiator);
}
reg_process_beacons(wiphy);
reg_process_ht_flags(wiphy);
if (wiphy->reg_notifier)
wiphy->reg_notifier(wiphy, last_request);
}
static void handle_channel_custom(struct wiphy *wiphy,
enum ieee80211_band band,
unsigned int chan_idx,
const struct ieee80211_regdomain *regd)
{
int r;
u32 desired_bw_khz = MHZ_TO_KHZ(20);
u32 bw_flags = 0;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
assert_reg_lock();
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
chan = &sband->channels[chan_idx];
r = freq_reg_info_regd(wiphy,
MHZ_TO_KHZ(chan->center_freq),
desired_bw_khz,
®_rule,
regd);
if (r) {
REG_DBG_PRINT("Disabling freq %d MHz as custom "
"regd has no rule that fits a %d MHz "
"wide channel\n",
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
chan->flags = IEEE80211_CHAN_DISABLED;
return;
}
chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
power_rule = ®_rule->power_rule;
freq_range = ®_rule->freq_range;
if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band,
const struct ieee80211_regdomain *regd)
{
unsigned int i;
struct ieee80211_supported_band *sband;
BUG_ON(!wiphy->bands[band]);
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
handle_channel_custom(wiphy, band, i, regd);
}
/* Used by drivers prior to wiphy registration */
void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
const struct ieee80211_regdomain *regd)
{
enum ieee80211_band band;
unsigned int bands_set = 0;
mutex_lock(®_mutex);
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!wiphy->bands[band])
continue;
handle_band_custom(wiphy, band, regd);
bands_set++;
}
mutex_unlock(®_mutex);
/*
* no point in calling this if it won't have any effect
* on your device's supportd bands.
*/
WARN_ON(!bands_set);
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
/*
* Return value which can be used by ignore_request() to indicate
* it has been determined we should intersect two regulatory domains
*/
#define REG_INTERSECT 1
/* This has the logic which determines when a new request
* should be ignored. */
static int ignore_request(struct wiphy *wiphy,
struct regulatory_request *pending_request)
{
struct wiphy *last_wiphy = NULL;
assert_cfg80211_lock();
/* All initial requests are respected */
if (!last_request)
return 0;
switch (pending_request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
return 0;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (unlikely(!is_an_alpha2(pending_request->alpha2)))
return -EINVAL;
if (last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE) {
if (last_wiphy != wiphy) {
/*
* Two cards with two APs claiming different
* Country IE alpha2s. We could
* intersect them, but that seems unlikely
* to be correct. Reject second one for now.
*/
if (regdom_changes(pending_request->alpha2))
return -EOPNOTSUPP;
return -EALREADY;
}
/*
* Two consecutive Country IE hints on the same wiphy.
* This should be picked up early by the driver/stack
*/
if (WARN_ON(regdom_changes(pending_request->alpha2)))
return 0;
return -EALREADY;
}
return 0;
case NL80211_REGDOM_SET_BY_DRIVER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
if (regdom_changes(pending_request->alpha2))
return 0;
return -EALREADY;
}
/*
* This would happen if you unplug and plug your card
* back in or if you add a new device for which the previously
* loaded card also agrees on the regulatory domain.
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
!regdom_changes(pending_request->alpha2))
return -EALREADY;
return REG_INTERSECT;
case NL80211_REGDOM_SET_BY_USER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
return REG_INTERSECT;
/*
* If the user knows better the user should set the regdom
* to their country before the IE is picked up
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_USER &&
last_request->intersect)
return -EOPNOTSUPP;
/*
* Process user requests only after previous user/driver/core
* requests have been processed
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE ||
last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
if (regdom_changes(last_request->alpha2))
return -EAGAIN;
}
if (!regdom_changes(pending_request->alpha2))
return -EALREADY;
return 0;
}
return -EINVAL;
}
static void reg_set_request_processed(void)
{
bool need_more_processing = false;
last_request->processed = true;
spin_lock(®_requests_lock);
if (!list_empty(®_requests_list))
need_more_processing = true;
spin_unlock(®_requests_lock);
if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
cancel_delayed_work_sync(®_timeout);
if (need_more_processing)
schedule_work(®_work);
}
/**
* __regulatory_hint - hint to the wireless core a regulatory domain
* @wiphy: if the hint comes from country information from an AP, this
* is required to be set to the wiphy that received the information
* @pending_request: the regulatory request currently being processed
*
* The Wireless subsystem can use this function to hint to the wireless core
* what it believes should be the current regulatory domain.
*
* Returns zero if all went fine, %-EALREADY if a regulatory domain had
* already been set or other standard error codes.
*
* Caller must hold &cfg80211_mutex and ®_mutex
*/
static int __regulatory_hint(struct wiphy *wiphy,
struct regulatory_request *pending_request)
{
bool intersect = false;
int r = 0;
assert_cfg80211_lock();
r = ignore_request(wiphy, pending_request);
if (r == REG_INTERSECT) {
if (pending_request->initiator ==
NL80211_REGDOM_SET_BY_DRIVER) {
r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain);
if (r) {
kfree(pending_request);
return r;
}
}
intersect = true;
} else if (r) {
/*
* If the regulatory domain being requested by the
* driver has already been set just copy it to the
* wiphy
*/
if (r == -EALREADY &&
pending_request->initiator ==
NL80211_REGDOM_SET_BY_DRIVER) {
r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain);
if (r) {
kfree(pending_request);
return r;
}
r = -EALREADY;
goto new_request;
}
kfree(pending_request);
return r;
}
new_request:
kfree(last_request);
last_request = pending_request;
last_request->intersect = intersect;
pending_request = NULL;
if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
user_alpha2[0] = last_request->alpha2[0];
user_alpha2[1] = last_request->alpha2[1];
}
/* When r == REG_INTERSECT we do need to call CRDA */
if (r < 0) {
/*
* Since CRDA will not be called in this case as we already
* have applied the requested regulatory domain before we just
* inform userspace we have processed the request
*/
if (r == -EALREADY) {
nl80211_send_reg_change_event(last_request);
reg_set_request_processed();
}
return r;
}
return call_crda(last_request->alpha2);
}
/* This processes *all* regulatory hints */
static void reg_process_hint(struct regulatory_request *reg_request)
{
int r = 0;
struct wiphy *wiphy = NULL;
enum nl80211_reg_initiator initiator = reg_request->initiator;
BUG_ON(!reg_request->alpha2);
if (wiphy_idx_valid(reg_request->wiphy_idx))
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
!wiphy) {
kfree(reg_request);
return;
}
r = __regulatory_hint(wiphy, reg_request);
/* This is required so that the orig_* parameters are saved */
if (r == -EALREADY && wiphy &&
wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
wiphy_update_regulatory(wiphy, initiator);
return;
}
/*
* We only time out user hints, given that they should be the only
* source of bogus requests.
*/
if (r != -EALREADY &&
reg_request->initiator == NL80211_REGDOM_SET_BY_USER)
schedule_delayed_work(®_timeout, msecs_to_jiffies(3142));
}
/*
* Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
* Regulatory hints come on a first come first serve basis and we
* must process each one atomically.
*/
static void reg_process_pending_hints(void)
{
struct regulatory_request *reg_request;
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
/* When last_request->processed becomes true this will be rescheduled */
if (last_request && !last_request->processed) {
REG_DBG_PRINT("Pending regulatory request, waiting "
"for it to be processed...");
goto out;
}
spin_lock(®_requests_lock);
if (list_empty(®_requests_list)) {
spin_unlock(®_requests_lock);
goto out;
}
reg_request = list_first_entry(®_requests_list,
struct regulatory_request,
list);
list_del_init(®_request->list);
spin_unlock(®_requests_lock);
reg_process_hint(reg_request);
out:
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
}
/* Processes beacon hints -- this has nothing to do with country IEs */
static void reg_process_pending_beacon_hints(void)
{
struct cfg80211_registered_device *rdev;
struct reg_beacon *pending_beacon, *tmp;
/*
* No need to hold the reg_mutex here as we just touch wiphys
* and do not read or access regulatory variables.
*/
mutex_lock(&cfg80211_mutex);
/* This goes through the _pending_ beacon list */
spin_lock_bh(®_pending_beacons_lock);
if (list_empty(®_pending_beacons)) {
spin_unlock_bh(®_pending_beacons_lock);
goto out;
}
list_for_each_entry_safe(pending_beacon, tmp,
®_pending_beacons, list) {
list_del_init(&pending_beacon->list);
/* Applies the beacon hint to current wiphys */
list_for_each_entry(rdev, &cfg80211_rdev_list, list)
wiphy_update_new_beacon(&rdev->wiphy, pending_beacon);
/* Remembers the beacon hint for new wiphys or reg changes */
list_add_tail(&pending_beacon->list, ®_beacon_list);
}
spin_unlock_bh(®_pending_beacons_lock);
out:
mutex_unlock(&cfg80211_mutex);
}
static void reg_todo(struct work_struct *work)
{
reg_process_pending_hints();
reg_process_pending_beacon_hints();
}
static void queue_regulatory_request(struct regulatory_request *request)
{
if (isalpha(request->alpha2[0]))
request->alpha2[0] = toupper(request->alpha2[0]);
if (isalpha(request->alpha2[1]))
request->alpha2[1] = toupper(request->alpha2[1]);
spin_lock(®_requests_lock);
list_add_tail(&request->list, ®_requests_list);
spin_unlock(®_requests_lock);
schedule_work(®_work);
}
/*
* Core regulatory hint -- happens during cfg80211_init()
* and when we restore regulatory settings.
*/
static int regulatory_hint_core(const char *alpha2)
{
struct regulatory_request *request;
kfree(last_request);
last_request = NULL;
request = kzalloc(sizeof(struct regulatory_request),
GFP_KERNEL);
if (!request)
return -ENOMEM;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_CORE;
queue_regulatory_request(request);
return 0;
}
/* User hints */
int regulatory_hint_user(const char *alpha2)
{
struct regulatory_request *request;
BUG_ON(!alpha2);
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->wiphy_idx = WIPHY_IDX_STALE;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_USER;
queue_regulatory_request(request);
return 0;
}
/* Driver hints */
int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
{
struct regulatory_request *request;
BUG_ON(!alpha2);
BUG_ON(!wiphy);
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->wiphy_idx = get_wiphy_idx(wiphy);
/* Must have registered wiphy first */
BUG_ON(!wiphy_idx_valid(request->wiphy_idx));
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
queue_regulatory_request(request);
return 0;
}
EXPORT_SYMBOL(regulatory_hint);
/*
* We hold wdev_lock() here so we cannot hold cfg80211_mutex() and
* therefore cannot iterate over the rdev list here.
*/
void regulatory_hint_11d(struct wiphy *wiphy,
enum ieee80211_band band,
u8 *country_ie,
u8 country_ie_len)
{
char alpha2[2];
enum environment_cap env = ENVIRON_ANY;
struct regulatory_request *request;
mutex_lock(®_mutex);
if (unlikely(!last_request))
goto out;
/* IE len must be evenly divisible by 2 */
if (country_ie_len & 0x01)
goto out;
if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
goto out;
alpha2[0] = country_ie[0];
alpha2[1] = country_ie[1];
if (country_ie[2] == 'I')
env = ENVIRON_INDOOR;
else if (country_ie[2] == 'O')
env = ENVIRON_OUTDOOR;
/*
* We will run this only upon a successful connection on cfg80211.
* We leave conflict resolution to the workqueue, where can hold
* cfg80211_mutex.
*/
if (likely(last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE &&
wiphy_idx_valid(last_request->wiphy_idx)))
goto out;
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
goto out;
request->wiphy_idx = get_wiphy_idx(wiphy);
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE;
request->country_ie_env = env;
mutex_unlock(®_mutex);
queue_regulatory_request(request);
return;
out:
mutex_unlock(®_mutex);
}
static void restore_alpha2(char *alpha2, bool reset_user)
{
/* indicates there is no alpha2 to consider for restoration */
alpha2[0] = '9';
alpha2[1] = '7';
/* The user setting has precedence over the module parameter */
if (is_user_regdom_saved()) {
/* Unless we're asked to ignore it and reset it */
if (reset_user) {
REG_DBG_PRINT("Restoring regulatory settings "
"including user preference\n");
user_alpha2[0] = '9';
user_alpha2[1] = '7';
/*
* If we're ignoring user settings, we still need to
* check the module parameter to ensure we put things
* back as they were for a full restore.
*/
if (!is_world_regdom(ieee80211_regdom)) {
REG_DBG_PRINT("Keeping preference on "
"module parameter ieee80211_regdom: %c%c\n",
ieee80211_regdom[0],
ieee80211_regdom[1]);
alpha2[0] = ieee80211_regdom[0];
alpha2[1] = ieee80211_regdom[1];
}
} else {
REG_DBG_PRINT("Restoring regulatory settings "
"while preserving user preference for: %c%c\n",
user_alpha2[0],
user_alpha2[1]);
alpha2[0] = user_alpha2[0];
alpha2[1] = user_alpha2[1];
}
} else if (!is_world_regdom(ieee80211_regdom)) {
REG_DBG_PRINT("Keeping preference on "
"module parameter ieee80211_regdom: %c%c\n",
ieee80211_regdom[0],
ieee80211_regdom[1]);
alpha2[0] = ieee80211_regdom[0];
alpha2[1] = ieee80211_regdom[1];
} else
REG_DBG_PRINT("Restoring regulatory settings\n");
}
static void restore_custom_reg_settings(struct wiphy *wiphy)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
struct ieee80211_channel *chan;
int i;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
chan->flags = chan->orig_flags;
chan->max_antenna_gain = chan->orig_mag;
chan->max_power = chan->orig_mpwr;
}
}
}
/*
* Restoring regulatory settings involves ingoring any
* possibly stale country IE information and user regulatory
* settings if so desired, this includes any beacon hints
* learned as we could have traveled outside to another country
* after disconnection. To restore regulatory settings we do
* exactly what we did at bootup:
*
* - send a core regulatory hint
* - send a user regulatory hint if applicable
*
* Device drivers that send a regulatory hint for a specific country
* keep their own regulatory domain on wiphy->regd so that does does
* not need to be remembered.
*/
static void restore_regulatory_settings(bool reset_user)
{
char alpha2[2];
struct reg_beacon *reg_beacon, *btmp;
struct regulatory_request *reg_request, *tmp;
LIST_HEAD(tmp_reg_req_list);
struct cfg80211_registered_device *rdev;
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
reset_regdomains();
restore_alpha2(alpha2, reset_user);
/*
* If there's any pending requests we simply
* stash them to a temporary pending queue and
* add then after we've restored regulatory
* settings.
*/
spin_lock(®_requests_lock);
if (!list_empty(®_requests_list)) {
list_for_each_entry_safe(reg_request, tmp,
®_requests_list, list) {
if (reg_request->initiator !=
NL80211_REGDOM_SET_BY_USER)
continue;
list_del(®_request->list);
list_add_tail(®_request->list, &tmp_reg_req_list);
}
}
spin_unlock(®_requests_lock);
/* Clear beacon hints */
spin_lock_bh(®_pending_beacons_lock);
if (!list_empty(®_pending_beacons)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_pending_beacons, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
spin_unlock_bh(®_pending_beacons_lock);
if (!list_empty(®_beacon_list)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_beacon_list, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
/* First restore to the basic regulatory settings */
cfg80211_regdomain = cfg80211_world_regdom;
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY)
restore_custom_reg_settings(&rdev->wiphy);
}
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
regulatory_hint_core(cfg80211_regdomain->alpha2);
/*
* This restores the ieee80211_regdom module parameter
* preference or the last user requested regulatory
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
regulatory_hint_user(user_alpha2);
if (list_empty(&tmp_reg_req_list))
return;
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
spin_lock(®_requests_lock);
list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) {
REG_DBG_PRINT("Adding request for country %c%c back "
"into the queue\n",
reg_request->alpha2[0],
reg_request->alpha2[1]);
list_del(®_request->list);
list_add_tail(®_request->list, ®_requests_list);
}
spin_unlock(®_requests_lock);
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
REG_DBG_PRINT("Kicking the queue\n");
schedule_work(®_work);
}
void regulatory_hint_disconnect(void)
{
REG_DBG_PRINT("All devices are disconnected, going to "
"restore regulatory settings\n");
restore_regulatory_settings(false);
}
static bool freq_is_chan_12_13_14(u16 freq)
{
if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
return true;
return false;
}
int regulatory_hint_found_beacon(struct wiphy *wiphy,
struct ieee80211_channel *beacon_chan,
gfp_t gfp)
{
struct reg_beacon *reg_beacon;
if (likely((beacon_chan->beacon_found ||
(beacon_chan->flags & IEEE80211_CHAN_RADAR) ||
(beacon_chan->band == IEEE80211_BAND_2GHZ &&
!freq_is_chan_12_13_14(beacon_chan->center_freq)))))
return 0;
reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp);
if (!reg_beacon)
return -ENOMEM;
REG_DBG_PRINT("Found new beacon on "
"frequency: %d MHz (Ch %d) on %s\n",
beacon_chan->center_freq,
ieee80211_frequency_to_channel(beacon_chan->center_freq),
wiphy_name(wiphy));
memcpy(®_beacon->chan, beacon_chan,
sizeof(struct ieee80211_channel));
/*
* Since we can be called from BH or and non-BH context
* we must use spin_lock_bh()
*/
spin_lock_bh(®_pending_beacons_lock);
list_add_tail(®_beacon->list, ®_pending_beacons);
spin_unlock_bh(®_pending_beacons_lock);
schedule_work(®_work);
return 0;
}
static void print_rd_rules(const struct ieee80211_regdomain *rd)
{
unsigned int i;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
for (i = 0; i < rd->n_reg_rules; i++) {
reg_rule = &rd->reg_rules[i];
freq_range = ®_rule->freq_range;
power_rule = ®_rule->power_rule;
/*
* There may not be documentation for max antenna gain
* in certain regions
*/
if (power_rule->max_antenna_gain)
pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
power_rule->max_antenna_gain,
power_rule->max_eirp);
else
pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
power_rule->max_eirp);
}
}
static void print_regdomain(const struct ieee80211_regdomain *rd)
{
if (is_intersected_alpha2(rd->alpha2)) {
if (last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE) {
struct cfg80211_registered_device *rdev;
rdev = cfg80211_rdev_by_wiphy_idx(
last_request->wiphy_idx);
if (rdev) {
pr_info("Current regulatory domain updated by AP to: %c%c\n",
rdev->country_ie_alpha2[0],
rdev->country_ie_alpha2[1]);
} else
pr_info("Current regulatory domain intersected:\n");
} else
pr_info("Current regulatory domain intersected:\n");
} else if (is_world_regdom(rd->alpha2))
pr_info("World regulatory domain updated:\n");
else {
if (is_unknown_alpha2(rd->alpha2))
pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
else
pr_info("Regulatory domain changed to country: %c%c\n",
rd->alpha2[0], rd->alpha2[1]);
}
print_rd_rules(rd);
}
static void print_regdomain_info(const struct ieee80211_regdomain *rd)
{
pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
print_rd_rules(rd);
}
/* Takes ownership of rd only if it doesn't fail */
static int __set_regdom(const struct ieee80211_regdomain *rd)
{
const struct ieee80211_regdomain *intersected_rd = NULL;
struct cfg80211_registered_device *rdev = NULL;
struct wiphy *request_wiphy;
/* Some basic sanity checks first */
if (is_world_regdom(rd->alpha2)) {
if (WARN_ON(!reg_is_valid_request(rd->alpha2)))
return -EINVAL;
update_world_regdomain(rd);
return 0;
}
if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) &&
!is_unknown_alpha2(rd->alpha2))
return -EINVAL;
if (!last_request)
return -EINVAL;
/*
* Lets only bother proceeding on the same alpha2 if the current
* rd is non static (it means CRDA was present and was used last)
* and the pending request came in from a country IE
*/
if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
/*
* If someone else asked us to change the rd lets only bother
* checking if the alpha2 changes if CRDA was already called
*/
if (!regdom_changes(rd->alpha2))
return -EINVAL;
}
/*
* Now lets set the regulatory domain, update all driver channels
* and finally inform them of what we have done, in case they want
* to review or adjust their own settings based on their own
* internal EEPROM data
*/
if (WARN_ON(!reg_is_valid_request(rd->alpha2)))
return -EINVAL;
if (!is_valid_rd(rd)) {
pr_err("Invalid regulatory domain detected:\n");
print_regdomain_info(rd);
return -EINVAL;
}
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (!last_request->intersect) {
int r;
if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
reset_regdomains();
cfg80211_regdomain = rd;
return 0;
}
/*
* For a driver hint, lets copy the regulatory domain the
* driver wanted to the wiphy to deal with conflicts
*/
/*
* Userspace could have sent two replies with only
* one kernel request.
*/
if (request_wiphy->regd)
return -EALREADY;
r = reg_copy_regd(&request_wiphy->regd, rd);
if (r)
return r;
reset_regdomains();
cfg80211_regdomain = rd;
return 0;
}
/* Intersection requires a bit more work */
if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
intersected_rd = regdom_intersect(rd, cfg80211_regdomain);
if (!intersected_rd)
return -EINVAL;
/*
* We can trash what CRDA provided now.
* However if a driver requested this specific regulatory
* domain we keep it for its private use
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER)
request_wiphy->regd = rd;
else
kfree(rd);
rd = NULL;
reset_regdomains();
cfg80211_regdomain = intersected_rd;
return 0;
}
if (!intersected_rd)
return -EINVAL;
rdev = wiphy_to_dev(request_wiphy);
rdev->country_ie_alpha2[0] = rd->alpha2[0];
rdev->country_ie_alpha2[1] = rd->alpha2[1];
rdev->env = last_request->country_ie_env;
BUG_ON(intersected_rd == rd);
kfree(rd);
rd = NULL;
reset_regdomains();
cfg80211_regdomain = intersected_rd;
return 0;
}
/*
* Use this call to set the current regulatory domain. Conflicts with
* multiple drivers can be ironed out later. Caller must've already
* kmalloc'd the rd structure. Caller must hold cfg80211_mutex
*/
int set_regdom(const struct ieee80211_regdomain *rd)
{
int r;
assert_cfg80211_lock();
mutex_lock(®_mutex);
/* Note that this doesn't update the wiphys, this is done below */
r = __set_regdom(rd);
if (r) {
kfree(rd);
mutex_unlock(®_mutex);
return r;
}
/* This would make this whole thing pointless */
if (!last_request->intersect)
BUG_ON(rd != cfg80211_regdomain);
/* update all wiphys now with the new established regulatory domain */
update_all_wiphy_regulatory(last_request->initiator);
print_regdomain(cfg80211_regdomain);
nl80211_send_reg_change_event(last_request);
reg_set_request_processed();
mutex_unlock(®_mutex);
return r;
}
#ifdef CONFIG_HOTPLUG
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
if (last_request && !last_request->processed) {
if (add_uevent_var(env, "COUNTRY=%c%c",
last_request->alpha2[0],
last_request->alpha2[1]))
return -ENOMEM;
}
return 0;
}
#else
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
return -ENODEV;
}
#endif /* CONFIG_HOTPLUG */
/* Caller must hold cfg80211_mutex */
void reg_device_remove(struct wiphy *wiphy)
{
struct wiphy *request_wiphy = NULL;
assert_cfg80211_lock();
mutex_lock(®_mutex);
kfree(wiphy->regd);
if (last_request)
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (!request_wiphy || request_wiphy != wiphy)
goto out;
last_request->wiphy_idx = WIPHY_IDX_STALE;
last_request->country_ie_env = ENVIRON_ANY;
out:
mutex_unlock(®_mutex);
}
static void reg_timeout_work(struct work_struct *work)
{
REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
"restoring regulatory settings");
restore_regulatory_settings(true);
}
int __init regulatory_init(void)
{
int err = 0;
reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
if (IS_ERR(reg_pdev))
return PTR_ERR(reg_pdev);
reg_pdev->dev.type = ®_device_type;
spin_lock_init(®_requests_lock);
spin_lock_init(®_pending_beacons_lock);
cfg80211_regdomain = cfg80211_world_regdom;
user_alpha2[0] = '9';
user_alpha2[1] = '7';
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_regdomain->alpha2);
if (err) {
if (err == -ENOMEM)
return err;
/*
* N.B. kobject_uevent_env() can fail mainly for when we're out
* memory which is handled and propagated appropriately above
* but it can also fail during a netlink_broadcast() or during
* early boot for call_usermodehelper(). For now treat these
* errors as non-fatal.
*/
pr_err("kobject_uevent_env() was unable to call CRDA during init\n");
#ifdef CONFIG_CFG80211_REG_DEBUG
/* We want to find out exactly why when debugging */
WARN_ON(err);
#endif
}
/*
* Finally, if the user set the module parameter treat it
* as a user hint.
*/
if (!is_world_regdom(ieee80211_regdom))
regulatory_hint_user(ieee80211_regdom);
return 0;
}
void /* __init_or_exit */ regulatory_exit(void)
{
struct regulatory_request *reg_request, *tmp;
struct reg_beacon *reg_beacon, *btmp;
cancel_work_sync(®_work);
cancel_delayed_work_sync(®_timeout);
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
reset_regdomains();
kfree(last_request);
platform_device_unregister(reg_pdev);
spin_lock_bh(®_pending_beacons_lock);
if (!list_empty(®_pending_beacons)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_pending_beacons, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
spin_unlock_bh(®_pending_beacons_lock);
if (!list_empty(®_beacon_list)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_beacon_list, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
spin_lock(®_requests_lock);
if (!list_empty(®_requests_list)) {
list_for_each_entry_safe(reg_request, tmp,
®_requests_list, list) {
list_del(®_request->list);
kfree(reg_request);
}
}
spin_unlock(®_requests_lock);
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
}
|
vSlipenchuk/ac100hd
|
net/wireless/reg.c
|
C
|
gpl-2.0
| 61,693
|
/*
* pcm audio input device
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <mach/debug_audio_mm.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/msm_audio.h>
#include <asm/atomic.h>
#include <asm/ioctls.h>
#include <mach/msm_adsp.h>
#include <mach/qdsp5v2/qdsp5audreccmdi.h>
#include <mach/qdsp5v2/qdsp5audrecmsg.h>
#include <mach/qdsp5v2/audpreproc.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
/* FRAME_NUM must be a power of two */
#define FRAME_NUM (8)
#define FRAME_SIZE (2052 * 2)
#define MONO_DATA_SIZE (2048)
#define STEREO_DATA_SIZE (MONO_DATA_SIZE * 2)
#define DMASZ (FRAME_SIZE * FRAME_NUM)
struct buffer {
void *data;
uint32_t size;
uint32_t read;
uint32_t addr;
};
struct audio_in {
struct buffer in[FRAME_NUM];
spinlock_t dsp_lock;
atomic_t in_bytes;
atomic_t in_samples;
struct mutex lock;
struct mutex read_lock;
wait_queue_head_t wait;
wait_queue_head_t wait_enable;
struct msm_adsp_module *audrec;
/* configuration to use on next enable */
uint32_t samp_rate;
uint32_t channel_mode;
uint32_t buffer_size; /* 2048 for mono, 4096 for stereo */
uint32_t enc_type;
uint32_t dsp_cnt;
uint32_t in_head; /* next buffer dsp will write */
uint32_t in_tail; /* next buffer read() will read */
uint32_t in_count; /* number of buffers available to read() */
const char *module_name;
unsigned queue_ids;
uint16_t enc_id; /* Session Id */
uint16_t source; /* Encoding source bit mask */
uint32_t device_events; /* device events interested in */
uint32_t dev_cnt;
spinlock_t dev_lock;
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
int opened;
int enabled;
int running;
int stopped; /* set when stopped, cleared on flush */
int abort; /* set when error, like sample rate mismatch */
};
static struct audio_in the_audio_in;
struct audio_frame {
uint16_t frame_count_lsw;
uint16_t frame_count_msw;
uint16_t frame_length;
uint16_t erased_pcm;
unsigned char raw_bitstream[]; /* samples */
} __attribute__((packed));
/* Audrec Queue command sent macro's */
#define audrec_send_bitstreamqueue(audio, cmd, len) \
msm_adsp_write(audio->audrec, ((audio->queue_ids & 0xFFFF0000) >> 16),\
cmd, len)
#define audrec_send_audrecqueue(audio, cmd, len) \
msm_adsp_write(audio->audrec, (audio->queue_ids & 0x0000FFFF),\
cmd, len)
/* DSP command send functions */
static int audpcm_in_enc_config(struct audio_in *audio, int enable);
static int audpcm_in_param_config(struct audio_in *audio);
static int audpcm_in_mem_config(struct audio_in *audio);
static int audpcm_in_record_config(struct audio_in *audio, int enable);
static int audpcm_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt);
static void audpcm_in_get_dsp_frames(struct audio_in *audio);
static void audpcm_in_flush(struct audio_in *audio);
static void pcm_in_listener(u32 evt_id, union auddev_evt_data *evt_payload,
void *private_data)
{
struct audio_in *audio = (struct audio_in *) private_data;
unsigned long flags;
MM_DBG("evt_id = 0x%8x\n", evt_id);
switch (evt_id) {
case AUDDEV_EVT_DEV_RDY: {
MM_DBG("AUDDEV_EVT_DEV_RDY\n");
spin_lock_irqsave(&audio->dev_lock, flags);
audio->dev_cnt++;
audio->source |= (0x1 << evt_payload->routing_id);
spin_unlock_irqrestore(&audio->dev_lock, flags);
if ((audio->running == 1) && (audio->enabled == 1))
audpcm_in_record_config(audio, 1);
break;
}
case AUDDEV_EVT_DEV_RLS: {
MM_DBG("AUDDEV_EVT_DEV_RLS\n");
spin_lock_irqsave(&audio->dev_lock, flags);
audio->dev_cnt--;
audio->source &= ~(0x1 << evt_payload->routing_id);
spin_unlock_irqrestore(&audio->dev_lock, flags);
if (!audio->running || !audio->enabled)
break;
/* Turn of as per source */
if (audio->source)
audpcm_in_record_config(audio, 1);
else
/* Turn off all */
audpcm_in_record_config(audio, 0);
break;
}
case AUDDEV_EVT_FREQ_CHG: {
MM_DBG("Encoder Driver got sample rate change event\n");
MM_DBG("sample rate %d\n", evt_payload->freq_info.sample_rate);
MM_DBG("dev_type %d\n", evt_payload->freq_info.dev_type);
MM_DBG("acdb_dev_id %d\n", evt_payload->freq_info.acdb_dev_id);
if (audio->running == 1) {
/* Stop Recording sample rate does not match
with device sample rate */
if (evt_payload->freq_info.sample_rate !=
audio->samp_rate) {
audpcm_in_record_config(audio, 0);
audio->abort = 1;
wake_up(&audio->wait);
}
}
break;
}
default:
MM_ERR("wrong event %d\n", evt_id);
break;
}
}
/* ------------------- dsp preproc event handler--------------------- */
static void audpreproc_dsp_event(void *data, unsigned id, void *msg)
{
struct audio_in *audio = data;
switch (id) {
case AUDPREPROC_ERROR_MSG: {
struct audpreproc_err_msg *err_msg = msg;
MM_ERR("ERROR_MSG: stream id %d err idx %d\n",
err_msg->stream_id, err_msg->aud_preproc_err_idx);
/* Error case */
wake_up(&audio->wait_enable);
break;
}
case AUDPREPROC_CMD_CFG_DONE_MSG: {
MM_DBG("CMD_CFG_DONE_MSG \n");
break;
}
case AUDPREPROC_CMD_ENC_CFG_DONE_MSG: {
struct audpreproc_cmd_enc_cfg_done_msg *enc_cfg_msg = msg;
MM_DBG("CMD_ENC_CFG_DONE_MSG: stream id %d enc type \
0x%8x\n", enc_cfg_msg->stream_id,
enc_cfg_msg->rec_enc_type);
/* Encoder enable success */
if (enc_cfg_msg->rec_enc_type & ENCODE_ENABLE)
audpcm_in_param_config(audio);
else { /* Encoder disable success */
audio->running = 0;
audpcm_in_record_config(audio, 0);
}
break;
}
case AUDPREPROC_CMD_ENC_PARAM_CFG_DONE_MSG: {
MM_DBG("CMD_ENC_PARAM_CFG_DONE_MSG \n");
audpcm_in_mem_config(audio);
break;
}
case AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG: {
MM_DBG("AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG \n");
wake_up(&audio->wait_enable);
break;
}
default:
MM_ERR("Unknown Event id %d\n", id);
}
}
/* ------------------- dsp audrec event handler--------------------- */
static void audrec_dsp_event(void *data, unsigned id, size_t len,
void (*getevent)(void *ptr, size_t len))
{
struct audio_in *audio = data;
switch (id) {
case AUDREC_CMD_MEM_CFG_DONE_MSG: {
MM_DBG("CMD_MEM_CFG_DONE MSG DONE\n");
audio->running = 1;
if (audio->dev_cnt > 0)
audpcm_in_record_config(audio, 1);
break;
}
case AUDREC_FATAL_ERR_MSG: {
struct audrec_fatal_err_msg fatal_err_msg;
getevent(&fatal_err_msg, AUDREC_FATAL_ERR_MSG_LEN);
MM_ERR("FATAL_ERR_MSG: err id %d\n",
fatal_err_msg.audrec_err_id);
/* Error stop the encoder */
audio->stopped = 1;
wake_up(&audio->wait);
break;
}
case AUDREC_UP_PACKET_READY_MSG: {
struct audrec_up_pkt_ready_msg pkt_ready_msg;
getevent(&pkt_ready_msg, AUDREC_UP_PACKET_READY_MSG_LEN);
MM_DBG("UP_PACKET_READY_MSG: write cnt lsw %d \
write cnt msw %d read cnt lsw %d read cnt msw %d \n",\
pkt_ready_msg.audrec_packet_write_cnt_lsw, \
pkt_ready_msg.audrec_packet_write_cnt_msw, \
pkt_ready_msg.audrec_up_prev_read_cnt_lsw, \
pkt_ready_msg.audrec_up_prev_read_cnt_msw);
audpcm_in_get_dsp_frames(audio);
break;
}
default:
MM_ERR("Unknown Event id %d\n", id);
}
}
static void audpcm_in_get_dsp_frames(struct audio_in *audio)
{
struct audio_frame *frame;
uint32_t index;
unsigned long flags;
index = audio->in_head;
frame = (void *) (((char *)audio->in[index].data) - \
sizeof(*frame));
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->in[index].size = frame->frame_length;
/* statistics of read */
atomic_add(audio->in[index].size, &audio->in_bytes);
atomic_add(1, &audio->in_samples);
audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1);
/* If overflow, move the tail index foward. */
if (audio->in_head == audio->in_tail)
audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1);
else
audio->in_count++;
audpcm_dsp_read_buffer(audio, audio->dsp_cnt++);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
wake_up(&audio->wait);
}
struct msm_adsp_ops audrec_adsp_ops = {
.event = audrec_dsp_event,
};
static int audpcm_in_enc_config(struct audio_in *audio, int enable)
{
struct audpreproc_audrec_cmd_enc_cfg cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPREPROC_AUDREC_CMD_ENC_CFG;
cmd.stream_id = audio->enc_id;
if (enable)
cmd.audrec_enc_type = audio->enc_type | ENCODE_ENABLE;
else
cmd.audrec_enc_type &= ~(ENCODE_ENABLE);
return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd));
}
static int audpcm_in_param_config(struct audio_in *audio)
{
struct audpreproc_audrec_cmd_parm_cfg_wav cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.common.cmd_id = AUDPREPROC_AUDREC_CMD_PARAM_CFG;
cmd.common.stream_id = audio->enc_id;
cmd.aud_rec_samplerate_idx = audio->samp_rate;
cmd.aud_rec_stereo_mode = audio->channel_mode;
return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd));
}
/* To Do: msm_snddev_route_enc(audio->enc_id); */
static int audpcm_in_record_config(struct audio_in *audio, int enable)
{
struct audpreproc_afe_cmd_audio_record_cfg cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG;
cmd.stream_id = audio->enc_id;
if (enable)
cmd.destination_activity = AUDIO_RECORDING_TURN_ON;
else
cmd.destination_activity = AUDIO_RECORDING_TURN_OFF;
cmd.source_mix_mask = audio->source;
return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd));
}
static int audpcm_in_mem_config(struct audio_in *audio)
{
struct audrec_cmd_arecmem_cfg cmd;
uint16_t *data = (void *) audio->data;
int n;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDREC_CMD_MEM_CFG_CMD;
cmd.audrec_up_pkt_intm_count = 1;
cmd.audrec_ext_pkt_start_addr_msw = audio->phys >> 16;
cmd.audrec_ext_pkt_start_addr_lsw = audio->phys;
cmd.audrec_ext_pkt_buf_number = FRAME_NUM;
/* prepare buffer pointers:
* Mono: 1024 samples + 4 halfword header
* Stereo: 2048 samples + 4 halfword header
*/
for (n = 0; n < FRAME_NUM; n++) {
audio->in[n].data = data + 4;
data += (4 + (audio->channel_mode ? 2048 : 1024));
MM_DBG("0x%8x\n", (int)(audio->in[n].data - 8));
}
return audrec_send_audrecqueue(audio, &cmd, sizeof(cmd));
}
static int audpcm_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt)
{
struct up_audrec_packet_ext_ptr cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = UP_AUDREC_PACKET_EXT_PTR;
cmd.audrec_up_curr_read_count_msw = read_cnt >> 16;
cmd.audrec_up_curr_read_count_lsw = read_cnt;
return audrec_send_bitstreamqueue(audio, &cmd, sizeof(cmd));
}
/* must be called with audio->lock held */
static int audpcm_in_enable(struct audio_in *audio)
{
if (audio->enabled)
return 0;
if (audpreproc_enable(audio->enc_id, &audpreproc_dsp_event, audio)) {
MM_ERR("msm_adsp_enable(audpreproc) failed\n");
return -ENODEV;
}
if (msm_adsp_enable(audio->audrec)) {
MM_ERR("msm_adsp_enable(audrec) failed\n");
audpreproc_disable(audio->enc_id, audio);
return -ENODEV;
}
audio->enabled = 1;
audpcm_in_enc_config(audio, 1);
return 0;
}
/* must be called with audio->lock held */
static int audpcm_in_disable(struct audio_in *audio)
{
if (audio->enabled) {
audio->enabled = 0;
audpcm_in_enc_config(audio, 0);
wake_up(&audio->wait);
wait_event_interruptible_timeout(audio->wait_enable,
audio->running == 0, 1*HZ);
msm_adsp_disable(audio->audrec);
audpreproc_disable(audio->enc_id, audio);
}
return 0;
}
static void audpcm_in_flush(struct audio_in *audio)
{
int i;
audio->dsp_cnt = 0;
audio->in_head = 0;
audio->in_tail = 0;
audio->in_count = 0;
for (i = 0; i < FRAME_NUM; i++) {
audio->in[i].size = 0;
audio->in[i].read = 0;
}
MM_DBG("in_bytes %d\n", atomic_read(&audio->in_bytes));
MM_DBG("in_samples %d\n", atomic_read(&audio->in_samples));
atomic_set(&audio->in_bytes, 0);
atomic_set(&audio->in_samples, 0);
}
/* ------------------- device --------------------- */
static long audpcm_in_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct audio_in *audio = file->private_data;
int rc = 0;
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
stats.byte_count = atomic_read(&audio->in_bytes);
stats.sample_count = atomic_read(&audio->in_samples);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
return -EFAULT;
return rc;
}
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START: {
uint32_t freq;
/* Poll at 48KHz always */
freq = 48000;
MM_DBG("AUDIO_START\n");
rc = msm_snddev_request_freq(&freq, audio->enc_id,
SNDDEV_CAP_TX, AUDDEV_CLNT_ENC);
MM_DBG("sample rate configured %d sample rate requested %d\n",
freq, audio->samp_rate);
if (rc < 0) {
MM_DBG("sample rate can not be set, return code %d\n",\
rc);
msm_snddev_withdraw_freq(audio->enc_id,
SNDDEV_CAP_TX, AUDDEV_CLNT_ENC);
MM_DBG("msm_snddev_withdraw_freq\n");
break;
}
rc = audpcm_in_enable(audio);
if (!rc) {
rc =
wait_event_interruptible_timeout(audio->wait_enable,
audio->running != 0, 1*HZ);
MM_DBG("state %d rc = %d\n", audio->running, rc);
if (audio->running == 0)
rc = -ENODEV;
else
rc = 0;
}
break;
}
case AUDIO_STOP: {
rc = audpcm_in_disable(audio);
rc = msm_snddev_withdraw_freq(audio->enc_id,
SNDDEV_CAP_TX, AUDDEV_CLNT_ENC);
MM_DBG("msm_snddev_withdraw_freq\n");
audio->stopped = 1;
audio->abort = 0;
break;
}
case AUDIO_FLUSH: {
if (audio->stopped) {
/* Make sure we're stopped and we wake any threads
* that might be blocked holding the read_lock.
* While audio->stopped read threads will always
* exit immediately.
*/
wake_up(&audio->wait);
mutex_lock(&audio->read_lock);
audpcm_in_flush(audio);
mutex_unlock(&audio->read_lock);
}
break;
}
case AUDIO_SET_CONFIG: {
struct msm_audio_config cfg;
if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) {
rc = -EFAULT;
break;
}
if (cfg.channel_count == 1) {
cfg.channel_count = AUDREC_CMD_MODE_MONO;
} else if (cfg.channel_count == 2) {
cfg.channel_count = AUDREC_CMD_MODE_STEREO;
} else {
rc = -EINVAL;
break;
}
audio->samp_rate = cfg.sample_rate;
audio->channel_mode = cfg.channel_count;
audio->buffer_size =
audio->channel_mode ? STEREO_DATA_SIZE : \
MONO_DATA_SIZE;
break;
}
case AUDIO_GET_CONFIG: {
struct msm_audio_config cfg;
memset(&cfg, 0, sizeof(cfg));
cfg.buffer_size = audio->buffer_size;
cfg.buffer_count = FRAME_NUM;
cfg.sample_rate = audio->samp_rate;
if (audio->channel_mode == AUDREC_CMD_MODE_MONO)
cfg.channel_count = 1;
else
cfg.channel_count = 2;
if (copy_to_user((void *) arg, &cfg, sizeof(cfg)))
rc = -EFAULT;
break;
}
case AUDIO_GET_SESSION_ID: {
if (copy_to_user((void *) arg, &audio->enc_id,
sizeof(unsigned short))) {
rc = -EFAULT;
}
break;
}
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
static ssize_t audpcm_in_read(struct file *file,
char __user *buf,
size_t count, loff_t *pos)
{
struct audio_in *audio = file->private_data;
unsigned long flags;
const char __user *start = buf;
void *data;
uint32_t index;
uint32_t size;
int rc = 0;
mutex_lock(&audio->read_lock);
while (count > 0) {
rc = wait_event_interruptible(
audio->wait, (audio->in_count > 0) || audio->stopped ||
audio->abort);
if (rc < 0)
break;
if (audio->stopped && !audio->in_count) {
MM_DBG("Driver in stop state, No more buffer to read");
rc = 0;/* End of File */
break;
}
if (audio->abort) {
rc = -EPERM; /* Not permitted due to abort */
break;
}
index = audio->in_tail;
data = (uint8_t *) audio->in[index].data;
size = audio->in[index].size;
if (count >= size) {
if (copy_to_user(buf, data, size)) {
rc = -EFAULT;
break;
}
spin_lock_irqsave(&audio->dsp_lock, flags);
if (index != audio->in_tail) {
/* overrun -- data is
* invalid and we need to retry */
spin_unlock_irqrestore(&audio->dsp_lock, flags);
continue;
}
audio->in[index].size = 0;
audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1);
audio->in_count--;
spin_unlock_irqrestore(&audio->dsp_lock, flags);
count -= size;
buf += size;
} else {
MM_ERR("short read\n");
break;
}
}
mutex_unlock(&audio->read_lock);
if (buf > start)
return buf - start;
return rc;
}
static ssize_t audpcm_in_write(struct file *file,
const char __user *buf,
size_t count, loff_t *pos)
{
return -EINVAL;
}
static int audpcm_in_release(struct inode *inode, struct file *file)
{
struct audio_in *audio = file->private_data;
mutex_lock(&audio->lock);
/* with draw frequency for session
incase not stopped the driver */
msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX,
AUDDEV_CLNT_ENC);
auddev_unregister_evt_listner(AUDDEV_CLNT_ENC, audio->enc_id);
audpcm_in_disable(audio);
audpcm_in_flush(audio);
msm_adsp_put(audio->audrec);
audpreproc_aenc_free(audio->enc_id);
audio->audrec = NULL;
audio->opened = 0;
mutex_unlock(&audio->lock);
return 0;
}
static int audpcm_in_open(struct inode *inode, struct file *file)
{
struct audio_in *audio = &the_audio_in;
int rc;
int encid;
mutex_lock(&audio->lock);
if (audio->opened) {
rc = -EBUSY;
goto done;
}
/* Settings will be re-config at AUDIO_SET_CONFIG,
* but at least we need to have initial config
*/
audio->channel_mode = AUDREC_CMD_MODE_MONO;
audio->buffer_size = MONO_DATA_SIZE;
audio->samp_rate = 8000;
audio->enc_type = ENC_TYPE_WAV;
audio->source = INTERNAL_CODEC_TX_SOURCE_MIX_MASK;
encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
&audio->queue_ids);
if (encid < 0) {
MM_ERR("No free encoder available\n");
rc = -ENODEV;
goto done;
}
audio->enc_id = encid;
rc = msm_adsp_get(audio->module_name, &audio->audrec,
&audrec_adsp_ops, audio);
if (rc) {
audpreproc_aenc_free(audio->enc_id);
goto done;
}
audio->stopped = 0;
audio->source = 0;
audio->abort = 0;
audpcm_in_flush(audio);
audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS |
AUDDEV_EVT_FREQ_CHG;
rc = auddev_register_evt_listner(audio->device_events,
AUDDEV_CLNT_ENC, audio->enc_id,
pcm_in_listener, (void *) audio);
if (rc) {
MM_ERR("failed to register device event listener\n");
goto evt_error;
}
file->private_data = audio;
audio->opened = 1;
rc = 0;
done:
mutex_unlock(&audio->lock);
return rc;
evt_error:
msm_adsp_put(audio->audrec);
audpreproc_aenc_free(audio->enc_id);
mutex_unlock(&audio->lock);
return rc;
}
static const struct file_operations audio_in_fops = {
.owner = THIS_MODULE,
.open = audpcm_in_open,
.release = audpcm_in_release,
.read = audpcm_in_read,
.write = audpcm_in_write,
.unlocked_ioctl = audpcm_in_ioctl,
};
struct miscdevice audio_in_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_pcm_in",
.fops = &audio_in_fops,
};
static int __init audpcm_in_init(void)
{
the_audio_in.data = dma_alloc_coherent(NULL, DMASZ,
&the_audio_in.phys, GFP_KERNEL);
MM_DBG("Memory addr = 0x%8x phy addr = 0x%8x ---- \n", \
(int) the_audio_in.data, (int) the_audio_in.phys);
if (!the_audio_in.data) {
MM_ERR("Unable to allocate DMA buffer\n");
return -ENOMEM;
}
mutex_init(&the_audio_in.lock);
mutex_init(&the_audio_in.read_lock);
spin_lock_init(&the_audio_in.dsp_lock);
spin_lock_init(&the_audio_in.dev_lock);
init_waitqueue_head(&the_audio_in.wait);
init_waitqueue_head(&the_audio_in.wait_enable);
return misc_register(&audio_in_misc);
}
device_initcall(audpcm_in_init);
|
marcOcram/Acer-Liquid-MT-Kernel
|
arch/arm/mach-msm/qdsp5v2/audio_pcm_in.c
|
C
|
gpl-2.0
| 20,064
|
/* linux/arch/arm/mach-s3c2410/mach-bast.c
*
* Copyright 2003-2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* http://www.simtec.co.uk/products/EB2410ITX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/syscore_ops.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/dm9000.h>
#include <linux/ata_platform.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/serial_8250.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_data/asoc-s3c24xx_simtec.h>
#include <linux/platform_data/hwmon-s3c.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <linux/platform_data/mtd-nand-s3c2410.h>
#include <net/ax88796.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach-types.h>
#include <mach/fb.h>
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
#include <mach/regs-lcd.h>
#include <mach/regs-mem.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/cpu-freq.h>
#include <plat/devs.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-serial.h>
#include "bast.h"
#include "common.h"
#include "simtec.h"
#define COPYRIGHT ", Copyright 2004-2008 Simtec Electronics"
/* macros for virtual address mods for the io space entries */
#define VA_C5(item) ((unsigned long)(item) + BAST_VAM_CS5)
#define VA_C4(item) ((unsigned long)(item) + BAST_VAM_CS4)
#define VA_C3(item) ((unsigned long)(item) + BAST_VAM_CS3)
#define VA_C2(item) ((unsigned long)(item) + BAST_VAM_CS2)
/* macros to modify the physical addresses for io space */
#define PA_CS2(item) (__phys_to_pfn((item) + S3C2410_CS2))
#define PA_CS3(item) (__phys_to_pfn((item) + S3C2410_CS3))
#define PA_CS4(item) (__phys_to_pfn((item) + S3C2410_CS4))
#define PA_CS5(item) (__phys_to_pfn((item) + S3C2410_CS5))
static struct map_desc bast_iodesc[] __initdata = {
/* ISA IO areas */
{
.virtual = (u32)S3C24XX_VA_ISA_BYTE,
.pfn = PA_CS2(BAST_PA_ISAIO),
.length = SZ_16M,
.type = MT_DEVICE,
}, {
.virtual = (u32)S3C24XX_VA_ISA_WORD,
.pfn = PA_CS3(BAST_PA_ISAIO),
.length = SZ_16M,
.type = MT_DEVICE,
},
/* bast CPLD control registers, and external interrupt controls */
{
.virtual = (u32)BAST_VA_CTRL1,
.pfn = __phys_to_pfn(BAST_PA_CTRL1),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_CTRL2,
.pfn = __phys_to_pfn(BAST_PA_CTRL2),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_CTRL3,
.pfn = __phys_to_pfn(BAST_PA_CTRL3),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_CTRL4,
.pfn = __phys_to_pfn(BAST_PA_CTRL4),
.length = SZ_1M,
.type = MT_DEVICE,
},
/* PC104 IRQ mux */
{
.virtual = (u32)BAST_VA_PC104_IRQREQ,
.pfn = __phys_to_pfn(BAST_PA_PC104_IRQREQ),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_PC104_IRQRAW,
.pfn = __phys_to_pfn(BAST_PA_PC104_IRQRAW),
.length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (u32)BAST_VA_PC104_IRQMASK,
.pfn = __phys_to_pfn(BAST_PA_PC104_IRQMASK),
.length = SZ_1M,
.type = MT_DEVICE,
},
/* peripheral space... one for each of fast/slow/byte/16bit */
/* note, ide is only decoded in word space, even though some registers
* are only 8bit */
/* slow, byte */
{ VA_C2(BAST_VA_ISAIO), PA_CS2(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C2(BAST_VA_ISAMEM), PA_CS2(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C2(BAST_VA_SUPERIO), PA_CS2(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
/* slow, word */
{ VA_C3(BAST_VA_ISAIO), PA_CS3(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C3(BAST_VA_ISAMEM), PA_CS3(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C3(BAST_VA_SUPERIO), PA_CS3(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
/* fast, byte */
{ VA_C4(BAST_VA_ISAIO), PA_CS4(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C4(BAST_VA_ISAMEM), PA_CS4(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C4(BAST_VA_SUPERIO), PA_CS4(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
/* fast, word */
{ VA_C5(BAST_VA_ISAIO), PA_CS5(BAST_PA_ISAIO), SZ_16M, MT_DEVICE },
{ VA_C5(BAST_VA_ISAMEM), PA_CS5(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C5(BAST_VA_SUPERIO), PA_CS5(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
};
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
/* port 2 is not actually used */
[2] = {
.hwport = 2,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
}
};
/* NAND Flash on BAST board */
#ifdef CONFIG_PM
static int bast_pm_suspend(void)
{
/* ensure that an nRESET is not generated on resume. */
gpio_direction_output(S3C2410_GPA(21), 1);
return 0;
}
static void bast_pm_resume(void)
{
s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
}
#else
#define bast_pm_suspend NULL
#define bast_pm_resume NULL
#endif
static struct syscore_ops bast_pm_syscore_ops = {
.suspend = bast_pm_suspend,
.resume = bast_pm_resume,
};
static int smartmedia_map[] = { 0 };
static int chip0_map[] = { 1 };
static int chip1_map[] = { 2 };
static int chip2_map[] = { 3 };
static struct mtd_partition __initdata bast_default_nand_part[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_16K,
.offset = 0,
},
[1] = {
.name = "/boot",
.size = SZ_4M - SZ_16K,
.offset = SZ_16K,
},
[2] = {
.name = "user",
.offset = SZ_4M,
.size = MTDPART_SIZ_FULL,
}
};
/* the bast has 4 selectable slots for nand-flash, the three
* on-board chip areas, as well as the external SmartMedia
* slot.
*
* Note, there is no current hot-plug support for the SmartMedia
* socket.
*/
static struct s3c2410_nand_set __initdata bast_nand_sets[] = {
[0] = {
.name = "SmartMedia",
.nr_chips = 1,
.nr_map = smartmedia_map,
.options = NAND_SCAN_SILENT_NODEV,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
},
[1] = {
.name = "chip0",
.nr_chips = 1,
.nr_map = chip0_map,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
},
[2] = {
.name = "chip1",
.nr_chips = 1,
.nr_map = chip1_map,
.options = NAND_SCAN_SILENT_NODEV,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
},
[3] = {
.name = "chip2",
.nr_chips = 1,
.nr_map = chip2_map,
.options = NAND_SCAN_SILENT_NODEV,
.nr_partitions = ARRAY_SIZE(bast_default_nand_part),
.partitions = bast_default_nand_part,
}
};
static void bast_nand_select(struct s3c2410_nand_set *set, int slot)
{
unsigned int tmp;
slot = set->nr_map[slot] & 3;
pr_debug("bast_nand: selecting slot %d (set %p,%p)\n",
slot, set, set->nr_map);
tmp = __raw_readb(BAST_VA_CTRL2);
tmp &= BAST_CPLD_CTLR2_IDERST;
tmp |= slot;
tmp |= BAST_CPLD_CTRL2_WNAND;
pr_debug("bast_nand: ctrl2 now %02x\n", tmp);
__raw_writeb(tmp, BAST_VA_CTRL2);
}
static struct s3c2410_platform_nand __initdata bast_nand_info = {
.tacls = 30,
.twrph0 = 60,
.twrph1 = 60,
.nr_sets = ARRAY_SIZE(bast_nand_sets),
.sets = bast_nand_sets,
.select_chip = bast_nand_select,
};
/* DM9000 */
static struct resource bast_dm9k_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS5 + BAST_PA_DM9000, 4),
[1] = DEFINE_RES_MEM(S3C2410_CS5 + BAST_PA_DM9000 + 0x40, 0x40),
[2] = DEFINE_RES_NAMED(BAST_IRQ_DM9000 , 1, NULL, IORESOURCE_IRQ \
| IORESOURCE_IRQ_HIGHLEVEL),
};
/* for the moment we limit ourselves to 16bit IO until some
* better IO routines can be written and tested
*/
static struct dm9000_plat_data bast_dm9k_platdata = {
.flags = DM9000_PLATF_16BITONLY,
};
static struct platform_device bast_device_dm9k = {
.name = "dm9000",
.id = 0,
.num_resources = ARRAY_SIZE(bast_dm9k_resource),
.resource = bast_dm9k_resource,
.dev = {
.platform_data = &bast_dm9k_platdata,
}
};
/* serial devices */
#define SERIAL_BASE (S3C2410_CS2 + BAST_PA_SUPERIO)
#define SERIAL_FLAGS (UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SHARE_IRQ)
#define SERIAL_CLK (1843200)
static struct plat_serial8250_port bast_sio_data[] = {
[0] = {
.mapbase = SERIAL_BASE + 0x2f8,
.irq = BAST_IRQ_PCSERIAL1,
.flags = SERIAL_FLAGS,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = SERIAL_CLK,
},
[1] = {
.mapbase = SERIAL_BASE + 0x3f8,
.irq = BAST_IRQ_PCSERIAL2,
.flags = SERIAL_FLAGS,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = SERIAL_CLK,
},
{ }
};
static struct platform_device bast_sio = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = &bast_sio_data,
},
};
/* we have devices on the bus which cannot work much over the
* standard 100KHz i2c bus frequency
*/
static struct s3c2410_platform_i2c __initdata bast_i2c_info = {
.flags = 0,
.slave_addr = 0x10,
.frequency = 100*1000,
};
/* Asix AX88796 10/100 ethernet controller */
static struct ax_plat_data bast_asix_platdata = {
.flags = AXFLG_MAC_FROMDEV,
.wordlength = 2,
.dcr_val = 0x48,
.rcr_val = 0x40,
};
static struct resource bast_asix_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS5 + BAST_PA_ASIXNET, 0x18 * 0x20),
[1] = DEFINE_RES_MEM(S3C2410_CS5 + BAST_PA_ASIXNET + (0x1f * 0x20), 1),
[2] = DEFINE_RES_IRQ(BAST_IRQ_ASIX),
};
static struct platform_device bast_device_asix = {
.name = "ax88796",
.id = 0,
.num_resources = ARRAY_SIZE(bast_asix_resource),
.resource = bast_asix_resource,
.dev = {
.platform_data = &bast_asix_platdata
}
};
/* Asix AX88796 10/100 ethernet controller parallel port */
static struct resource bast_asixpp_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS5 + BAST_PA_ASIXNET + (0x18 * 0x20), \
0x30 * 0x20),
};
static struct platform_device bast_device_axpp = {
.name = "ax88796-pp",
.id = 0,
.num_resources = ARRAY_SIZE(bast_asixpp_resource),
.resource = bast_asixpp_resource,
};
/* LCD/VGA controller */
static struct s3c2410fb_display __initdata bast_lcd_info[] = {
{
.type = S3C2410_LCDCON1_TFT,
.width = 640,
.height = 480,
.pixclock = 33333,
.xres = 640,
.yres = 480,
.bpp = 4,
.left_margin = 40,
.right_margin = 20,
.hsync_len = 88,
.upper_margin = 30,
.lower_margin = 32,
.vsync_len = 3,
.lcdcon5 = 0x00014b02,
},
{
.type = S3C2410_LCDCON1_TFT,
.width = 640,
.height = 480,
.pixclock = 33333,
.xres = 640,
.yres = 480,
.bpp = 8,
.left_margin = 40,
.right_margin = 20,
.hsync_len = 88,
.upper_margin = 30,
.lower_margin = 32,
.vsync_len = 3,
.lcdcon5 = 0x00014b02,
},
{
.type = S3C2410_LCDCON1_TFT,
.width = 640,
.height = 480,
.pixclock = 33333,
.xres = 640,
.yres = 480,
.bpp = 16,
.left_margin = 40,
.right_margin = 20,
.hsync_len = 88,
.upper_margin = 30,
.lower_margin = 32,
.vsync_len = 3,
.lcdcon5 = 0x00014b02,
},
};
/* LCD/VGA controller */
static struct s3c2410fb_mach_info __initdata bast_fb_info = {
.displays = bast_lcd_info,
.num_displays = ARRAY_SIZE(bast_lcd_info),
.default_display = 1,
};
/* I2C devices fitted. */
static struct i2c_board_info bast_i2c_devs[] __initdata = {
{
I2C_BOARD_INFO("tlv320aic23", 0x1a),
}, {
I2C_BOARD_INFO("simtec-pmu", 0x6b),
}, {
I2C_BOARD_INFO("ch7013", 0x75),
},
};
static struct s3c_hwmon_pdata bast_hwmon_info = {
/* LCD contrast (0-6.6V) */
.in[0] = &(struct s3c_hwmon_chcfg) {
.name = "lcd-contrast",
.mult = 3300,
.div = 512,
},
/* LED current feedback */
.in[1] = &(struct s3c_hwmon_chcfg) {
.name = "led-feedback",
.mult = 3300,
.div = 1024,
},
/* LCD feedback (0-6.6V) */
.in[2] = &(struct s3c_hwmon_chcfg) {
.name = "lcd-feedback",
.mult = 3300,
.div = 512,
},
/* Vcore (1.8-2.0V), Vref 3.3V */
.in[3] = &(struct s3c_hwmon_chcfg) {
.name = "vcore",
.mult = 3300,
.div = 1024,
},
};
/* Standard BAST devices */
// cat /sys/devices/platform/s3c24xx-adc/s3c-hwmon/in_0
static struct platform_device *bast_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_rtc,
&s3c_device_nand,
&s3c_device_adc,
&s3c_device_hwmon,
&bast_device_dm9k,
&bast_device_asix,
&bast_device_axpp,
&bast_sio,
};
static struct clk *bast_clocks[] __initdata = {
&s3c24xx_dclk0,
&s3c24xx_dclk1,
&s3c24xx_clkout0,
&s3c24xx_clkout1,
&s3c24xx_uclk,
};
static struct s3c_cpufreq_board __initdata bast_cpufreq = {
.refresh = 7800, /* 7.8usec */
.auto_io = 1,
.need_io = 1,
};
static struct s3c24xx_audio_simtec_pdata __initdata bast_audio = {
.have_mic = 1,
.have_lout = 1,
};
static void __init bast_map_io(void)
{
/* initialise the clocks */
s3c24xx_dclk0.parent = &clk_upll;
s3c24xx_dclk0.rate = 12*1000*1000;
s3c24xx_dclk1.parent = &clk_upll;
s3c24xx_dclk1.rate = 24*1000*1000;
s3c24xx_clkout0.parent = &s3c24xx_dclk0;
s3c24xx_clkout1.parent = &s3c24xx_dclk1;
s3c24xx_uclk.parent = &s3c24xx_clkout1;
s3c24xx_register_clocks(bast_clocks, ARRAY_SIZE(bast_clocks));
s3c_hwmon_set_platdata(&bast_hwmon_info);
s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc));
s3c24xx_init_clocks(0);
s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs));
}
static void __init bast_init(void)
{
register_syscore_ops(&bast_pm_syscore_ops);
s3c_i2c0_set_platdata(&bast_i2c_info);
s3c_nand_set_platdata(&bast_nand_info);
s3c24xx_fb_set_platdata(&bast_fb_info);
platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices));
i2c_register_board_info(0, bast_i2c_devs,
ARRAY_SIZE(bast_i2c_devs));
usb_simtec_init();
nor_simtec_init();
simtec_audio_add(NULL, true, &bast_audio);
WARN_ON(gpio_request(S3C2410_GPA(21), "bast nreset"));
s3c_cpufreq_setboard(&bast_cpufreq);
}
MACHINE_START(BAST, "Simtec-BAST")
/* Maintainer: Ben Dooks <ben@simtec.co.uk> */
.atag_offset = 0x100,
.map_io = bast_map_io,
.init_irq = s3c24xx_init_irq,
.init_machine = bast_init,
.init_time = s3c24xx_timer_init,
.restart = s3c2410_restart,
MACHINE_END
|
Oleh-Kravchenko/asusp535
|
arch/arm/mach-s3c24xx/mach-bast.c
|
C
|
gpl-2.0
| 14,795
|
/*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/regulator/consumer.h>
#include <asm/mach-types.h>
#include <asm/cpu.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include <mach/msm-krait-l2-accessors.h>
#include <mach/rpm-regulator.h>
#include <mach/rpm-regulator-smd.h>
#include <mach/msm_bus.h>
#include <mach/msm_dcvs.h>
#include "acpuclock.h"
#include "acpuclock-krait.h"
#include "avs.h"
#ifdef CONFIG_SEC_DEBUG_DCVS_LOG
#include <mach/sec_debug.h>
#endif
/* MUX source selects. */
#define PRI_SRC_SEL_SEC_SRC 0
#define PRI_SRC_SEL_HFPLL 1
#define PRI_SRC_SEL_HFPLL_DIV2 2
#define SECCLKAGD BIT(4)
#ifdef CONFIG_SEC_DEBUG_SUBSYS
int boost_uv;
int speed_bin;
int pvs_bin;
#endif
static DEFINE_MUTEX(driver_lock);
static DEFINE_SPINLOCK(l2_lock);
static struct drv_data {
struct acpu_level *acpu_freq_tbl;
const struct l2_level *l2_freq_tbl;
struct scalable *scalable;
struct hfpll_data *hfpll_data;
u32 bus_perf_client;
struct msm_bus_scale_pdata *bus_scale;
int boost_uv;
struct device *dev;
} drv;
static unsigned long acpuclk_krait_get_rate(int cpu)
{
return drv.scalable[cpu].cur_speed->khz;
}
/* Select a source on the primary MUX. */
static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel)
{
u32 regval;
regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
regval &= ~0x3;
regval |= (pri_src_sel & 0x3);
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
/* Wait for switch to complete. */
mb();
udelay(1);
}
/* Select a source on the secondary MUX. */
static void __cpuinit set_sec_clk_src(struct scalable *sc, u32 sec_src_sel)
{
u32 regval;
/* 8064 Errata: disable sec_src clock gating during switch. */
regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
regval |= SECCLKAGD;
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
/* Program the MUX */
regval &= ~(0x3 << 2);
regval |= ((sec_src_sel & 0x3) << 2);
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
/* 8064 Errata: re-enabled sec_src clock gating. */
regval &= ~SECCLKAGD;
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
/* Wait for switch to complete. */
mb();
udelay(1);
}
static int enable_rpm_vreg(struct vreg *vreg)
{
int ret = 0;
if (vreg->rpm_reg) {
ret = rpm_regulator_enable(vreg->rpm_reg);
if (ret)
dev_err(drv.dev, "%s regulator enable failed (%d)\n",
vreg->name, ret);
}
return ret;
}
static void disable_rpm_vreg(struct vreg *vreg)
{
int rc;
if (vreg->rpm_reg) {
rc = rpm_regulator_disable(vreg->rpm_reg);
if (rc)
dev_err(drv.dev, "%s regulator disable failed (%d)\n",
vreg->name, rc);
}
}
/* Enable an already-configured HFPLL. */
static void hfpll_enable(struct scalable *sc, bool skip_regulators)
{
if (!skip_regulators) {
/* Enable regulators required by the HFPLL. */
enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
}
/* Disable PLL bypass mode. */
writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset);
/*
* H/W requires a 5us delay between disabling the bypass and
* de-asserting the reset. Delay 10us just to be safe.
*/
mb();
udelay(10);
/* De-assert active-low PLL reset. */
writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset);
/* Wait for PLL to lock. */
mb();
udelay(60);
/* Enable PLL output. */
writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset);
}
/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
static void hfpll_disable(struct scalable *sc, bool skip_regulators)
{
/*
* Disable the PLL output, disable test mode, enable the bypass mode,
* and assert the reset.
*/
writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset);
if (!skip_regulators) {
/* Remove voltage votes required by the HFPLL. */
disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
}
}
/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
{
void __iomem *base = sc->hfpll_base;
u32 regval;
writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
if (drv.hfpll_data->has_user_reg) {
regval = readl_relaxed(base + drv.hfpll_data->user_offset);
if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
regval &= ~drv.hfpll_data->user_vco_mask;
else
regval |= drv.hfpll_data->user_vco_mask;
writel_relaxed(regval, base + drv.hfpll_data->user_offset);
}
}
/* Return the L2 speed that should be applied. */
static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l)
{
unsigned int new_l = 0;
int cpu;
/* Find max L2 speed vote. */
sc->l2_vote = vote_l;
for_each_present_cpu(cpu)
new_l = max(new_l, drv.scalable[cpu].l2_vote);
return new_l;
}
/* Update the bus bandwidth request. */
static void set_bus_bw(unsigned int bw)
{
int ret;
/* Update bandwidth if request has changed. This may sleep. */
ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw);
if (ret)
dev_err(drv.dev, "bandwidth request failed (%d)\n", ret);
}
/* Set the CPU or L2 clock speed. */
static void set_speed(struct scalable *sc, const struct core_speed *tgt_s,
bool skip_regulators)
{
const struct core_speed *strt_s = sc->cur_speed;
if (strt_s == tgt_s)
return;
if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
/*
* Move to an always-on source running at a frequency
* that does not require an elevated CPU voltage.
*/
set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
/* Re-program HFPLL. */
hfpll_disable(sc, true);
hfpll_set_rate(sc, tgt_s);
hfpll_enable(sc, true);
/* Move to HFPLL. */
set_pri_clk_src(sc, tgt_s->pri_src_sel);
} else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
set_pri_clk_src(sc, tgt_s->pri_src_sel);
hfpll_disable(sc, skip_regulators);
} else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
hfpll_set_rate(sc, tgt_s);
hfpll_enable(sc, skip_regulators);
set_pri_clk_src(sc, tgt_s->pri_src_sel);
}
sc->cur_speed = tgt_s;
}
struct vdd_data {
int vdd_mem;
int vdd_dig;
int vdd_core;
int ua_core;
};
/* Apply any per-cpu voltage increases. */
static int increase_vdd(int cpu, struct vdd_data *data,
enum setrate_reason reason)
{
struct scalable *sc = &drv.scalable[cpu];
int rc;
/*
* Increase vdd_mem active-set before vdd_dig.
* vdd_mem should be >= vdd_dig.
*/
if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_mem (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
}
/* Increase vdd_dig active-set vote. */
if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_dig (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
}
/* Increase current request. */
if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
data->ua_core);
if (rc < 0) {
dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
sc->vreg[VREG_CORE].name, rc);
return rc;
}
sc->vreg[VREG_CORE].cur_ua = data->ua_core;
}
/*
* Update per-CPU core voltage. Don't do this for the hotplug path for
* which it should already be correct. Attempting to set it is bad
* because we don't know what CPU we are running on at this point, but
* the CPU regulator API requires we call it from the affected CPU.
*/
if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
&& reason != SETRATE_HOTPLUG) {
rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_core (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
}
return 0;
}
/* Apply any per-cpu voltage decreases. */
static void decrease_vdd(int cpu, struct vdd_data *data,
enum setrate_reason reason)
{
struct scalable *sc = &drv.scalable[cpu];
int ret;
/*
* Update per-CPU core voltage. This must be called on the CPU
* that's being affected. Don't do this in the hotplug remove path,
* where the rail is off and we're executing on the other CPU.
*/
if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
&& reason != SETRATE_HOTPLUG) {
ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_core (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
}
/* Decrease current request. */
if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
data->ua_core);
if (ret < 0) {
dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
sc->vreg[VREG_CORE].name, ret);
return;
}
sc->vreg[VREG_CORE].cur_ua = data->ua_core;
}
/* Decrease vdd_dig active-set vote. */
if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_dig (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
}
/*
* Decrease vdd_mem active-set after vdd_dig.
* vdd_mem should be >= vdd_dig.
*/
if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_mem (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
}
}
static int calculate_vdd_mem(const struct acpu_level *tgt)
{
return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
}
static int get_src_dig(const struct core_speed *s)
{
const int *hfpll_vdd = drv.hfpll_data->vdd;
const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
if (s->src != HFPLL)
return hfpll_vdd[HFPLL_VDD_NONE];
else if (s->pll_l_val > nom_vdd_l_max)
return hfpll_vdd[HFPLL_VDD_HIGH];
else if (s->pll_l_val > low_vdd_l_max)
return hfpll_vdd[HFPLL_VDD_NOM];
else
return hfpll_vdd[HFPLL_VDD_LOW];
}
static int calculate_vdd_dig(const struct acpu_level *tgt)
{
int l2_pll_vdd_dig, cpu_pll_vdd_dig;
l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
}
static bool enable_boost = true;
module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
static int calculate_vdd_core(const struct acpu_level *tgt)
{
return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
}
static DEFINE_MUTEX(l2_regulator_lock);
static int l2_vreg_count;
static int enable_l2_regulators(void)
{
int ret = 0;
mutex_lock(&l2_regulator_lock);
if (l2_vreg_count == 0) {
ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
if (ret)
goto out;
ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
if (ret) {
disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
goto out;
}
}
l2_vreg_count++;
out:
mutex_unlock(&l2_regulator_lock);
return ret;
}
static void disable_l2_regulators(void)
{
mutex_lock(&l2_regulator_lock);
if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!"))
goto out;
if (l2_vreg_count == 1) {
disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
}
l2_vreg_count--;
out:
mutex_unlock(&l2_regulator_lock);
}
static int minus_vc;
module_param_named(
mclk, minus_vc, int, S_IRUGO | S_IWUSR | S_IWGRP
);
/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
enum setrate_reason reason)
{
const struct core_speed *strt_acpu_s, *tgt_acpu_s;
const struct acpu_level *tgt;
int tgt_l2_l;
enum src_id prev_l2_src = NUM_SRC_ID;
struct vdd_data vdd_data;
bool skip_regulators;
int rc = 0;
if (cpu > num_possible_cpus())
return -EINVAL;
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
mutex_lock(&driver_lock);
strt_acpu_s = drv.scalable[cpu].cur_speed;
/* Return early if rate didn't change. */
if (rate == strt_acpu_s->khz)
goto out;
/* Find target frequency. */
for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
if (tgt->speed.khz == rate) {
tgt_acpu_s = &tgt->speed;
break;
}
}
if (tgt->speed.khz == 0) {
rc = -EINVAL;
goto out;
}
/* Calculate voltage requirements for the current CPU. */
vdd_data.vdd_mem = calculate_vdd_mem(tgt);
vdd_data.vdd_dig = calculate_vdd_dig(tgt);
vdd_data.vdd_core = calculate_vdd_core(tgt) + minus_vc;
vdd_data.ua_core = tgt->ua_core;
/* Disable AVS before voltage switch */
if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) {
AVS_DISABLE(cpu);
drv.scalable[cpu].avs_enabled = false;
}
/* Increase VDD levels if needed. */
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
rc = increase_vdd(cpu, &vdd_data, reason);
if (rc)
goto out;
prev_l2_src =
drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src;
/* Vote for the L2 regulators here if necessary. */
if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) {
rc = enable_l2_regulators();
if (rc)
goto out;
}
}
dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
/*
* If we are setting the rate as part of power collapse or in the resume
* path after power collapse, skip the vote for the HFPLL regulators,
* which are active-set-only votes that will be removed when apps enters
* its sleep set. This is needed to avoid voting for regulators with
* sleeping APIs from an atomic context.
*/
skip_regulators = (reason == SETRATE_PC);
#ifdef CONFIG_SEC_DEBUG_DCVS_LOG
sec_debug_dcvs_log(cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
#endif
/* Set the new CPU speed. */
set_speed(&drv.scalable[cpu], tgt_acpu_s, skip_regulators);
/*
* Update the L2 vote and apply the rate change. A spinlock is
* necessary to ensure L2 rate is calculated and set atomically
* with the CPU frequency, even if acpuclk_krait_set_rate() is
* called from an atomic context and the driver_lock mutex is not
* acquired.
*/
spin_lock(&l2_lock);
tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
set_speed(&drv.scalable[L2],
&drv.l2_freq_tbl[tgt_l2_l].speed, true);
spin_unlock(&l2_lock);
/* Nothing else to do for power collapse or SWFI. */
if (reason == SETRATE_PC || reason == SETRATE_SWFI)
goto out;
/*
* Remove the vote for the L2 HFPLL regulators only if the L2
* was already on an HFPLL source.
*/
if (prev_l2_src == HFPLL)
disable_l2_regulators();
/* Update bus bandwith request. */
set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
/* Drop VDD levels if we can. */
decrease_vdd(cpu, &vdd_data, reason);
/* Re-enable AVS */
if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) {
AVS_ENABLE(cpu, tgt->avsdscr_setting);
drv.scalable[cpu].avs_enabled = true;
}
dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
out:
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
mutex_unlock(&driver_lock);
return rc;
}
static struct acpuclk_data acpuclk_krait_data = {
.set_rate = acpuclk_krait_set_rate,
.get_rate = acpuclk_krait_get_rate,
};
/* Initialize a HFPLL at a given rate and enable it. */
static void __cpuinit hfpll_init(struct scalable *sc,
const struct core_speed *tgt_s)
{
dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
/* Disable the PLL for re-programming. */
hfpll_disable(sc, true);
/* Configure PLL parameters for integer mode. */
writel_relaxed(drv.hfpll_data->config_val,
sc->hfpll_base + drv.hfpll_data->config_offset);
writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
if (drv.hfpll_data->has_user_reg)
writel_relaxed(drv.hfpll_data->user_val,
sc->hfpll_base + drv.hfpll_data->user_offset);
/* Program droop controller, if supported */
if (drv.hfpll_data->has_droop_ctl)
writel_relaxed(drv.hfpll_data->droop_val,
sc->hfpll_base + drv.hfpll_data->droop_offset);
/* Set an initial PLL rate. */
hfpll_set_rate(sc, tgt_s);
}
static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg,
int vdd, bool enable)
{
int ret;
if (!sc->vreg[vreg].name)
return 0;
sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
sc->vreg[vreg].name);
if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
ret = PTR_ERR(sc->vreg[vreg].rpm_reg);
dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n",
sc->vreg[vreg].name, ret);
goto err_get;
}
ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
sc->vreg[vreg].max_vdd);
if (ret) {
dev_err(drv.dev, "%s initialization failed (%d)\n",
sc->vreg[vreg].name, ret);
goto err_conf;
}
sc->vreg[vreg].cur_vdd = vdd;
if (enable) {
ret = enable_rpm_vreg(&sc->vreg[vreg]);
if (ret)
goto err_conf;
}
return 0;
err_conf:
rpm_regulator_put(sc->vreg[vreg].rpm_reg);
err_get:
return ret;
}
static void __cpuinit rpm_regulator_cleanup(struct scalable *sc,
enum vregs vreg)
{
if (!sc->vreg[vreg].rpm_reg)
return;
disable_rpm_vreg(&sc->vreg[vreg]);
rpm_regulator_put(sc->vreg[vreg].rpm_reg);
}
/* Voltage regulator initialization. */
static int __cpuinit regulator_init(struct scalable *sc,
const struct acpu_level *acpu_level)
{
int ret, vdd_mem, vdd_dig, vdd_core;
vdd_mem = calculate_vdd_mem(acpu_level);
ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
if (ret)
goto err_mem;
vdd_dig = calculate_vdd_dig(acpu_level);
ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
if (ret)
goto err_dig;
ret = rpm_regulator_init(sc, VREG_HFPLL_A,
sc->vreg[VREG_HFPLL_A].max_vdd, false);
if (ret)
goto err_hfpll_a;
ret = rpm_regulator_init(sc, VREG_HFPLL_B,
sc->vreg[VREG_HFPLL_B].max_vdd, false);
if (ret)
goto err_hfpll_b;
/* Setup Krait CPU regulators and initial core voltage. */
sc->vreg[VREG_CORE].reg = regulator_get(drv.dev,
sc->vreg[VREG_CORE].name);
if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
ret = PTR_ERR(sc->vreg[VREG_CORE].reg);
dev_err(drv.dev, "regulator_get(%s) failed (%d)\n",
sc->vreg[VREG_CORE].name, ret);
goto err_core_get;
}
ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
acpu_level->ua_core);
if (ret < 0) {
dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
sc->vreg[VREG_CORE].name, ret);
goto err_core_conf;
}
sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
vdd_core = calculate_vdd_core(acpu_level);
ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
sc->vreg[VREG_CORE].max_vdd);
if (ret) {
dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
sc->vreg[VREG_CORE].name, ret);
goto err_core_conf;
}
sc->vreg[VREG_CORE].cur_vdd = vdd_core;
ret = regulator_enable(sc->vreg[VREG_CORE].reg);
if (ret) {
dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
sc->vreg[VREG_CORE].name, ret);
goto err_core_conf;
}
/*
* Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency
* requires a corresponding target L2 frequency that needs the L2 to
* run off of an HFPLL.
*/
if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL)
l2_vreg_count++;
return 0;
err_core_conf:
regulator_put(sc->vreg[VREG_CORE].reg);
err_core_get:
rpm_regulator_cleanup(sc, VREG_HFPLL_B);
err_hfpll_b:
rpm_regulator_cleanup(sc, VREG_HFPLL_A);
err_hfpll_a:
rpm_regulator_cleanup(sc, VREG_DIG);
err_dig:
rpm_regulator_cleanup(sc, VREG_MEM);
err_mem:
return ret;
}
static void __cpuinit regulator_cleanup(struct scalable *sc)
{
regulator_disable(sc->vreg[VREG_CORE].reg);
regulator_put(sc->vreg[VREG_CORE].reg);
rpm_regulator_cleanup(sc, VREG_HFPLL_B);
rpm_regulator_cleanup(sc, VREG_HFPLL_A);
rpm_regulator_cleanup(sc, VREG_DIG);
rpm_regulator_cleanup(sc, VREG_MEM);
}
/* Set initial rate for a given core. */
static int __cpuinit init_clock_sources(struct scalable *sc,
const struct core_speed *tgt_s)
{
u32 regval;
void __iomem *aux_reg;
/* Program AUX source input to the secondary MUX. */
if (sc->aux_clk_sel_phys) {
aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
if (!aux_reg)
return -ENOMEM;
writel_relaxed(sc->aux_clk_sel, aux_reg);
iounmap(aux_reg);
}
/* Switch away from the HFPLL while it's re-initialized. */
set_sec_clk_src(sc, sc->sec_clk_sel);
set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
hfpll_init(sc, tgt_s);
/* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
regval &= ~(0x3 << 6);
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
/* Enable and switch to the target clock source. */
if (tgt_s->src == HFPLL)
hfpll_enable(sc, false);
set_pri_clk_src(sc, tgt_s->pri_src_sel);
sc->cur_speed = tgt_s;
return 0;
}
static void __cpuinit fill_cur_core_speed(struct core_speed *s,
struct scalable *sc)
{
s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
}
static bool __cpuinit speed_equal(const struct core_speed *s1,
const struct core_speed *s2)
{
return (s1->pri_src_sel == s2->pri_src_sel &&
s1->pll_l_val == s2->pll_l_val);
}
static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
{
struct scalable *sc = &drv.scalable[cpu];
const struct acpu_level *l;
struct core_speed cur_speed;
fill_cur_core_speed(&cur_speed, sc);
for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
if (speed_equal(&l->speed, &cur_speed))
return l;
return NULL;
}
static const struct l2_level __init *find_cur_l2_level(void)
{
struct scalable *sc = &drv.scalable[L2];
const struct l2_level *l;
struct core_speed cur_speed;
fill_cur_core_speed(&cur_speed, sc);
for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
if (speed_equal(&l->speed, &cur_speed))
return l;
return NULL;
}
static const struct acpu_level __cpuinit *find_min_acpu_level(void)
{
struct acpu_level *l;
for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
if (l->use_for_scaling)
return l;
return NULL;
}
static int __cpuinit per_cpu_init(int cpu)
{
struct scalable *sc = &drv.scalable[cpu];
const struct acpu_level *acpu_level;
int ret;
sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
if (!sc->hfpll_base) {
ret = -ENOMEM;
goto err_ioremap;
}
acpu_level = find_cur_acpu_level(cpu);
if (!acpu_level) {
acpu_level = find_min_acpu_level();
if (!acpu_level) {
ret = -ENODEV;
goto err_table;
}
dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
cpu, acpu_level->speed.khz);
} else {
dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
acpu_level->speed.khz);
}
ret = regulator_init(sc, acpu_level);
if (ret)
goto err_regulators;
ret = init_clock_sources(sc, &acpu_level->speed);
if (ret)
goto err_clocks;
sc->l2_vote = acpu_level->l2_level;
sc->initialized = true;
return 0;
err_clocks:
regulator_cleanup(sc);
err_regulators:
err_table:
iounmap(sc->hfpll_base);
err_ioremap:
return ret;
}
/* Register with bus driver. */
static void __init bus_init(const struct l2_level *l2_level)
{
int ret;
drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale);
if (!drv.bus_perf_client) {
dev_err(drv.dev, "unable to register bus client\n");
BUG();
}
ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
l2_level->bw_level);
if (ret)
dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
}
#ifdef CONFIG_CPU_FREQ_MSM
static struct cpufreq_frequency_table freq_table[NR_CPUS][35];
extern int console_batt_stat;
static void __init cpufreq_table_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
int i, freq_cnt = 0;
/* Construct the freq_table tables from acpu_freq_tbl. */
for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0
&& freq_cnt < ARRAY_SIZE(*freq_table); i++) {
if (drv.acpu_freq_tbl[i].use_for_scaling) {
#ifdef CONFIG_SEC_FACTORY
// if factory_condition, set the core freq limit.
//QMCK
if (console_set_on_cmdline && drv.acpu_freq_tbl[i].speed.khz > 1000000) {
if(console_batt_stat == 1) {
continue;
}
}
//QMCK
#endif
freq_table[cpu][freq_cnt].index = freq_cnt;
freq_table[cpu][freq_cnt].frequency
= drv.acpu_freq_tbl[i].speed.khz;
freq_cnt++;
}
}
/* freq_table not big enough to store all usable freqs. */
BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0);
freq_table[cpu][freq_cnt].index = freq_cnt;
freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
dev_info(drv.dev, "CPU%d: %d frequencies supported\n",
cpu, freq_cnt);
/* Register table with CPUFreq. */
cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
}
}
#else
static void __init cpufreq_table_init(void) {}
#endif
static void __init dcvs_freq_init(void)
{
int i;
for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++)
if (drv.acpu_freq_tbl[i].use_for_scaling)
msm_dcvs_register_cpu_freq(
drv.acpu_freq_tbl[i].speed.khz,
drv.acpu_freq_tbl[i].vdd_core / 1000);
}
static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
static int prev_khz[NR_CPUS];
int rc, cpu = (int)hcpu;
struct scalable *sc = &drv.scalable[cpu];
unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
/* Fall through. */
case CPU_UP_CANCELED:
acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
break;
case CPU_UP_PREPARE:
if (!sc->initialized) {
rc = per_cpu_init(cpu);
if (rc)
return NOTIFY_BAD;
break;
}
if (WARN_ON(!prev_khz[cpu]))
return NOTIFY_BAD;
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
sc->vreg[VREG_CORE].cur_ua);
if (rc < 0)
return NOTIFY_BAD;
acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = {
.notifier_call = acpuclk_cpu_callback,
};
static const int krait_needs_vmin(void)
{
switch (read_cpuid_id()) {
case 0x511F04D0: /* KR28M2A20 */
case 0x511F04D1: /* KR28M2A21 */
case 0x510F06F0: /* KR28M4A10 */
return 1;
default:
return 0;
};
}
static void krait_apply_vmin(struct acpu_level *tbl)
{
for (; tbl->speed.khz != 0; tbl++) {
if (tbl->vdd_core < 1150000)
tbl->vdd_core = 1150000;
tbl->avsdscr_setting = 0;
}
}
static int __init get_speed_bin(u32 pte_efuse)
{
uint32_t speed_bin;
speed_bin = pte_efuse & 0xF;
if (speed_bin == 0xF)
speed_bin = (pte_efuse >> 4) & 0xF;
if (speed_bin == 0xF) {
speed_bin = 0;
dev_warn(drv.dev, "SPEED BIN: Defaulting to %d\n", speed_bin);
} else {
dev_info(drv.dev, "SPEED BIN: %d\n", speed_bin);
}
return speed_bin;
}
static int __init get_pvs_bin(u32 pte_efuse)
{
uint32_t pvs_bin;
pvs_bin = (pte_efuse >> 10) & 0x7;
if (pvs_bin == 0x7)
pvs_bin = (pte_efuse >> 13) & 0x7;
if (pvs_bin == 0x7) {
pvs_bin = 0;
dev_warn(drv.dev, "ACPU PVS: Defaulting to %d\n", pvs_bin);
} else {
dev_info(drv.dev, "ACPU PVS: %d\n", pvs_bin);
}
return pvs_bin;
}
static struct pvs_table * __init select_freq_plan(u32 pte_efuse_phys,
struct pvs_table (*pvs_tables)[NUM_PVS])
{
void __iomem *pte_efuse;
u32 pte_efuse_val, tbl_idx, bin_idx;
pte_efuse = ioremap(pte_efuse_phys, 4);
if (!pte_efuse) {
dev_err(drv.dev, "Unable to map QFPROM base\n");
return NULL;
}
pte_efuse_val = readl_relaxed(pte_efuse);
iounmap(pte_efuse);
/* Select frequency tables. */
bin_idx = get_speed_bin(pte_efuse_val);
tbl_idx = get_pvs_bin(pte_efuse_val);
#ifdef CONFIG_SEC_DEBUG_SUBSYS
speed_bin = bin_idx;
pvs_bin = tbl_idx;
#endif
return &pvs_tables[bin_idx][tbl_idx];
}
static void __init drv_data_init(struct device *dev,
const struct acpuclk_krait_params *params)
{
struct pvs_table *pvs;
drv.dev = dev;
drv.scalable = kmemdup(params->scalable, params->scalable_size,
GFP_KERNEL);
BUG_ON(!drv.scalable);
drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data),
GFP_KERNEL);
BUG_ON(!drv.hfpll_data);
drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size,
GFP_KERNEL);
BUG_ON(!drv.l2_freq_tbl);
drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale),
GFP_KERNEL);
BUG_ON(!drv.bus_scale);
drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase,
drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase),
GFP_KERNEL);
BUG_ON(!drv.bus_scale->usecase);
pvs = select_freq_plan(params->pte_efuse_phys, params->pvs_tables);
BUG_ON(!pvs->table);
drv.acpu_freq_tbl = kmemdup(pvs->table, pvs->size, GFP_KERNEL);
BUG_ON(!drv.acpu_freq_tbl);
drv.boost_uv = pvs->boost_uv;
#ifdef CONFIG_SEC_DEBUG_SUBSYS
boost_uv = drv.boost_uv;
#endif
acpuclk_krait_data.power_collapse_khz = params->stby_khz;
acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
}
static void __init hw_init(void)
{
struct scalable *l2 = &drv.scalable[L2];
const struct l2_level *l2_level;
int cpu, rc;
if (krait_needs_vmin())
krait_apply_vmin(drv.acpu_freq_tbl);
l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
BUG_ON(!l2->hfpll_base);
rc = rpm_regulator_init(l2, VREG_HFPLL_A,
l2->vreg[VREG_HFPLL_A].max_vdd, false);
BUG_ON(rc);
rc = rpm_regulator_init(l2, VREG_HFPLL_B,
l2->vreg[VREG_HFPLL_B].max_vdd, false);
BUG_ON(rc);
l2_level = find_cur_l2_level();
if (!l2_level) {
l2_level = drv.l2_freq_tbl;
dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
l2_level->speed.khz);
} else {
dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
l2_level->speed.khz);
}
rc = init_clock_sources(l2, &l2_level->speed);
BUG_ON(rc);
for_each_online_cpu(cpu) {
rc = per_cpu_init(cpu);
BUG_ON(rc);
}
bus_init(l2_level);
}
int __init acpuclk_krait_init(struct device *dev,
const struct acpuclk_krait_params *params)
{
drv_data_init(dev, params);
hw_init();
cpufreq_table_init();
dcvs_freq_init();
acpuclk_register(&acpuclk_krait_data);
register_hotcpu_notifier(&acpuclk_cpu_notifier);
return 0;
}
|
cnexus/NexTKernel-d2spr
|
arch/arm/mach-msm/acpuclock-krait.c
|
C
|
gpl-2.0
| 31,611
|
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/wait.h>
#include <sound/apr_audio-v2.h>
#include <linux/qdsp6v2/apr.h>
#include <sound/q6adm-v2.h>
#include <sound/q6audio-v2.h>
#include <sound/q6afe-v2.h>
#include <sound/hw_audio_log.h>
#include "audio_acdb.h"
#define TIMEOUT_MS 1000
#define RESET_COPP_ID 99
#define INVALID_COPP_ID 0xFF
/* Used for inband payload copy, max size is 4k */
/* 2 is to account for module & param ID in payload */
#define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 2 * sizeof(uint32_t))
#define ULL_SUPPORTED_SAMPLE_RATE 48000
enum {
ADM_RX_AUDPROC_CAL,
ADM_TX_AUDPROC_CAL,
ADM_RX_AUDVOL_CAL,
ADM_TX_AUDVOL_CAL,
ADM_CUSTOM_TOP_CAL,
ADM_RTAC,
ADM_MAX_CAL_TYPES
};
struct adm_ctl {
void *apr;
atomic_t copp_id[AFE_MAX_PORTS];
atomic_t copp_cnt[AFE_MAX_PORTS];
atomic_t copp_low_latency_id[AFE_MAX_PORTS];
atomic_t copp_low_latency_cnt[AFE_MAX_PORTS];
atomic_t copp_perf_mode[AFE_MAX_PORTS];
atomic_t copp_stat[AFE_MAX_PORTS];
wait_queue_head_t wait[AFE_MAX_PORTS];
struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
atomic_t mem_map_cal_handles[ADM_MAX_CAL_TYPES];
atomic_t mem_map_cal_index;
int set_custom_topology;
int ec_ref_rx;
};
static struct adm_ctl this_adm;
struct adm_multi_ch_map {
bool set_channel_map;
char channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
};
static struct adm_multi_ch_map multi_ch_map = { false,
{0, 0, 0, 0, 0, 0, 0, 0}
};
static int adm_get_parameters[ADM_GET_PARAMETER_LENGTH];
int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
{
struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
int ret = 0, sz = 0;
int index;
ad_logd("SRS - %s", __func__);
switch (srs_tech_id) {
case SRS_ID_GLOBAL: {
struct srs_trumedia_params_GLOBAL *glb_params = NULL;
sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
sizeof(struct srs_trumedia_params_GLOBAL);
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed\n",
__func__);
return -ENOMEM;
}
adm_params->payload_size =
sizeof(struct srs_trumedia_params_GLOBAL) +
sizeof(struct adm_param_data_v5);
adm_params->params.param_id = SRS_TRUMEDIA_PARAMS;
adm_params->params.param_size =
sizeof(struct srs_trumedia_params_GLOBAL);
glb_params = (struct srs_trumedia_params_GLOBAL *)
((u8 *)adm_params +
sizeof(struct adm_cmd_set_pp_params_inband_v5));
memcpy(glb_params, srs_params,
sizeof(struct srs_trumedia_params_GLOBAL));
ad_logd("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n",
__func__, (int)glb_params->v1,
(int)glb_params->v2, (int)glb_params->v3,
(int)glb_params->v4, (int)glb_params->v5,
(int)glb_params->v6, (int)glb_params->v7,
(int)glb_params->v8);
break;
}
case SRS_ID_WOWHD: {
struct srs_trumedia_params_WOWHD *whd_params = NULL;
sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
sizeof(struct srs_trumedia_params_WOWHD);
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed\n",
__func__);
return -ENOMEM;
}
adm_params->payload_size =
sizeof(struct srs_trumedia_params_WOWHD) +
sizeof(struct adm_param_data_v5);
adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
adm_params->params.param_size =
sizeof(struct srs_trumedia_params_WOWHD);
whd_params = (struct srs_trumedia_params_WOWHD *)
((u8 *)adm_params +
sizeof(struct adm_cmd_set_pp_params_inband_v5));
memcpy(whd_params, srs_params,
sizeof(struct srs_trumedia_params_WOWHD));
ad_logd("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x, 10 = %x, 11 = %x\n",
__func__, (int)whd_params->v1,
(int)whd_params->v2, (int)whd_params->v3,
(int)whd_params->v4, (int)whd_params->v5,
(int)whd_params->v6, (int)whd_params->v7,
(int)whd_params->v8, (int)whd_params->v9,
(int)whd_params->v10, (int)whd_params->v11);
break;
}
case SRS_ID_CSHP: {
struct srs_trumedia_params_CSHP *chp_params = NULL;
sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
sizeof(struct srs_trumedia_params_CSHP);
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed\n",
__func__);
return -ENOMEM;
}
adm_params->payload_size =
sizeof(struct srs_trumedia_params_CSHP) +
sizeof(struct adm_param_data_v5);
adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
adm_params->params.param_size =
sizeof(struct srs_trumedia_params_CSHP);
chp_params = (struct srs_trumedia_params_CSHP *)
((u8 *)adm_params +
sizeof(struct adm_cmd_set_pp_params_inband_v5));
memcpy(chp_params, srs_params,
sizeof(struct srs_trumedia_params_CSHP));
ad_logd("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x\n",
__func__, (int)chp_params->v1,
(int)chp_params->v2, (int)chp_params->v3,
(int)chp_params->v4, (int)chp_params->v5,
(int)chp_params->v6, (int)chp_params->v7,
(int)chp_params->v8, (int)chp_params->v9);
break;
}
case SRS_ID_HPF: {
struct srs_trumedia_params_HPF *hpf_params = NULL;
sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
sizeof(struct srs_trumedia_params_HPF);
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed\n",
__func__);
return -ENOMEM;
}
adm_params->payload_size =
sizeof(struct srs_trumedia_params_HPF) +
sizeof(struct adm_param_data_v5);
adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HPF;
adm_params->params.param_size =
sizeof(struct srs_trumedia_params_HPF);
hpf_params = (struct srs_trumedia_params_HPF *)
((u8 *)adm_params +
sizeof(struct adm_cmd_set_pp_params_inband_v5));
memcpy(hpf_params, srs_params,
sizeof(struct srs_trumedia_params_HPF));
ad_logd("SRS - %s: HPF params - 1 = %x\n", __func__,
(int)hpf_params->v1);
break;
}
case SRS_ID_PEQ: {
struct srs_trumedia_params_PEQ *peq_params = NULL;
sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
sizeof(struct srs_trumedia_params_PEQ);
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed\n",
__func__);
return -ENOMEM;
}
adm_params->payload_size =
sizeof(struct srs_trumedia_params_PEQ) +
sizeof(struct adm_param_data_v5);
adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ;
adm_params->params.param_size =
sizeof(struct srs_trumedia_params_PEQ);
peq_params = (struct srs_trumedia_params_PEQ *)
((u8 *)adm_params +
sizeof(struct adm_cmd_set_pp_params_inband_v5));
memcpy(peq_params, srs_params,
sizeof(struct srs_trumedia_params_PEQ));
ad_logd("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x, 4 = %x\n",
__func__, (int)peq_params->v1,
(int)peq_params->v2, (int)peq_params->v3,
(int)peq_params->v4);
break;
}
case SRS_ID_HL: {
struct srs_trumedia_params_HL *hl_params = NULL;
sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
sizeof(struct srs_trumedia_params_HL);
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed\n",
__func__);
return -ENOMEM;
}
adm_params->payload_size =
sizeof(struct srs_trumedia_params_HL) +
sizeof(struct adm_param_data_v5);
adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HL;
adm_params->params.param_size =
sizeof(struct srs_trumedia_params_HL);
hl_params = (struct srs_trumedia_params_HL *)
((u8 *)adm_params +
sizeof(struct adm_cmd_set_pp_params_inband_v5));
memcpy(hl_params, srs_params,
sizeof(struct srs_trumedia_params_HL));
ad_logd("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x\n",
__func__, (int)hl_params->v1,
(int)hl_params->v2, (int)hl_params->v3,
(int)hl_params->v4, (int)hl_params->v5,
(int)hl_params->v6, (int)hl_params->v7);
break;
}
default:
goto fail_cmd;
}
adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
adm_params->hdr.pkt_size = sz;
adm_params->hdr.src_svc = APR_SVC_ADM;
adm_params->hdr.src_domain = APR_DOMAIN_APPS;
adm_params->hdr.src_port = port_id;
adm_params->hdr.dest_svc = APR_SVC_ADM;
adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
index = afe_get_port_index(port_id);
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d portid %#x\n",
__func__, index, port_id);
ret = -EINVAL;
goto fail_cmd;
}
adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
adm_params->hdr.token = port_id;
adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
adm_params->payload_addr_lsw = 0;
adm_params->payload_addr_msw = 0;
adm_params->mem_map_handle = 0;
adm_params->params.module_id = SRS_TRUMEDIA_MODULE_ID;
adm_params->params.reserved = 0;
ad_logd("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
__func__, adm_params->hdr.dest_port,
adm_params->payload_size, adm_params->params.module_id,
adm_params->params.param_id);
ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
if (ret < 0) {
ad_loge("SRS - %s: ADM enable for port %d failed\n", __func__,
port_id);
ret = -EINVAL;
goto fail_cmd;
}
/* Wait for the callback with copp id */
ret = wait_event_timeout(this_adm.wait[index], 1,
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
ad_loge("%s: SRS set params timed out port = %d\n",
__func__, port_id);
ret = -EINVAL;
goto fail_cmd;
}
fail_cmd:
kfree(adm_params);
return ret;
}
int adm_set_stereo_to_custom_stereo(int port_id, unsigned int session_id,
char *params, uint32_t params_length)
{
struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
int sz, rc = 0, index = afe_get_port_index(port_id);
ad_logd("%s\n", __func__);
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d port_id %#x\n", __func__, index,
port_id);
return -EINVAL;
}
sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) +
params_length;
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed\n", __func__);
return -ENOMEM;
}
memcpy(((u8 *)adm_params +
sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)),
params, params_length);
adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
adm_params->hdr.pkt_size = sz;
adm_params->hdr.src_svc = APR_SVC_ADM;
adm_params->hdr.src_domain = APR_DOMAIN_APPS;
adm_params->hdr.src_port = port_id;
adm_params->hdr.dest_svc = APR_SVC_ADM;
adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
adm_params->hdr.token = port_id;
adm_params->hdr.opcode = ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5;
adm_params->payload_addr_lsw = 0;
adm_params->payload_addr_msw = 0;
adm_params->mem_map_handle = 0;
adm_params->payload_size = params_length;
/* direction RX as 0 */
adm_params->direction = 0;
/* session id for this cmd to be applied on */
adm_params->sessionid = session_id;
/* valid COPP id for LPCM */
adm_params->deviceid = atomic_read(&this_adm.copp_id[index]);
adm_params->reserved = 0;
ad_logd("%s: deviceid %d, session_id %d, src_port %d, dest_port %d\n",
__func__, adm_params->deviceid, adm_params->sessionid,
adm_params->hdr.src_port, adm_params->hdr.dest_port);
atomic_set(&this_adm.copp_stat[index], 0);
rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
if (rc < 0) {
ad_loge("%s: Set params failed port = %#x\n",
__func__, port_id);
rc = -EINVAL;
goto set_stereo_to_custom_stereo_return;
}
/* Wait for the callback */
rc = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!rc) {
ad_loge("%s: Set params timed out port = %#x\n", __func__,
port_id);
rc = -EINVAL;
goto set_stereo_to_custom_stereo_return;
}
rc = 0;
set_stereo_to_custom_stereo_return:
kfree(adm_params);
return rc;
}
int adm_dolby_dap_send_params(int port_id, char *params, uint32_t params_length)
{
struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
int sz, rc = 0, index = afe_get_port_index(port_id);
ad_logd("%s\n", __func__);
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d portid %#x\n",
__func__, index, port_id);
return -EINVAL;
}
sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed", __func__);
return -ENOMEM;
}
memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
params, params_length);
adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
adm_params->hdr.pkt_size = sz;
adm_params->hdr.src_svc = APR_SVC_ADM;
adm_params->hdr.src_domain = APR_DOMAIN_APPS;
adm_params->hdr.src_port = port_id;
adm_params->hdr.dest_svc = APR_SVC_ADM;
adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
adm_params->hdr.token = port_id;
adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
adm_params->payload_addr_lsw = 0;
adm_params->payload_addr_msw = 0;
adm_params->mem_map_handle = 0;
adm_params->payload_size = params_length;
atomic_set(&this_adm.copp_stat[index], 0);
rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
if (rc < 0) {
ad_loge("%s: Set params failed port = %#x\n",
__func__, port_id);
rc = -EINVAL;
goto dolby_dap_send_param_return;
}
/* Wait for the callback */
rc = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!rc) {
ad_loge("%s: Set params timed out port = %#x\n",
__func__, port_id);
rc = -EINVAL;
goto dolby_dap_send_param_return;
}
rc = 0;
dolby_dap_send_param_return:
kfree(adm_params);
return rc;
}
int adm_get_params(int port_id, uint32_t module_id, uint32_t param_id,
uint32_t params_length, char *params)
{
struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
int sz, rc = 0, i = 0, index = afe_get_port_index(port_id);
int *params_data = (int *)params;
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d portid %#x\n",
__func__, index, port_id);
return -EINVAL;
}
sz = sizeof(struct adm_cmd_get_pp_params_v5) + params_length;
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params) {
ad_loge("%s, adm params memory alloc failed", __func__);
return -ENOMEM;
}
memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)),
params, params_length);
adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
adm_params->hdr.pkt_size = sz;
adm_params->hdr.src_svc = APR_SVC_ADM;
adm_params->hdr.src_domain = APR_DOMAIN_APPS;
adm_params->hdr.src_port = port_id;
adm_params->hdr.dest_svc = APR_SVC_ADM;
adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
adm_params->hdr.token = port_id;
adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
adm_params->data_payload_addr_lsw = 0;
adm_params->data_payload_addr_msw = 0;
adm_params->mem_map_handle = 0;
adm_params->module_id = module_id;
adm_params->param_id = param_id;
adm_params->param_max_size = params_length;
adm_params->reserved = 0;
atomic_set(&this_adm.copp_stat[index], 0);
rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
if (rc < 0) {
ad_loge("%s: Failed to Get Params on port %d\n", __func__,
port_id);
rc = -EINVAL;
goto adm_get_param_return;
}
/* Wait for the callback with copp id */
rc = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!rc) {
ad_loge("%s: get params timed out port = %d\n", __func__,
port_id);
rc = -EINVAL;
goto adm_get_param_return;
}
if ((params_data) && (ARRAY_SIZE(adm_get_parameters) >=
(1+adm_get_parameters[0])) &&
(params_length/sizeof(int) >=
adm_get_parameters[0])) {
for (i = 0; i < adm_get_parameters[0]; i++)
params_data[i] = adm_get_parameters[1+i];
} else {
pr_err("%s: Get param data not copied! get_param array size %zd, index %d, params array size %zd, index %d\n",
__func__, ARRAY_SIZE(adm_get_parameters),
(1+adm_get_parameters[0]),
params_length/sizeof(int),
adm_get_parameters[0]);
}
rc = 0;
adm_get_param_return:
kfree(adm_params);
return rc;
}
static void adm_callback_debug_print(struct apr_client_data *data)
{
uint32_t *payload;
payload = data->payload;
if (data->payload_size >= 8)
ad_logd("%s: code = 0x%x PL#0[%x], PL#1[%x], size = %d\n",
__func__, data->opcode, payload[0], payload[1],
data->payload_size);
else if (data->payload_size >= 4)
ad_logd("%s: code = 0x%x PL#0[%x], size = %d\n",
__func__, data->opcode, payload[0],
data->payload_size);
else
ad_logd("%s: code = 0x%x, size = %d\n",
__func__, data->opcode, data->payload_size);
}
void adm_set_multi_ch_map(char *channel_map)
{
memcpy(multi_ch_map.channel_mapping, channel_map,
PCM_FORMAT_MAX_NUM_CHANNEL);
multi_ch_map.set_channel_map = true;
}
void adm_get_multi_ch_map(char *channel_map)
{
if (multi_ch_map.set_channel_map) {
memcpy(channel_map, multi_ch_map.channel_mapping,
PCM_FORMAT_MAX_NUM_CHANNEL);
}
}
static int32_t adm_callback(struct apr_client_data *data, void *priv)
{
uint32_t *payload;
int i, index;
if (data == NULL) {
ad_loge("%s: data paramter is null\n", __func__);
return -EINVAL;
}
payload = data->payload;
if (data->opcode == RESET_EVENTS) {
ad_logd("adm_callback: Reset event is received: %d %d apr[%p]\n",
data->reset_event, data->reset_proc,
this_adm.apr);
if (this_adm.apr) {
apr_reset(this_adm.apr);
for (i = 0; i < AFE_MAX_PORTS; i++) {
atomic_set(&this_adm.copp_id[i],
RESET_COPP_ID);
atomic_set(&this_adm.copp_low_latency_id[i],
RESET_COPP_ID);
atomic_set(&this_adm.copp_cnt[i], 0);
atomic_set(&this_adm.copp_low_latency_cnt[i],
0);
atomic_set(&this_adm.copp_perf_mode[i], 0);
atomic_set(&this_adm.copp_stat[i], 0);
}
this_adm.apr = NULL;
reset_custom_topology_flags();
this_adm.set_custom_topology = 1;
for (i = 0; i < ADM_MAX_CAL_TYPES; i++)
atomic_set(&this_adm.mem_map_cal_handles[i],
0);
rtac_clear_mapping(ADM_RTAC_CAL);
}
ad_logd("Resetting calibration blocks");
for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
/* Device calibration */
this_adm.mem_addr_audproc[i].cal_size = 0;
this_adm.mem_addr_audproc[i].cal_kvaddr = 0;
this_adm.mem_addr_audproc[i].cal_paddr = 0;
/* Volume calibration */
this_adm.mem_addr_audvol[i].cal_size = 0;
this_adm.mem_addr_audvol[i].cal_kvaddr = 0;
this_adm.mem_addr_audvol[i].cal_paddr = 0;
}
return 0;
}
adm_callback_debug_print(data);
if (data->payload_size) {
index = q6audio_get_port_index(data->token);
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d token %d\n",
__func__, index, data->token);
return 0;
}
if (data->opcode == APR_BASIC_RSP_RESULT) {
ad_logd("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
if (payload[1] != 0) {
ad_loge("%s: cmd = 0x%x returned error = 0x%x\n",
__func__, payload[0], payload[1]);
}
switch (payload[0]) {
case ADM_CMD_SET_PP_PARAMS_V5:
ad_logd("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
__func__);
if (rtac_make_adm_callback(
payload, data->payload_size)) {
break;
}
case ADM_CMD_DEVICE_CLOSE_V5:
case ADM_CMD_SHARED_MEM_UNMAP_REGIONS:
case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
case ADM_CMD_ADD_TOPOLOGIES:
ad_logd("%s: Basic callback received, wake up.\n",
__func__);
atomic_set(&this_adm.copp_stat[index], 1);
wake_up(&this_adm.wait[index]);
break;
case ADM_CMD_SHARED_MEM_MAP_REGIONS:
ad_logd("%s: ADM_CMD_SHARED_MEM_MAP_REGIONS\n",
__func__);
/* Should only come here if there is an APR */
/* error or malformed APR packet. Otherwise */
/* response will be returned as */
if (payload[1] != 0) {
ad_loge("%s: ADM map error, resuming\n",
__func__);
atomic_set(&this_adm.copp_stat[index],
1);
wake_up(&this_adm.wait[index]);
}
break;
case ADM_CMD_GET_PP_PARAMS_V5:
ad_logd("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
__func__);
/* Should only come here if there is an APR */
/* error or malformed APR packet. Otherwise */
/* response will be returned as */
/* ADM_CMDRSP_GET_PP_PARAMS_V5 */
if (payload[1] != 0) {
ad_loge("%s: ADM get param error = %d, resuming\n",
__func__, payload[1]);
rtac_make_adm_callback(payload,
data->payload_size);
}
break;
case ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5:
ad_logd("%s:ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5\n",
__func__);
atomic_set(&this_adm.copp_stat[index], 1);
wake_up(&this_adm.wait[index]);
break;
default:
ad_loge("%s: Unknown Cmd: 0x%x\n", __func__,
payload[0]);
break;
}
return 0;
}
switch (data->opcode) {
case ADM_CMDRSP_DEVICE_OPEN_V5: {
struct adm_cmd_rsp_device_open_v5 *open =
(struct adm_cmd_rsp_device_open_v5 *)data->payload;
if (open->copp_id == INVALID_COPP_ID) {
ad_loge("%s: invalid coppid rxed %d\n",
__func__, open->copp_id);
atomic_set(&this_adm.copp_stat[index], 1);
wake_up(&this_adm.wait[index]);
break;
}
if (atomic_read(&this_adm.copp_perf_mode[index])) {
atomic_set(&this_adm.copp_low_latency_id[index],
open->copp_id);
} else {
atomic_set(&this_adm.copp_id[index],
open->copp_id);
}
atomic_set(&this_adm.copp_stat[index], 1);
ad_logd("%s: coppid rxed=%d\n", __func__,
open->copp_id);
wake_up(&this_adm.wait[index]);
}
break;
case ADM_CMDRSP_GET_PP_PARAMS_V5:
ad_logd("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
if (payload[0] != 0)
ad_loge("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
__func__, payload[0]);
if (rtac_make_adm_callback(payload,
data->payload_size))
break;
if ((payload[0] == 0) && (data->payload_size >
(4 * sizeof(*payload))) &&
(data->payload_size/sizeof(*payload)-4 >=
payload[3]) &&
(ARRAY_SIZE(adm_get_parameters)-1 >=
payload[3])) {
adm_get_parameters[0] = payload[3];
pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n",
__func__, adm_get_parameters[0]);
/* storing param size then params */
for (i = 0; i < payload[3]; i++)
adm_get_parameters[1+i] = payload[4+i];
} else {
adm_get_parameters[0] = -1;
pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n",
__func__, adm_get_parameters[0]);
}
atomic_set(&this_adm.copp_stat[index], 1);
wake_up(&this_adm.wait[index]);
break;
case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
ad_logd("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
__func__);
atomic_set(&this_adm.mem_map_cal_handles[
atomic_read(&this_adm.mem_map_cal_index)],
*payload);
atomic_set(&this_adm.copp_stat[index], 1);
wake_up(&this_adm.wait[index]);
break;
default:
ad_loge("%s: Unknown cmd:0x%x\n", __func__,
data->opcode);
break;
}
}
return 0;
}
void send_adm_custom_topology(int port_id)
{
struct acdb_cal_block cal_block;
struct cmd_set_topologies adm_top;
int index;
int result;
int size = 4096;
get_adm_custom_topology(&cal_block);
if (cal_block.cal_size == 0) {
ad_logd("%s: no cal to send addr= 0x%pa\n",
__func__, &cal_block.cal_paddr);
goto done;
}
index = afe_get_port_index(port_id);
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d portid %#x\n",
__func__, index, port_id);
goto done;
}
if (this_adm.set_custom_topology) {
/* specific index 4 for adm topology memory */
atomic_set(&this_adm.mem_map_cal_index, ADM_CUSTOM_TOP_CAL);
/* Only call this once */
this_adm.set_custom_topology = 0;
result = adm_memory_map_regions(port_id,
&cal_block.cal_paddr, 0, &size, 1);
if (result < 0) {
ad_loge("%s: mmap did not work! addr = 0x%pa, size = %zd\n",
__func__, &cal_block.cal_paddr,
cal_block.cal_size);
goto done;
}
}
adm_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(20), APR_PKT_VER);
adm_top.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
sizeof(adm_top));
adm_top.hdr.src_svc = APR_SVC_ADM;
adm_top.hdr.src_domain = APR_DOMAIN_APPS;
adm_top.hdr.src_port = port_id;
adm_top.hdr.dest_svc = APR_SVC_ADM;
adm_top.hdr.dest_domain = APR_DOMAIN_ADSP;
adm_top.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
adm_top.hdr.token = port_id;
adm_top.hdr.opcode = ADM_CMD_ADD_TOPOLOGIES;
adm_top.payload_addr_lsw = lower_32_bits(cal_block.cal_paddr);
adm_top.payload_addr_msw = upper_32_bits(cal_block.cal_paddr);
adm_top.mem_map_handle =
atomic_read(&this_adm.mem_map_cal_handles[ADM_CUSTOM_TOP_CAL]);
adm_top.payload_size = cal_block.cal_size;
atomic_set(&this_adm.copp_stat[index], 0);
ad_logd("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%x, size = %d\n",
__func__, adm_top.payload_addr_lsw,
adm_top.payload_size);
result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_top);
if (result < 0) {
ad_loge("%s: Set topologies failed port = 0x%x payload = 0x%pa\n",
__func__, port_id, &cal_block.cal_paddr);
goto done;
}
/* Wait for the callback */
result = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!result) {
ad_loge("%s: Set topologies timed out port = 0x%x, payload = 0x%pa\n",
__func__, port_id, &cal_block.cal_paddr);
goto done;
}
done:
return;
}
static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal,
int perf_mode)
{
s32 result = 0;
struct adm_cmd_set_pp_params_v5 adm_params;
int index = afe_get_port_index(port_id);
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d portid %#x\n",
__func__, index, port_id);
return 0;
}
ad_logd("%s: Port id %#x, index %d\n", __func__, port_id, index);
if (!aud_cal || aud_cal->cal_size == 0) {
ad_logd("%s: No ADM cal to send for port_id = %#x!\n",
__func__, port_id);
result = -EINVAL;
goto done;
}
adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(20), APR_PKT_VER);
adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
sizeof(adm_params));
adm_params.hdr.src_svc = APR_SVC_ADM;
adm_params.hdr.src_domain = APR_DOMAIN_APPS;
adm_params.hdr.src_port = port_id;
adm_params.hdr.dest_svc = APR_SVC_ADM;
adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
if (perf_mode == LEGACY_PCM_MODE)
adm_params.hdr.dest_port =
atomic_read(&this_adm.copp_id[index]);
else
adm_params.hdr.dest_port =
atomic_read(&this_adm.copp_low_latency_id[index]);
adm_params.hdr.token = port_id;
adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
adm_params.payload_addr_lsw = lower_32_bits(aud_cal->cal_paddr);
adm_params.payload_addr_msw = upper_32_bits(aud_cal->cal_paddr);
adm_params.mem_map_handle = atomic_read(&this_adm.mem_map_cal_handles[
atomic_read(&this_adm.mem_map_cal_index)]);
adm_params.payload_size = aud_cal->cal_size;
atomic_set(&this_adm.copp_stat[index], 0);
ad_logd("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
__func__, adm_params.payload_addr_lsw,
adm_params.payload_size);
result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
if (result < 0) {
ad_loge("%s: Set params failed port = %#x payload = 0x%pa\n",
__func__, port_id, &aud_cal->cal_paddr);
result = -EINVAL;
goto done;
}
/* Wait for the callback */
result = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!result) {
ad_loge("%s: Set params timed out port = %#x, payload = 0x%pa\n",
__func__, port_id, &aud_cal->cal_paddr);
result = -EINVAL;
goto done;
}
result = 0;
done:
return result;
}
static void send_adm_cal(int port_id, int path, int perf_mode)
{
int result = 0;
s32 acdb_path;
struct acdb_cal_block aud_cal;
int size;
ad_logd("%s\n", __func__);
/* Maps audio_dev_ctrl path definition to ACDB definition */
acdb_path = path - 1;
if (acdb_path == TX_CAL)
size = 4096 * 4;
else
size = 4096;
ad_logd("%s: Sending audproc cal\n", __func__);
get_audproc_cal(acdb_path, &aud_cal);
/* map & cache buffers used */
atomic_set(&this_adm.mem_map_cal_index, acdb_path);
if (((this_adm.mem_addr_audproc[acdb_path].cal_paddr !=
aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
(aud_cal.cal_size >
this_adm.mem_addr_audproc[acdb_path].cal_size)) {
if (this_adm.mem_addr_audproc[acdb_path].cal_paddr != 0)
adm_memory_unmap_regions(port_id);
result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
0, &size, 1);
if (result < 0) {
ad_loge("ADM audproc mmap did not work! path = %d, addr = 0x%pa, size = %zd\n",
acdb_path, &aud_cal.cal_paddr,
aud_cal.cal_size);
} else {
this_adm.mem_addr_audproc[acdb_path].cal_paddr =
aud_cal.cal_paddr;
this_adm.mem_addr_audproc[acdb_path].cal_size = size;
}
}
if (!send_adm_cal_block(port_id, &aud_cal, perf_mode))
ad_logd("%s: Audproc cal sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
else
ad_logd("%s: Audproc cal not sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
ad_logd("%s: Sending audvol cal\n", __func__);
get_audvol_cal(acdb_path, &aud_cal);
/* map & cache buffers used */
atomic_set(&this_adm.mem_map_cal_index,
(acdb_path + MAX_AUDPROC_TYPES));
if (((this_adm.mem_addr_audvol[acdb_path].cal_paddr !=
aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
(aud_cal.cal_size >
this_adm.mem_addr_audvol[acdb_path].cal_size)) {
if (this_adm.mem_addr_audvol[acdb_path].cal_paddr != 0)
adm_memory_unmap_regions(port_id);
result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
0, &size, 1);
if (result < 0) {
ad_loge("ADM audvol mmap did not work! path = %d, addr = 0x%pa, size = %zd\n",
acdb_path, &aud_cal.cal_paddr,
aud_cal.cal_size);
} else {
this_adm.mem_addr_audvol[acdb_path].cal_paddr =
aud_cal.cal_paddr;
this_adm.mem_addr_audvol[acdb_path].cal_size = size;
}
}
if (!send_adm_cal_block(port_id, &aud_cal, perf_mode))
ad_logd("%s: Audvol cal sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
else
ad_logd("%s: Audvol cal not sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
}
int adm_map_rtac_block(struct rtac_cal_block_data *cal_block)
{
int result = 0;
ad_logd("%s\n", __func__);
if (cal_block == NULL) {
ad_loge("%s: cal_block is NULL!\n",
__func__);
result = -EINVAL;
goto done;
}
if (cal_block->cal_data.paddr == 0) {
ad_logd("%s: No address to map!\n",
__func__);
result = -EINVAL;
goto done;
}
if (cal_block->map_data.map_size == 0) {
ad_logd("%s: map size is 0!\n",
__func__);
result = -EINVAL;
goto done;
}
/* valid port ID needed for callback use primary I2S */
atomic_set(&this_adm.mem_map_cal_index, ADM_RTAC);
result = adm_memory_map_regions(PRIMARY_I2S_RX,
&cal_block->cal_data.paddr, 0,
&cal_block->map_data.map_size, 1);
if (result < 0) {
ad_loge("%s: RTAC mmap did not work! addr = 0x%pa, size = %d\n",
__func__, &cal_block->cal_data.paddr,
cal_block->map_data.map_size);
goto done;
}
cal_block->map_data.map_handle = atomic_read(
&this_adm.mem_map_cal_handles[ADM_RTAC]);
done:
return result;
}
int adm_unmap_rtac_block(uint32_t *mem_map_handle)
{
int result = 0;
ad_logd("%s\n", __func__);
if (mem_map_handle == NULL) {
ad_logd("%s: Map handle is NULL, nothing to unmap\n",
__func__);
goto done;
}
if (*mem_map_handle == 0) {
ad_logd("%s: Map handle is 0, nothing to unmap\n",
__func__);
goto done;
}
if (*mem_map_handle != atomic_read(
&this_adm.mem_map_cal_handles[ADM_RTAC])) {
ad_loge("%s: Map handles do not match! Unmapping RTAC, RTAC map 0x%x, ADM map 0x%x\n",
__func__, *mem_map_handle, atomic_read(
&this_adm.mem_map_cal_handles[ADM_RTAC]));
/* if mismatch use handle passed in to unmap */
atomic_set(&this_adm.mem_map_cal_handles[ADM_RTAC],
*mem_map_handle);
}
/* valid port ID needed for callback use primary I2S */
atomic_set(&this_adm.mem_map_cal_index, ADM_RTAC);
result = adm_memory_unmap_regions(PRIMARY_I2S_RX);
if (result < 0) {
ad_logd("%s: adm_memory_unmap_regions failed, error %d\n",
__func__, result);
} else {
atomic_set(&this_adm.mem_map_cal_handles[ADM_RTAC], 0);
*mem_map_handle = 0;
}
done:
return result;
}
int adm_unmap_cal_blocks(void)
{
int i;
int result = 0;
int result2 = 0;
for (i = 0; i < ADM_MAX_CAL_TYPES; i++) {
if (atomic_read(&this_adm.mem_map_cal_handles[i]) != 0) {
if (i <= ADM_TX_AUDPROC_CAL) {
this_adm.mem_addr_audproc[i].cal_paddr = 0;
this_adm.mem_addr_audproc[i].cal_size = 0;
} else if (i <= ADM_TX_AUDVOL_CAL) {
this_adm.mem_addr_audvol
[(i - ADM_RX_AUDVOL_CAL)].cal_paddr
= 0;
this_adm.mem_addr_audvol
[(i - ADM_RX_AUDVOL_CAL)].cal_size
= 0;
} else if (i == ADM_CUSTOM_TOP_CAL) {
this_adm.set_custom_topology = 1;
} else {
continue;
}
/* valid port ID needed for callback use primary I2S */
atomic_set(&this_adm.mem_map_cal_index, i);
result2 = adm_memory_unmap_regions(PRIMARY_I2S_RX);
if (result2 < 0) {
ad_loge("%s: adm_memory_unmap_regions failed, err %d\n",
__func__, result2);
result = result2;
} else {
atomic_set(&this_adm.mem_map_cal_handles[i],
0);
}
}
}
return result;
}
int adm_connect_afe_port(int mode, int session_id, int port_id)
{
struct adm_cmd_connect_afe_port_v5 cmd;
int ret = 0;
int index;
ad_logd("%s: port %d session id:%d mode:%d\n", __func__,
port_id, session_id, mode);
port_id = afe_convert_virtual_to_portid(port_id);
if (afe_validate_port(port_id) < 0) {
ad_loge("%s port idi[%d] is invalid\n", __func__, port_id);
return -ENODEV;
}
if (this_adm.apr == NULL) {
this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
0xFFFFFFFF, &this_adm);
if (this_adm.apr == NULL) {
ad_loge("%s: Unable to register ADM\n", __func__);
ret = -ENODEV;
return ret;
}
rtac_set_adm_handle(this_adm.apr);
}
index = afe_get_port_index(port_id);
ad_logd("%s: Port ID %#x, index %d\n", __func__, port_id, index);
cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
cmd.hdr.pkt_size = sizeof(cmd);
cmd.hdr.src_svc = APR_SVC_ADM;
cmd.hdr.src_domain = APR_DOMAIN_APPS;
cmd.hdr.src_port = port_id;
cmd.hdr.dest_svc = APR_SVC_ADM;
cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
cmd.hdr.dest_port = port_id;
cmd.hdr.token = port_id;
cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
cmd.mode = mode;
cmd.session_id = session_id;
cmd.afe_port_id = port_id;
atomic_set(&this_adm.copp_stat[index], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
if (ret < 0) {
ad_loge("%s:ADM enable for port %#x failed\n",
__func__, port_id);
ret = -EINVAL;
goto fail_cmd;
}
/* Wait for the callback with copp id */
ret = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
ad_loge("%s ADM connect AFE failed for port %#x\n", __func__,
port_id);
ret = -EINVAL;
goto fail_cmd;
}
atomic_inc(&this_adm.copp_cnt[index]);
return 0;
fail_cmd:
return ret;
}
int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
int perf_mode, uint16_t bits_per_sample)
{
struct adm_cmd_device_open_v5 open;
int ret = 0;
int index;
int tmp_port = q6audio_get_port_id(port_id);
ad_logd("%s: port %#x path:%d rate:%d mode:%d perf_mode:%d\n",
__func__, port_id, path, rate, channel_mode, perf_mode);
port_id = q6audio_convert_virtual_to_portid(port_id);
if (q6audio_validate_port(port_id) < 0) {
ad_loge("%s port idi[%#x] is invalid\n", __func__, port_id);
return -ENODEV;
}
index = q6audio_get_port_index(port_id);
ad_logd("%s: Port ID %#x, index %d\n", __func__, port_id, index);
if (this_adm.apr == NULL) {
this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
0xFFFFFFFF, &this_adm);
if (this_adm.apr == NULL) {
ad_loge("%s: Unable to register ADM\n", __func__);
ret = -ENODEV;
return ret;
}
rtac_set_adm_handle(this_adm.apr);
}
if (perf_mode == LEGACY_PCM_MODE) {
atomic_set(&this_adm.copp_perf_mode[index], 0);
send_adm_custom_topology(port_id);
} else {
atomic_set(&this_adm.copp_perf_mode[index], 1);
}
/* Create a COPP if port id are not enabled */
if ((perf_mode == LEGACY_PCM_MODE &&
(atomic_read(&this_adm.copp_cnt[index]) == 0)) ||
(perf_mode != LEGACY_PCM_MODE &&
(atomic_read(&this_adm.copp_low_latency_cnt[index]) == 0))) {
ad_logd("%s:opening ADM: perf_mode: %d\n", __func__,
perf_mode);
open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
open.hdr.pkt_size = sizeof(open);
open.hdr.src_svc = APR_SVC_ADM;
open.hdr.src_domain = APR_DOMAIN_APPS;
open.hdr.src_port = tmp_port;
open.hdr.dest_svc = APR_SVC_ADM;
open.hdr.dest_domain = APR_DOMAIN_ADSP;
open.hdr.dest_port = tmp_port;
open.hdr.token = port_id;
open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE)
open.flags = ADM_ULTRA_LOW_LATENCY_DEVICE_SESSION;
else if (perf_mode == LOW_LATENCY_PCM_MODE)
open.flags = ADM_LOW_LATENCY_DEVICE_SESSION;
else
open.flags = ADM_LEGACY_DEVICE_SESSION;
open.mode_of_operation = path;
open.endpoint_id_1 = tmp_port;
if (this_adm.ec_ref_rx == -1) {
open.endpoint_id_2 = 0xFFFF;
} else if (this_adm.ec_ref_rx && (path != 1)) {
open.endpoint_id_2 = this_adm.ec_ref_rx;
this_adm.ec_ref_rx = -1;
}
open.topology_id = topology;
if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
(open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
(open.topology_id == VPM_TX_DM_RFECNS_COPP_TOPOLOGY))
rate = 16000;
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
open.topology_id = NULL_COPP_TOPOLOGY;
rate = ULL_SUPPORTED_SAMPLE_RATE;
} else if (perf_mode == LOW_LATENCY_PCM_MODE) {
if ((open.topology_id == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
(open.topology_id == SRS_TRUMEDIA_TOPOLOGY_ID))
open.topology_id = DEFAULT_COPP_TOPOLOGY;
}
open.dev_num_channel = channel_mode & 0x00FF;
open.bit_width = bits_per_sample;
WARN_ON(perf_mode == ULTRA_LOW_LATENCY_PCM_MODE &&
(rate != 48000));
open.sample_rate = rate;
memset(open.dev_channel_mapping, 0, 8);
if (channel_mode == 1) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
} else if (channel_mode == 2) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
} else if (channel_mode == 3) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
} else if (channel_mode == 4) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
} else if (channel_mode == 5) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
} else if (channel_mode == 6) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
open.dev_channel_mapping[4] = PCM_CHANNEL_LS;
open.dev_channel_mapping[5] = PCM_CHANNEL_RS;
} else if (channel_mode == 8) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
} else {
ad_loge("%s invalid num_chan %d\n", __func__,
channel_mode);
return -EINVAL;
}
if ((open.dev_num_channel > 2) &&
multi_ch_map.set_channel_map)
memcpy(open.dev_channel_mapping,
multi_ch_map.channel_mapping,
PCM_FORMAT_MAX_NUM_CHANNEL);
ad_logd("%s: port_id=%#x rate=%d topology_id=0x%X\n",
__func__, open.endpoint_id_1, open.sample_rate,
open.topology_id);
atomic_set(&this_adm.copp_stat[index], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
if (ret < 0) {
ad_loge("%s:ADM enable for port %#x for[%d] failed\n",
__func__, tmp_port, port_id);
ret = -EINVAL;
goto fail_cmd;
}
/* Wait for the callback with copp id */
ret = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
ad_loge("%s ADM open failed for port %#x for [%d]\n",
__func__, tmp_port, port_id);
ret = -EINVAL;
goto fail_cmd;
}
}
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
perf_mode == LOW_LATENCY_PCM_MODE) {
atomic_inc(&this_adm.copp_low_latency_cnt[index]);
ad_logd("%s: index: %d coppid: %d", __func__, index,
atomic_read(&this_adm.copp_low_latency_id[index]));
} else {
atomic_inc(&this_adm.copp_cnt[index]);
ad_logd("%s: index: %d coppid: %d", __func__, index,
atomic_read(&this_adm.copp_id[index]));
}
return 0;
fail_cmd:
return ret;
}
int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
int topology, int perf_mode, uint16_t bits_per_sample)
{
int ret = 0;
ret = adm_open(port_id, path, rate, channel_mode,
topology, perf_mode, bits_per_sample);
return ret;
}
int adm_matrix_map(int session_id, int path, int num_copps,
unsigned int *port_id, int copp_id, int perf_mode)
{
struct adm_cmd_matrix_map_routings_v5 *route;
struct adm_session_map_node_v5 *node;
uint16_t *copps_list;
int cmd_size = 0;
int ret = 0, i = 0;
void *payload = NULL;
void *matrix_map = NULL;
/* Assumes port_ids have already been validated during adm_open */
int index = q6audio_get_port_index(copp_id);
if (index < 0 || index >= AFE_MAX_PORTS) {
ad_loge("%s: invalid port idx %d token %d\n",
__func__, index, copp_id);
return 0;
}
cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
sizeof(struct adm_session_map_node_v5) +
(sizeof(uint32_t) * num_copps));
matrix_map = kzalloc(cmd_size, GFP_KERNEL);
if (matrix_map == NULL) {
ad_loge("%s: Mem alloc failed\n", __func__);
ret = -EINVAL;
return ret;
}
route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;
ad_logd("%s: session 0x%x path:%d num_copps:%d port_id[0]:%#x coppid[%d]\n",
__func__, session_id, path, num_copps, port_id[0], copp_id);
route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
route->hdr.pkt_size = cmd_size;
route->hdr.src_svc = 0;
route->hdr.src_domain = APR_DOMAIN_APPS;
route->hdr.src_port = copp_id;
route->hdr.dest_svc = APR_SVC_ADM;
route->hdr.dest_domain = APR_DOMAIN_ADSP;
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
perf_mode == LOW_LATENCY_PCM_MODE) {
route->hdr.dest_port =
atomic_read(&this_adm.copp_low_latency_id[index]);
} else {
route->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
}
route->hdr.token = copp_id;
route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
route->num_sessions = 1;
switch (path) {
case 0x1:
route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
break;
case 0x2:
case 0x3:
route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
break;
default:
ad_loge("%s: Wrong path set[%d]\n", __func__, path);
break;
}
payload = ((u8 *)matrix_map +
sizeof(struct adm_cmd_matrix_map_routings_v5));
node = (struct adm_session_map_node_v5 *)payload;
node->session_id = session_id;
node->num_copps = num_copps;
payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
copps_list = (uint16_t *)payload;
for (i = 0; i < num_copps; i++) {
int tmp;
port_id[i] = q6audio_convert_virtual_to_portid(port_id[i]);
tmp = q6audio_get_port_index(port_id[i]);
if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
perf_mode == LOW_LATENCY_PCM_MODE)
copps_list[i] =
atomic_read(&this_adm.copp_low_latency_id[tmp]);
else
copps_list[i] =
atomic_read(&this_adm.copp_id[tmp]);
}
else
continue;
ad_logd("%s: port_id[%#x]: %d, index: %d act coppid[0x%x]\n",
__func__, i, port_id[i], tmp, copps_list[i]);
}
atomic_set(&this_adm.copp_stat[index], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
if (ret < 0) {
ad_loge("%s: ADM routing for port %#x failed\n",
__func__, port_id[0]);
ret = -EINVAL;
goto fail_cmd;
}
ret = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
ad_loge("%s: ADM cmd Route failed for port %#x\n",
__func__, port_id[0]);
ret = -EINVAL;
goto fail_cmd;
}
if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
for (i = 0; i < num_copps; i++)
send_adm_cal(port_id[i], path, perf_mode);
for (i = 0; i < num_copps; i++) {
int tmp, copp_id;
tmp = afe_get_port_index(port_id[i]);
if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
if (perf_mode == LEGACY_PCM_MODE)
copp_id = atomic_read(
&this_adm.copp_id[tmp]);
else
copp_id = atomic_read(
&this_adm.copp_low_latency_id[tmp]);
rtac_add_adm_device(port_id[i],
copp_id, path, session_id);
ad_logd("%s, copp_id: %d\n",
__func__, copp_id);
} else
ad_logd("%s: Invalid port index %d",
__func__, tmp);
}
}
fail_cmd:
kfree(matrix_map);
return ret;
}
int adm_memory_map_regions(int port_id,
phys_addr_t *buf_add, uint32_t mempool_id,
uint32_t *bufsz, uint32_t bufcnt)
{
struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
struct avs_shared_map_region_payload *mregions = NULL;
void *mmap_region_cmd = NULL;
void *payload = NULL;
int ret = 0;
int i = 0;
int cmd_size = 0;
int index = 0;
ad_logd("%s\n", __func__);
if (this_adm.apr == NULL) {
this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
0xFFFFFFFF, &this_adm);
if (this_adm.apr == NULL) {
ad_loge("%s: Unable to register ADM\n", __func__);
ret = -ENODEV;
return ret;
}
rtac_set_adm_handle(this_adm.apr);
}
port_id = q6audio_convert_virtual_to_portid(port_id);
if (q6audio_validate_port(port_id) < 0) {
ad_loge("%s port id[%#x] is invalid\n", __func__, port_id);
return -ENODEV;
}
index = q6audio_get_port_index(port_id);
cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
+ sizeof(struct avs_shared_map_region_payload)
* bufcnt;
mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
if (!mmap_region_cmd) {
ad_loge("%s: allocate mmap_region_cmd failed\n", __func__);
return -ENOMEM;
}
mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE),
APR_PKT_VER);
mmap_regions->hdr.pkt_size = cmd_size;
mmap_regions->hdr.src_port = 0;
mmap_regions->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
mmap_regions->hdr.token = port_id;
mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
mmap_regions->num_regions = bufcnt & 0x00ff;
mmap_regions->property_flag = 0x00;
ad_logd("%s: map_regions->num_regions = %d\n", __func__,
mmap_regions->num_regions);
payload = ((u8 *) mmap_region_cmd +
sizeof(struct avs_cmd_shared_mem_map_regions));
mregions = (struct avs_shared_map_region_payload *)payload;
for (i = 0; i < bufcnt; i++) {
mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
mregions->shm_addr_msw = upper_32_bits(buf_add[i]);
mregions->mem_size_bytes = bufsz[i];
++mregions;
}
atomic_set(&this_adm.copp_stat[index], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
if (ret < 0) {
ad_loge("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
mmap_regions->hdr.opcode, ret);
ret = -EINVAL;
goto fail_cmd;
}
ret = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]), 5 * HZ);
if (!ret) {
ad_loge("%s: timeout. waited for memory_map\n", __func__);
ret = -EINVAL;
goto fail_cmd;
}
fail_cmd:
kfree(mmap_region_cmd);
return ret;
}
int adm_memory_unmap_regions(int32_t port_id)
{
struct avs_cmd_shared_mem_unmap_regions unmap_regions;
int ret = 0;
int index = 0;
ad_logd("%s\n", __func__);
if (this_adm.apr == NULL) {
ad_loge("%s APR handle NULL\n", __func__);
return -EINVAL;
}
port_id = q6audio_convert_virtual_to_portid(port_id);
if (q6audio_validate_port(port_id) < 0) {
ad_loge("%s port idi[%d] is invalid\n", __func__, port_id);
return -ENODEV;
}
index = q6audio_get_port_index(port_id);
unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE),
APR_PKT_VER);
unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
unmap_regions.hdr.src_port = 0;
unmap_regions.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
unmap_regions.hdr.token = port_id;
unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
unmap_regions.mem_map_handle = atomic_read(&this_adm.
mem_map_cal_handles[atomic_read(&this_adm.mem_map_cal_index)]);
atomic_set(&this_adm.copp_stat[index], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
if (ret < 0) {
ad_loge("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
unmap_regions.hdr.opcode, ret);
ret = -EINVAL;
goto fail_cmd;
}
ret = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
5 * HZ);
if (!ret) {
ad_loge("%s: timeout. waited for memory_unmap index %d\n",
__func__, index);
ret = -EINVAL;
goto fail_cmd;
} else {
ad_logd("%s: Unmap handle 0x%x succeeded\n", __func__,
unmap_regions.mem_map_handle);
}
fail_cmd:
return ret;
}
#ifdef CONFIG_RTAC
int adm_get_copp_id(int port_index)
{
int copp_id;
ad_logd("%s\n", __func__);
if (port_index < 0) {
ad_loge("%s: invalid port_id = %d\n", __func__, port_index);
return -EINVAL;
}
copp_id = atomic_read(&this_adm.copp_id[port_index]);
if (copp_id == RESET_COPP_ID)
copp_id = atomic_read(
&this_adm.copp_low_latency_id[port_index]);
return copp_id;
}
int adm_get_lowlatency_copp_id(int port_index)
{
ad_logd("%s\n", __func__);
if (port_index < 0) {
ad_loge("%s: invalid port_id = %d\n", __func__, port_index);
return -EINVAL;
}
return atomic_read(&this_adm.copp_low_latency_id[port_index]);
}
#else
int adm_get_copp_id(int port_index)
{
return -EINVAL;
}
int adm_get_lowlatency_copp_id(int port_index)
{
return -EINVAL;
}
#endif /* #ifdef CONFIG_RTAC */
void adm_ec_ref_rx_id(int port_id)
{
this_adm.ec_ref_rx = port_id;
ad_logd("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
}
int adm_close(int port_id, int perf_mode)
{
struct apr_hdr close;
int ret = 0;
int index = 0;
int copp_id = RESET_COPP_ID;
port_id = q6audio_convert_virtual_to_portid(port_id);
index = q6audio_get_port_index(port_id);
if (q6audio_validate_port(port_id) < 0)
return -EINVAL;
ad_logd("%s port_id=%#x index %d perf_mode: %d\n", __func__, port_id,
index, perf_mode);
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
perf_mode == LOW_LATENCY_PCM_MODE) {
if (!(atomic_read(&this_adm.copp_low_latency_cnt[index]))) {
ad_loge("%s: copp count for port[%#x]is 0\n", __func__,
port_id);
goto fail_cmd;
}
atomic_dec(&this_adm.copp_low_latency_cnt[index]);
} else {
if (!(atomic_read(&this_adm.copp_cnt[index]))) {
ad_loge("%s: copp count for port[%#x]is 0\n", __func__,
port_id);
goto fail_cmd;
}
atomic_dec(&this_adm.copp_cnt[index]);
}
if ((perf_mode == LEGACY_PCM_MODE &&
!(atomic_read(&this_adm.copp_cnt[index]))) ||
((perf_mode != LEGACY_PCM_MODE) &&
!(atomic_read(&this_adm.copp_low_latency_cnt[index])))) {
ad_logd("%s:Closing ADM: perf_mode: %d\n", __func__,
perf_mode);
close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
close.pkt_size = sizeof(close);
close.src_svc = APR_SVC_ADM;
close.src_domain = APR_DOMAIN_APPS;
close.src_port = port_id;
close.dest_svc = APR_SVC_ADM;
close.dest_domain = APR_DOMAIN_ADSP;
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
perf_mode == LOW_LATENCY_PCM_MODE)
close.dest_port =
atomic_read(&this_adm.copp_low_latency_id[index]);
else
close.dest_port = atomic_read(&this_adm.copp_id[index]);
close.token = port_id;
close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
atomic_set(&this_adm.copp_stat[index], 0);
if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE ||
perf_mode == LOW_LATENCY_PCM_MODE) {
copp_id = atomic_read(
&this_adm.copp_low_latency_id[index]);
ad_logd("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
__func__,
copp_id,
port_id, index,
atomic_read(
&this_adm.copp_low_latency_cnt[index]));
atomic_set(&this_adm.copp_low_latency_id[index],
RESET_COPP_ID);
} else {
copp_id = atomic_read(&this_adm.copp_id[index]);
ad_logd("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
__func__,
copp_id,
port_id, index,
atomic_read(&this_adm.copp_cnt[index]));
atomic_set(&this_adm.copp_id[index],
RESET_COPP_ID);
}
ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
if (ret < 0) {
ad_loge("%s ADM close failed\n", __func__);
ret = -EINVAL;
goto fail_cmd;
}
ret = wait_event_timeout(this_adm.wait[index],
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
ad_loge("%s: ADM cmd Route failed for port %#x\n",
__func__, port_id);
ret = -EINVAL;
goto fail_cmd;
}
}
if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
ad_logd("%s: remove adm device from rtac\n", __func__);
rtac_remove_adm_device(port_id, copp_id);
}
fail_cmd:
return ret;
}
static int __init adm_init(void)
{
int i = 0;
this_adm.apr = NULL;
this_adm.set_custom_topology = 1;
this_adm.ec_ref_rx = -1;
for (i = 0; i < AFE_MAX_PORTS; i++) {
atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
atomic_set(&this_adm.copp_low_latency_id[i], RESET_COPP_ID);
atomic_set(&this_adm.copp_cnt[i], 0);
atomic_set(&this_adm.copp_low_latency_cnt[i], 0);
atomic_set(&this_adm.copp_stat[i], 0);
atomic_set(&this_adm.copp_perf_mode[i], 0);
init_waitqueue_head(&this_adm.wait[i]);
}
return 0;
}
device_initcall(adm_init);
|
EloYGomeZ/test_kernel_g620s
|
sound/soc/msm/qdsp6v2/q6adm.c
|
C
|
gpl-2.0
| 55,954
|
/*
* PROJECT: ReactOS i8042 (ps/2 keyboard-mouse controller) driver
* LICENSE: GPL - See COPYING in the top level directory
* FILE: drivers/input/i8042prt/pnp.c
* PURPOSE: IRP_MJ_PNP operations
* PROGRAMMERS: Copyright 2006-2007 Hervé Poussineau (hpoussin@reactos.org)
* Copyright 2008 Colin Finck (mail@colinfinck.de)
*/
/* INCLUDES ******************************************************************/
#include "i8042prt.h"
#include <debug.h>
/* FUNCTIONS *****************************************************************/
/* This is all pretty confusing. There's more than one way to
* disable/enable the keyboard. You can send KBD_ENABLE to the
* keyboard, and it will start scanning keys. Sending KBD_DISABLE
* will disable the key scanning but also reset the parameters to
* defaults.
*
* You can also send 0xAE to the controller for enabling the
* keyboard clock line and 0xAD for disabling it. Then it'll
* automatically get turned on at the next command. The last
* way is by modifying the bit that drives the clock line in the
* 'command byte' of the controller. This is almost, but not quite,
* the same as the AE/AD thing. The difference can be used to detect
* some really old broken keyboard controllers which I hope won't be
* necessary.
*
* We change the command byte, sending KBD_ENABLE/DISABLE seems to confuse
* some kvm switches.
*/
BOOLEAN
i8042ChangeMode(
IN PPORT_DEVICE_EXTENSION DeviceExtension,
IN UCHAR FlagsToDisable,
IN UCHAR FlagsToEnable)
{
UCHAR Value;
NTSTATUS Status;
if (!i8042Write(DeviceExtension, DeviceExtension->ControlPort, KBD_READ_MODE))
{
WARN_(I8042PRT, "Can't read i8042 mode\n");
return FALSE;
}
Status = i8042ReadDataWait(DeviceExtension, &Value);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "No response after read i8042 mode\n");
return FALSE;
}
Value &= ~FlagsToDisable;
Value |= FlagsToEnable;
if (!i8042Write(DeviceExtension, DeviceExtension->ControlPort, KBD_WRITE_MODE))
{
WARN_(I8042PRT, "Can't set i8042 mode\n");
return FALSE;
}
if (!i8042Write(DeviceExtension, DeviceExtension->DataPort, Value))
{
WARN_(I8042PRT, "Can't send i8042 mode\n");
return FALSE;
}
return TRUE;
}
static NTSTATUS
i8042BasicDetect(
IN PPORT_DEVICE_EXTENSION DeviceExtension)
{
NTSTATUS Status;
ULONG ResendIterations;
UCHAR Value = 0;
/* Don't enable keyboard and mouse interrupts, disable keyboard/mouse */
i8042Flush(DeviceExtension);
if (!i8042ChangeMode(DeviceExtension, CCB_KBD_INT_ENAB | CCB_MOUSE_INT_ENAB, CCB_KBD_DISAB | CCB_MOUSE_DISAB))
return STATUS_IO_DEVICE_ERROR;
i8042Flush(DeviceExtension);
/* Issue a CTRL_SELF_TEST command to check if this is really an i8042 controller */
ResendIterations = DeviceExtension->Settings.ResendIterations + 1;
while (ResendIterations--)
{
if (!i8042Write(DeviceExtension, DeviceExtension->ControlPort, CTRL_SELF_TEST))
{
WARN_(I8042PRT, "Writing CTRL_SELF_TEST command failed\n");
return STATUS_IO_TIMEOUT;
}
Status = i8042ReadDataWait(DeviceExtension, &Value);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "Failed to read CTRL_SELF_TEST response, status 0x%08lx\n", Status);
return Status;
}
if (Value == KBD_SELF_TEST_OK)
{
INFO_(I8042PRT, "CTRL_SELF_TEST completed successfully!\n");
break;
}
else if (Value == KBD_RESEND)
{
TRACE_(I8042PRT, "Resending...\n");
KeStallExecutionProcessor(50);
}
else
{
WARN_(I8042PRT, "Got 0x%02x instead of 0x55\n", Value);
return STATUS_IO_DEVICE_ERROR;
}
}
return STATUS_SUCCESS;
}
static VOID
i8042DetectKeyboard(
IN PPORT_DEVICE_EXTENSION DeviceExtension)
{
NTSTATUS Status;
/* Set LEDs (that is not fatal if some error occurs) */
Status = i8042SynchWritePort(DeviceExtension, 0, KBD_CMD_SET_LEDS, TRUE);
if (NT_SUCCESS(Status))
{
Status = i8042SynchWritePort(DeviceExtension, 0, 0, TRUE);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "Can't finish SET_LEDS (0x%08lx)\n", Status);
return;
}
}
else
{
WARN_(I8042PRT, "Warning: can't write SET_LEDS (0x%08lx)\n", Status);
}
/* Turn on translation and SF (Some machines don't reboot if SF is not set, see ReactOS bug CORE-1713) */
if (!i8042ChangeMode(DeviceExtension, 0, CCB_TRANSLATE | CCB_SYSTEM_FLAG))
return;
/*
* We used to send a KBD_LINE_TEST (0xAB) command, but on at least HP
* Pavilion notebooks the response to that command was incorrect.
* So now we just assume that a keyboard is attached.
*/
DeviceExtension->Flags |= KEYBOARD_PRESENT;
INFO_(I8042PRT, "Keyboard detected\n");
}
static VOID
i8042DetectMouse(
IN PPORT_DEVICE_EXTENSION DeviceExtension)
{
NTSTATUS Status;
UCHAR Value;
UCHAR ExpectedReply[] = { MOUSE_ACK, 0xAA };
UCHAR ReplyByte;
/* First do a mouse line test */
if (i8042Write(DeviceExtension, DeviceExtension->ControlPort, MOUSE_LINE_TEST))
{
Status = i8042ReadDataWait(DeviceExtension, &Value);
if (!NT_SUCCESS(Status) || Value != 0)
{
WARN_(I8042PRT, "Mouse line test failed\n");
goto failure;
}
}
/* Now reset the mouse */
i8042Flush(DeviceExtension);
if(!i8042IsrWritePort(DeviceExtension, MOU_CMD_RESET, CTRL_WRITE_MOUSE))
{
WARN_(I8042PRT, "Failed to write reset command to mouse\n");
goto failure;
}
/* The implementation of the "Mouse Reset" command differs much from chip to chip.
By default, the first byte is an ACK, when the mouse is plugged in and working and NACK when it's not.
On success, the next bytes are 0xAA and 0x00.
But on some systems (like ECS K7S5A Pro, SiS 735 chipset), we always get an ACK and 0xAA.
Only the last byte indicates, whether a mouse is plugged in.
It is either sent or not, so there is no byte, which indicates a failure here.
After the Mouse Reset command was issued, it usually takes some time until we get a response.
So get the first two bytes in a loop. */
for (ReplyByte = 0;
ReplyByte < sizeof(ExpectedReply) / sizeof(ExpectedReply[0]);
ReplyByte++)
{
ULONG Counter = 500;
do
{
Status = i8042ReadDataWait(DeviceExtension, &Value);
if(!NT_SUCCESS(Status))
{
/* Wait some time before trying again */
KeStallExecutionProcessor(50);
}
} while (Status == STATUS_IO_TIMEOUT && Counter--);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "No ACK after mouse reset, status 0x%08lx\n", Status);
goto failure;
}
else if (Value != ExpectedReply[ReplyByte])
{
WARN_(I8042PRT, "Unexpected reply: 0x%02x (expected 0x%02x)\n", Value, ExpectedReply[ReplyByte]);
goto failure;
}
}
/* Finally get the third byte, but only try it one time (see above).
Otherwise this takes around 45 seconds on a K7S5A Pro, when no mouse is plugged in. */
Status = i8042ReadDataWait(DeviceExtension, &Value);
if(!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "Last byte was not transmitted after mouse reset, status 0x%08lx\n", Status);
goto failure;
}
else if(Value != 0x00)
{
WARN_(I8042PRT, "Last byte after mouse reset was not 0x00, but 0x%02x\n", Value);
goto failure;
}
DeviceExtension->Flags |= MOUSE_PRESENT;
INFO_(I8042PRT, "Mouse detected\n");
return;
failure:
/* There is probably no mouse present. On some systems,
the probe locks the entire keyboard controller. Let's
try to get access to the keyboard again by sending a
reset */
i8042Flush(DeviceExtension);
i8042Write(DeviceExtension, DeviceExtension->ControlPort, CTRL_SELF_TEST);
i8042ReadDataWait(DeviceExtension, &Value);
i8042Flush(DeviceExtension);
INFO_(I8042PRT, "Mouse not detected\n");
}
static NTSTATUS
i8042ConnectKeyboardInterrupt(
IN PI8042_KEYBOARD_EXTENSION DeviceExtension)
{
PPORT_DEVICE_EXTENSION PortDeviceExtension;
KIRQL DirqlMax;
NTSTATUS Status;
TRACE_(I8042PRT, "i8042ConnectKeyboardInterrupt()\n");
PortDeviceExtension = DeviceExtension->Common.PortDeviceExtension;
DirqlMax = MAX(
PortDeviceExtension->KeyboardInterrupt.Dirql,
PortDeviceExtension->MouseInterrupt.Dirql);
INFO_(I8042PRT, "KeyboardInterrupt.Vector %lu\n",
PortDeviceExtension->KeyboardInterrupt.Vector);
INFO_(I8042PRT, "KeyboardInterrupt.Dirql %lu\n",
PortDeviceExtension->KeyboardInterrupt.Dirql);
INFO_(I8042PRT, "KeyboardInterrupt.DirqlMax %lu\n",
DirqlMax);
INFO_(I8042PRT, "KeyboardInterrupt.InterruptMode %s\n",
PortDeviceExtension->KeyboardInterrupt.InterruptMode == LevelSensitive ? "LevelSensitive" : "Latched");
INFO_(I8042PRT, "KeyboardInterrupt.ShareInterrupt %s\n",
PortDeviceExtension->KeyboardInterrupt.ShareInterrupt ? "yes" : "no");
INFO_(I8042PRT, "KeyboardInterrupt.Affinity 0x%lx\n",
PortDeviceExtension->KeyboardInterrupt.Affinity);
Status = IoConnectInterrupt(
&PortDeviceExtension->KeyboardInterrupt.Object,
i8042KbdInterruptService,
DeviceExtension, &PortDeviceExtension->SpinLock,
PortDeviceExtension->KeyboardInterrupt.Vector, PortDeviceExtension->KeyboardInterrupt.Dirql, DirqlMax,
PortDeviceExtension->KeyboardInterrupt.InterruptMode, PortDeviceExtension->KeyboardInterrupt.ShareInterrupt,
PortDeviceExtension->KeyboardInterrupt.Affinity, FALSE);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "IoConnectInterrupt() failed with status 0x%08x\n", Status);
return Status;
}
if (DirqlMax == PortDeviceExtension->KeyboardInterrupt.Dirql)
PortDeviceExtension->HighestDIRQLInterrupt = PortDeviceExtension->KeyboardInterrupt.Object;
PortDeviceExtension->Flags |= KEYBOARD_INITIALIZED;
return STATUS_SUCCESS;
}
static NTSTATUS
i8042ConnectMouseInterrupt(
IN PI8042_MOUSE_EXTENSION DeviceExtension)
{
PPORT_DEVICE_EXTENSION PortDeviceExtension;
KIRQL DirqlMax;
NTSTATUS Status;
TRACE_(I8042PRT, "i8042ConnectMouseInterrupt()\n");
Status = i8042MouInitialize(DeviceExtension);
if (!NT_SUCCESS(Status))
return Status;
PortDeviceExtension = DeviceExtension->Common.PortDeviceExtension;
DirqlMax = MAX(
PortDeviceExtension->KeyboardInterrupt.Dirql,
PortDeviceExtension->MouseInterrupt.Dirql);
INFO_(I8042PRT, "MouseInterrupt.Vector %lu\n",
PortDeviceExtension->MouseInterrupt.Vector);
INFO_(I8042PRT, "MouseInterrupt.Dirql %lu\n",
PortDeviceExtension->MouseInterrupt.Dirql);
INFO_(I8042PRT, "MouseInterrupt.DirqlMax %lu\n",
DirqlMax);
INFO_(I8042PRT, "MouseInterrupt.InterruptMode %s\n",
PortDeviceExtension->MouseInterrupt.InterruptMode == LevelSensitive ? "LevelSensitive" : "Latched");
INFO_(I8042PRT, "MouseInterrupt.ShareInterrupt %s\n",
PortDeviceExtension->MouseInterrupt.ShareInterrupt ? "yes" : "no");
INFO_(I8042PRT, "MouseInterrupt.Affinity 0x%lx\n",
PortDeviceExtension->MouseInterrupt.Affinity);
Status = IoConnectInterrupt(
&PortDeviceExtension->MouseInterrupt.Object,
i8042MouInterruptService,
DeviceExtension, &PortDeviceExtension->SpinLock,
PortDeviceExtension->MouseInterrupt.Vector, PortDeviceExtension->MouseInterrupt.Dirql, DirqlMax,
PortDeviceExtension->MouseInterrupt.InterruptMode, PortDeviceExtension->MouseInterrupt.ShareInterrupt,
PortDeviceExtension->MouseInterrupt.Affinity, FALSE);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "IoConnectInterrupt() failed with status 0x%08x\n", Status);
goto cleanup;
}
if (DirqlMax == PortDeviceExtension->MouseInterrupt.Dirql)
PortDeviceExtension->HighestDIRQLInterrupt = PortDeviceExtension->MouseInterrupt.Object;
PortDeviceExtension->Flags |= MOUSE_INITIALIZED;
Status = STATUS_SUCCESS;
cleanup:
if (!NT_SUCCESS(Status))
{
PortDeviceExtension->Flags &= ~MOUSE_INITIALIZED;
if (PortDeviceExtension->MouseInterrupt.Object)
{
IoDisconnectInterrupt(PortDeviceExtension->MouseInterrupt.Object);
PortDeviceExtension->HighestDIRQLInterrupt = PortDeviceExtension->KeyboardInterrupt.Object;
}
}
return Status;
}
static NTSTATUS
EnableInterrupts(
IN PPORT_DEVICE_EXTENSION DeviceExtension,
IN UCHAR FlagsToDisable,
IN UCHAR FlagsToEnable)
{
i8042Flush(DeviceExtension);
if (!i8042ChangeMode(DeviceExtension, FlagsToDisable, FlagsToEnable))
return STATUS_UNSUCCESSFUL;
return STATUS_SUCCESS;
}
static NTSTATUS
StartProcedure(
IN PPORT_DEVICE_EXTENSION DeviceExtension)
{
NTSTATUS Status = STATUS_UNSUCCESSFUL;
UCHAR FlagsToDisable = 0;
UCHAR FlagsToEnable = 0;
KIRQL Irql;
if (DeviceExtension->DataPort == 0)
{
/* Unable to do something at the moment */
return STATUS_SUCCESS;
}
if (!(DeviceExtension->Flags & (KEYBOARD_PRESENT | MOUSE_PRESENT)))
{
/* Try to detect them */
TRACE_(I8042PRT, "Check if the controller is really a i8042\n");
Status = i8042BasicDetect(DeviceExtension);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "i8042BasicDetect() failed with status 0x%08lx\n", Status);
return STATUS_UNSUCCESSFUL;
}
/* First detect the mouse and then the keyboard!
If we do it the other way round, some systems throw away settings like the keyboard translation, when detecting the mouse. */
TRACE_(I8042PRT, "Detecting mouse\n");
i8042DetectMouse(DeviceExtension);
TRACE_(I8042PRT, "Detecting keyboard\n");
i8042DetectKeyboard(DeviceExtension);
INFO_(I8042PRT, "Keyboard present: %s\n", DeviceExtension->Flags & KEYBOARD_PRESENT ? "YES" : "NO");
INFO_(I8042PRT, "Mouse present : %s\n", DeviceExtension->Flags & MOUSE_PRESENT ? "YES" : "NO");
TRACE_(I8042PRT, "Enabling i8042 interrupts\n");
if (DeviceExtension->Flags & KEYBOARD_PRESENT)
{
FlagsToDisable |= CCB_KBD_DISAB;
FlagsToEnable |= CCB_KBD_INT_ENAB;
}
if (DeviceExtension->Flags & MOUSE_PRESENT)
{
FlagsToDisable |= CCB_MOUSE_DISAB;
FlagsToEnable |= CCB_MOUSE_INT_ENAB;
}
Status = EnableInterrupts(DeviceExtension, FlagsToDisable, FlagsToEnable);
if (!NT_SUCCESS(Status))
{
WARN_(I8042PRT, "EnableInterrupts failed: %lx\n", Status);
DeviceExtension->Flags &= ~(KEYBOARD_PRESENT | MOUSE_PRESENT);
return Status;
}
}
/* Connect interrupts */
if (DeviceExtension->Flags & KEYBOARD_PRESENT &&
DeviceExtension->Flags & KEYBOARD_CONNECTED &&
DeviceExtension->Flags & KEYBOARD_STARTED &&
!(DeviceExtension->Flags & KEYBOARD_INITIALIZED))
{
/* Keyboard is ready to be initialized */
Status = i8042ConnectKeyboardInterrupt(DeviceExtension->KeyboardExtension);
if (NT_SUCCESS(Status))
{
DeviceExtension->Flags |= KEYBOARD_INITIALIZED;
}
else
{
WARN_(I8042PRT, "i8042ConnectKeyboardInterrupt failed: %lx\n", Status);
}
}
if (DeviceExtension->Flags & MOUSE_PRESENT &&
DeviceExtension->Flags & MOUSE_CONNECTED &&
DeviceExtension->Flags & MOUSE_STARTED &&
!(DeviceExtension->Flags & MOUSE_INITIALIZED))
{
/* Mouse is ready to be initialized */
Status = i8042ConnectMouseInterrupt(DeviceExtension->MouseExtension);
if (NT_SUCCESS(Status))
{
DeviceExtension->Flags |= MOUSE_INITIALIZED;
}
else
{
WARN_(I8042PRT, "i8042ConnectMouseInterrupt failed: %lx\n", Status);
}
/* Start the mouse */
Irql = KeAcquireInterruptSpinLock(DeviceExtension->HighestDIRQLInterrupt);
/* HACK: the mouse has already been reset in i8042DetectMouse. This second
reset prevents some touchpads/mice from working (Dell D531, D600).
See CORE-6901 */
if (!(i8042HwFlags & FL_INITHACK))
{
i8042IsrWritePort(DeviceExtension, MOU_CMD_RESET, CTRL_WRITE_MOUSE);
}
KeReleaseInterruptSpinLock(DeviceExtension->HighestDIRQLInterrupt, Irql);
}
return Status;
}
static NTSTATUS
i8042PnpStartDevice(
IN PDEVICE_OBJECT DeviceObject,
IN PCM_RESOURCE_LIST AllocatedResources,
IN PCM_RESOURCE_LIST AllocatedResourcesTranslated)
{
PFDO_DEVICE_EXTENSION DeviceExtension;
PPORT_DEVICE_EXTENSION PortDeviceExtension;
PCM_PARTIAL_RESOURCE_DESCRIPTOR ResourceDescriptor, ResourceDescriptorTranslated;
INTERRUPT_DATA InterruptData = { NULL };
BOOLEAN FoundDataPort = FALSE;
BOOLEAN FoundControlPort = FALSE;
BOOLEAN FoundIrq = FALSE;
ULONG i;
NTSTATUS Status;
TRACE_(I8042PRT, "i8042PnpStartDevice(%p)\n", DeviceObject);
DeviceExtension = (PFDO_DEVICE_EXTENSION)DeviceObject->DeviceExtension;
PortDeviceExtension = DeviceExtension->PortDeviceExtension;
ASSERT(DeviceExtension->PnpState == dsStopped);
if (!AllocatedResources)
{
WARN_(I8042PRT, "No allocated resources sent to driver\n");
return STATUS_INSUFFICIENT_RESOURCES;
}
if (AllocatedResources->Count != 1)
{
WARN_(I8042PRT, "Wrong number of allocated resources sent to driver\n");
return STATUS_INSUFFICIENT_RESOURCES;
}
if (AllocatedResources->List[0].PartialResourceList.Version != 1
|| AllocatedResources->List[0].PartialResourceList.Revision != 1
|| AllocatedResourcesTranslated->List[0].PartialResourceList.Version != 1
|| AllocatedResourcesTranslated->List[0].PartialResourceList.Revision != 1)
{
WARN_(I8042PRT, "Revision mismatch: %u.%u != 1.1 or %u.%u != 1.1\n",
AllocatedResources->List[0].PartialResourceList.Version,
AllocatedResources->List[0].PartialResourceList.Revision,
AllocatedResourcesTranslated->List[0].PartialResourceList.Version,
AllocatedResourcesTranslated->List[0].PartialResourceList.Revision);
return STATUS_REVISION_MISMATCH;
}
/* Get Irq and optionally control port and data port */
for (i = 0; i < AllocatedResources->List[0].PartialResourceList.Count; i++)
{
ResourceDescriptor = &AllocatedResources->List[0].PartialResourceList.PartialDescriptors[i];
ResourceDescriptorTranslated = &AllocatedResourcesTranslated->List[0].PartialResourceList.PartialDescriptors[i];
switch (ResourceDescriptor->Type)
{
case CmResourceTypePort:
{
if (ResourceDescriptor->u.Port.Length == 1)
{
/* We assume that the first resource will
* be the control port and the second one
* will be the data port...
*/
if (!FoundDataPort)
{
PortDeviceExtension->DataPort = ULongToPtr(ResourceDescriptor->u.Port.Start.u.LowPart);
INFO_(I8042PRT, "Found data port: %p\n", PortDeviceExtension->DataPort);
FoundDataPort = TRUE;
}
else if (!FoundControlPort)
{
PortDeviceExtension->ControlPort = ULongToPtr(ResourceDescriptor->u.Port.Start.u.LowPart);
INFO_(I8042PRT, "Found control port: %p\n", PortDeviceExtension->ControlPort);
FoundControlPort = TRUE;
}
else
{
/* FIXME: implement PS/2 Active Multiplexing */
ERR_(I8042PRT, "Unhandled I/O ranges provided: 0x%lx\n", ResourceDescriptor->u.Port.Length);
}
}
else
WARN_(I8042PRT, "Invalid I/O range length: 0x%lx\n", ResourceDescriptor->u.Port.Length);
break;
}
case CmResourceTypeInterrupt:
{
if (FoundIrq)
return STATUS_INVALID_PARAMETER;
InterruptData.Dirql = (KIRQL)ResourceDescriptorTranslated->u.Interrupt.Level;
InterruptData.Vector = ResourceDescriptorTranslated->u.Interrupt.Vector;
InterruptData.Affinity = ResourceDescriptorTranslated->u.Interrupt.Affinity;
if (ResourceDescriptorTranslated->Flags & CM_RESOURCE_INTERRUPT_LATCHED)
InterruptData.InterruptMode = Latched;
else
InterruptData.InterruptMode = LevelSensitive;
InterruptData.ShareInterrupt = (ResourceDescriptorTranslated->ShareDisposition == CmResourceShareShared);
INFO_(I8042PRT, "Found irq resource: %lu\n", ResourceDescriptor->u.Interrupt.Level);
FoundIrq = TRUE;
break;
}
default:
WARN_(I8042PRT, "Unknown resource descriptor type 0x%x\n", ResourceDescriptor->Type);
}
}
if (!FoundIrq)
{
WARN_(I8042PRT, "Interrupt resource was not found in allocated resources list\n");
return STATUS_INSUFFICIENT_RESOURCES;
}
else if (DeviceExtension->Type == Keyboard && (!FoundDataPort || !FoundControlPort))
{
WARN_(I8042PRT, "Some required resources were not found in allocated resources list\n");
return STATUS_INSUFFICIENT_RESOURCES;
}
else if (DeviceExtension->Type == Mouse && (FoundDataPort || FoundControlPort))
{
WARN_(I8042PRT, "Too much resources were provided in allocated resources list\n");
return STATUS_INVALID_PARAMETER;
}
switch (DeviceExtension->Type)
{
case Keyboard:
{
RtlCopyMemory(
&PortDeviceExtension->KeyboardInterrupt,
&InterruptData,
sizeof(INTERRUPT_DATA));
PortDeviceExtension->Flags |= KEYBOARD_STARTED;
Status = StartProcedure(PortDeviceExtension);
break;
}
case Mouse:
{
RtlCopyMemory(
&PortDeviceExtension->MouseInterrupt,
&InterruptData,
sizeof(INTERRUPT_DATA));
PortDeviceExtension->Flags |= MOUSE_STARTED;
Status = StartProcedure(PortDeviceExtension);
break;
}
default:
{
WARN_(I8042PRT, "Unknown FDO type %u\n", DeviceExtension->Type);
ASSERT(!(PortDeviceExtension->Flags & KEYBOARD_CONNECTED) || !(PortDeviceExtension->Flags & MOUSE_CONNECTED));
Status = STATUS_INVALID_DEVICE_REQUEST;
}
}
if (NT_SUCCESS(Status))
DeviceExtension->PnpState = dsStarted;
return Status;
}
static VOID
i8042RemoveDevice(
IN PDEVICE_OBJECT DeviceObject)
{
PI8042_DRIVER_EXTENSION DriverExtension;
KIRQL OldIrql;
PFDO_DEVICE_EXTENSION DeviceExtension;
DriverExtension = (PI8042_DRIVER_EXTENSION)IoGetDriverObjectExtension(DeviceObject->DriverObject, DeviceObject->DriverObject);
DeviceExtension = (PFDO_DEVICE_EXTENSION)DeviceObject->DeviceExtension;
KeAcquireSpinLock(&DriverExtension->DeviceListLock, &OldIrql);
RemoveEntryList(&DeviceExtension->ListEntry);
KeReleaseSpinLock(&DriverExtension->DeviceListLock, OldIrql);
IoDetachDevice(DeviceExtension->LowerDevice);
IoDeleteDevice(DeviceObject);
}
NTSTATUS NTAPI
i8042Pnp(
IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp)
{
PIO_STACK_LOCATION Stack;
ULONG MinorFunction;
I8042_DEVICE_TYPE DeviceType;
ULONG_PTR Information = 0;
NTSTATUS Status;
Stack = IoGetCurrentIrpStackLocation(Irp);
MinorFunction = Stack->MinorFunction;
DeviceType = ((PFDO_DEVICE_EXTENSION)DeviceObject->DeviceExtension)->Type;
switch (MinorFunction)
{
case IRP_MN_START_DEVICE: /* 0x00 */
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_START_DEVICE\n");
/* Call lower driver (if any) */
if (DeviceType != PhysicalDeviceObject)
{
Status = ForwardIrpAndWait(DeviceObject, Irp);
if (NT_SUCCESS(Status))
Status = i8042PnpStartDevice(
DeviceObject,
Stack->Parameters.StartDevice.AllocatedResources,
Stack->Parameters.StartDevice.AllocatedResourcesTranslated);
}
else
Status = STATUS_SUCCESS;
break;
}
case IRP_MN_QUERY_DEVICE_RELATIONS: /* (optional) 0x07 */
{
switch (Stack->Parameters.QueryDeviceRelations.Type)
{
case BusRelations:
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_QUERY_DEVICE_RELATIONS / BusRelations\n");
return ForwardIrpAndForget(DeviceObject, Irp);
}
case RemovalRelations:
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_QUERY_DEVICE_RELATIONS / RemovalRelations\n");
return ForwardIrpAndForget(DeviceObject, Irp);
}
default:
ERR_(I8042PRT, "IRP_MJ_PNP / IRP_MN_QUERY_DEVICE_RELATIONS / Unknown type 0x%lx\n",
Stack->Parameters.QueryDeviceRelations.Type);
return ForwardIrpAndForget(DeviceObject, Irp);
}
break;
}
case IRP_MN_FILTER_RESOURCE_REQUIREMENTS: /* (optional) 0x0d */
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_FILTER_RESOURCE_REQUIREMENTS\n");
return ForwardIrpAndForget(DeviceObject, Irp);
}
case IRP_MN_QUERY_PNP_DEVICE_STATE: /* 0x14 */
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_QUERY_PNP_DEVICE_STATE\n");
return ForwardIrpAndForget(DeviceObject, Irp);
}
case IRP_MN_QUERY_REMOVE_DEVICE:
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_QUERY_REMOVE_DEVICE\n");
return ForwardIrpAndForget(DeviceObject, Irp);
}
case IRP_MN_CANCEL_REMOVE_DEVICE:
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_CANCEL_REMOVE_DEVICE\n");
return ForwardIrpAndForget(DeviceObject, Irp);
}
case IRP_MN_REMOVE_DEVICE:
{
TRACE_(I8042PRT, "IRP_MJ_PNP / IRP_MN_REMOVE_DEVICE\n");
Status = ForwardIrpAndForget(DeviceObject, Irp);
i8042RemoveDevice(DeviceObject);
return Status;
}
default:
{
ERR_(I8042PRT, "IRP_MJ_PNP / unknown minor function 0x%x\n", MinorFunction);
return ForwardIrpAndForget(DeviceObject, Irp);
}
}
Irp->IoStatus.Information = Information;
Irp->IoStatus.Status = Status;
IoCompleteRequest(Irp, IO_NO_INCREMENT);
return Status;
}
|
sunnyden/reactos
|
drivers/input/i8042prt/pnp.c
|
C
|
gpl-2.0
| 27,818
|
/*
* Compiler error handler
* Copyright
* (C) 1992 Joseph H. Allen
*
* This file is part of JOE (Joe's Own Editor)
*/
#include "types.h"
/* Error database */
typedef struct error ERROR;
struct error {
LINK(ERROR) link; /* Linked list of errors */
long line; /* Target line number */
long org; /* Original target line number */
unsigned char *file; /* Target file name */
long src; /* Error-file line number */
unsigned char *msg; /* The message */
} errors = { { &errors, &errors} };
ERROR *errptr = &errors; /* Current error row */
B *errbuf = NULL; /* Buffer with error messages */
/* Function which allows stepping through all error buffers,
for multi-file search and replace. Give it a buffer. It finds next
buffer in error list. Look at 'berror' for error information. */
/* This is made to work like bafter: it does not increment refcount of buffer */
B *beafter(B *b)
{
struct error *e;
unsigned char *name = b->name;
if (!name) name = USTR "";
for (e = errors.link.next; e != &errors; e = e->link.next)
if (!zcmp(name, e->file))
break;
if (e == &errors) {
/* Given buffer is not in list? Return first buffer in list. */
e = errors.link.next;
}
while (e != &errors && !zcmp(name, e->file))
e = e->link.next;
berror = 0;
if (e != &errors) {
B *b = bfind(e->file);
/* bfind bumps refcount, so we have to unbump it */
if (b->count == 1)
b->orphan = 1; /* Oops */
else
--b->count;
return b;
}
return 0;
}
/* Insert and delete notices */
void inserr(unsigned char *name, long int where, long int n, int bol)
{
ERROR *e;
if (!n)
return;
if (name) {
for (e = errors.link.next; e != &errors; e = e->link.next) {
if (!zcmp(e->file, name)) {
if (e->line > where)
e->line += n;
else if (e->line == where && bol)
e->line += n;
}
}
}
}
void delerr(unsigned char *name, long int where, long int n)
{
ERROR *e;
if (!n)
return;
if (name) {
for (e = errors.link.next; e != &errors; e = e->link.next) {
if (!zcmp(e->file, name)) {
if (e->line > where + n)
e->line -= n;
else if (e->line > where)
e->line = where;
}
}
}
}
/* Abort notice */
void abrerr(unsigned char *name)
{
ERROR *e;
if (name)
for (e = errors.link.next; e != &errors; e = e->link.next)
if (!zcmp(e->file, name))
e->line = e->org;
}
/* Save notice */
void saverr(unsigned char *name)
{
ERROR *e;
if (name)
for (e = errors.link.next; e != &errors; e = e->link.next)
if (!zcmp(e->file, name))
e->org = e->line;
}
/* Pool of free error nodes */
ERROR errnodes = { {&errnodes, &errnodes} };
/* Free an error node */
static void freeerr(ERROR *n)
{
vsrm(n->file);
vsrm(n->msg);
enquef(ERROR, link, &errnodes, n);
}
/* Free all errors */
static int freeall(void)
{
int count = 0;
while (!qempty(ERROR, link, &errors)) {
freeerr(deque_f(ERROR, link, errors.link.next));
++count;
}
errptr = &errors;
return count;
}
/* Parse error messages into database */
/* From joe's joe 2.9 */
/* First word (allowing ., /, _ and -) with a . is the file name. Next number
is line number. Then there should be a ':' */
static void parseone(struct charmap *map,unsigned char *s,unsigned char **rtn_name,long *rtn_line)
{
int x, y, flg;
unsigned char *name = NULL;
long line = -1;
y=0;
flg=0;
if (s[0] == 'J' && s[1] == 'O' && s[2] == 'E' && s[3] == ':')
goto bye;
do {
/* Skip to first word */
for (x = y; s[x] && !(joe_isalnum_(map,s[x]) || s[x] == '.' || s[x] == '/'); ++x) ;
/* Skip to end of first word */
for (y = x; joe_isalnum_(map,s[y]) || s[y] == '.' || s[y] == '/' || s[y]=='-'; ++y)
if (s[y] == '.')
flg = 1;
} while (!flg && x!=y);
/* Save file name */
if (x != y)
name = vsncpy(NULL, 0, s + x, y - x);
/* Skip to first number */
for (x = y; s[x] && (s[x] < '0' || s[x] > '9'); ++x) ;
/* Skip to end of first number */
for (y = x; s[y] >= '0' && s[y] <= '9'; ++y) ;
/* Save line number */
if (x != y)
sscanf((char *)(s + x), "%ld", &line);
if (line != -1)
--line;
/* Look for ':' */
flg = 0;
while (s[y]) {
/* Allow : anywhere on line: works for MIPS C compiler */
/*
for (y = 0; s[y];)
*/
if (s[y]==':') {
flg = 1;
break;
}
++y;
}
bye:
if (!flg)
line = -1;
*rtn_name = name;
*rtn_line = line;
}
/* Parser for file name lists from grep, find and ls.
*
* filename
* filename:*
* filename:line-number:*
*/
void parseone_grep(struct charmap *map,unsigned char *s,unsigned char **rtn_name,long *rtn_line)
{
int y;
unsigned char *name = NULL;
long line = -1;
if (s[0] == 'J' && s[1] == 'O' && s[2] == 'E' && s[3] == ':')
goto bye;
/* Skip to first : or end of line */
for (y = 0;s[y] && s[y] != ':';++y);
if (y) {
/* This should be the file name */
name = vsncpy(NULL,0,s,y);
line = 0;
if (s[y] == ':') {
/* Maybe there's a line number */
++y;
while (s[y] >= '0' && s[y] <= '9')
line = line * 10 + (s[y++] - '0');
--line;
if (line < 0 || s[y] != ':') {
/* Line number is only valid if there's a second : */
line = 0;
}
}
}
bye:
*rtn_name = name;
*rtn_line = line;
}
static int parseit(struct charmap *map,unsigned char *s, long int row,
void (*parseone)(struct charmap *map, unsigned char *s, unsigned char **rtn_name, long *rtn_line), unsigned char *current_dir)
{
unsigned char *name = NULL;
long line = -1;
ERROR *err;
parseone(map,s,&name,&line);
if (name) {
if (line != -1) {
/* We have an error */
err = (ERROR *) alitem(&errnodes, sizeof(ERROR));
err->file = name;
if (current_dir) {
err->file = vsncpy(NULL, 0, sv(current_dir));
err->file = vsncpy(sv(err->file), sv(name));
err->file = canonical(err->file);
vsrm(name);
} else {
err->file = name;
}
err->org = err->line = line;
err->src = row;
err->msg = vsncpy(NULL, 0, sc("\\i"));
err->msg = vsncpy(sv(err->msg), sv(s));
enqueb(ERROR, link, &errors, err);
return 1;
} else
vsrm(name);
}
return 0;
}
/* Parse the error output contained in a buffer */
void kill_ansi(unsigned char *s);
static long parserr(B *b)
{
if (markv(1)) {
P *p = pdup(markb, USTR "parserr1");
P *q = pdup(markb, USTR "parserr2");
long nerrs = 0;
errbuf = markb->b;
freeall();
p_goto_bol(p);
do {
unsigned char *s;
pset(q, p);
p_goto_eol(p);
s = brvs(q, (int) (p->byte - q->byte));
if (s) {
kill_ansi(s);
nerrs += parseit(q->b->o.charmap, s, q->line, (q->b->parseone ? q->b->parseone : parseone),q->b->current_dir);
vsrm(s);
}
pgetc(p);
} while (p->byte < markk->byte);
prm(p);
prm(q);
return nerrs;
} else {
P *p = pdup(b->bof, USTR "parserr3");
P *q = pdup(p, USTR "parserr4");
long nerrs = 0;
errbuf = b;
freeall();
do {
unsigned char *s;
pset(q, p);
p_goto_eol(p);
s = brvs(q, (int) (p->byte - q->byte));
if (s) {
kill_ansi(s);
nerrs += parseit(q->b->o.charmap, s, q->line, (q->b->parseone ? q->b->parseone : parseone), q->b->current_dir);
vsrm(s);
}
} while (pgetc(p) != NO_MORE_DATA);
prm(p);
prm(q);
return nerrs;
}
}
BW *find_a_good_bw(B *b)
{
W *w;
BW *bw = 0;
/* Find lowest window with buffer */
if ((w = maint->topwin) != NULL) {
do {
if ((w->watom->what&TYPETW) && ((BW *)w->object)->b==b && w->y>=0)
bw = (BW *)w->object;
w = w->link.next;
} while (w != maint->topwin);
}
if (bw)
return bw;
/* Otherwise just find lowest window */
if ((w = maint->topwin) != NULL) {
do {
if ((w->watom->what&TYPETW) && w->y>=0)
bw = (BW *)w->object;
w = w->link.next;
} while (w != maint->topwin);
}
return bw;
}
int parserrb(B *b)
{
BW *bw;
int n;
freeall();
bw = find_a_good_bw(b);
unmark(bw);
n = parserr(b);
if (n)
joe_snprintf_1(msgbuf, JOE_MSGBUFSIZE, joe_gettext(_("%d messages found")), n);
else
joe_snprintf_0(msgbuf, sizeof(msgbuf), joe_gettext(_("No messages found")));
msgnw(bw->parent, msgbuf);
return 0;
}
int urelease(BW *bw)
{
bw->b->parseone = 0;
if (qempty(ERROR, link, &errors) && !errbuf) {
joe_snprintf_0(msgbuf, sizeof(msgbuf), joe_gettext(_("No messages")));
} else {
int count = freeall();
errbuf = NULL;
joe_snprintf_1(msgbuf, sizeof(msgbuf), joe_gettext(_("%d messages cleared")), count);
}
msgnw(bw->parent, msgbuf);
updall();
return 0;
}
int uparserr(BW *bw)
{
int n;
freeall();
bw->b->parseone = parseone;
n = parserr(bw->b);
if (n)
joe_snprintf_1(msgbuf, JOE_MSGBUFSIZE, joe_gettext(_("%d messages found")), n);
else
joe_snprintf_0(msgbuf, sizeof(msgbuf), joe_gettext(_("No messages found")));
msgnw(bw->parent, msgbuf);
return 0;
}
int ugparse(BW *bw)
{
int n;
freeall();
bw->b->parseone = parseone_grep;
n = parserr(bw->b);
if (n)
joe_snprintf_1(msgbuf, JOE_MSGBUFSIZE, joe_gettext(_("%d messages found")), n);
else
joe_snprintf_0(msgbuf, sizeof(msgbuf), joe_gettext(_("No messages found")));
msgnw(bw->parent, msgbuf);
return 0;
}
int jump_to_file_line(BW *bw,unsigned char *file,int line,unsigned char *msg)
{
int omid;
if (!bw->b->name || zcmp(file, bw->b->name)) {
if (doswitch(bw, vsdup(file), NULL, NULL))
return -1;
bw = (BW *) maint->curwin->object;
}
omid = mid;
mid = 1;
pline(bw->cursor, line);
dofollows();
mid = omid;
bw->cursor->xcol = piscol(bw->cursor);
msgnw(bw->parent, msg);
return 0;
}
/* Show current message */
int ucurrent_msg(BW *bw)
{
if (errptr != &errors) {
msgnw(bw->parent, errptr->msg);
return 0;
} else {
msgnw(bw->parent, joe_gettext(_("No messages")));
return -1;
}
}
/* Find line in error database: return pointer to message */
ERROR *srcherr(BW *bw,unsigned char *file,long line)
{
ERROR *p;
for (p = errors.link.next; p != &errors; p=p->link.next)
if (!zcmp(p->file,file) && p->org == line) {
errptr = p;
setline(errbuf, errptr->src);
return errptr;
}
return 0;
}
/* Delete ansi formatting */
void kill_ansi(unsigned char *s)
{
unsigned char *d = s;
while (*s)
if (*s == 27) {
while (*s && (*s == 27 || *s == '[' || (*s >= '0' && *s <= '9') || *s == ';'))
++s;
if (*s)
++s;
} else
*d++ = *s++;
*d = 0;
}
int ujump(BW *bw)
{
int rtn = -1;
P *p = pdup(bw->cursor, USTR "ujump");
P *q = pdup(p, USTR "ujump");
unsigned char *s;
p_goto_bol(p);
p_goto_eol(q);
s = brvs(p, (int) (q->byte - p->byte));
kill_ansi(s);
prm(p);
prm(q);
if (s) {
unsigned char *name = NULL;
unsigned char *fullname = NULL;
unsigned char *curd = get_cd(bw->parent);
long line = -1;
if (bw->b->parseone)
bw->b->parseone(bw->b->o.charmap,s,&name,&line);
else
parseone_grep(bw->b->o.charmap,s,&name,&line);
/* Prepend current directory.. */
fullname = vsncpy(NULL, 0, sv(curd));
fullname = vsncpy(sv(fullname), sv(name));
vsrm(name);
name = canonical(fullname);
if (name && line != -1) {
ERROR *p = srcherr(bw, name, line);
uprevw((BASE *)bw);
/* Check that we made it to a tw */
if (p)
rtn = jump_to_file_line(maint->curwin->object,name,p->line,NULL /* p->msg */);
else
rtn = jump_to_file_line(maint->curwin->object,name,line,NULL);
vsrm(name);
}
vsrm(s);
}
return rtn;
}
int unxterr(BW *bw)
{
if (errptr->link.next == &errors) {
msgnw(bw->parent, joe_gettext(_("No more errors")));
return -1;
}
errptr = errptr->link.next;
setline(errbuf, errptr->src);
return jump_to_file_line(bw,errptr->file,errptr->line,NULL /* errptr->msg */);
}
int uprverr(BW *bw)
{
if (errptr->link.prev == &errors) {
msgnw(bw->parent, joe_gettext(_("No more errors")));
return -1;
}
errptr = errptr->link.prev;
setline(errbuf, errptr->src);
return jump_to_file_line(bw,errptr->file,errptr->line,NULL /* errptr->msg */);
}
|
jhallen/joe-editor
|
joe/uerror.c
|
C
|
gpl-2.0
| 11,705
|
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif
#endif
/*
* 1 Header File Including
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include "hwifi_tps.h"
#include "cfg80211_stru.h"
#include "hwifi_cfg80211.h"
#include "hwifi_wpa_ioctl.h" /* for wl_pwrm_set */
#include "hwifi_wl_config_ioctl.h"
#include <net/cfg80211.h> /* wdev_priv */
#include <linux/etherdevice.h> /* eth_type_trans */
#include "hwifi_utils.h"
#include "hwifi_hcc.h"
#include "hwifi_netdev.h"
#include "hwifi_cfgapi.h"
/*
* 2 Global Variable Definition
*/
/*
* 3 Function Definition
*/
/*****************************************************************************
º¯ Êý Ãû : hwifi_test_freq_set
¹¦ÄÜÃèÊö : ÉèÖÃwifiƵ¶Î
ÊäÈë²ÎÊý : struct cfg_struct *cfg, uint8 enabled
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_test_freq_set(struct cfg_struct *cfg, int32 freq)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *wid_freq;
uint16 msg_size;
int32 ret;
if(IS_CONNECTED(cfg))
{
HWIFI_WARNING("Current connected status not support wifreq param setting.");
return -EFAIL;
}
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill ps mode */
wid_freq = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(wid_freq, WID_PRIMARY_CHANNEL , freq);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("Failed to send wifi freq set msg!");
return -EFAIL;
}
cfg->ap_info.curr_channel=freq;
HWIFI_DEBUG("Succeed to set wifreq: %d", freq);
return SUCC;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_test_userpow_set
¹¦ÄÜÃèÊö : ÉèÖÃWIFI¹¦ÂÊ
ÊäÈë²ÎÊý : struct cfg_struct *cfg, uint8 enabled
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_test_userpow_set(struct cfg_struct *cfg, int32 userpow)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *wid_userpow;
uint16 msg_size;
int32 ret;
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill ps mode */
wid_userpow = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(wid_userpow, WID_USER_CONTROL_ON_TX_POWER , (uint8)userpow);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("Failed to send wifi userpow set msg!");
return -EFAIL;
}
cfg->hi110x_dev->tps->userpow=userpow;
HWIFI_DEBUG("succeed to set wiuserpow: %d", userpow);
return SUCC;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_test_userpow_get
¹¦ÄÜÃèÊö : »ñÈ¡WIFI¹¦ÂÊ
ÊäÈë²ÎÊý : struct cfg_struct *cfg
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWS160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_test_userpow_get(struct cfg_struct *cfg)
{
return cfg->hi110x_dev->tps->userpow;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_test_pow_set
¹¦ÄÜÃèÊö : É趨WIFIÖ¸¶¨¹¦ÂÊ·¢ËÍ
ÊäÈë²ÎÊý : struct cfg_struct *cfg, uint8 enabled
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_test_pow_set(struct cfg_struct *cfg, int32 pow)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *wid_pow;
uint16 msg_size;
int32 ret;
int32 n_channels_2G = 1;
int32 n_channels_5G = 0;
if (HWIFI_CONNECTED == cfg->conn.status)
{
if (cfg->conn.bss.freq <= HWIFI_AT_TEST_MAX_FREQ_2G)
{
HWIFI_INFO("scan: 2.4G connected scan, only scan 2.4G");
n_channels_5G = 0;
n_channels_2G = 1;
}
else
{
HWIFI_INFO("scan: 5G connected scan, only scan 5G");
n_channels_5G = 1;
n_channels_2G = 0;
}
}
else if(IS_AP(cfg))
{
if(cfg->ap_info.channel_info & HWIFI_AT_TEST_5G_BAND)
{
HWIFI_INFO("ap operation on 5G, only scan 5G");
n_channels_5G = 1;
n_channels_2G = 0;
}
else
{
HWIFI_INFO("ap operation on 2.4G, only scan 2.4G");
n_channels_5G = 0;
n_channels_2G = 1;
}
}
if(n_channels_5G > 0)
{
if(pow > 180 || pow <0)
{
HWIFI_WARNING("can not set the pow value %d",pow);
return -EFAIL;
}
}
else if(n_channels_2G > 0)
{
if(pow > 200 || pow <0)
{
HWIFI_WARNING("can not set the pow value %d",pow);
return -EFAIL;
}
}
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill ps mode */
wid_pow = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(wid_pow, WID_CURRENT_TX_POW , (uint16)pow);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("fail to send wifi pow set msg");
return -EFAIL;
}
cfg->hi110x_dev->tps->pow=pow;
HWIFI_INFO("succeed to send wifi pow set msg %d",pow);
return SUCC;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_test_pow_get
¹¦ÄÜÃèÊö : »ñÈ¡WIFIÖ¸¶¨¹¦ÂÊ
ÊäÈë²ÎÊý :struct cfg_struct *cfg
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_test_pow_get(struct cfg_struct *cfg)
{
return cfg->hi110x_dev->tps->pow;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_oltpc_active_set
¹¦ÄÜÃèÊö : ÉèÖÃWiFi µ±Ç°µÄ¹¦Âʵ÷ÕûÐÅÏ¢¶ÁȡָÁî
ÊäÈë²ÎÊý : struct cfg_struct *cfg, uint8 enabled
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_oltpc_active_set(struct cfg_struct *cfg, int32 enabled)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *wid_oltpc;
uint16 msg_size;
int32 ret;
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill ps mode */
wid_oltpc = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(wid_oltpc, WID_OLTPC_ACTIVE , (uint8)enabled);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("fail to send wifi oltpc active set msg");
return -EFAIL;
}
cfg->hi110x_dev->tps->oltpc_active=enabled;
HWIFI_DEBUG("succeed to send wifi oltpc active set msg");
return SUCC;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_oltpc_active_get
¹¦ÄÜÃèÊö : »ñÈ¡WiFi µ±Ç°µÄ¹¦Âʵ÷ÕûÐÅÏ¢¶ÁȡָÁî
ÊäÈë²ÎÊý :struct cfg_struct *cfg
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_oltpc_active_get(struct cfg_struct *cfg)
{
return cfg->hi110x_dev->tps->oltpc_active;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_oltpc_switch_set
¹¦ÄÜÃèÊö : ÉèÖÃWiFi µ±Ç°µÄ¹¦Âʵ÷ÕûÐÅÏ¢¶ÁȡָÁî
ÊäÈë²ÎÊý : struct cfg_struct *cfg, uint8 enabled
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_oltpc_switch_set(struct cfg_struct *cfg, int32 enabled)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *wid_oltpc;
uint16 msg_size;
int32 ret;
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill ps mode */
wid_oltpc = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(wid_oltpc, WID_OLTPC_SWITCH , (uint8)enabled);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("Failed to send wifi olptc active set msg!");
return -EFAIL;
}
cfg->hi110x_dev->tps->oltpc_switch=enabled;
HWIFI_DEBUG("succeed to send wifi oltpc switch set msg");
return SUCC;
}
/*****************************************************************************
º¯ Êý Ãû : hwifi_oltpc_switch_get
¹¦ÄÜÃèÊö : »ñÈ¡WiFi µ±Ç°µÄ¹¦Âʵ÷ÕûÐÅÏ¢¶ÁȡָÁî
ÊäÈë²ÎÊý :struct cfg_struct *cfg
Êä³ö²ÎÊý : ÎÞ
·µ »Ø Öµ :
µ÷Óú¯Êý :
±»µ÷º¯Êý :
ÐÞ¸ÄÀúÊ· :
1.ÈÕ ÆÚ : 2013Äê3ÔÂ12ÈÕ
×÷ Õß : hWX160629
ÐÞ¸ÄÄÚÈÝ : ÐÂÉú³Éº¯Êý
*****************************************************************************/
int32 hwifi_oltpc_switch_get(struct cfg_struct *cfg)
{
return cfg->hi110x_dev->tps->oltpc_switch;
}
/*
* Prototype : hwifi_test_mode_set
* Description : set burst rx/tx mode
* Input : struct cfg_struct *cfg, uint8 enabled
* Output : None
* Return Value :
* Calls :
* Called By :
*
* History :
* 1.Date : 2013/5/3
* Author : hWX160629
* Modification : Created function
*
*/
int32 hwifi_test_mode_set(struct cfg_struct *cfg, uint8 mode)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *test_mode;
uint16 msg_size;
int32 ret;
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
if(!IS_STA(cfg) && !IS_AP(cfg))
{
HWIFI_WARNING("Current status can not support burst tx/rx mode set.");
dev_kfree_skb_any(skb);
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_MODE, msg_size);
/* fill ps mode */
test_mode = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(test_mode, WID_MODE_CHANGE, mode);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("Failed to send mode set msg!");
return -EFAIL;
}
cfg->hi110x_dev->tps->mode=mode;
HWIFI_DEBUG("Succeed to set mode param :%d", mode);
return SUCC;
}
/*
* Prototype : hwifi_test_mode_get
* Description : get the setting of mode param
* Input : struct cfg_struct *cfg
* Output : None
* Return Value :
* Calls :
* Called By :
*
* History :
* 1.Date : 2013/5/3
* Author : hWX160629
* Modification : Created function
*
*/
int32 hwifi_test_mode_get(struct cfg_struct *cfg)
{
return cfg->hi110x_dev->tps->mode;
}
/*
* Prototype : hwifi_test_datarate_set
* Description : set rate
* Input : struct cfg_struct *cfg,
* uint8 enabled
* Output : None
* Return Value : int32
* Calls :
* Called By :
*
* History :
* 1.Date : 2012/2/19
* Author : hWX160629
* Modification : Created function
*
*/
int32 hwifi_test_datarate_set(struct cfg_struct *cfg, uint8 rate)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *rate_set;
uint16 msg_size;
int32 ret;
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill ps mode */
rate_set = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(rate_set, WID_CURRENT_TX_RATE, rate);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("fail to send rate set msg");
return -EFAIL;
}
HWIFI_INFO("succeed to send rate set msg %d",rate);
return SUCC;
}
/*
* Prototype : hwifi_band_set
* Description : enable/disable support for 40MHz operation
* Input : struct cfg_struct *cfg,uint8 enabled
* Output : None
* Return Value : int32
* Calls :
* Called By :
*
* History :
* 1.Date : 2013/4/26
* Author : hWX160629
* Modification : Created function
*
*/
int32 hwifi_band_set(struct cfg_struct *cfg,uint8 band)
{
int32 ret;
ret = hwifi_sta_2040_enable_ctrl_set(cfg,band);
if (SUCC != ret)
{
HWIFI_WARNING("Failed to send band param set msg!");
return -EFAIL;
}
HWIFI_INFO("Succeed to set band param:%d", band);
return SUCC;
}
/*
* Prototype : wifitest_protocol_gmode_set
* Description : set 11g operating mode
* Input : struct cfg_struct *cfg,uint8 enabled
* Output : None
* Return Value : int32
* Calls :
* Called By :
*
* History :
* 1.Date : 2013/4/26
* Author : hWX160629
* Modification : Created function
*
*/
int32 wifitest_protocol_gmode_set(struct cfg_struct *cfg,uint8 mode)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *gmode;
uint16 msg_size;
int32 ret;
if(IS_CONNECTED(cfg)||(IS_P2P_ON(cfg)))
{
HWIFI_WARNING("current status can not support protocol gmode set.");
return -EFAIL;
}
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill gmode */
gmode = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(gmode, WID_11G_OPERATING_MODE, mode);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("fail to send protocol gmode set msg");
return -EFAIL;
}
cfg->sta_info.gmode= mode;
HWIFI_INFO("succeed to set protocol gmode:%d", mode);
return SUCC;
}
/*
* Prototype : wifitest_protocol_nmode_set
* Description : set ht capability enabled
* Input : struct cfg_struct *cfg,uint8 enabled
* uint8 enabled
* Output : None
* Return Value : int32
* Calls :
* Called By :
*
* History :
* 1.Date : 2012/2/19
* Author : hWX160629
* Modification : Created function
*
*/
int32 wifitest_protocol_nmode_set(struct cfg_struct *cfg,uint8 mode)
{
struct sk_buff *skb;
struct hwifi_msg_header *msghdr;
struct char_wid *ht;
uint16 msg_size;
int32 ret;
if (IS_CONNECTED(cfg) || IS_AP(cfg))
{
HWIFI_WARNING("current status can not support 11n mode set.");
return -EFAIL;
}
msg_size = sizeof(struct hwifi_msg_header) + sizeof(struct char_wid);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
/* fill the msg header */
msghdr = (struct hwifi_msg_header*)skb_put(skb, sizeof(struct hwifi_msg_header));
hwifi_fill_msg_hdr(msghdr, HOST_CMD_CONFIG, msg_size);
/* fill ps mode */
ht = (struct char_wid *)skb_put(skb, sizeof(struct char_wid));
hwifi_fill_char_wid(ht, WID_11N_ENABLE, mode);
ret = hwifi_send_cmd(cfg, skb);
if (SUCC != ret)
{
HWIFI_WARNING("fail to send 11n mode set msg");
return -EFAIL;
}
HWIFI_INFO("succeed to send 11n mode set msg %d",mode);
return SUCC;
}
/*
* Prototype : hwifi_dbb_get
* Description : get dbb of wifi
* Input : struct cfg_struct *cfg
* Output : None
* Return Value : int
* Calls :
* Called By :
*
* History :
* 1.Date : 2013/11/9
* Author : hWX160629
* Modification : Created function
*
*/
int32 hwifi_dbb_get(struct cfg_struct *cfg,int8 *dbb)
{
int32 ret;
if (NULL == cfg)
{
HWIFI_WARNING("Invalid NULL cfg!");
return -EFAIL;
}
cfg->hi110x_dev->tps->dbb_ver_got = 0xFF;
ret = wl_get_dbb_info(cfg);
if(SUCC != ret)
{
HWIFI_WARNING("Failed to get DBB number!");
return -EFAIL;
}
ret = wait_event_interruptible_timeout(cfg->wait_queue, (0xFF != cfg->hi110x_dev->tps->dbb_ver_got), 5 * HZ);
if (0 == ret)
{
HWIFI_WARNING("wait for dbb version message time out(5s)!");
return -EFAIL;
}
else if (ret < 0)
{
HWIFI_WARNING("wait for dbb version message error!");
return -EFAIL;
}
strncpy(dbb,cfg->hi110x_dev->tps->dbb,HISI_WIFI_DBB_LEN);
HWIFI_DEBUG("DBB number is %s",cfg->hi110x_dev->tps->dbb);
return SUCC;
}
int32 hwifi_upc_get(struct cfg_struct *cfg)
{
int32 ret;
if (NULL == cfg)
{
HWIFI_WARNING("Invalid NULL cfg!");
return -EFAIL;
}
cfg->hi110x_dev->tps->check_upc_ctrl = -EFAIL;
ret = wl_get_upc_info(cfg);
if(SUCC != ret)
{
HWIFI_WARNING("Failed to get upc!");
return -EFAIL;
}
ret = wait_event_interruptible_timeout(cfg->wait_queue, (-EFAIL != cfg->hi110x_dev->tps->check_upc_ctrl), 5 * HZ);
if (0 == ret)
{
HWIFI_WARNING("wait for upc info message time out(5s)!");
return -EFAIL;
}
else if (ret < 0)
{
HWIFI_WARNING("wait for upc info message error!");
return -EFAIL;
}
HWIFI_DEBUG("report upc info is %d",cfg->hi110x_dev->tps->check_upc_flag);
return cfg->hi110x_dev->tps->check_upc_flag;
}
int32 hwifi_gen_cw_single_tone_set(struct cfg_struct *cfg)
{
int32 ret;
uint16 msg_size;
struct sk_buff *skb;
struct hwifi_gen_cw_single_tone_msg *msg;
HWIFI_ASSERT((NULL != cfg));
msg_size = sizeof(struct hwifi_gen_cw_single_tone_msg);
skb = hwifi_alloc_skb_for_cmd(msg_size);
if (NULL == skb)
{
return -EFAIL;
}
msg = (struct hwifi_gen_cw_single_tone_msg *)skb_put(skb, msg_size);
/* set the msg header */
hwifi_fill_msg_hdr(&msg->msg_hdr, HOST_CMD_CONFIG, msg_size);
hwifi_fill_char_wid(&msg->phy_active_reg_1, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_1);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_1, WID_11N_PHY_ACTIVE_REG_VAL, WID_SIGNAL_TONE_ACTIVE_REG_VAL_1);
hwifi_fill_char_wid(&msg->phy_active_reg_2, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_2);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_2,WID_11N_PHY_ACTIVE_REG_VAL,WID_SIGNAL_TONE_ACTIVE_REG_VAL_2);
hwifi_fill_char_wid(&msg->phy_active_reg_3, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_3);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_3,WID_11N_PHY_ACTIVE_REG_VAL,WID_SIGNAL_TONE_ACTIVE_REG_VAL_3);
hwifi_fill_char_wid(&msg->phy_active_reg_4, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_4);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_4,WID_11N_PHY_ACTIVE_REG_VAL,WID_SIGNAL_TONE_ACTIVE_REG_VAL_4);
hwifi_fill_char_wid(&msg->phy_active_reg_5, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_5);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_5,WID_11N_PHY_ACTIVE_REG_VAL,WID_SIGNAL_TONE_ACTIVE_REG_VAL_5);
hwifi_fill_char_wid(&msg->phy_active_reg_6, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_6);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_6,WID_11N_PHY_ACTIVE_REG_VAL,WID_SIGNAL_TONE_ACTIVE_REG_VAL_6);
hwifi_fill_char_wid(&msg->phy_active_reg_7, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_7);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_7,WID_11N_PHY_ACTIVE_REG_VAL,WID_SIGNAL_TONE_ACTIVE_REG_VAL_7);
hwifi_fill_char_wid(&msg->phy_active_reg_8, WID_PHY_ACTIVE_REG, WID_SIGNAL_TONE_PHY_ACTIVE_REG_8);
hwifi_fill_int_wid(&msg->hw_11n_phy_active_reg_val_8,WID_11N_PHY_ACTIVE_REG_VAL,WID_SIGNAL_TONE_ACTIVE_REG_VAL_8);
hwifi_fill_int_wid(&msg->rf_reg_info, WID_RF_REG_VAL, WID_SIGNAL_TONE_RF_REG_INFO);
ret = hwifi_send_cmd(cfg, skb);
PRINT_SEND_CMD_RET("connect status,gen_cw_single_tone param set success", ret);
return ret;
}
int32 hwifi_tps_ioctl_cmd(struct hi110x_device* hi110x_dev, struct ifreq *ifr, int32 cmd)
{
wifi_ioctl_test_data_struct ioctl_data;
int32 ret = SUCC;
if ((NULL == hi110x_dev) || (NULL == ifr) || (NULL == ifr->ifr_data))
{
HWIFI_WARNING("Invalid NULL params!");
return -EFAIL;
}
HWIFI_PRINT_ONCE(INFO, "sizeof wifi_ioctl_test_data_struct is %zu", sizeof(wifi_ioctl_test_data_struct));
if(copy_from_user(&ioctl_data,ifr->ifr_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("Failed to copy ifr->ifr_data from user!");
return -EFAIL;
}
if(ioctl_data.verify != VERIFY_CODE)
{
HWIFI_WARNING("ioctl verify failed,verify code is:%d(not equal %d)", ioctl_data.verify, VERIFY_CODE);
return -EFAIL;
}
switch(ioctl_data.cmd)
{
case HWIFI_IOCTL_CMD_WI_FREQ_SET:
ret = hwifi_test_freq_set(hi110x_dev->cfg,ioctl_data.pri_data.freq);
break;
case HWIFI_IOCTL_CMD_WI_USERPOW_SET:
ret = hwifi_test_userpow_set(hi110x_dev->cfg,ioctl_data.pri_data.userpow);
break;
case HWIFI_IOCTL_CMD_WI_USERPOW_GET:
ioctl_data.pri_data.userpow = hwifi_test_userpow_get(hi110x_dev->cfg);
if(copy_to_user(ifr->ifr_data,&ioctl_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("Failed to copy ioctl_data to user !");
ret = -EFAIL;
}
break;
case HWIFI_IOCTL_CMD_WI_POWER_SET:
ret = hwifi_test_pow_set(hi110x_dev->cfg,ioctl_data.pri_data.pow);
break;
case HWIFI_IOCTL_CMD_WI_POWER_GET:
ioctl_data.pri_data.pow = hwifi_test_pow_get(hi110x_dev->cfg);
if(copy_to_user(ifr->ifr_data,&ioctl_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("Failed to copy ioctl_data to user !");
ret = -EFAIL;
}
break;
case HWIFI_IOCTL_OLTPC_ACTIVE_SET:
ret = hwifi_oltpc_active_set(hi110x_dev->cfg,ioctl_data.pri_data.oltpc_active);
break;
case HWIFI_IOCTL_OLTPC_SWITCH_SET:
ret = hwifi_oltpc_switch_set(hi110x_dev->cfg,ioctl_data.pri_data.oltpc_switch);
break;
case HWIFI_IOCTL_OLTPC_ACTIVE_GET:
ioctl_data.pri_data.oltpc_active=hwifi_oltpc_active_get(hi110x_dev->cfg);
if(copy_to_user(ifr->ifr_data,&ioctl_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("Failed to copy ioctl_data to user !");
ret = -EFAIL;
}
break;
case HWIFI_IOCTL_OLTPC_SWITCH_GET:
ioctl_data.pri_data.oltpc_switch=hwifi_oltpc_switch_get(hi110x_dev->cfg);
if(copy_to_user(ifr->ifr_data,&ioctl_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("copy_to_user failed");
ret = -EFAIL;
}
break;
case HWIFI_IOCTL_CMD_MODE_SET:
ret = hwifi_test_mode_set(hi110x_dev->cfg,ioctl_data.pri_data.mode);
break;
case HWIFI_IOCTL_CMD_MODE_GET:
ioctl_data.pri_data.mode=hwifi_test_mode_get(hi110x_dev->cfg);
if(copy_to_user(ifr->ifr_data,&ioctl_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("Failed to copy ioctl_data to user !");
ret = -EFAIL;
}
break;
case HWIFI_IOCTL_CMD_DATARATE_SET:
ret = hwifi_test_datarate_set(hi110x_dev->cfg,(uint8)ioctl_data.pri_data.datarate);
break;
case HWIFI_IOCTL_CMD_BAND_SET:
ret = hwifi_band_set(hi110x_dev->cfg,ioctl_data.pri_data.band);
break;
case HWIFI_IOCTL_CMD_PROTOCOL_GMODE_SET:
ret = wifitest_protocol_gmode_set(hi110x_dev->cfg,ioctl_data.pri_data.protocol_gmode);
break;
case HWIFI_IOCTL_CMD_PROTOCOL_NMODE_SET:
ret = wifitest_protocol_nmode_set(hi110x_dev->cfg,ioctl_data.pri_data.protocol_nmode);
break;
case HWIFI_IOCTL_CMD_DBB_GET:
ret = hwifi_dbb_get(hi110x_dev->cfg,ioctl_data.pri_data.dbb);
if(copy_to_user(ifr->ifr_data,&ioctl_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("Failed to copy ioctl_data to user !");
ret = -EFAIL;
}
break;
case HWIFI_IOCTL_CMD_UPC_GET:
ioctl_data.pri_data.check_upc_flag = hwifi_upc_get(hi110x_dev->cfg);
if(copy_to_user(ifr->ifr_data,&ioctl_data,sizeof(wifi_ioctl_test_data_struct)))
{
HWIFI_WARNING("Failed to copy ioctl_data to user !");
ret = -EFAIL;
}
break;
case HWIFI_IOCTL_CMD_GEN_CW_SINGLE_TONE_SET:
ret = hwifi_gen_cw_single_tone_set(hi110x_dev->cfg);
break;
default:
HWIFI_WARNING("Invalid not support ioctl_data.cmd(%d)",ioctl_data.cmd);
ret = -EFAIL;
break;
}
return ret;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif
#endif
|
gabry3795/android_kernel_huawei_mt7_l09
|
drivers/huawei_platform/connectivity/hisi/hisiwifi/hwifi_tps.c
|
C
|
gpl-2.0
| 28,729
|
#include <psp2/display.h>
#include <psp2/io/fcntl.h>
#include <psp2/kernel/processmgr.h>
#include <stdio.h> // sprintf()
#include <psp2/ctrl.h> // sceCtrl*()
#include "graphics.h"
#define VER_MAJOR 0
#define VER_MINOR 9
#define VER_BUILD ""
#define VAL_LENGTH 0x10
#define VAL_PUBLIC 0x0A
#define VAL_PRIVATE 0x06
#define printf psvDebugScreenPrintf
int _vshSblAimgrGetConsoleId(char CID[32]);
/*
Model: Proto, SKU: DEM-3000, MoBo: IRT-001/IRT-002;
Model: FatWF, SKU: PCH-1000, MoBo: IRS-002/IRS-1001;
Model: Fat3G, SKU: PCH-1100, MoBo: IRS-002/IRS-1001;
Model: Slim, SKU: PCH-2000, MoBo: USS-1001/USS-1002;
Model: TV, SKU: VTE-1000, MoBo: DOL-1001/DOL-1002.
No diff between FatWF and Fat3G.
No diff between Vita TV (Asian) and PSTV (Western).
*/
SceCtrlData pad;
void ExitCross(char*text)
{
printf("%s, press X to exit...\n", text);
do
{
sceCtrlReadBufferPositive(0, &pad, 1);
sceKernelDelayThread(0.05*1000*1000);
}
while(!(pad.buttons & SCE_CTRL_CROSS));
sceKernelExitProcess(0);
}
void ExitError(char*text, int delay, int error)
{
printf(text, error);
sceKernelDelayThread(delay*1000*1000);
sceKernelExitProcess(0);
}
int WriteFile(char*file, void*buf, int size)
{
sceIoRemove(file);
SceUID fd = sceIoOpen(file, SCE_O_WRONLY | SCE_O_CREAT | SCE_O_TRUNC, 0777);
if (fd < 0)
return fd;
int written = sceIoWrite(fd, buf, size);
sceIoClose(fd);
return written;
}
int main(int argc, char *argv[])
{
int i = 0;
int paranoid = 0;
char idps_buffer[16];
unsigned char idps_text_char_tmp[1];
unsigned char idps_text_char_1st[1];
unsigned char idps_text_char_2nd[1];
char idps_text_buffer[32] = "";
for (i = 0; i < 1000; i++) {
sceCtrlReadBufferPositive(0, &pad, 1);
if (pad.buttons & SCE_CTRL_LTRIGGER)
paranoid = 1;
sceKernelDelayThread(1000);
}
psvDebugScreenInit();
psvDebugScreenClear(0);
printf("PSV IDPS Dumper v%i.%i%s by Yoti\n\n", VER_MAJOR, VER_MINOR, VER_BUILD);
#if (VAL_PUBLIC + VAL_PRIVATE != 0x10)
#error IDPS Lenght must be 16 bytes long!
#endif
_vshSblAimgrGetConsoleId(idps_buffer);
printf(" Your IDPS is: ");
for (i=0; i<VAL_PUBLIC; i++)
{
if (i == 0x04)
psvDebugScreenSetFgColor(0xFF0000FF); // red #1
else if (i == 0x05)
psvDebugScreenSetFgColor(0xFFFF0000); // blue #2
else if (i == 0x06)
psvDebugScreenSetFgColor(0xFF0000FF); // red #3
else if (i == 0x07)
psvDebugScreenSetFgColor(0xFF00FF00); // green #4
else
psvDebugScreenSetFgColor(0xFFFFFFFF); // white
printf("%02X", (u8)idps_buffer[i]);
}
if (paranoid == 1)
{
for (i=0; i<VAL_PRIVATE; i++)
{
psvDebugScreenSetFgColor(0xFF777777); // gray
printf("XX");
psvDebugScreenSetFgColor(0xFFFFFFFF); // white
}
}
else
{
for (i=0; i<VAL_PRIVATE; i++)
{
psvDebugScreenSetFgColor(0xFFFFFFFF); // white
printf("%02X", (u8)idps_buffer[VAL_PUBLIC+i]);
}
}
printf("\n\n");
printf(" It seems that you are using ");
psvDebugScreenSetFgColor(0xFF0000FF); // red
if (idps_buffer[0x04] == 0x00)
printf("PlayStation Portable");
else if (idps_buffer[0x04] == 0x01) // psv, vtv/pstv
{
if (idps_buffer[0x06] == 0x00)
printf("PlayStation Vita"); // fatWF/fat3G, slim
else if (idps_buffer[0x06] == 0x02)
printf("PlayStation/Vita TV"); // vtv, pstv
else if (idps_buffer[0x06] == 0x06)
printf("PlayStation/Vita TV"); // vtv, pstv (testkit)
else
printf("Unknown Vita 0x%02X", idps_buffer[0x06]);
}
else
printf("Unknown PS 0x%02X", idps_buffer[0x04]);
psvDebugScreenSetFgColor(0xFFFFFFFF); // white
printf("\n");
printf(" Your motherboard is ");
psvDebugScreenSetFgColor(0xFF00FF00); // green
if (idps_buffer[0x06] == 0x00) // portable
{
switch(idps_buffer[0x07])
{
case 0x01:
printf("TA-079/081 (PSP-1000)");
break;
case 0x02:
printf("TA-082/086 (PSP-1000)");
break;
case 0x03:
printf("TA-085/088 (PSP-2000)");
break;
case 0x04:
printf("TA-090/092 (PSP-3000)");
break;
case 0x05:
printf("TA-091 (PSP-N1000)");
break;
case 0x06:
printf("TA-093 (PSP-3000)");
break;
//case 0x07:
// printf("TA-094 (PSP-N1000)");
// break;
case 0x08:
printf("TA-095 (PSP-3000)");
break;
case 0x09:
printf("TA-096/097 (PSP-E1000)");
break;
case 0x10:
printf("IRS-002 (PCH-1000/1100)");
break;
case 0x11: // 3G?
case 0x12: // WF?
printf("IRS-1001 (PCH-1000/1100)");
break;
case 0x14:
printf("USS-1001 (PCH-2000)");
break;
case 0x18:
printf("USS-1002 (PCH-2000)");
break;
default:
printf("Unknown MoBo 0x%02X", idps_buffer[0x07]);
break;
}
}
else if ((idps_buffer[0x06] == 0x02) || (idps_buffer[0x06] == 0x06)) // home system
{
switch(idps_buffer[0x07])
{
case 0x01:
printf("DOL-1001 (VTE-1000)");
break;
case 0x02:
printf("DOL-1002 (VTE-1000)");
break;
default:
printf("Unknown MoBo 0x%02X", idps_buffer[0x07]);
break;
}
}
else
printf("Unknown type 0x%02X", idps_buffer[0x06]);
psvDebugScreenSetFgColor(0xFFFFFFFF); // white
printf("\n");
printf(" And your region is ");
psvDebugScreenSetFgColor(0xFFFF0000); // blue
switch(idps_buffer[0x05])
{
case 0x00:
printf("Proto");
break;
case 0x01:
printf("DevKit");
break;
case 0x02:
printf("TestKit");
break;
case 0x03:
printf("Japan");
break;
case 0x04:
printf("North America");
break;
case 0x05:
printf("Europe/East/Africa");
break;
case 0x06:
printf("Korea");
break;
case 0x07: // PCH-xx03 VTE-1016
printf("Great Britain/United Kingdom");
break;
case 0x08:
printf("Mexica/Latin America");
break;
case 0x09:
printf("Australia/New Zeland");
break;
case 0x0A:
printf("Hong Kong/Singapore");
break;
case 0x0B:
printf("Taiwan");
break;
case 0x0C:
printf("Russia");
break;
case 0x0D:
printf("China");
break;
default:
printf("Unknown region 0x%02X", idps_buffer[0x05]);
break;
}
psvDebugScreenSetFgColor(0xFFFFFFFF); // white
printf("\n\n");
// binary
printf(" Saving as ux0:data/idps.bin... ");
if (WriteFile("ux0:data/idps.bin", idps_buffer, 16) > 0)
printf("OK");
else
printf("NG");
printf("\n");
// text
for (i=0; i<0x10; i++)
{
idps_text_char_tmp[1]=idps_buffer[i];
idps_text_char_1st[1]=(idps_text_char_tmp[1] & 0xf0) >> 4;
idps_text_char_2nd[1]=(idps_text_char_tmp[1] & 0x0f);
// 1st half of byte
if (idps_text_char_1st[1] < 0xA) // digit
sprintf(idps_text_buffer, "%s%c", idps_text_buffer, idps_text_char_1st[1]+0x30);
else // char
sprintf(idps_text_buffer, "%s%c", idps_text_buffer, idps_text_char_1st[1]+0x37);
// 2nd half of byte
if (idps_text_char_2nd[1] < 0xA) // digit
sprintf(idps_text_buffer, "%s%c", idps_text_buffer, idps_text_char_2nd[1]+0x30);
else // char
sprintf(idps_text_buffer, "%s%c", idps_text_buffer, idps_text_char_2nd[1]+0x37);
}
printf(" Saving as ux0:data/idps.txt... ");
if (WriteFile("ux0:data/idps.txt", idps_text_buffer, 32) > 0)
printf("OK");
else
printf("NG");
printf("\n\n");
printf(" https://github.com/yoti/psv_idpsdump/\n");
ExitCross("\nDone");
return 0;
}
|
Yoti/psv_idpsdump
|
main.c
|
C
|
gpl-3.0
| 7,135
|
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright (C) 2015 Leslie Zhai <xiang.zhai@i-soft.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <glib.h>
#include <glib/gstdio.h>
int main(int argc, char *argv[])
{
char *path = argv[1];
struct stat buf;
if (lstat(path, &buf) == -1) {
printf("ERROR: failed to get %s lstat\n", path);
return 0;
}
switch (buf.st_mode & S_IFMT) {
case S_IFDIR:
printf("DEBUG: line %d %s is directory\n", __LINE__, path);
break;
case S_IFLNK:
printf("DEBUG: line %d %s is symbolic link\n", __LINE__, path);
break;
case S_IFREG:
printf("DEBUG: line %d %s is regular file\n", __LINE__, path);
break;
default:
break;
}
if (g_file_test(path, G_FILE_TEST_EXISTS | G_FILE_TEST_IS_SYMLINK))
printf("DEBUG: line %d %s is symbolic link\n", __LINE__, path);
return 0;
}
|
LeetcodeCN/leetcodecn.github.io
|
src/stat/hello-stat.c
|
C
|
gpl-3.0
| 1,716
|
#include <includes.h>
#include <utils.h>
#include <methnum.h>
#include "s_param.h"
#include "integral.h"
double Ivac ( Param * P , double m )
{
double m2 = m*m ;
double LE = sqrt ( m2 + P->L2 );
return 0.5 * ONE_OVER_8PI_2 * ( P->L * LE * ( m2 + 2*P->L ) + m2*m2 * log ( m/(P->L + LE) ) ) ;
}
double dm_Ivac ( Param * P , double m )
{
double m2 = m*m ;
double LE = sqrt ( m2 + P->L2 );
return m * ONE_OVER_4PI_2 * ( P->L * LE + m2 * log ( m/(P->L + LE) ) );
}
double dm2_Ivac ( Param * P , double m )
{
double m2 = m*m ;
double LE = sqrt ( m2 + P->L2 );
return ONE_OVER_4PI_2 * ( P->L*(3*m2 + P->L2)/LE + 3*m2 * log ( m / (P->L + LE) ) ) ;
}
double dm3_Ivac ( Param * P , double m )
{
double m2 = m*m ;
double LE2 = m2 + P->L2 ;
double LE = sqrt( LE2 );
return 3 * m * ONE_OVER_2PI_2 * ( P->L*( 3*m2 + 4*P->L2 )/(3*LE2*LE) + log ( m / (P->L + LE) ) );
}
double Imed ( Param * P , double m , double T , double mu )
{
double m2 = m*m ;
double b = 1./T ;
double integ ( double p )
{
double p2 = p*p ;
double E2 = p2 + m2 ;
double E = sqrt ( E2 );
double x = -(E - mu) * b ;
double y = -(E + mu) * b ;
double a = log ( 1 + exp ( x ) ) ;
double b = log ( 1 + exp ( y ) ) ;
return p2 * ( a + b );
}
double I = ONE_OVER_2PI_2 * integ_dp ( integ , 0. , P->L , cutoff );
return I ;
}
double dm_Imed ( Param * P , double m , double T , double mu )
{
double m2 = m*m ;
double b = 1./T ;
double integ ( double p )
{
double p2 = p*p ;
double E2 = p2 + m2 ;
double E = sqrt ( E2 );
double x = (E - mu) * b ;
double y = (E + mu) * b ;
double ex = exp ( x ) ;
double ey = exp ( y );
double f = 1. / ( 1 + ex );
double fb = 1. / ( 1 + ey );
return p2 * ( - f - fb ) / E ;
}
double I = integ_dp ( integ , 0. , P->L , cutoff );
return m * I ;
}
double dT_Imed ( Param * P , double m , double T , double mu )
{
}
double dmu_Imed ( Param * P , double m , double T , double mu );
/* double dm2_Imed ( Param * P , double m , double T , double mu ); */
/* double dmT_Imed ( Param * P , double m , double T , double mu ); */
/* double dmmu_Imed ( Param * P , double m , double T , double mu ); */
/* double dT2_Imed ( Param * P , double m , double T , double mu ); */
/* double dTmu_Imed ( Param * P , double m , double T , double mu ); */
/* double dmu2_Imed ( Param * P , double m , double T , double mu ); */
|
AlexandreBiguet/NJLlikeModels
|
legacy/programs/njl-0/njl1-b/integral.c
|
C
|
gpl-3.0
| 2,487
|
#include <stdio.h>
#include "aeb.h"
#include <string.h>
#include <math.h>
int main() {
Aeb * raiz, *esq, * dir;
Aeb * arvore;
double r;
char s[127];
/*arvore = criaRaiz('*');
esq = criaFolha(10.0);
dir = criaFolha(7.0);
conectaNodos(arvore, esq, dir);
raiz = criaRaiz('+');
dir = criaFolha(8.0);
conectaNodos(raiz, arvore, dir);
printf("Resultado: %g\n", resolveExpressao(raiz));*/
printf("\nExpressão: ");
scanf("%s",s);
arvore = criaArvore(s);
printf("Expressão após conversão: ");
mostraArvore(arvore);
puts("");
r=resolveExpressao(arvore);
printf("\nO resultado é= %g\n",r);
}
|
EltonBroering/Programacao_C
|
Projeto7/teste.c
|
C
|
gpl-3.0
| 636
|
#include <unistd.h>
#include <sys/stat.h>
int remove(const char *pathname) {
struct stat buf;
stat(pathname, &buf);
if (S_ISDIR(buf.st_mode)) {
return rmdir(pathname);
} else {
return unlink(pathname);
}
}
|
TacOS-team/tacos
|
libs/libc/stdio/remove.c
|
C
|
gpl-3.0
| 216
|
/*
* Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* Change log:
* 06/29/95 - Modified to handle flow control for writing (Tuyen Nguyen)
* Modified for MP, 1996 by Tuyen Nguyen
* Modified, April 9, 1997 by Tuyen Nguyen for MacOSX.
*/
#define RESOLVE_DBG
#include <sys/errno.h>
#include <sys/types.h>
#include <sys/param.h>
#include <machine/spl.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/filedesc.h>
#include <sys/fcntl.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <netat/sysglue.h>
#include <netat/appletalk.h>
#include <netat/at_pcb.h>
#include <netat/ddp.h>
#include <netat/adsp.h>
#include <netat/adsp_internal.h>
#ifdef notdefn
struct adsp_debug adsp_dtable[1025];
int ad_entry = 0;
#endif
int
adspAllocateCCB(gref)
register gref_t *gref; /* READ queue */
{
gbuf_t *ccb_mp;
register CCBPtr sp;
if (!(ccb_mp = gbuf_alloc(sizeof(CCB), PRI_LO))) {
return (0);
}
bzero((caddr_t) gbuf_rptr(ccb_mp), sizeof(CCB));
gbuf_wset(ccb_mp,sizeof(CCB));
gref->info = (caddr_t) ccb_mp;
sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info));
sp->pid = gref->pid; /* save the caller process pointer */
sp->gref = gref; /* save a back pointer to the WRITE queue */
sp->sp_mp = ccb_mp; /* and its message block */
return 1;
}
int
adspRelease(gref)
register gref_t *gref; /* READ queue */
{
register CCBPtr sp;
if (gref->info) {
sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info));
/* Tells completion routine of close */
/* packet to remove us. */
if (sp->state == sPassive || sp->state == sClosed ||
sp->state == sOpening || sp->state == sListening) {
if (sp->state == sListening)
CompleteQueue(&sp->opb, errAborted);
sp->removing = 1; /* Prevent allowing another dspClose. */
DoClose(sp, errAborted, 0); /* will remove CCB */
return 0;
} else { /* sClosing & sOpen */
sp->state = sClosing;
}
if (CheckOkToClose(sp)) { /* going to close */
sp->sendCtl = B_CTL_CLOSE; /* Send close advice */
} else {
CheckSend(sp); /* try one more time to send out data */
if (sp->state != sClosed)
sp->sendCtl = B_CTL_CLOSE; /* Setup to send close advice */
}
CheckSend(sp); /* and force out the close */
sp->removing = 1; /* Prevent allowing another dspClose. */
sp->state = sClosed;
DoClose(sp, errAborted, 0); /* to closed and remove CCB */
}
return 0;
}
int
adspWriteHandler(gref, mp)
gref_t *gref; /* WRITE queue */
gbuf_t *mp;
{
register ioc_t *iocbp;
register struct adspcmd *ap;
int error, flag;
void *sp;
switch(gbuf_type(mp)) {
case MSG_DATA:
if (gref->info == 0) {
gbuf_freem(mp);
return(STR_IGNORE);
}
/*
* Fill in the global stuff
*/
ap = (struct adspcmd *)gbuf_rptr(mp);
ap->gref = gref;
ap->ioc = 0;
ap->mp = mp;
sp = (void *)gbuf_rptr(((gbuf_t *)gref->info));
switch(ap->csCode) {
case dspWrite:
if ((error = adspWrite(sp, ap)))
gbuf_freem(mp);
return(STR_IGNORE);
case dspAttention:
if ((error = adspAttention(sp, (CCBPtr)ap)))
gbuf_freem(mp);
return(STR_IGNORE);
}
case MSG_IOCTL:
if (gref->info == 0) {
adspioc_ack(EPROTOTYPE, mp, gref);
return(STR_IGNORE);
}
iocbp = (ioc_t *) gbuf_rptr(mp);
if (ADSP_IOCTL(iocbp->ioc_cmd)) {
iocbp->ioc_count = sizeof(*ap) - 1;
if (gbuf_cont(mp) == 0) {
adspioc_ack(EINVAL, mp, gref);
return(STR_IGNORE);
}
ap = (struct adspcmd *) gbuf_rptr(gbuf_cont(mp));
ap->gref = gref;
ap->ioc = (caddr_t) mp;
ap->mp = gbuf_cont(mp); /* request head */
ap->ioResult = 0;
if ((gref->info == 0) && ((iocbp->ioc_cmd != ADSPOPEN) &&
(iocbp->ioc_cmd != ADSPCLLISTEN))) {
ap->ioResult = errState;
adspioc_ack(EINVAL, mp, gref);
return(STR_IGNORE);
}
} else
return(STR_PUTNEXT); /* pass it on down */
sp = (void *)gbuf_rptr(((gbuf_t *)gref->info));
switch(iocbp->ioc_cmd) {
case ADSPOPEN:
case ADSPCLLISTEN:
ap->socket = ((CCBPtr)sp)->localSocket;
flag = (adspMode(ap) == ocAccept) ? 1 : 0;
if (flag && ap->socket) {
if (adspDeassignSocket((CCBPtr)sp) >= 0)
ap->socket = 0;
}
if ((ap->socket == 0) &&
((ap->socket =
(at_socket)adspAssignSocket(gref, flag)) == 0)) {
adspioc_ack(EADDRNOTAVAIL, mp, gref);
return(STR_IGNORE);
}
ap->csCode = iocbp->ioc_cmd == ADSPOPEN ? dspInit : dspCLInit;
if ((error = adspInit(sp, ap)) == 0) {
switch(ap->csCode) {
case dspInit:
/* and open the connection */
ap->csCode = dspOpen;
error = adspOpen(sp, ap);
break;
case dspCLInit:
/* ADSPCLLISTEN */
ap->csCode = dspCLListen;
error = adspCLListen(sp, ap);
break;
}
}
if (error)
adspioc_ack(error, mp, gref); /* if this failed req complete */
return(STR_IGNORE);
case ADSPCLOSE:
ap->csCode = dspClose;
if ((error = adspClose(sp, ap))) {
adspioc_ack(error, mp, gref);
break;
}
break;
case ADSPCLREMOVE:
ap->csCode = dspCLRemove;
error = adspClose(sp, ap);
adspioc_ack(error, mp, gref);
return(STR_IGNORE);
case ADSPCLDENY:
ap->csCode = dspCLDeny;
if ((error = adspCLDeny(sp, (CCBPtr)ap))) {
adspioc_ack(error, mp, gref);
}
return(STR_IGNORE);
case ADSPSTATUS:
ap->csCode = dspStatus;
if ((error = adspStatus(sp, ap))) {
adspioc_ack(error, mp, gref);
}
return(STR_IGNORE);
case ADSPREAD:
ap->csCode = dspRead;
if ((error = adspRead(sp, ap))) {
adspioc_ack(error, mp, gref);
}
return(STR_IGNORE);
case ADSPATTENTION:
ap->csCode = dspAttention;
if ((error = adspReadAttention((CCBPtr)sp, ap))) {
adspioc_ack(error, mp, gref);
}
return(STR_IGNORE);
case ADSPOPTIONS:
ap->csCode = dspOptions;
if ((error = adspOptions(sp, ap))) {
adspioc_ack(error, mp, gref);
}
return(STR_IGNORE);
case ADSPRESET:
ap->csCode = dspReset;
if ((error = adspReset(sp, ap))) {
adspioc_ack(error, mp, gref);
}
return(STR_IGNORE);
case ADSPNEWCID:
ap->csCode = dspNewCID;
if ((error = adspNewCID(sp, ap))) {
adspioc_ack(error, mp, gref);
}
return(STR_IGNORE);
default:
return(STR_PUTNEXT); /* pass it on down */
}
return(STR_IGNORE);
case MSG_PROTO:
default:
gbuf_freem(mp);
}
return(STR_IGNORE);
}
int
adspReadHandler(gref, mp)
gref_t *gref;
gbuf_t *mp;
{
int error;
switch(gbuf_type(mp)) {
case MSG_DATA:
if ((error = adspPacket(gref, mp))) {
gbuf_freem(mp);
}
break;
case MSG_IOCTL:
default:
return(STR_PUTNEXT);
break;
}
return(STR_IGNORE);
}
/*
* adsp_sendddp()
*
* Description:
* This procedure a formats a DDP datagram header and calls the
* DDP module to queue it for routing and transmission according to
* the DDP parameters. We always take control of the datagram;
* if there is an error we free it, otherwise we pass it to the next
* layer. We don't need to set the src address fileds because the
* DDP layer fills these in for us.
*
* Calling Sequence:
* ret_status = adsp_sendddp(q, sp, mp, length, dstnetaddr, ddptype);
*
* Formal Parameters:
* sp Caller stream pointer
* mp gbuf_t chain containing the datagram to transmit
* The first mblk contains the ADSP header and space
* for the DDP header.
* length size of data portion of datagram
* dstnetaddr address of 4-byte destination internet address
* ddptype DDP protocol to assign to the datagram
*
* Completion Status:
* 0 Procedure successful completed.
* EMSGSIZE Specified datagram length is too big.
*
* Side Effects:
* NONE
*/
int
adsp_sendddp(sp, mp, length, dstnetaddr, ddptype)
CCBPtr sp;
gbuf_t *mp;
int length;
AddrUnion *dstnetaddr;
int ddptype;
{
DDPX_FRAME *ddp;
gbuf_t *mlist = mp;
if (mp == 0)
return EINVAL;
if (length > DDP_DATA_SIZE) {
gbuf_freel(mlist);
return EMSGSIZE;
}
while (mp) {
if (length == 0)
length = gbuf_msgsize(mp) - DDPL_FRAME_LEN;
/* Set up the DDP header */
ddp = (DDPX_FRAME *) gbuf_rptr(mp);
UAS_ASSIGN_HTON(ddp->ddpx_length, (length + DDPL_FRAME_LEN));
UAS_ASSIGN(ddp->ddpx_cksm, 0);
if (sp) {
if (sp->useCheckSum)
UAS_ASSIGN_HTON(ddp->ddpx_cksm, 1);
}
NET_ASSIGN(ddp->ddpx_dnet, dstnetaddr->a.net);
ddp->ddpx_dnode = dstnetaddr->a.node;
ddp->ddpx_source = sp ? sp->localSocket : ddp->ddpx_dest;
ddp->ddpx_dest = dstnetaddr->a.socket;
ddp->ddpx_type = ddptype;
length = 0;
mp = gbuf_next(mp);
}
DDP_OUTPUT(mlist);
return 0;
}
void NotifyUser(
__unused CCBPtr sp)
{
/*
pidsig(sp->pid, SIGIO);
*/
}
void UrgentUser(
__unused CCBPtr sp)
{
/*
pidsig(sp->pid, SIGURG);
*/
}
|
p01arst0rm/decorum-linux
|
_resources/kernels/xnu-arm/bsd/netat/adsp.c
|
C
|
gpl-3.0
| 10,262
|
/*
**
** This file is part of BananaCam.
**
** BananaCam is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** BananaCam is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with BananaCam. If not, see <http://www.gnu.org/licenses/>.
**
*/
#include "camera_control.h"
void error_func (GPContext *context, const char *format, va_list args, void *data) {
context = context;
data = data;
fprintf(stderr, "*** Contexterror ***\n");
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
}
void message_func (GPContext *context, const char *format, va_list args, void *data) {
context = context;
data = data;
vprintf(format, args);
printf("\n");
}
void signal_handler(int sig)
{
printf("Signal ==> %i\n", sig);
}
void signal_inib()
{
struct sigaction act;
int i;
act.sa_handler = signal_handler;
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
i = 1;
while (i < 32)
{
if (i != 11)
sigaction(i, &act, NULL);
i++;
}
}
int init(t_cam *c)
{
c->liveview = 0;
c->liveview_fps = 30;
c->liveview_fps_time = 1000000 / 30;
pthread_mutex_init(&c->liveview_mutex, NULL);
pthread_cond_init(&c->liveview_condvar, NULL);
c->folder_path = strdup("/tmp/");
c->camera_value_list = NULL;
gp_context_set_error_func(c->context, (GPContextErrorFunc)error_func, NULL);
gp_context_set_message_func(c->context, (GPContextMessageFunc)message_func, NULL);
gp_camera_new(&c->camera);
c->context = gp_context_new();
printf("Camera Init\n");
c->ret = gp_camera_init(c->camera, c->context);
if (c->ret != GP_OK) {
printf("gp_camera_init: %d\n", c->ret);
return (GP_ERROR);
}
/* get_initial_camera_values(t_cam *c); */
return (GP_OK);
}
void generic_exec(t_cam *c, char *command, char **param)
{
char *msg = NULL;
if (command && strncmp(command, "get_", 4) == 0)
{
command = &command[4];
get_config(command, c);
return;
}
if (param)
{
if (param[0])
set_config(command, param[0], c);
}
else
{
asprintf(&msg, "bad parameters for %s", command);
creat_and_send_message(BAD_PARAMETERS, NULL, NULL, msg, c);
}
}
int exec_command(t_cam *c, char *command, char **param)
{
t_func *tmp = NULL;
int flag = 0;
if (strcmp(command, "liveview") != 0 && c->liveview == 1)
{
printf("enter inside here\n");
c->liveview = 0;
flag = 1;
sleep(1);
}
tmp = c->first_func_ptr;
while (tmp != NULL)
{
if (strcmp(command, tmp->name) == 0)
{
tmp->func_ptr(c, param);
break;
}
tmp = tmp->next;
}
if (tmp == NULL)
generic_exec(c, command, param);
if (flag == 1)
liveview(c, NULL);
return (0);
}
void add_func_ptr_list(t_cam *c, char *name, int (*func_ptr)(t_cam *c, char **param))
{
t_func *tmp;
if (c->first_func_ptr == NULL)
{
c->first_func_ptr = malloc(sizeof(*c->first_func_ptr));
tmp = c->first_func_ptr;
tmp->next = NULL;
tmp->func_ptr = func_ptr;
tmp->name = strdup(name);
c->first_func_ptr = tmp;
}
else
{
tmp = c->first_func_ptr;
while (tmp->next != NULL)
tmp = tmp->next;
tmp->next = malloc(sizeof(*tmp->next));
tmp = tmp->next;
tmp->next = NULL;
tmp->func_ptr = func_ptr;
tmp->name = strdup(name);
}
}
int main(int ac, char **av)
{
t_cam *c;
ac = ac;
av = av;
#ifdef __APPLE__
//pthread_t thread;
printf("Killing PTPCamera process\n");
system("killall PTPCamera");
//pthread_create(&thread, NULL, initUSBDetect, (void *)c);
#endif
signal_inib();
c = malloc(sizeof(*c));
c->first_func_ptr = NULL;
init(c);
get_all_widget_and_choices(c);
add_func_ptr_list(c, "capture", trigger_capture);
add_func_ptr_list(c, "liveview", liveview);
add_func_ptr_list(c, "auto_focus", auto_focus);
add_func_ptr_list(c, "liveviewfps", liveviewfps);
add_func_ptr_list(c, "get_liveviewfps", get_liveviewfps);
add_func_ptr_list(c, "defaultpath", set_default_folder_path);
add_func_ptr_list(c, "get_defaultpath", get_default_folder_path);
pthread_create(&c->liveview_thread, NULL, liveview_launcher, (void*)c);
init_comm(c, UNIX_SOCKET_PATH);
gp_camera_exit(c->camera, c->context);
return (0);
}
|
OliverNicolini/BananaCam
|
src/main.c
|
C
|
gpl-3.0
| 4,707
|
/*
Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Free
Software Foundation, Inc.
This file is part of GNU Inetutils.
GNU Inetutils is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
GNU Inetutils is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see `http://www.gnu.org/licenses/'. */
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <config.h>
#if defined AUTHENTICATION || defined ENCRYPTION
# include <unistd.h>
# include <sys/types.h>
# include <arpa/telnet.h>
# include <libtelnet/encrypt.h>
# include <libtelnet/misc.h>
# include "general.h"
# include "ring.h"
# include "externs.h"
# include "defines.h"
# include "types.h"
int
net_write (unsigned char *str, int len)
{
if (NETROOM () > len)
{
ring_supply_data (&netoring, str, len);
if (str[0] == IAC && str[1] == SE)
printsub ('>', &str[2], len - 2);
return (len);
}
return (0);
}
void
net_encrypt ()
{
# ifdef ENCRYPTION
if (encrypt_output)
ring_encrypt (&netoring, encrypt_output);
else
ring_clearto (&netoring);
# endif /* ENCRYPTION */
}
int
telnet_spin ()
{
return (-1);
}
char *
telnet_getenv (char *val)
{
return ((char *) env_getvalue (val));
}
char *
telnet_gets (char *prompt, char *result, int length, int echo)
{
# if !HAVE_DECL_GETPASS
extern char *getpass ();
# endif
extern int globalmode;
int om = globalmode;
char *res;
TerminalNewMode (-1);
if (echo)
{
printf ("%s", prompt);
res = fgets (result, length, stdin);
}
else
{
res = getpass (prompt);
if (res)
{
strncpy (result, res, length);
memset (res, 0, strlen (res));
res = result;
}
}
TerminalNewMode (om);
return (res);
}
#endif /* defined(AUTHENTICATION) || defined(ENCRYPTION) */
|
infoburp/inetutils
|
telnet/authenc.c
|
C
|
gpl-3.0
| 3,857
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.