repo_name
string
path
string
copies
string
size
string
content
string
license
string
bestwpw/Xiaomi_Kernel_OpenSource
drivers/ide/qd65xx.c
4927
11148
/* * Copyright (C) 1996-2001 Linus Torvalds & author (see below) */ /* * Version 0.03 Cleaned auto-tune, added probe * Version 0.04 Added second channel tuning * Version 0.05 Enhanced tuning ; added qd6500 support * Version 0.06 Added dos driver's list * Version 0.07 Second channel bug fix * * QDI QD6500/QD6580 EIDE controller fast support * * To activate controller support, use "ide0=qd65xx" */ /* * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by * Samuel Thibault <samuel.thibault@ens-lyon.org> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "qd65xx" #include "qd65xx.h" /* * I/O ports are 0x30-0x31 (and 0x32-0x33 for qd6580) * or 0xb0-0xb1 (and 0xb2-0xb3 for qd6580) * -- qd6500 is a single IDE interface * -- qd6580 is a dual IDE interface * * More research on qd6580 being done by willmore@cig.mot.com (David) * More Information given by Petr Soucek (petr@ryston.cz) * http://www.ryston.cz/petr/vlb */ /* * base: Timer1 * * * base+0x01: Config (R/O) * * bit 0: ide baseport: 1 = 0x1f0 ; 0 = 0x170 (only useful for qd6500) * bit 1: qd65xx baseport: 1 = 0xb0 ; 0 = 0x30 * bit 2: ID3: bus speed: 1 = <=33MHz ; 0 = >33MHz * bit 3: qd6500: 1 = disabled, 0 = enabled * qd6580: 1 * upper nibble: * qd6500: 1100 * qd6580: either 1010 or 0101 * * * base+0x02: Timer2 (qd6580 only) * * * base+0x03: Control (qd6580 only) * * bits 0-3 must always be set 1 * bit 4 must be set 1, but is set 0 by dos driver while measuring vlb clock * bit 0 : 1 = Only primary port enabled : channel 0 for hda, channel 1 for hdb * 0 = Primary and Secondary ports enabled : channel 0 for hda & hdb * channel 1 for hdc & hdd * bit 1 : 1 = only disks on primary port * 0 = disks & ATAPI devices on primary port * bit 2-4 : always 0 * bit 5 : status, but of what ? * bit 6 : always set 1 by dos driver * bit 7 : set 1 for non-ATAPI devices on primary port * (maybe read-ahead and post-write buffer ?) */ static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */ /* * qd65xx_select: * * This routine is invoked to prepare for access to a given drive. */ static void qd65xx_dev_select(ide_drive_t *drive) { u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) | (QD_TIMREG(drive) & 0x02); if (timings[index] != QD_TIMING(drive)) outb(timings[index] = QD_TIMING(drive), QD_TIMREG(drive)); outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr); } /* * qd6500_compute_timing * * computes the timing value where * lower nibble represents active time, in count of VLB clocks * upper nibble represents recovery time, in count of VLB clocks */ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time) { int clk = ide_vlb_clk ? ide_vlb_clk : 50; u8 act_cyc, rec_cyc; if (clk <= 33) { act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9); rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15); } else { act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8); rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18); } return (rec_cyc << 4) | 0x08 | act_cyc; } /* * qd6580_compute_timing * * idem for qd6580 */ static u8 qd6580_compute_timing (int active_time, int recovery_time) { int clk = ide_vlb_clk ? ide_vlb_clk : 50; u8 act_cyc, rec_cyc; act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17); rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15); return (rec_cyc << 4) | act_cyc; } /* * qd_find_disk_type * * tries to find timing from dos driver's table */ static int qd_find_disk_type (ide_drive_t *drive, int *active_time, int *recovery_time) { struct qd65xx_timing_s *p; char *m = (char *)&drive->id[ATA_ID_PROD]; char model[ATA_ID_PROD_LEN]; if (*m == 0) return 0; strncpy(model, m, ATA_ID_PROD_LEN); ide_fixstring(model, ATA_ID_PROD_LEN, 1); /* byte-swap */ for (p = qd65xx_timing ; p->offset != -1 ; p++) { if (!strncmp(p->model, model+p->offset, 4)) { printk(KERN_DEBUG "%s: listed !\n", drive->name); *active_time = p->active; *recovery_time = p->recovery; return 1; } } return 0; } /* * qd_set_timing: * * records the timing */ static void qd_set_timing (ide_drive_t *drive, u8 timing) { unsigned long data = (unsigned long)ide_get_drivedata(drive); data &= 0xff00; data |= timing; ide_set_drivedata(drive, (void *)data); printk(KERN_DEBUG "%s: %#x\n", drive->name, timing); } static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { u16 *id = drive->id; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ /* FIXME: use drive->pio_mode value */ if (!qd_find_disk_type(drive, &active_time, &recovery_time) && (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_PIO] >= 240) { printk(KERN_INFO "%s: PIO mode%d\n", drive->name, id[ATA_ID_OLD_PIO_MODES] & 0xff); active_time = 110; recovery_time = drive->id[ATA_ID_EIDE_PIO] - 120; } qd_set_timing(drive, qd6500_compute_timing(drive->hwif, active_time, recovery_time)); } static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ u8 base = (hwif->config_data & 0xff00) >> 8; if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) { cycle_time = ide_pio_cycle_time(drive, pio); switch (pio) { case 0: break; case 3: if (cycle_time >= 110) { active_time = 86; recovery_time = cycle_time - 102; } else printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name); break; case 4: if (cycle_time >= 69) { active_time = 70; recovery_time = cycle_time - 61; } else printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name); break; default: if (cycle_time >= 180) { active_time = 110; recovery_time = cycle_time - 120; } else { active_time = t->active; recovery_time = cycle_time - active_time; } } printk(KERN_INFO "%s: PIO mode%d\n", drive->name,pio); } if (!hwif->channel && drive->media != ide_disk) { outb(0x5f, QD_CONTROL_PORT); printk(KERN_WARNING "%s: ATAPI: disabled read-ahead FIFO " "and post-write buffer on %s.\n", drive->name, hwif->name); } qd_set_timing(drive, qd6580_compute_timing(active_time, recovery_time)); } /* * qd_testreg * * tests if the given port is a register */ static int __init qd_testreg(int port) { unsigned long flags; u8 savereg, readreg; local_irq_save(flags); savereg = inb_p(port); outb_p(QD_TESTVAL, port); /* safe value */ readreg = inb_p(port); outb(savereg, port); local_irq_restore(flags); if (savereg == QD_TESTVAL) { printk(KERN_ERR "Outch ! the probe for qd65xx isn't reliable !\n"); printk(KERN_ERR "Please contact maintainers to tell about your hardware\n"); printk(KERN_ERR "Assuming qd65xx is not present.\n"); return 1; } return (readreg != QD_TESTVAL); } static void __init qd6500_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); ide_set_drivedata(drive, (void *)QD6500_DEF_DATA); } static void __init qd6580_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u16 t1, t2; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); if (hwif->host_flags & IDE_HFLAG_SINGLE) { t1 = QD6580_DEF_DATA; t2 = QD6580_DEF_DATA2; } else t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA; ide_set_drivedata(drive, (void *)((drive->dn & 1) ? t2 : t1)); } static const struct ide_tp_ops qd65xx_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = qd65xx_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_port_ops qd6500_port_ops = { .init_dev = qd6500_init_dev, .set_pio_mode = qd6500_set_pio_mode, }; static const struct ide_port_ops qd6580_port_ops = { .init_dev = qd6580_init_dev, .set_pio_mode = qd6580_set_pio_mode, }; static const struct ide_port_info qd65xx_port_info __initdata = { .name = DRV_NAME, .tp_ops = &qd65xx_tp_ops, .chipset = ide_qd65xx, .host_flags = IDE_HFLAG_IO_32BIT | IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; /* * qd_probe: * * looks at the specified baseport, and if qd found, registers & initialises it * return 1 if another qd may be probed */ static int __init qd_probe(int base) { int rc; u8 config, unit, control; struct ide_port_info d = qd65xx_port_info; config = inb(QD_CONFIG_PORT); if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) ) return -ENODEV; unit = ! (config & QD_CONFIG_IDE_BASEPORT); if (unit) d.host_flags |= IDE_HFLAG_QD_2ND_PORT; switch (config & 0xf0) { case QD_CONFIG_QD6500: if (qd_testreg(base)) return -ENODEV; /* bad register */ if (config & QD_CONFIG_DISABLED) { printk(KERN_WARNING "qd6500 is disabled !\n"); return -ENODEV; } printk(KERN_NOTICE "qd6500 at %#x\n", base); printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n", config, QD_ID3); d.port_ops = &qd6500_port_ops; d.host_flags |= IDE_HFLAG_SINGLE; break; case QD_CONFIG_QD6580_A: case QD_CONFIG_QD6580_B: if (qd_testreg(base) || qd_testreg(base + 0x02)) return -ENODEV; /* bad registers */ control = inb(QD_CONTROL_PORT); printk(KERN_NOTICE "qd6580 at %#x\n", base); printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", config, control, QD_ID3); outb(QD_DEF_CONTR, QD_CONTROL_PORT); d.port_ops = &qd6580_port_ops; if (control & QD_CONTR_SEC_DISABLED) d.host_flags |= IDE_HFLAG_SINGLE; printk(KERN_INFO "qd6580: %s IDE board\n", (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual"); break; default: return -ENODEV; } rc = ide_legacy_device_add(&d, (base << 8) | config); if (d.host_flags & IDE_HFLAG_SINGLE) return (rc == 0) ? 1 : rc; return rc; } static bool probe_qd65xx; module_param_named(probe, probe_qd65xx, bool, 0); MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); static int __init qd65xx_init(void) { int rc1, rc2 = -ENODEV; if (probe_qd65xx == 0) return -ENODEV; rc1 = qd_probe(0x30); if (rc1) rc2 = qd_probe(0xb0); if (rc1 < 0 && rc2 < 0) return -ENODEV; return 0; } module_init(qd65xx_init); MODULE_AUTHOR("Samuel Thibault"); MODULE_DESCRIPTION("support of qd65xx vlb ide chipset"); MODULE_LICENSE("GPL");
gpl-2.0
sombree/android_kernel_samsung_jf
drivers/ide/qd65xx.c
4927
11148
/* * Copyright (C) 1996-2001 Linus Torvalds & author (see below) */ /* * Version 0.03 Cleaned auto-tune, added probe * Version 0.04 Added second channel tuning * Version 0.05 Enhanced tuning ; added qd6500 support * Version 0.06 Added dos driver's list * Version 0.07 Second channel bug fix * * QDI QD6500/QD6580 EIDE controller fast support * * To activate controller support, use "ide0=qd65xx" */ /* * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by * Samuel Thibault <samuel.thibault@ens-lyon.org> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "qd65xx" #include "qd65xx.h" /* * I/O ports are 0x30-0x31 (and 0x32-0x33 for qd6580) * or 0xb0-0xb1 (and 0xb2-0xb3 for qd6580) * -- qd6500 is a single IDE interface * -- qd6580 is a dual IDE interface * * More research on qd6580 being done by willmore@cig.mot.com (David) * More Information given by Petr Soucek (petr@ryston.cz) * http://www.ryston.cz/petr/vlb */ /* * base: Timer1 * * * base+0x01: Config (R/O) * * bit 0: ide baseport: 1 = 0x1f0 ; 0 = 0x170 (only useful for qd6500) * bit 1: qd65xx baseport: 1 = 0xb0 ; 0 = 0x30 * bit 2: ID3: bus speed: 1 = <=33MHz ; 0 = >33MHz * bit 3: qd6500: 1 = disabled, 0 = enabled * qd6580: 1 * upper nibble: * qd6500: 1100 * qd6580: either 1010 or 0101 * * * base+0x02: Timer2 (qd6580 only) * * * base+0x03: Control (qd6580 only) * * bits 0-3 must always be set 1 * bit 4 must be set 1, but is set 0 by dos driver while measuring vlb clock * bit 0 : 1 = Only primary port enabled : channel 0 for hda, channel 1 for hdb * 0 = Primary and Secondary ports enabled : channel 0 for hda & hdb * channel 1 for hdc & hdd * bit 1 : 1 = only disks on primary port * 0 = disks & ATAPI devices on primary port * bit 2-4 : always 0 * bit 5 : status, but of what ? * bit 6 : always set 1 by dos driver * bit 7 : set 1 for non-ATAPI devices on primary port * (maybe read-ahead and post-write buffer ?) */ static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */ /* * qd65xx_select: * * This routine is invoked to prepare for access to a given drive. */ static void qd65xx_dev_select(ide_drive_t *drive) { u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) | (QD_TIMREG(drive) & 0x02); if (timings[index] != QD_TIMING(drive)) outb(timings[index] = QD_TIMING(drive), QD_TIMREG(drive)); outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr); } /* * qd6500_compute_timing * * computes the timing value where * lower nibble represents active time, in count of VLB clocks * upper nibble represents recovery time, in count of VLB clocks */ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time) { int clk = ide_vlb_clk ? ide_vlb_clk : 50; u8 act_cyc, rec_cyc; if (clk <= 33) { act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9); rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15); } else { act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8); rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18); } return (rec_cyc << 4) | 0x08 | act_cyc; } /* * qd6580_compute_timing * * idem for qd6580 */ static u8 qd6580_compute_timing (int active_time, int recovery_time) { int clk = ide_vlb_clk ? ide_vlb_clk : 50; u8 act_cyc, rec_cyc; act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17); rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15); return (rec_cyc << 4) | act_cyc; } /* * qd_find_disk_type * * tries to find timing from dos driver's table */ static int qd_find_disk_type (ide_drive_t *drive, int *active_time, int *recovery_time) { struct qd65xx_timing_s *p; char *m = (char *)&drive->id[ATA_ID_PROD]; char model[ATA_ID_PROD_LEN]; if (*m == 0) return 0; strncpy(model, m, ATA_ID_PROD_LEN); ide_fixstring(model, ATA_ID_PROD_LEN, 1); /* byte-swap */ for (p = qd65xx_timing ; p->offset != -1 ; p++) { if (!strncmp(p->model, model+p->offset, 4)) { printk(KERN_DEBUG "%s: listed !\n", drive->name); *active_time = p->active; *recovery_time = p->recovery; return 1; } } return 0; } /* * qd_set_timing: * * records the timing */ static void qd_set_timing (ide_drive_t *drive, u8 timing) { unsigned long data = (unsigned long)ide_get_drivedata(drive); data &= 0xff00; data |= timing; ide_set_drivedata(drive, (void *)data); printk(KERN_DEBUG "%s: %#x\n", drive->name, timing); } static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { u16 *id = drive->id; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ /* FIXME: use drive->pio_mode value */ if (!qd_find_disk_type(drive, &active_time, &recovery_time) && (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_PIO] >= 240) { printk(KERN_INFO "%s: PIO mode%d\n", drive->name, id[ATA_ID_OLD_PIO_MODES] & 0xff); active_time = 110; recovery_time = drive->id[ATA_ID_EIDE_PIO] - 120; } qd_set_timing(drive, qd6500_compute_timing(drive->hwif, active_time, recovery_time)); } static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ u8 base = (hwif->config_data & 0xff00) >> 8; if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) { cycle_time = ide_pio_cycle_time(drive, pio); switch (pio) { case 0: break; case 3: if (cycle_time >= 110) { active_time = 86; recovery_time = cycle_time - 102; } else printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name); break; case 4: if (cycle_time >= 69) { active_time = 70; recovery_time = cycle_time - 61; } else printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name); break; default: if (cycle_time >= 180) { active_time = 110; recovery_time = cycle_time - 120; } else { active_time = t->active; recovery_time = cycle_time - active_time; } } printk(KERN_INFO "%s: PIO mode%d\n", drive->name,pio); } if (!hwif->channel && drive->media != ide_disk) { outb(0x5f, QD_CONTROL_PORT); printk(KERN_WARNING "%s: ATAPI: disabled read-ahead FIFO " "and post-write buffer on %s.\n", drive->name, hwif->name); } qd_set_timing(drive, qd6580_compute_timing(active_time, recovery_time)); } /* * qd_testreg * * tests if the given port is a register */ static int __init qd_testreg(int port) { unsigned long flags; u8 savereg, readreg; local_irq_save(flags); savereg = inb_p(port); outb_p(QD_TESTVAL, port); /* safe value */ readreg = inb_p(port); outb(savereg, port); local_irq_restore(flags); if (savereg == QD_TESTVAL) { printk(KERN_ERR "Outch ! the probe for qd65xx isn't reliable !\n"); printk(KERN_ERR "Please contact maintainers to tell about your hardware\n"); printk(KERN_ERR "Assuming qd65xx is not present.\n"); return 1; } return (readreg != QD_TESTVAL); } static void __init qd6500_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); ide_set_drivedata(drive, (void *)QD6500_DEF_DATA); } static void __init qd6580_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u16 t1, t2; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); if (hwif->host_flags & IDE_HFLAG_SINGLE) { t1 = QD6580_DEF_DATA; t2 = QD6580_DEF_DATA2; } else t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA; ide_set_drivedata(drive, (void *)((drive->dn & 1) ? t2 : t1)); } static const struct ide_tp_ops qd65xx_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = qd65xx_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_port_ops qd6500_port_ops = { .init_dev = qd6500_init_dev, .set_pio_mode = qd6500_set_pio_mode, }; static const struct ide_port_ops qd6580_port_ops = { .init_dev = qd6580_init_dev, .set_pio_mode = qd6580_set_pio_mode, }; static const struct ide_port_info qd65xx_port_info __initdata = { .name = DRV_NAME, .tp_ops = &qd65xx_tp_ops, .chipset = ide_qd65xx, .host_flags = IDE_HFLAG_IO_32BIT | IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; /* * qd_probe: * * looks at the specified baseport, and if qd found, registers & initialises it * return 1 if another qd may be probed */ static int __init qd_probe(int base) { int rc; u8 config, unit, control; struct ide_port_info d = qd65xx_port_info; config = inb(QD_CONFIG_PORT); if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) ) return -ENODEV; unit = ! (config & QD_CONFIG_IDE_BASEPORT); if (unit) d.host_flags |= IDE_HFLAG_QD_2ND_PORT; switch (config & 0xf0) { case QD_CONFIG_QD6500: if (qd_testreg(base)) return -ENODEV; /* bad register */ if (config & QD_CONFIG_DISABLED) { printk(KERN_WARNING "qd6500 is disabled !\n"); return -ENODEV; } printk(KERN_NOTICE "qd6500 at %#x\n", base); printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n", config, QD_ID3); d.port_ops = &qd6500_port_ops; d.host_flags |= IDE_HFLAG_SINGLE; break; case QD_CONFIG_QD6580_A: case QD_CONFIG_QD6580_B: if (qd_testreg(base) || qd_testreg(base + 0x02)) return -ENODEV; /* bad registers */ control = inb(QD_CONTROL_PORT); printk(KERN_NOTICE "qd6580 at %#x\n", base); printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", config, control, QD_ID3); outb(QD_DEF_CONTR, QD_CONTROL_PORT); d.port_ops = &qd6580_port_ops; if (control & QD_CONTR_SEC_DISABLED) d.host_flags |= IDE_HFLAG_SINGLE; printk(KERN_INFO "qd6580: %s IDE board\n", (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual"); break; default: return -ENODEV; } rc = ide_legacy_device_add(&d, (base << 8) | config); if (d.host_flags & IDE_HFLAG_SINGLE) return (rc == 0) ? 1 : rc; return rc; } static bool probe_qd65xx; module_param_named(probe, probe_qd65xx, bool, 0); MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); static int __init qd65xx_init(void) { int rc1, rc2 = -ENODEV; if (probe_qd65xx == 0) return -ENODEV; rc1 = qd_probe(0x30); if (rc1) rc2 = qd_probe(0xb0); if (rc1 < 0 && rc2 < 0) return -ENODEV; return 0; } module_init(qd65xx_init); MODULE_AUTHOR("Samuel Thibault"); MODULE_DESCRIPTION("support of qd65xx vlb ide chipset"); MODULE_LICENSE("GPL");
gpl-2.0
estiko/android_kernel_lenovo_a706_xtremeuv
drivers/ide/qd65xx.c
4927
11148
/* * Copyright (C) 1996-2001 Linus Torvalds & author (see below) */ /* * Version 0.03 Cleaned auto-tune, added probe * Version 0.04 Added second channel tuning * Version 0.05 Enhanced tuning ; added qd6500 support * Version 0.06 Added dos driver's list * Version 0.07 Second channel bug fix * * QDI QD6500/QD6580 EIDE controller fast support * * To activate controller support, use "ide0=qd65xx" */ /* * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by * Samuel Thibault <samuel.thibault@ens-lyon.org> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "qd65xx" #include "qd65xx.h" /* * I/O ports are 0x30-0x31 (and 0x32-0x33 for qd6580) * or 0xb0-0xb1 (and 0xb2-0xb3 for qd6580) * -- qd6500 is a single IDE interface * -- qd6580 is a dual IDE interface * * More research on qd6580 being done by willmore@cig.mot.com (David) * More Information given by Petr Soucek (petr@ryston.cz) * http://www.ryston.cz/petr/vlb */ /* * base: Timer1 * * * base+0x01: Config (R/O) * * bit 0: ide baseport: 1 = 0x1f0 ; 0 = 0x170 (only useful for qd6500) * bit 1: qd65xx baseport: 1 = 0xb0 ; 0 = 0x30 * bit 2: ID3: bus speed: 1 = <=33MHz ; 0 = >33MHz * bit 3: qd6500: 1 = disabled, 0 = enabled * qd6580: 1 * upper nibble: * qd6500: 1100 * qd6580: either 1010 or 0101 * * * base+0x02: Timer2 (qd6580 only) * * * base+0x03: Control (qd6580 only) * * bits 0-3 must always be set 1 * bit 4 must be set 1, but is set 0 by dos driver while measuring vlb clock * bit 0 : 1 = Only primary port enabled : channel 0 for hda, channel 1 for hdb * 0 = Primary and Secondary ports enabled : channel 0 for hda & hdb * channel 1 for hdc & hdd * bit 1 : 1 = only disks on primary port * 0 = disks & ATAPI devices on primary port * bit 2-4 : always 0 * bit 5 : status, but of what ? * bit 6 : always set 1 by dos driver * bit 7 : set 1 for non-ATAPI devices on primary port * (maybe read-ahead and post-write buffer ?) */ static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */ /* * qd65xx_select: * * This routine is invoked to prepare for access to a given drive. */ static void qd65xx_dev_select(ide_drive_t *drive) { u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) | (QD_TIMREG(drive) & 0x02); if (timings[index] != QD_TIMING(drive)) outb(timings[index] = QD_TIMING(drive), QD_TIMREG(drive)); outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr); } /* * qd6500_compute_timing * * computes the timing value where * lower nibble represents active time, in count of VLB clocks * upper nibble represents recovery time, in count of VLB clocks */ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time) { int clk = ide_vlb_clk ? ide_vlb_clk : 50; u8 act_cyc, rec_cyc; if (clk <= 33) { act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9); rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15); } else { act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8); rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18); } return (rec_cyc << 4) | 0x08 | act_cyc; } /* * qd6580_compute_timing * * idem for qd6580 */ static u8 qd6580_compute_timing (int active_time, int recovery_time) { int clk = ide_vlb_clk ? ide_vlb_clk : 50; u8 act_cyc, rec_cyc; act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17); rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15); return (rec_cyc << 4) | act_cyc; } /* * qd_find_disk_type * * tries to find timing from dos driver's table */ static int qd_find_disk_type (ide_drive_t *drive, int *active_time, int *recovery_time) { struct qd65xx_timing_s *p; char *m = (char *)&drive->id[ATA_ID_PROD]; char model[ATA_ID_PROD_LEN]; if (*m == 0) return 0; strncpy(model, m, ATA_ID_PROD_LEN); ide_fixstring(model, ATA_ID_PROD_LEN, 1); /* byte-swap */ for (p = qd65xx_timing ; p->offset != -1 ; p++) { if (!strncmp(p->model, model+p->offset, 4)) { printk(KERN_DEBUG "%s: listed !\n", drive->name); *active_time = p->active; *recovery_time = p->recovery; return 1; } } return 0; } /* * qd_set_timing: * * records the timing */ static void qd_set_timing (ide_drive_t *drive, u8 timing) { unsigned long data = (unsigned long)ide_get_drivedata(drive); data &= 0xff00; data |= timing; ide_set_drivedata(drive, (void *)data); printk(KERN_DEBUG "%s: %#x\n", drive->name, timing); } static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { u16 *id = drive->id; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ /* FIXME: use drive->pio_mode value */ if (!qd_find_disk_type(drive, &active_time, &recovery_time) && (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_PIO] >= 240) { printk(KERN_INFO "%s: PIO mode%d\n", drive->name, id[ATA_ID_OLD_PIO_MODES] & 0xff); active_time = 110; recovery_time = drive->id[ATA_ID_EIDE_PIO] - 120; } qd_set_timing(drive, qd6500_compute_timing(drive->hwif, active_time, recovery_time)); } static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ u8 base = (hwif->config_data & 0xff00) >> 8; if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) { cycle_time = ide_pio_cycle_time(drive, pio); switch (pio) { case 0: break; case 3: if (cycle_time >= 110) { active_time = 86; recovery_time = cycle_time - 102; } else printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name); break; case 4: if (cycle_time >= 69) { active_time = 70; recovery_time = cycle_time - 61; } else printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name); break; default: if (cycle_time >= 180) { active_time = 110; recovery_time = cycle_time - 120; } else { active_time = t->active; recovery_time = cycle_time - active_time; } } printk(KERN_INFO "%s: PIO mode%d\n", drive->name,pio); } if (!hwif->channel && drive->media != ide_disk) { outb(0x5f, QD_CONTROL_PORT); printk(KERN_WARNING "%s: ATAPI: disabled read-ahead FIFO " "and post-write buffer on %s.\n", drive->name, hwif->name); } qd_set_timing(drive, qd6580_compute_timing(active_time, recovery_time)); } /* * qd_testreg * * tests if the given port is a register */ static int __init qd_testreg(int port) { unsigned long flags; u8 savereg, readreg; local_irq_save(flags); savereg = inb_p(port); outb_p(QD_TESTVAL, port); /* safe value */ readreg = inb_p(port); outb(savereg, port); local_irq_restore(flags); if (savereg == QD_TESTVAL) { printk(KERN_ERR "Outch ! the probe for qd65xx isn't reliable !\n"); printk(KERN_ERR "Please contact maintainers to tell about your hardware\n"); printk(KERN_ERR "Assuming qd65xx is not present.\n"); return 1; } return (readreg != QD_TESTVAL); } static void __init qd6500_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); ide_set_drivedata(drive, (void *)QD6500_DEF_DATA); } static void __init qd6580_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u16 t1, t2; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); if (hwif->host_flags & IDE_HFLAG_SINGLE) { t1 = QD6580_DEF_DATA; t2 = QD6580_DEF_DATA2; } else t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA; ide_set_drivedata(drive, (void *)((drive->dn & 1) ? t2 : t1)); } static const struct ide_tp_ops qd65xx_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = qd65xx_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_port_ops qd6500_port_ops = { .init_dev = qd6500_init_dev, .set_pio_mode = qd6500_set_pio_mode, }; static const struct ide_port_ops qd6580_port_ops = { .init_dev = qd6580_init_dev, .set_pio_mode = qd6580_set_pio_mode, }; static const struct ide_port_info qd65xx_port_info __initdata = { .name = DRV_NAME, .tp_ops = &qd65xx_tp_ops, .chipset = ide_qd65xx, .host_flags = IDE_HFLAG_IO_32BIT | IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; /* * qd_probe: * * looks at the specified baseport, and if qd found, registers & initialises it * return 1 if another qd may be probed */ static int __init qd_probe(int base) { int rc; u8 config, unit, control; struct ide_port_info d = qd65xx_port_info; config = inb(QD_CONFIG_PORT); if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) ) return -ENODEV; unit = ! (config & QD_CONFIG_IDE_BASEPORT); if (unit) d.host_flags |= IDE_HFLAG_QD_2ND_PORT; switch (config & 0xf0) { case QD_CONFIG_QD6500: if (qd_testreg(base)) return -ENODEV; /* bad register */ if (config & QD_CONFIG_DISABLED) { printk(KERN_WARNING "qd6500 is disabled !\n"); return -ENODEV; } printk(KERN_NOTICE "qd6500 at %#x\n", base); printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n", config, QD_ID3); d.port_ops = &qd6500_port_ops; d.host_flags |= IDE_HFLAG_SINGLE; break; case QD_CONFIG_QD6580_A: case QD_CONFIG_QD6580_B: if (qd_testreg(base) || qd_testreg(base + 0x02)) return -ENODEV; /* bad registers */ control = inb(QD_CONTROL_PORT); printk(KERN_NOTICE "qd6580 at %#x\n", base); printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", config, control, QD_ID3); outb(QD_DEF_CONTR, QD_CONTROL_PORT); d.port_ops = &qd6580_port_ops; if (control & QD_CONTR_SEC_DISABLED) d.host_flags |= IDE_HFLAG_SINGLE; printk(KERN_INFO "qd6580: %s IDE board\n", (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual"); break; default: return -ENODEV; } rc = ide_legacy_device_add(&d, (base << 8) | config); if (d.host_flags & IDE_HFLAG_SINGLE) return (rc == 0) ? 1 : rc; return rc; } static bool probe_qd65xx; module_param_named(probe, probe_qd65xx, bool, 0); MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); static int __init qd65xx_init(void) { int rc1, rc2 = -ENODEV; if (probe_qd65xx == 0) return -ENODEV; rc1 = qd_probe(0x30); if (rc1) rc2 = qd_probe(0xb0); if (rc1 < 0 && rc2 < 0) return -ENODEV; return 0; } module_init(qd65xx_init); MODULE_AUTHOR("Samuel Thibault"); MODULE_DESCRIPTION("support of qd65xx vlb ide chipset"); MODULE_LICENSE("GPL");
gpl-2.0
SuperHanss/android_kernel_sony_apq8064
drivers/media/video/tlg2300/pd-alsa.c
9279
7941
#include <linux/kernel.h> #include <linux/usb.h> #include <linux/init.h> #include <linux/sound.h> #include <linux/spinlock.h> #include <linux/soundcard.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/gfp.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/initval.h> #include <sound/control.h> #include <media/v4l2-common.h> #include "pd-common.h" #include "vendorcmds.h" static void complete_handler_audio(struct urb *urb); #define AUDIO_EP (0x83) #define AUDIO_BUF_SIZE (512) #define PERIOD_SIZE (1024 * 8) #define PERIOD_MIN (4) #define PERIOD_MAX PERIOD_MIN static struct snd_pcm_hardware snd_pd_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = PERIOD_SIZE * PERIOD_MIN, .period_bytes_min = PERIOD_SIZE, .period_bytes_max = PERIOD_SIZE, .periods_min = PERIOD_MIN, .periods_max = PERIOD_MAX, /* .buffer_bytes_max = 62720 * 8, .period_bytes_min = 64, .period_bytes_max = 12544, .periods_min = 2, .periods_max = 98 */ }; static int snd_pd_capture_open(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; struct snd_pcm_runtime *runtime = substream->runtime; if (!p) return -ENODEV; pa->users++; pa->card_close = 0; pa->capture_pcm_substream = substream; runtime->private_data = p; runtime->hw = snd_pd_hw_capture; snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); usb_autopm_get_interface(p->interface); kref_get(&p->kref); return 0; } static int snd_pd_pcm_close(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; pa->users--; pa->card_close = 1; usb_autopm_put_interface(p->interface); kref_put(&p->kref, poseidon_delete); return 0; } static int snd_pd_hw_capture_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size; size = params_buffer_bytes(hw_params); if (runtime->dma_area) { if (runtime->dma_bytes > size) return 0; vfree(runtime->dma_area); } runtime->dma_area = vmalloc(size); if (!runtime->dma_area) return -ENOMEM; else runtime->dma_bytes = size; return 0; } static int audio_buf_free(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; int i; for (i = 0; i < AUDIO_BUFS; i++) if (pa->urb_array[i]) usb_kill_urb(pa->urb_array[i]); free_all_urb_generic(pa->urb_array, AUDIO_BUFS); logpm(); return 0; } static int snd_pd_hw_capture_free(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); logpm(); audio_buf_free(p); return 0; } static int snd_pd_prepare(struct snd_pcm_substream *substream) { return 0; } #define AUDIO_TRAILER_SIZE (16) static inline void handle_audio_data(struct urb *urb, int *period_elapsed) { struct poseidon_audio *pa = urb->context; struct snd_pcm_runtime *runtime = pa->capture_pcm_substream->runtime; int stride = runtime->frame_bits >> 3; int len = urb->actual_length / stride; unsigned char *cp = urb->transfer_buffer; unsigned int oldptr = pa->rcv_position; if (urb->actual_length == AUDIO_BUF_SIZE - 4) len -= (AUDIO_TRAILER_SIZE / stride); /* do the copy */ if (oldptr + len >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride); memcpy(runtime->dma_area, (cp + cnt * stride), (len * stride - cnt * stride)); } else memcpy(runtime->dma_area + oldptr * stride, cp, len * stride); /* update the statas */ snd_pcm_stream_lock(pa->capture_pcm_substream); pa->rcv_position += len; if (pa->rcv_position >= runtime->buffer_size) pa->rcv_position -= runtime->buffer_size; pa->copied_position += (len); if (pa->copied_position >= runtime->period_size) { pa->copied_position -= runtime->period_size; *period_elapsed = 1; } snd_pcm_stream_unlock(pa->capture_pcm_substream); } static void complete_handler_audio(struct urb *urb) { struct poseidon_audio *pa = urb->context; struct snd_pcm_substream *substream = pa->capture_pcm_substream; int period_elapsed = 0; int ret; if (1 == pa->card_close || pa->capture_stream != STREAM_ON) return; if (urb->status != 0) { /*if (urb->status == -ESHUTDOWN)*/ return; } if (substream) { if (urb->actual_length) { handle_audio_data(urb, &period_elapsed); if (period_elapsed) snd_pcm_period_elapsed(substream); } } ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) log("audio urb failed (errcod = %i)", ret); return; } static int fire_audio_urb(struct poseidon *p) { int i, ret = 0; struct poseidon_audio *pa = &p->audio; alloc_bulk_urbs_generic(pa->urb_array, AUDIO_BUFS, p->udev, AUDIO_EP, AUDIO_BUF_SIZE, GFP_ATOMIC, complete_handler_audio, pa); for (i = 0; i < AUDIO_BUFS; i++) { ret = usb_submit_urb(pa->urb_array[i], GFP_KERNEL); if (ret) log("urb err : %d", ret); } log(); return ret; } static int snd_pd_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; if (debug_mode) log("cmd %d, audio stat : %d\n", cmd, pa->capture_stream); switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_START: if (pa->capture_stream == STREAM_ON) return 0; pa->rcv_position = pa->copied_position = 0; pa->capture_stream = STREAM_ON; if (in_hibernation(p)) return 0; fire_audio_urb(p); return 0; case SNDRV_PCM_TRIGGER_SUSPEND: pa->capture_stream = STREAM_SUSPEND; return 0; case SNDRV_PCM_TRIGGER_STOP: pa->capture_stream = STREAM_OFF; return 0; default: return -EINVAL; } } static snd_pcm_uframes_t snd_pd_capture_pointer(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; return pa->rcv_position; } static struct page *snd_pcm_pd_get_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } static struct snd_pcm_ops pcm_capture_ops = { .open = snd_pd_capture_open, .close = snd_pd_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pd_hw_capture_params, .hw_free = snd_pd_hw_capture_free, .prepare = snd_pd_prepare, .trigger = snd_pd_capture_trigger, .pointer = snd_pd_capture_pointer, .page = snd_pcm_pd_get_page, }; #ifdef CONFIG_PM int pm_alsa_suspend(struct poseidon *p) { logpm(p); audio_buf_free(p); return 0; } int pm_alsa_resume(struct poseidon *p) { logpm(p); fire_audio_urb(p); return 0; } #endif int poseidon_audio_init(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; struct snd_card *card; struct snd_pcm *pcm; int ret; ret = snd_card_create(-1, "Telegent", THIS_MODULE, 0, &card); if (ret != 0) return ret; ret = snd_pcm_new(card, "poseidon audio", 0, 0, 1, &pcm); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops); pcm->info_flags = 0; pcm->private_data = p; strcpy(pcm->name, "poseidon audio capture"); strcpy(card->driver, "ALSA driver"); strcpy(card->shortname, "poseidon Audio"); strcpy(card->longname, "poseidon ALSA Audio"); if (snd_card_register(card)) { snd_card_free(card); return -ENOMEM; } pa->card = card; return 0; } int poseidon_audio_free(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; if (pa->card) snd_card_free(pa->card); return 0; }
gpl-2.0
zeusk/tiamat-leo
drivers/media/video/tlg2300/pd-alsa.c
9279
7941
#include <linux/kernel.h> #include <linux/usb.h> #include <linux/init.h> #include <linux/sound.h> #include <linux/spinlock.h> #include <linux/soundcard.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/gfp.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/initval.h> #include <sound/control.h> #include <media/v4l2-common.h> #include "pd-common.h" #include "vendorcmds.h" static void complete_handler_audio(struct urb *urb); #define AUDIO_EP (0x83) #define AUDIO_BUF_SIZE (512) #define PERIOD_SIZE (1024 * 8) #define PERIOD_MIN (4) #define PERIOD_MAX PERIOD_MIN static struct snd_pcm_hardware snd_pd_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = PERIOD_SIZE * PERIOD_MIN, .period_bytes_min = PERIOD_SIZE, .period_bytes_max = PERIOD_SIZE, .periods_min = PERIOD_MIN, .periods_max = PERIOD_MAX, /* .buffer_bytes_max = 62720 * 8, .period_bytes_min = 64, .period_bytes_max = 12544, .periods_min = 2, .periods_max = 98 */ }; static int snd_pd_capture_open(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; struct snd_pcm_runtime *runtime = substream->runtime; if (!p) return -ENODEV; pa->users++; pa->card_close = 0; pa->capture_pcm_substream = substream; runtime->private_data = p; runtime->hw = snd_pd_hw_capture; snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); usb_autopm_get_interface(p->interface); kref_get(&p->kref); return 0; } static int snd_pd_pcm_close(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; pa->users--; pa->card_close = 1; usb_autopm_put_interface(p->interface); kref_put(&p->kref, poseidon_delete); return 0; } static int snd_pd_hw_capture_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size; size = params_buffer_bytes(hw_params); if (runtime->dma_area) { if (runtime->dma_bytes > size) return 0; vfree(runtime->dma_area); } runtime->dma_area = vmalloc(size); if (!runtime->dma_area) return -ENOMEM; else runtime->dma_bytes = size; return 0; } static int audio_buf_free(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; int i; for (i = 0; i < AUDIO_BUFS; i++) if (pa->urb_array[i]) usb_kill_urb(pa->urb_array[i]); free_all_urb_generic(pa->urb_array, AUDIO_BUFS); logpm(); return 0; } static int snd_pd_hw_capture_free(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); logpm(); audio_buf_free(p); return 0; } static int snd_pd_prepare(struct snd_pcm_substream *substream) { return 0; } #define AUDIO_TRAILER_SIZE (16) static inline void handle_audio_data(struct urb *urb, int *period_elapsed) { struct poseidon_audio *pa = urb->context; struct snd_pcm_runtime *runtime = pa->capture_pcm_substream->runtime; int stride = runtime->frame_bits >> 3; int len = urb->actual_length / stride; unsigned char *cp = urb->transfer_buffer; unsigned int oldptr = pa->rcv_position; if (urb->actual_length == AUDIO_BUF_SIZE - 4) len -= (AUDIO_TRAILER_SIZE / stride); /* do the copy */ if (oldptr + len >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride); memcpy(runtime->dma_area, (cp + cnt * stride), (len * stride - cnt * stride)); } else memcpy(runtime->dma_area + oldptr * stride, cp, len * stride); /* update the statas */ snd_pcm_stream_lock(pa->capture_pcm_substream); pa->rcv_position += len; if (pa->rcv_position >= runtime->buffer_size) pa->rcv_position -= runtime->buffer_size; pa->copied_position += (len); if (pa->copied_position >= runtime->period_size) { pa->copied_position -= runtime->period_size; *period_elapsed = 1; } snd_pcm_stream_unlock(pa->capture_pcm_substream); } static void complete_handler_audio(struct urb *urb) { struct poseidon_audio *pa = urb->context; struct snd_pcm_substream *substream = pa->capture_pcm_substream; int period_elapsed = 0; int ret; if (1 == pa->card_close || pa->capture_stream != STREAM_ON) return; if (urb->status != 0) { /*if (urb->status == -ESHUTDOWN)*/ return; } if (substream) { if (urb->actual_length) { handle_audio_data(urb, &period_elapsed); if (period_elapsed) snd_pcm_period_elapsed(substream); } } ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) log("audio urb failed (errcod = %i)", ret); return; } static int fire_audio_urb(struct poseidon *p) { int i, ret = 0; struct poseidon_audio *pa = &p->audio; alloc_bulk_urbs_generic(pa->urb_array, AUDIO_BUFS, p->udev, AUDIO_EP, AUDIO_BUF_SIZE, GFP_ATOMIC, complete_handler_audio, pa); for (i = 0; i < AUDIO_BUFS; i++) { ret = usb_submit_urb(pa->urb_array[i], GFP_KERNEL); if (ret) log("urb err : %d", ret); } log(); return ret; } static int snd_pd_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; if (debug_mode) log("cmd %d, audio stat : %d\n", cmd, pa->capture_stream); switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_START: if (pa->capture_stream == STREAM_ON) return 0; pa->rcv_position = pa->copied_position = 0; pa->capture_stream = STREAM_ON; if (in_hibernation(p)) return 0; fire_audio_urb(p); return 0; case SNDRV_PCM_TRIGGER_SUSPEND: pa->capture_stream = STREAM_SUSPEND; return 0; case SNDRV_PCM_TRIGGER_STOP: pa->capture_stream = STREAM_OFF; return 0; default: return -EINVAL; } } static snd_pcm_uframes_t snd_pd_capture_pointer(struct snd_pcm_substream *substream) { struct poseidon *p = snd_pcm_substream_chip(substream); struct poseidon_audio *pa = &p->audio; return pa->rcv_position; } static struct page *snd_pcm_pd_get_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } static struct snd_pcm_ops pcm_capture_ops = { .open = snd_pd_capture_open, .close = snd_pd_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pd_hw_capture_params, .hw_free = snd_pd_hw_capture_free, .prepare = snd_pd_prepare, .trigger = snd_pd_capture_trigger, .pointer = snd_pd_capture_pointer, .page = snd_pcm_pd_get_page, }; #ifdef CONFIG_PM int pm_alsa_suspend(struct poseidon *p) { logpm(p); audio_buf_free(p); return 0; } int pm_alsa_resume(struct poseidon *p) { logpm(p); fire_audio_urb(p); return 0; } #endif int poseidon_audio_init(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; struct snd_card *card; struct snd_pcm *pcm; int ret; ret = snd_card_create(-1, "Telegent", THIS_MODULE, 0, &card); if (ret != 0) return ret; ret = snd_pcm_new(card, "poseidon audio", 0, 0, 1, &pcm); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops); pcm->info_flags = 0; pcm->private_data = p; strcpy(pcm->name, "poseidon audio capture"); strcpy(card->driver, "ALSA driver"); strcpy(card->shortname, "poseidon Audio"); strcpy(card->longname, "poseidon ALSA Audio"); if (snd_card_register(card)) { snd_card_free(card); return -ENOMEM; } pa->card = card; return 0; } int poseidon_audio_free(struct poseidon *p) { struct poseidon_audio *pa = &p->audio; if (pa->card) snd_card_free(pa->card); return 0; }
gpl-2.0
daishi4u/J7_Afterburner
drivers/hwmon/pmbus/ucd9200.c
9535
5207
/* * Hardware monitoring driver for ucd9200 series Digital PWM System Controllers * * Copyright (C) 2011 Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/i2c/pmbus.h> #include "pmbus.h" #define UCD9200_PHASE_INFO 0xd2 #define UCD9200_DEVICE_ID 0xfd enum chips { ucd9200, ucd9220, ucd9222, ucd9224, ucd9240, ucd9244, ucd9246, ucd9248 }; static const struct i2c_device_id ucd9200_id[] = { {"ucd9200", ucd9200}, {"ucd9220", ucd9220}, {"ucd9222", ucd9222}, {"ucd9224", ucd9224}, {"ucd9240", ucd9240}, {"ucd9244", ucd9244}, {"ucd9246", ucd9246}, {"ucd9248", ucd9248}, {} }; MODULE_DEVICE_TABLE(i2c, ucd9200_id); static int ucd9200_probe(struct i2c_client *client, const struct i2c_device_id *id) { u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; struct pmbus_driver_info *info; const struct i2c_device_id *mid; int i, j, ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_BLOCK_DATA)) return -ENODEV; ret = i2c_smbus_read_block_data(client, UCD9200_DEVICE_ID, block_buffer); if (ret < 0) { dev_err(&client->dev, "Failed to read device ID\n"); return ret; } block_buffer[ret] = '\0'; dev_info(&client->dev, "Device ID %s\n", block_buffer); for (mid = ucd9200_id; mid->name[0]; mid++) { if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) break; } if (!mid->name[0]) { dev_err(&client->dev, "Unsupported device\n"); return -ENODEV; } if (id->driver_data != ucd9200 && id->driver_data != mid->driver_data) dev_notice(&client->dev, "Device mismatch: Configured %s, detected %s\n", id->name, mid->name); info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info), GFP_KERNEL); if (!info) return -ENOMEM; ret = i2c_smbus_read_block_data(client, UCD9200_PHASE_INFO, block_buffer); if (ret < 0) { dev_err(&client->dev, "Failed to read phase information\n"); return ret; } /* * Calculate number of configured pages (rails) from PHASE_INFO * register. * Rails have to be sequential, so we can abort after finding * the first unconfigured rail. */ info->pages = 0; for (i = 0; i < ret; i++) { if (!block_buffer[i]) break; info->pages++; } if (!info->pages) { dev_err(&client->dev, "No rails configured\n"); return -ENODEV; } dev_info(&client->dev, "%d rails configured\n", info->pages); /* * Set PHASE registers on all pages to 0xff to ensure that phase * specific commands will apply to all phases of a given page (rail). * This only affects the READ_IOUT and READ_TEMPERATURE2 registers. * READ_IOUT will return the sum of currents of all phases of a rail, * and READ_TEMPERATURE2 will return the maximum temperature detected * for the the phases of the rail. */ for (i = 0; i < info->pages; i++) { /* * Setting PAGE & PHASE fails once in a while for no obvious * reason, so we need to retry a couple of times. */ for (j = 0; j < 3; j++) { ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i); if (ret < 0) continue; ret = i2c_smbus_write_byte_data(client, PMBUS_PHASE, 0xff); if (ret < 0) continue; break; } if (ret < 0) { dev_err(&client->dev, "Failed to initialize PHASE registers\n"); return ret; } } if (info->pages > 1) i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP; for (i = 1; i < info->pages; i++) info->func[i] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP; /* ucd9240 supports a single fan */ if (mid->driver_data == ucd9240) info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12; return pmbus_do_probe(client, mid, info); } /* This is the driver that will be inserted */ static struct i2c_driver ucd9200_driver = { .driver = { .name = "ucd9200", }, .probe = ucd9200_probe, .remove = pmbus_do_remove, .id_table = ucd9200_id, }; module_i2c_driver(ucd9200_driver); MODULE_AUTHOR("Guenter Roeck"); MODULE_DESCRIPTION("PMBus driver for TI UCD922x, UCD924x"); MODULE_LICENSE("GPL");
gpl-2.0
Desterly/android_kernel_motorola_msm8994
net/ipv4/tcp_highspeed.c
10559
5018
/* * Sally Floyd's High Speed TCP (RFC 3649) congestion control * * See http://www.icir.org/floyd/hstcp.html * * John Heffner <jheffner@psc.edu> */ #include <linux/module.h> #include <net/tcp.h> /* From AIMD tables from RFC 3649 appendix B, * with fixed-point MD scaled <<8. */ static const struct hstcp_aimd_val { unsigned int cwnd; unsigned int md; } hstcp_aimd_vals[] = { { 38, 128, /* 0.50 */ }, { 118, 112, /* 0.44 */ }, { 221, 104, /* 0.41 */ }, { 347, 98, /* 0.38 */ }, { 495, 93, /* 0.37 */ }, { 663, 89, /* 0.35 */ }, { 851, 86, /* 0.34 */ }, { 1058, 83, /* 0.33 */ }, { 1284, 81, /* 0.32 */ }, { 1529, 78, /* 0.31 */ }, { 1793, 76, /* 0.30 */ }, { 2076, 74, /* 0.29 */ }, { 2378, 72, /* 0.28 */ }, { 2699, 71, /* 0.28 */ }, { 3039, 69, /* 0.27 */ }, { 3399, 68, /* 0.27 */ }, { 3778, 66, /* 0.26 */ }, { 4177, 65, /* 0.26 */ }, { 4596, 64, /* 0.25 */ }, { 5036, 62, /* 0.25 */ }, { 5497, 61, /* 0.24 */ }, { 5979, 60, /* 0.24 */ }, { 6483, 59, /* 0.23 */ }, { 7009, 58, /* 0.23 */ }, { 7558, 57, /* 0.22 */ }, { 8130, 56, /* 0.22 */ }, { 8726, 55, /* 0.22 */ }, { 9346, 54, /* 0.21 */ }, { 9991, 53, /* 0.21 */ }, { 10661, 52, /* 0.21 */ }, { 11358, 52, /* 0.20 */ }, { 12082, 51, /* 0.20 */ }, { 12834, 50, /* 0.20 */ }, { 13614, 49, /* 0.19 */ }, { 14424, 48, /* 0.19 */ }, { 15265, 48, /* 0.19 */ }, { 16137, 47, /* 0.19 */ }, { 17042, 46, /* 0.18 */ }, { 17981, 45, /* 0.18 */ }, { 18955, 45, /* 0.18 */ }, { 19965, 44, /* 0.17 */ }, { 21013, 43, /* 0.17 */ }, { 22101, 43, /* 0.17 */ }, { 23230, 42, /* 0.17 */ }, { 24402, 41, /* 0.16 */ }, { 25618, 41, /* 0.16 */ }, { 26881, 40, /* 0.16 */ }, { 28193, 39, /* 0.16 */ }, { 29557, 39, /* 0.15 */ }, { 30975, 38, /* 0.15 */ }, { 32450, 38, /* 0.15 */ }, { 33986, 37, /* 0.15 */ }, { 35586, 36, /* 0.14 */ }, { 37253, 36, /* 0.14 */ }, { 38992, 35, /* 0.14 */ }, { 40808, 35, /* 0.14 */ }, { 42707, 34, /* 0.13 */ }, { 44694, 33, /* 0.13 */ }, { 46776, 33, /* 0.13 */ }, { 48961, 32, /* 0.13 */ }, { 51258, 32, /* 0.13 */ }, { 53677, 31, /* 0.12 */ }, { 56230, 30, /* 0.12 */ }, { 58932, 30, /* 0.12 */ }, { 61799, 29, /* 0.12 */ }, { 64851, 28, /* 0.11 */ }, { 68113, 28, /* 0.11 */ }, { 71617, 27, /* 0.11 */ }, { 75401, 26, /* 0.10 */ }, { 79517, 26, /* 0.10 */ }, { 84035, 25, /* 0.10 */ }, { 89053, 24, /* 0.10 */ }, }; #define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals) struct hstcp { u32 ai; }; static void hstcp_init(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct hstcp *ca = inet_csk_ca(sk); ca->ai = 0; /* Ensure the MD arithmetic works. This is somewhat pedantic, * since I don't think we will see a cwnd this large. :) */ tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); } static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); struct hstcp *ca = inet_csk_ca(sk); if (!tcp_is_cwnd_limited(sk, in_flight)) return; if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); else { /* Update AIMD parameters. * * We want to guarantee that: * hstcp_aimd_vals[ca->ai-1].cwnd < * snd_cwnd <= * hstcp_aimd_vals[ca->ai].cwnd */ if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && ca->ai < HSTCP_AIMD_MAX - 1) ca->ai++; } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) ca->ai--; } /* Do additive increase */ if (tp->snd_cwnd < tp->snd_cwnd_clamp) { /* cwnd = cwnd + a(w) / cwnd */ tp->snd_cwnd_cnt += ca->ai + 1; if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { tp->snd_cwnd_cnt -= tp->snd_cwnd; tp->snd_cwnd++; } } } } static u32 hstcp_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct hstcp *ca = inet_csk_ca(sk); /* Do multiplicative decrease */ return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); } static struct tcp_congestion_ops tcp_highspeed __read_mostly = { .init = hstcp_init, .ssthresh = hstcp_ssthresh, .cong_avoid = hstcp_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, .owner = THIS_MODULE, .name = "highspeed" }; static int __init hstcp_register(void) { BUILD_BUG_ON(sizeof(struct hstcp) > ICSK_CA_PRIV_SIZE); return tcp_register_congestion_control(&tcp_highspeed); } static void __exit hstcp_unregister(void) { tcp_unregister_congestion_control(&tcp_highspeed); } module_init(hstcp_register); module_exit(hstcp_unregister); MODULE_AUTHOR("John Heffner"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("High Speed TCP");
gpl-2.0
aranb/linux
drivers/thermal/thermal_core.c
64
48601
/* * thermal.c - Generic Thermal Management Sysfs support. * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/kdev_t.h> #include <linux/idr.h> #include <linux/thermal.h> #include <linux/reboot.h> #include <linux/string.h> #include <linux/of.h> #include <net/netlink.h> #include <net/genetlink.h> #define CREATE_TRACE_POINTS #include <trace/events/thermal.h> #include "thermal_core.h" #include "thermal_hwmon.h" MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Generic thermal management sysfs support"); MODULE_LICENSE("GPL v2"); static DEFINE_IDR(thermal_tz_idr); static DEFINE_IDR(thermal_cdev_idr); static DEFINE_MUTEX(thermal_idr_lock); static LIST_HEAD(thermal_tz_list); static LIST_HEAD(thermal_cdev_list); static LIST_HEAD(thermal_governor_list); static DEFINE_MUTEX(thermal_list_lock); static DEFINE_MUTEX(thermal_governor_lock); static struct thermal_governor *def_governor; static struct thermal_governor *__find_governor(const char *name) { struct thermal_governor *pos; if (!name || !name[0]) return def_governor; list_for_each_entry(pos, &thermal_governor_list, governor_list) if (!strncasecmp(name, pos->name, THERMAL_NAME_LENGTH)) return pos; return NULL; } int thermal_register_governor(struct thermal_governor *governor) { int err; const char *name; struct thermal_zone_device *pos; if (!governor) return -EINVAL; mutex_lock(&thermal_governor_lock); err = -EBUSY; if (__find_governor(governor->name) == NULL) { err = 0; list_add(&governor->governor_list, &thermal_governor_list); if (!def_governor && !strncmp(governor->name, DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH)) def_governor = governor; } mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) { /* * only thermal zones with specified tz->tzp->governor_name * may run with tz->govenor unset */ if (pos->governor) continue; name = pos->tzp->governor_name; if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) pos->governor = governor; } mutex_unlock(&thermal_list_lock); mutex_unlock(&thermal_governor_lock); return err; } void thermal_unregister_governor(struct thermal_governor *governor) { struct thermal_zone_device *pos; if (!governor) return; mutex_lock(&thermal_governor_lock); if (__find_governor(governor->name) == NULL) goto exit; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) { if (!strncasecmp(pos->governor->name, governor->name, THERMAL_NAME_LENGTH)) pos->governor = NULL; } mutex_unlock(&thermal_list_lock); list_del(&governor->governor_list); exit: mutex_unlock(&thermal_governor_lock); return; } static int get_idr(struct idr *idr, struct mutex *lock, int *id) { int ret; if (lock) mutex_lock(lock); ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); if (lock) mutex_unlock(lock); if (unlikely(ret < 0)) return ret; *id = ret; return 0; } static void release_idr(struct idr *idr, struct mutex *lock, int id) { if (lock) mutex_lock(lock); idr_remove(idr, id); if (lock) mutex_unlock(lock); } int get_tz_trend(struct thermal_zone_device *tz, int trip) { enum thermal_trend trend; if (tz->emul_temperature || !tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) { if (tz->temperature > tz->last_temperature) trend = THERMAL_TREND_RAISING; else if (tz->temperature < tz->last_temperature) trend = THERMAL_TREND_DROPPING; else trend = THERMAL_TREND_STABLE; } return trend; } EXPORT_SYMBOL(get_tz_trend); struct thermal_instance *get_thermal_instance(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int trip) { struct thermal_instance *pos = NULL; struct thermal_instance *target_instance = NULL; mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry(pos, &tz->thermal_instances, tz_node) { if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { target_instance = pos; break; } } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); return target_instance; } EXPORT_SYMBOL(get_thermal_instance); static void print_bind_err_msg(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int ret) { dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", tz->type, cdev->type, ret); } static void __bind(struct thermal_zone_device *tz, int mask, struct thermal_cooling_device *cdev, unsigned long *limits) { int i, ret; for (i = 0; i < tz->trips; i++) { if (mask & (1 << i)) { unsigned long upper, lower; upper = THERMAL_NO_LIMIT; lower = THERMAL_NO_LIMIT; if (limits) { lower = limits[i * 2]; upper = limits[i * 2 + 1]; } ret = thermal_zone_bind_cooling_device(tz, i, cdev, upper, lower); if (ret) print_bind_err_msg(tz, cdev, ret); } } } static void __unbind(struct thermal_zone_device *tz, int mask, struct thermal_cooling_device *cdev) { int i; for (i = 0; i < tz->trips; i++) if (mask & (1 << i)) thermal_zone_unbind_cooling_device(tz, i, cdev); } static void bind_cdev(struct thermal_cooling_device *cdev) { int i, ret; const struct thermal_zone_params *tzp; struct thermal_zone_device *pos = NULL; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) { if (!pos->tzp && !pos->ops->bind) continue; if (pos->ops->bind) { ret = pos->ops->bind(pos, cdev); if (ret) print_bind_err_msg(pos, cdev, ret); continue; } tzp = pos->tzp; if (!tzp || !tzp->tbp) continue; for (i = 0; i < tzp->num_tbps; i++) { if (tzp->tbp[i].cdev || !tzp->tbp[i].match) continue; if (tzp->tbp[i].match(pos, cdev)) continue; tzp->tbp[i].cdev = cdev; __bind(pos, tzp->tbp[i].trip_mask, cdev, tzp->tbp[i].binding_limits); } } mutex_unlock(&thermal_list_lock); } static void bind_tz(struct thermal_zone_device *tz) { int i, ret; struct thermal_cooling_device *pos = NULL; const struct thermal_zone_params *tzp = tz->tzp; if (!tzp && !tz->ops->bind) return; mutex_lock(&thermal_list_lock); /* If there is ops->bind, try to use ops->bind */ if (tz->ops->bind) { list_for_each_entry(pos, &thermal_cdev_list, node) { ret = tz->ops->bind(tz, pos); if (ret) print_bind_err_msg(tz, pos, ret); } goto exit; } if (!tzp || !tzp->tbp) goto exit; list_for_each_entry(pos, &thermal_cdev_list, node) { for (i = 0; i < tzp->num_tbps; i++) { if (tzp->tbp[i].cdev || !tzp->tbp[i].match) continue; if (tzp->tbp[i].match(tz, pos)) continue; tzp->tbp[i].cdev = pos; __bind(tz, tzp->tbp[i].trip_mask, pos, tzp->tbp[i].binding_limits); } } exit: mutex_unlock(&thermal_list_lock); } static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, int delay) { if (delay > 1000) mod_delayed_work(system_freezable_wq, &tz->poll_queue, round_jiffies(msecs_to_jiffies(delay))); else if (delay) mod_delayed_work(system_freezable_wq, &tz->poll_queue, msecs_to_jiffies(delay)); else cancel_delayed_work(&tz->poll_queue); } static void monitor_thermal_zone(struct thermal_zone_device *tz) { mutex_lock(&tz->lock); if (tz->passive) thermal_zone_device_set_polling(tz, tz->passive_delay); else if (tz->polling_delay) thermal_zone_device_set_polling(tz, tz->polling_delay); else thermal_zone_device_set_polling(tz, 0); mutex_unlock(&tz->lock); } static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip, enum thermal_trip_type trip_type) { tz->governor ? tz->governor->throttle(tz, trip) : def_governor->throttle(tz, trip); } static void handle_critical_trips(struct thermal_zone_device *tz, int trip, enum thermal_trip_type trip_type) { long trip_temp; tz->ops->get_trip_temp(tz, trip, &trip_temp); /* If we have not crossed the trip_temp, we do not care. */ if (trip_temp <= 0 || tz->temperature < trip_temp) return; trace_thermal_zone_trip(tz, trip, trip_type); if (tz->ops->notify) tz->ops->notify(tz, trip, trip_type); if (trip_type == THERMAL_TRIP_CRITICAL) { dev_emerg(&tz->device, "critical temperature reached(%d C),shutting down\n", tz->temperature / 1000); orderly_poweroff(true); } } static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) { enum thermal_trip_type type; tz->ops->get_trip_type(tz, trip, &type); if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT) handle_critical_trips(tz, trip, type); else handle_non_critical_trips(tz, trip, type); /* * Alright, we handled this trip successfully. * So, start monitoring again. */ monitor_thermal_zone(tz); } /** * thermal_zone_get_temp() - returns its the temperature of thermal zone * @tz: a valid pointer to a struct thermal_zone_device * @temp: a valid pointer to where to store the resulting temperature. * * When a valid thermal zone reference is passed, it will fetch its * temperature and fill @temp. * * Return: On success returns 0, an error code otherwise */ int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp) { int ret = -EINVAL; #ifdef CONFIG_THERMAL_EMULATION int count; unsigned long crit_temp = -1UL; enum thermal_trip_type type; #endif if (!tz || IS_ERR(tz) || !tz->ops->get_temp) goto exit; mutex_lock(&tz->lock); ret = tz->ops->get_temp(tz, temp); #ifdef CONFIG_THERMAL_EMULATION if (!tz->emul_temperature) goto skip_emul; for (count = 0; count < tz->trips; count++) { ret = tz->ops->get_trip_type(tz, count, &type); if (!ret && type == THERMAL_TRIP_CRITICAL) { ret = tz->ops->get_trip_temp(tz, count, &crit_temp); break; } } if (ret) goto skip_emul; if (*temp < crit_temp) *temp = tz->emul_temperature; skip_emul: #endif mutex_unlock(&tz->lock); exit: return ret; } EXPORT_SYMBOL_GPL(thermal_zone_get_temp); static void update_temperature(struct thermal_zone_device *tz) { long temp; int ret; ret = thermal_zone_get_temp(tz, &temp); if (ret) { dev_warn(&tz->device, "failed to read out thermal zone %d\n", tz->id); return; } mutex_lock(&tz->lock); tz->last_temperature = tz->temperature; tz->temperature = temp; mutex_unlock(&tz->lock); trace_thermal_temperature(tz); dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n", tz->last_temperature, tz->temperature); } void thermal_zone_device_update(struct thermal_zone_device *tz) { int count; if (!tz->ops->get_temp) return; update_temperature(tz); for (count = 0; count < tz->trips; count++) handle_thermal_trip(tz, count); } EXPORT_SYMBOL_GPL(thermal_zone_device_update); static void thermal_zone_device_check(struct work_struct *work) { struct thermal_zone_device *tz = container_of(work, struct thermal_zone_device, poll_queue.work); thermal_zone_device_update(tz); } /* sys I/F for thermal zone */ #define to_thermal_zone(_dev) \ container_of(_dev, struct thermal_zone_device, device) static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); return sprintf(buf, "%s\n", tz->type); } static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); long temperature; int ret; ret = thermal_zone_get_temp(tz, &temperature); if (ret) return ret; return sprintf(buf, "%ld\n", temperature); } static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); enum thermal_device_mode mode; int result; if (!tz->ops->get_mode) return -EPERM; result = tz->ops->get_mode(tz, &mode); if (result) return result; return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" : "disabled"); } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); int result; if (!tz->ops->set_mode) return -EPERM; if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED); else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED); else result = -EINVAL; if (result) return result; return count; } static ssize_t trip_point_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); enum thermal_trip_type type; int trip, result; if (!tz->ops->get_trip_type) return -EPERM; if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) return -EINVAL; result = tz->ops->get_trip_type(tz, trip, &type); if (result) return result; switch (type) { case THERMAL_TRIP_CRITICAL: return sprintf(buf, "critical\n"); case THERMAL_TRIP_HOT: return sprintf(buf, "hot\n"); case THERMAL_TRIP_PASSIVE: return sprintf(buf, "passive\n"); case THERMAL_TRIP_ACTIVE: return sprintf(buf, "active\n"); default: return sprintf(buf, "unknown\n"); } } static ssize_t trip_point_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); int trip, ret; unsigned long temperature; if (!tz->ops->set_trip_temp) return -EPERM; if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) return -EINVAL; if (kstrtoul(buf, 10, &temperature)) return -EINVAL; ret = tz->ops->set_trip_temp(tz, trip, temperature); return ret ? ret : count; } static ssize_t trip_point_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); int trip, ret; long temperature; if (!tz->ops->get_trip_temp) return -EPERM; if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) return -EINVAL; ret = tz->ops->get_trip_temp(tz, trip, &temperature); if (ret) return ret; return sprintf(buf, "%ld\n", temperature); } static ssize_t trip_point_hyst_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); int trip, ret; unsigned long temperature; if (!tz->ops->set_trip_hyst) return -EPERM; if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) return -EINVAL; if (kstrtoul(buf, 10, &temperature)) return -EINVAL; /* * We are not doing any check on the 'temperature' value * here. The driver implementing 'set_trip_hyst' has to * take care of this. */ ret = tz->ops->set_trip_hyst(tz, trip, temperature); return ret ? ret : count; } static ssize_t trip_point_hyst_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); int trip, ret; unsigned long temperature; if (!tz->ops->get_trip_hyst) return -EPERM; if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) return -EINVAL; ret = tz->ops->get_trip_hyst(tz, trip, &temperature); return ret ? ret : sprintf(buf, "%ld\n", temperature); } static ssize_t passive_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_cooling_device *cdev = NULL; int state; if (!sscanf(buf, "%d\n", &state)) return -EINVAL; /* sanity check: values below 1000 millicelcius don't make sense * and can cause the system to go into a thermal heart attack */ if (state && state < 1000) return -EINVAL; if (state && !tz->forced_passive) { mutex_lock(&thermal_list_lock); list_for_each_entry(cdev, &thermal_cdev_list, node) { if (!strncmp("Processor", cdev->type, sizeof("Processor"))) thermal_zone_bind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev, THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); } mutex_unlock(&thermal_list_lock); if (!tz->passive_delay) tz->passive_delay = 1000; } else if (!state && tz->forced_passive) { mutex_lock(&thermal_list_lock); list_for_each_entry(cdev, &thermal_cdev_list, node) { if (!strncmp("Processor", cdev->type, sizeof("Processor"))) thermal_zone_unbind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev); } mutex_unlock(&thermal_list_lock); tz->passive_delay = 0; } tz->forced_passive = state; thermal_zone_device_update(tz); return count; } static ssize_t passive_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); return sprintf(buf, "%d\n", tz->forced_passive); } static ssize_t policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = -EINVAL; struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_governor *gov; char name[THERMAL_NAME_LENGTH]; snprintf(name, sizeof(name), "%s", buf); mutex_lock(&thermal_governor_lock); mutex_lock(&tz->lock); gov = __find_governor(strim(name)); if (!gov) goto exit; tz->governor = gov; ret = count; exit: mutex_unlock(&tz->lock); mutex_unlock(&thermal_governor_lock); return ret; } static ssize_t policy_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); return sprintf(buf, "%s\n", tz->governor->name); } #ifdef CONFIG_THERMAL_EMULATION static ssize_t emul_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); int ret = 0; unsigned long temperature; if (kstrtoul(buf, 10, &temperature)) return -EINVAL; if (!tz->ops->set_emul_temp) { mutex_lock(&tz->lock); tz->emul_temperature = temperature; mutex_unlock(&tz->lock); } else { ret = tz->ops->set_emul_temp(tz, temperature); } if (!ret) thermal_zone_device_update(tz); return ret ? ret : count; } static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); #endif/*CONFIG_THERMAL_EMULATION*/ static DEVICE_ATTR(type, 0444, type_show, NULL); static DEVICE_ATTR(temp, 0444, temp_show, NULL); static DEVICE_ATTR(mode, 0644, mode_show, mode_store); static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store); static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store); /* sys I/F for cooling device */ #define to_cooling_device(_dev) \ container_of(_dev, struct thermal_cooling_device, device) static ssize_t thermal_cooling_device_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); return sprintf(buf, "%s\n", cdev->type); } static ssize_t thermal_cooling_device_max_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int ret; ret = cdev->ops->get_max_state(cdev, &state); if (ret) return ret; return sprintf(buf, "%ld\n", state); } static ssize_t thermal_cooling_device_cur_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int ret; ret = cdev->ops->get_cur_state(cdev, &state); if (ret) return ret; return sprintf(buf, "%ld\n", state); } static ssize_t thermal_cooling_device_cur_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int result; if (!sscanf(buf, "%ld\n", &state)) return -EINVAL; if ((long)state < 0) return -EINVAL; result = cdev->ops->set_cur_state(cdev, state); if (result) return result; return count; } static struct device_attribute dev_attr_cdev_type = __ATTR(type, 0444, thermal_cooling_device_type_show, NULL); static DEVICE_ATTR(max_state, 0444, thermal_cooling_device_max_state_show, NULL); static DEVICE_ATTR(cur_state, 0644, thermal_cooling_device_cur_state_show, thermal_cooling_device_cur_state_store); static ssize_t thermal_cooling_device_trip_point_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_instance *instance; instance = container_of(attr, struct thermal_instance, attr); if (instance->trip == THERMAL_TRIPS_NONE) return sprintf(buf, "-1\n"); else return sprintf(buf, "%d\n", instance->trip); } /* Device management */ /** * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone * @tz: pointer to struct thermal_zone_device * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. * @cdev: pointer to struct thermal_cooling_device * @upper: the Maximum cooling state for this trip point. * THERMAL_NO_LIMIT means no upper limit, * and the cooling device can be in max_state. * @lower: the Minimum cooling state can be used for this trip point. * THERMAL_NO_LIMIT means no lower limit, * and the cooling device can be in cooling state 0. * * This interface function bind a thermal cooling device to the certain trip * point of a thermal zone device. * This function is usually called in the thermal zone device .bind callback. * * Return: 0 on success, the proper error value otherwise. */ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev, unsigned long upper, unsigned long lower) { struct thermal_instance *dev; struct thermal_instance *pos; struct thermal_zone_device *pos1; struct thermal_cooling_device *pos2; unsigned long max_state; int result, ret; if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) return -EINVAL; list_for_each_entry(pos1, &thermal_tz_list, node) { if (pos1 == tz) break; } list_for_each_entry(pos2, &thermal_cdev_list, node) { if (pos2 == cdev) break; } if (tz != pos1 || cdev != pos2) return -EINVAL; ret = cdev->ops->get_max_state(cdev, &max_state); if (ret) return ret; /* lower default 0, upper default max_state */ lower = lower == THERMAL_NO_LIMIT ? 0 : lower; upper = upper == THERMAL_NO_LIMIT ? max_state : upper; if (lower > upper || upper > max_state) return -EINVAL; dev = kzalloc(sizeof(struct thermal_instance), GFP_KERNEL); if (!dev) return -ENOMEM; dev->tz = tz; dev->cdev = cdev; dev->trip = trip; dev->upper = upper; dev->lower = lower; dev->target = THERMAL_NO_TARGET; result = get_idr(&tz->idr, &tz->lock, &dev->id); if (result) goto free_mem; sprintf(dev->name, "cdev%d", dev->id); result = sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name); if (result) goto release_idr; sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; dev->attr.attr.mode = 0444; dev->attr.show = thermal_cooling_device_trip_point_show; result = device_create_file(&tz->device, &dev->attr); if (result) goto remove_symbol_link; mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry(pos, &tz->thermal_instances, tz_node) if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { result = -EEXIST; break; } if (!result) { list_add_tail(&dev->tz_node, &tz->thermal_instances); list_add_tail(&dev->cdev_node, &cdev->thermal_instances); } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); if (!result) return 0; device_remove_file(&tz->device, &dev->attr); remove_symbol_link: sysfs_remove_link(&tz->device.kobj, dev->name); release_idr: release_idr(&tz->idr, &tz->lock, dev->id); free_mem: kfree(dev); return result; } EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device); /** * thermal_zone_unbind_cooling_device() - unbind a cooling device from a * thermal zone. * @tz: pointer to a struct thermal_zone_device. * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. * @cdev: pointer to a struct thermal_cooling_device. * * This interface function unbind a thermal cooling device from the certain * trip point of a thermal zone device. * This function is usually called in the thermal zone device .unbind callback. * * Return: 0 on success, the proper error value otherwise. */ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev) { struct thermal_instance *pos, *next; mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) { if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { list_del(&pos->tz_node); list_del(&pos->cdev_node); mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); goto unbind; } } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); return -ENODEV; unbind: device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); release_idr(&tz->idr, &tz->lock, pos->id); kfree(pos); return 0; } EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device); static void thermal_release(struct device *dev) { struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; if (!strncmp(dev_name(dev), "thermal_zone", sizeof("thermal_zone") - 1)) { tz = to_thermal_zone(dev); kfree(tz); } else if(!strncmp(dev_name(dev), "cooling_device", sizeof("cooling_device") - 1)){ cdev = to_cooling_device(dev); kfree(cdev); } } static struct class thermal_class = { .name = "thermal", .dev_release = thermal_release, }; /** * __thermal_cooling_device_register() - register a new thermal cooling device * @np: a pointer to a device tree node. * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. * * This interface function adds a new thermal cooling device (fan/processor/...) * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself * to all the thermal zone devices registered at the same time. * It also gives the opportunity to link the cooling device to a device tree * node, so that it can be bound to a thermal zone created out of device tree. * * Return: a pointer to the created struct thermal_cooling_device or an * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ static struct thermal_cooling_device * __thermal_cooling_device_register(struct device_node *np, char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { struct thermal_cooling_device *cdev; int result; if (type && strlen(type) >= THERMAL_NAME_LENGTH) return ERR_PTR(-EINVAL); if (!ops || !ops->get_max_state || !ops->get_cur_state || !ops->set_cur_state) return ERR_PTR(-EINVAL); cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); if (!cdev) return ERR_PTR(-ENOMEM); result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id); if (result) { kfree(cdev); return ERR_PTR(result); } strlcpy(cdev->type, type ? : "", sizeof(cdev->type)); mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->thermal_instances); cdev->np = np; cdev->ops = ops; cdev->updated = false; cdev->device.class = &thermal_class; cdev->devdata = devdata; dev_set_name(&cdev->device, "cooling_device%d", cdev->id); result = device_register(&cdev->device); if (result) { release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); kfree(cdev); return ERR_PTR(result); } /* sys I/F */ if (type) { result = device_create_file(&cdev->device, &dev_attr_cdev_type); if (result) goto unregister; } result = device_create_file(&cdev->device, &dev_attr_max_state); if (result) goto unregister; result = device_create_file(&cdev->device, &dev_attr_cur_state); if (result) goto unregister; /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); list_add(&cdev->node, &thermal_cdev_list); mutex_unlock(&thermal_list_lock); /* Update binding information for 'this' new cdev */ bind_cdev(cdev); return cdev; unregister: release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); device_unregister(&cdev->device); return ERR_PTR(result); } /** * thermal_cooling_device_register() - register a new thermal cooling device * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. * * This interface function adds a new thermal cooling device (fan/processor/...) * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself * to all the thermal zone devices registered at the same time. * * Return: a pointer to the created struct thermal_cooling_device or an * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ struct thermal_cooling_device * thermal_cooling_device_register(char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return __thermal_cooling_device_register(NULL, type, devdata, ops); } EXPORT_SYMBOL_GPL(thermal_cooling_device_register); /** * thermal_of_cooling_device_register() - register an OF thermal cooling device * @np: a pointer to a device tree node. * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. * * This function will register a cooling device with device tree node reference. * This interface function adds a new thermal cooling device (fan/processor/...) * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself * to all the thermal zone devices registered at the same time. * * Return: a pointer to the created struct thermal_cooling_device or an * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return __thermal_cooling_device_register(np, type, devdata, ops); } EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register); /** * thermal_cooling_device_unregister - removes the registered thermal cooling device * @cdev: the thermal cooling device to remove. * * thermal_cooling_device_unregister() must be called when the device is no * longer needed. */ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) { int i; const struct thermal_zone_params *tzp; struct thermal_zone_device *tz; struct thermal_cooling_device *pos = NULL; if (!cdev) return; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_cdev_list, node) if (pos == cdev) break; if (pos != cdev) { /* thermal cooling device not found */ mutex_unlock(&thermal_list_lock); return; } list_del(&cdev->node); /* Unbind all thermal zones associated with 'this' cdev */ list_for_each_entry(tz, &thermal_tz_list, node) { if (tz->ops->unbind) { tz->ops->unbind(tz, cdev); continue; } if (!tz->tzp || !tz->tzp->tbp) continue; tzp = tz->tzp; for (i = 0; i < tzp->num_tbps; i++) { if (tzp->tbp[i].cdev == cdev) { __unbind(tz, tzp->tbp[i].trip_mask, cdev); tzp->tbp[i].cdev = NULL; } } } mutex_unlock(&thermal_list_lock); if (cdev->type[0]) device_remove_file(&cdev->device, &dev_attr_cdev_type); device_remove_file(&cdev->device, &dev_attr_max_state); device_remove_file(&cdev->device, &dev_attr_cur_state); release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); device_unregister(&cdev->device); return; } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); void thermal_cdev_update(struct thermal_cooling_device *cdev) { struct thermal_instance *instance; unsigned long target = 0; /* cooling device is updated*/ if (cdev->updated) return; mutex_lock(&cdev->lock); /* Make sure cdev enters the deepest cooling state */ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { dev_dbg(&cdev->device, "zone%d->target=%lu\n", instance->tz->id, instance->target); if (instance->target == THERMAL_NO_TARGET) continue; if (instance->target > target) target = instance->target; } mutex_unlock(&cdev->lock); cdev->ops->set_cur_state(cdev, target); cdev->updated = true; trace_cdev_update(cdev, target); dev_dbg(&cdev->device, "set to state %lu\n", target); } EXPORT_SYMBOL(thermal_cdev_update); /** * thermal_notify_framework - Sensor drivers use this API to notify framework * @tz: thermal zone device * @trip: indicates which trip point has been crossed * * This function handles the trip events from sensor drivers. It starts * throttling the cooling devices according to the policy configured. * For CRITICAL and HOT trip points, this notifies the respective drivers, * and does actual throttling for other trip points i.e ACTIVE and PASSIVE. * The throttling policy is based on the configured platform data; if no * platform data is provided, this uses the step_wise throttling policy. */ void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { handle_thermal_trip(tz, trip); } EXPORT_SYMBOL_GPL(thermal_notify_framework); /** * create_trip_attrs() - create attributes for trip points * @tz: the thermal zone device * @mask: Writeable trip point bitmap. * * helper function to instantiate sysfs entries for every trip * point and its properties of a struct thermal_zone_device. * * Return: 0 on success, the proper error value otherwise. */ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) { int indx; int size = sizeof(struct thermal_attr) * tz->trips; tz->trip_type_attrs = kzalloc(size, GFP_KERNEL); if (!tz->trip_type_attrs) return -ENOMEM; tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL); if (!tz->trip_temp_attrs) { kfree(tz->trip_type_attrs); return -ENOMEM; } if (tz->ops->get_trip_hyst) { tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL); if (!tz->trip_hyst_attrs) { kfree(tz->trip_type_attrs); kfree(tz->trip_temp_attrs); return -ENOMEM; } } for (indx = 0; indx < tz->trips; indx++) { /* create trip type attribute */ snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, "trip_point_%d_type", indx); sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); tz->trip_type_attrs[indx].attr.attr.name = tz->trip_type_attrs[indx].name; tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_type_attrs[indx].attr.show = trip_point_type_show; device_create_file(&tz->device, &tz->trip_type_attrs[indx].attr); /* create trip temp attribute */ snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, "trip_point_%d_temp", indx); sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); tz->trip_temp_attrs[indx].attr.attr.name = tz->trip_temp_attrs[indx].name; tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; if (mask & (1 << indx)) { tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; tz->trip_temp_attrs[indx].attr.store = trip_point_temp_store; } device_create_file(&tz->device, &tz->trip_temp_attrs[indx].attr); /* create Optional trip hyst attribute */ if (!tz->ops->get_trip_hyst) continue; snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, "trip_point_%d_hyst", indx); sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); tz->trip_hyst_attrs[indx].attr.attr.name = tz->trip_hyst_attrs[indx].name; tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; if (tz->ops->set_trip_hyst) { tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; tz->trip_hyst_attrs[indx].attr.store = trip_point_hyst_store; } device_create_file(&tz->device, &tz->trip_hyst_attrs[indx].attr); } return 0; } static void remove_trip_attrs(struct thermal_zone_device *tz) { int indx; for (indx = 0; indx < tz->trips; indx++) { device_remove_file(&tz->device, &tz->trip_type_attrs[indx].attr); device_remove_file(&tz->device, &tz->trip_temp_attrs[indx].attr); if (tz->ops->get_trip_hyst) device_remove_file(&tz->device, &tz->trip_hyst_attrs[indx].attr); } kfree(tz->trip_type_attrs); kfree(tz->trip_temp_attrs); kfree(tz->trip_hyst_attrs); } /** * thermal_zone_device_register() - register a new thermal zone device * @type: the thermal zone device type * @trips: the number of trip points the thermal zone support * @mask: a bit string indicating the writeablility of trip points * @devdata: private device data * @ops: standard thermal zone device callbacks * @tzp: thermal zone platform parameters * @passive_delay: number of milliseconds to wait between polls when * performing passive cooling * @polling_delay: number of milliseconds to wait between polls when checking * whether trip points have been crossed (0 for interrupt * driven systems) * * This interface function adds a new thermal zone device (sensor) to * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the * thermal cooling devices registered at the same time. * thermal_zone_device_unregister() must be called when the device is no * longer needed. The passive cooling depends on the .get_trend() return value. * * Return: a pointer to the created struct thermal_zone_device or an * in case of error, an ERR_PTR. Caller must check return value with * IS_ERR*() helpers. */ struct thermal_zone_device *thermal_zone_device_register(const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { struct thermal_zone_device *tz; enum thermal_trip_type trip_type; int result; int count; int passive = 0; if (type && strlen(type) >= THERMAL_NAME_LENGTH) return ERR_PTR(-EINVAL); if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) return ERR_PTR(-EINVAL); if (!ops) return ERR_PTR(-EINVAL); if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp)) return ERR_PTR(-EINVAL); tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); if (!tz) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tz->thermal_instances); idr_init(&tz->idr); mutex_init(&tz->lock); result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); if (result) { kfree(tz); return ERR_PTR(result); } strlcpy(tz->type, type ? : "", sizeof(tz->type)); tz->ops = ops; tz->tzp = tzp; tz->device.class = &thermal_class; tz->devdata = devdata; tz->trips = trips; tz->passive_delay = passive_delay; tz->polling_delay = polling_delay; dev_set_name(&tz->device, "thermal_zone%d", tz->id); result = device_register(&tz->device); if (result) { release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); kfree(tz); return ERR_PTR(result); } /* sys I/F */ if (type) { result = device_create_file(&tz->device, &dev_attr_type); if (result) goto unregister; } result = device_create_file(&tz->device, &dev_attr_temp); if (result) goto unregister; if (ops->get_mode) { result = device_create_file(&tz->device, &dev_attr_mode); if (result) goto unregister; } result = create_trip_attrs(tz, mask); if (result) goto unregister; for (count = 0; count < trips; count++) { tz->ops->get_trip_type(tz, count, &trip_type); if (trip_type == THERMAL_TRIP_PASSIVE) passive = 1; } if (!passive) { result = device_create_file(&tz->device, &dev_attr_passive); if (result) goto unregister; } #ifdef CONFIG_THERMAL_EMULATION result = device_create_file(&tz->device, &dev_attr_emul_temp); if (result) goto unregister; #endif /* Create policy attribute */ result = device_create_file(&tz->device, &dev_attr_policy); if (result) goto unregister; /* Update 'this' zone's governor information */ mutex_lock(&thermal_governor_lock); if (tz->tzp) tz->governor = __find_governor(tz->tzp->governor_name); else tz->governor = def_governor; mutex_unlock(&thermal_governor_lock); if (!tz->tzp || !tz->tzp->no_hwmon) { result = thermal_add_hwmon_sysfs(tz); if (result) goto unregister; } mutex_lock(&thermal_list_lock); list_add_tail(&tz->node, &thermal_tz_list); mutex_unlock(&thermal_list_lock); /* Bind cooling devices for this zone */ bind_tz(tz); INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); if (!tz->ops->get_temp) thermal_zone_device_set_polling(tz, 0); thermal_zone_device_update(tz); return tz; unregister: release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); device_unregister(&tz->device); return ERR_PTR(result); } EXPORT_SYMBOL_GPL(thermal_zone_device_register); /** * thermal_device_unregister - removes the registered thermal zone device * @tz: the thermal zone device to remove */ void thermal_zone_device_unregister(struct thermal_zone_device *tz) { int i; const struct thermal_zone_params *tzp; struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; if (!tz) return; tzp = tz->tzp; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) if (pos == tz) break; if (pos != tz) { /* thermal zone device not found */ mutex_unlock(&thermal_list_lock); return; } list_del(&tz->node); /* Unbind all cdevs associated with 'this' thermal zone */ list_for_each_entry(cdev, &thermal_cdev_list, node) { if (tz->ops->unbind) { tz->ops->unbind(tz, cdev); continue; } if (!tzp || !tzp->tbp) break; for (i = 0; i < tzp->num_tbps; i++) { if (tzp->tbp[i].cdev == cdev) { __unbind(tz, tzp->tbp[i].trip_mask, cdev); tzp->tbp[i].cdev = NULL; } } } mutex_unlock(&thermal_list_lock); thermal_zone_device_set_polling(tz, 0); if (tz->type[0]) device_remove_file(&tz->device, &dev_attr_type); device_remove_file(&tz->device, &dev_attr_temp); if (tz->ops->get_mode) device_remove_file(&tz->device, &dev_attr_mode); device_remove_file(&tz->device, &dev_attr_policy); remove_trip_attrs(tz); tz->governor = NULL; thermal_remove_hwmon_sysfs(tz); release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); idr_destroy(&tz->idr); mutex_destroy(&tz->lock); device_unregister(&tz->device); return; } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); /** * thermal_zone_get_zone_by_name() - search for a zone and returns its ref * @name: thermal zone name to fetch the temperature * * When only one zone is found with the passed name, returns a reference to it. * * Return: On success returns a reference to an unique thermal zone with * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid * paramenters, -ENODEV for not found and -EEXIST for multiple matches). */ struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name) { struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL); unsigned int found = 0; if (!name) goto exit; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) if (!strncasecmp(name, pos->type, THERMAL_NAME_LENGTH)) { found++; ref = pos; } mutex_unlock(&thermal_list_lock); /* nothing has been found, thus an error code for it */ if (found == 0) ref = ERR_PTR(-ENODEV); else if (found > 1) /* Success only when an unique zone is found */ ref = ERR_PTR(-EEXIST); exit: return ref; } EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); #ifdef CONFIG_NET static const struct genl_multicast_group thermal_event_mcgrps[] = { { .name = THERMAL_GENL_MCAST_GROUP_NAME, }, }; static struct genl_family thermal_event_genl_family = { .id = GENL_ID_GENERATE, .name = THERMAL_GENL_FAMILY_NAME, .version = THERMAL_GENL_VERSION, .maxattr = THERMAL_GENL_ATTR_MAX, .mcgrps = thermal_event_mcgrps, .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps), }; int thermal_generate_netlink_event(struct thermal_zone_device *tz, enum events event) { struct sk_buff *skb; struct nlattr *attr; struct thermal_genl_event *thermal_event; void *msg_header; int size; int result; static unsigned int thermal_event_seqnum; if (!tz) return -EINVAL; /* allocate memory */ size = nla_total_size(sizeof(struct thermal_genl_event)) + nla_total_size(0); skb = genlmsg_new(size, GFP_ATOMIC); if (!skb) return -ENOMEM; /* add the genetlink message header */ msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++, &thermal_event_genl_family, 0, THERMAL_GENL_CMD_EVENT); if (!msg_header) { nlmsg_free(skb); return -ENOMEM; } /* fill the data */ attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, sizeof(struct thermal_genl_event)); if (!attr) { nlmsg_free(skb); return -EINVAL; } thermal_event = nla_data(attr); if (!thermal_event) { nlmsg_free(skb); return -EINVAL; } memset(thermal_event, 0, sizeof(struct thermal_genl_event)); thermal_event->orig = tz->id; thermal_event->event = event; /* send multicast genetlink message */ result = genlmsg_end(skb, msg_header); if (result < 0) { nlmsg_free(skb); return result; } result = genlmsg_multicast(&thermal_event_genl_family, skb, 0, 0, GFP_ATOMIC); if (result) dev_err(&tz->device, "Failed to send netlink event:%d", result); return result; } EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); static int genetlink_init(void) { return genl_register_family(&thermal_event_genl_family); } static void genetlink_exit(void) { genl_unregister_family(&thermal_event_genl_family); } #else /* !CONFIG_NET */ static inline int genetlink_init(void) { return 0; } static inline void genetlink_exit(void) {} #endif /* !CONFIG_NET */ static int __init thermal_register_governors(void) { int result; result = thermal_gov_step_wise_register(); if (result) return result; result = thermal_gov_fair_share_register(); if (result) return result; result = thermal_gov_bang_bang_register(); if (result) return result; return thermal_gov_user_space_register(); } static void thermal_unregister_governors(void) { thermal_gov_step_wise_unregister(); thermal_gov_fair_share_unregister(); thermal_gov_bang_bang_unregister(); thermal_gov_user_space_unregister(); } static int __init thermal_init(void) { int result; result = thermal_register_governors(); if (result) goto error; result = class_register(&thermal_class); if (result) goto unregister_governors; result = genetlink_init(); if (result) goto unregister_class; result = of_parse_thermal_zones(); if (result) goto exit_netlink; return 0; exit_netlink: genetlink_exit(); unregister_class: class_unregister(&thermal_class); unregister_governors: thermal_unregister_governors(); error: idr_destroy(&thermal_tz_idr); idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); mutex_destroy(&thermal_governor_lock); return result; } static void __exit thermal_exit(void) { of_thermal_destroy_zones(); genetlink_exit(); class_unregister(&thermal_class); thermal_unregister_governors(); idr_destroy(&thermal_tz_idr); idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); mutex_destroy(&thermal_governor_lock); } fs_initcall(thermal_init); module_exit(thermal_exit);
gpl-2.0
zidootech/zidoo-kodi-14.2
lib/cmyth/librefmem/alloc.c
64
12580
/* * Copyright (C) 2005-2006, Eric Lund, Jon Gettler * http://www.mvpmc.org/ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * alloc.c - Memory management functions. The structures returned from * librefmem APIs are actually pointers to reference counted * blocks of memory. The functions provided here handle allocating * these blocks (strictly internally to the library), placing * holds on these blocks (publicly) and releasing holds (publicly). * * All pointer type return values, including strings are reference * counted. * * NOTE: Since reference counted pointers are used to move * these structures around, it is strictly forbidden to * modify the contents of a structure once its pointer has * been returned to a callerthrough an API function. This * means that all API functions that modify the contents * of a structure must copy the structure, modify the * copy, and return a pointer to the copy. It is safe to * copy pointers (as long as you hold them) everyone * follows this simple rule. There is no need for deep * copying of any structure. */ #include <sys/types.h> #include <stdlib.h> #ifndef _MSC_VER #include <unistd.h> #endif #include <errno.h> #include <refmem/refmem.h> #include <refmem/atomic.h> #include <refmem_local.h> #include <string.h> #include <stdio.h> #ifdef DEBUG #include <assert.h> #define ALLOC_MAGIC 0xef37a45d #define GUARD_MAGIC 0xe3 #endif /* DEBUG */ /* Disable optimization on OSX ppc Compilation fails in release mode both with Apple gcc build 5490 and 5493 */ #if defined(__APPLE__) && defined(__ppc__) #pragma GCC optimization_level 0 #endif /* * struct refcounter * * Scope: PRIVATE (local to this module) * * Description: * * The structure used to manage references. One of these is prepended to every * allocation. It contains two key pieces of information: * * - The reference count on the structure or string attached to it * * - A pointer to a 'destroy' function (a destructor) to be called when * the last reference is released. This function facilitates tearing down * of any complex structures contained in the reference counted block. If * it is NULL, no function is called. * * NOTE: Make sure this has a word aligned length, as it will be placed * before each allocation and will affect the alignment of pointers. */ typedef struct refcounter { #ifdef DEBUG unsigned int magic; struct refcounter *next; const char *file; const char *func; int line; #endif /* DEBUG */ mvp_atomic_t refcount; size_t length; ref_destroy_t destroy; } refcounter_t; #ifdef DEBUG typedef struct { unsigned char magic; } guard_t; #endif /* DEBUG */ #define REF_REFCNT(p) ((refcounter_t *)(((unsigned char *)(p)) - sizeof(refcounter_t))) #define REF_DATA(r) (((unsigned char *)(r)) + sizeof(refcounter_t)) #if defined(DEBUG) #define REF_ALLOC_BINS 101 static refcounter_t *ref_list[REF_ALLOC_BINS]; #endif /* DEBUG */ #if defined(DEBUG) static inline void ref_remove(refcounter_t *ref) { int bin; refcounter_t *r, *p; bin = ((uintptr_t)ref >> 2) % REF_ALLOC_BINS; r = ref_list[bin]; p = NULL; while (r && (r != ref)) { p = r; r = r->next; } assert(r == ref); if (p) { p->next = r->next; } else { ref_list[bin] = r->next; } } static inline void ref_add(refcounter_t *ref) { int bin; bin = ((uintptr_t)ref >> 2) % REF_ALLOC_BINS; ref->next = ref_list[bin]; ref_list[bin] = ref; } static struct alloc_type { const char *file; const char *func; int line; int count; } alloc_list[128]; void ref_alloc_show(void) { int i, j; int types = 0, bytes = 0, count = 0; refcounter_t *r; for (i=0; i<REF_ALLOC_BINS; i++) { r = ref_list[i]; while (r) { for (j=0; (j<types) && (j<128); j++) { if ((alloc_list[j].file == r->file) && (alloc_list[j].func == r->func) && (alloc_list[j].line == r->line)) { alloc_list[j].count++; break; } } if (j == types) { alloc_list[j].file = r->file; alloc_list[j].func = r->func; alloc_list[j].line = r->line; alloc_list[j].count = 1; types++; } bytes += r->length + sizeof(*r); count++; r = r->next; } } printf("refmem allocation count: %d\n", count); printf("refmem allocation bytes: %d\n", bytes); printf("refmem unique allocation types: %d\n", types); for (i=0; i<types; i++) { printf("ALLOC: %s %s():%d count %d\n", alloc_list[i].file, alloc_list[i].func, alloc_list[i].line, alloc_list[i].count); } } #else void ref_alloc_show(void) { } #endif /* DEBUG */ /* * ref_alloc(size_t len) * * Scope: PRIVATE (mapped to __ref_alloc) * * Description * * Allocate a reference counted block of data. * * Return Value: * * Success: A non-NULL pointer to a block of memory at least 'len' bytes long * and safely aligned. The block is reference counted and can be * released using ref_release(). * * Failure: A NULL pointer. */ void * __ref_alloc(size_t len, const char *file, const char *func, int line) { #ifdef DEBUG void *block = malloc(sizeof(refcounter_t) + len + sizeof(guard_t)); guard_t *guard; #else void *block = malloc(sizeof(refcounter_t) + len); #endif /* DEBUG */ void *ret = REF_DATA(block); refcounter_t *ref = (refcounter_t *)block; refmem_dbg(REF_DBG_DEBUG, "%s(%d, ret = %p, ref = %p) {\n", __FUNCTION__, len, ret, ref); if (block) { memset(block, 0, sizeof(refcounter_t) + len); mvp_atomic_set(&ref->refcount, 1); #ifdef DEBUG ref->magic = ALLOC_MAGIC; ref->file = file; ref->func = func; ref->line = line; guard = (guard_t*)((uintptr_t)block + sizeof(refcounter_t) + len); guard->magic = GUARD_MAGIC; ref_add(ref); #endif /* DEBUG */ ref->destroy = NULL; ref->length = len; refmem_dbg(REF_DBG_DEBUG, "%s(%d, ret = %p, ref = %p) }\n", __FUNCTION__, len, ret, ref); return ret; } refmem_dbg(REF_DBG_DEBUG, "%s(%d, ret = %p, ref = %p) !}\n", __FUNCTION__, len, ret, ref); return NULL; } /* * ref_realloc(void *p, size_t len) * * Scope: PRIVATE (mapped to __ref_realloc) * * Description * * Change the allocation size of a reference counted allocation. * * Return Value: * * Success: A non-NULL pointer to a block of memory at least 'len' bytes long * and safely aligned. The block is reference counted and can be * released using ref_release(). * * Failure: A NULL pointer. */ void * ref_realloc(void *p, size_t len) { refcounter_t *ref = REF_REFCNT(p); void *ret = ref_alloc(len); refmem_dbg(REF_DBG_DEBUG, "%s(%d, ret = %p, ref = %p) {\n", __FUNCTION__, len, ret, ref); #ifdef DEBUG if(p) assert(ref->magic == ALLOC_MAGIC); #endif /* DEBUG */ if (p && ret) { memcpy(ret, p, ref->length); ref_set_destroy(ret, ref->destroy); } if (p) { ref_release(p); } refmem_dbg(REF_DBG_DEBUG, "%s(%d, ret = %p, ref = %p) }\n", __FUNCTION__, len, ret, ref); return ret; } /* * ref_set_destroy(void *block, ref_destroy_t func) * * Scope: PRIVATE (mapped to __ref_set_destroy) * * Description * * Set the destroy function for a block of data. The first argument * is a pointer to the data block (as returned by ref_alloc()). The * second argument is a pointer to the destroy function which, when * called, will be passed one argument, the pointer to the block (as * returned by ref_alloc()). The destroy function is * respsonsible for any cleanup needed prior to finally releasing the * memory holding the memory block. * * Return Value: NONE */ void ref_set_destroy(void *data, ref_destroy_t func) { void *block = REF_REFCNT(data); refcounter_t *ref = block; refmem_dbg(REF_DBG_DEBUG, "%s(%p, func = %p, ref = %p) {\n", __FUNCTION__, data, func, ref); #ifdef DEBUG assert(ref->magic == ALLOC_MAGIC); #endif /* DEBUG */ if (data) { ref->destroy = func; } refmem_dbg(REF_DBG_DEBUG, "%s(%p, func = %p, ref = %p) }\n", __FUNCTION__, data, func, ref); } /* * ref_strdup(char *str) * * Scope: PUBLIC * * Description * * Similar to the libc version of strdup() except that it returns a pointer * to a reference counted string. * * Return Value: * * Success: A non-NULL pointer to a reference counted string which can be * released using ref_release(). * * Failure: A NULL pointer. */ char * ref_strdup(char *str) { size_t len; char *ret = NULL; refmem_dbg(REF_DBG_DEBUG, "%s(%p) {\n", __FUNCTION__, str); if (str) { len = strlen(str) + 1; ret = ref_alloc(len); if (ret) { strncpy(ret, str, len); ret[len - 1] = '\0'; } refmem_dbg(REF_DBG_DEBUG, "%s str = %p[%s], len = %d, ret =%p\n", __FUNCTION__, str, str, len, ret); } refmem_dbg(REF_DBG_DEBUG, "%s() }\n", __FUNCTION__); return ret; } /* * ref_hold(void *p) * * Scope: PUBLIC * * Description * * This is how holders of references to reference counted blocks take * additional references. The argument is a pointer to a structure or * string returned from ref_alloc. The structure's reference count * will be incremented and a pointer to that space returned. * * There is really no error condition possible, but if a NULL pointer * is passed in, a NULL is returned. * * NOTE: since this function operates outside of the space that is directly * accessed by the pointer, if a pointer that was NOT allocated by * ref_alloc() is provided, negative consequences are likely. * * Return Value: A pointer to the held space */ void * ref_hold(void *p) { void *block = REF_REFCNT(p); refcounter_t *ref = block; #ifdef DEBUG guard_t *guard; #endif /* DEBUG */ refmem_dbg(REF_DBG_DEBUG, "%s(%p) {\n", __FUNCTION__, p); if (p) { #ifdef DEBUG assert(ref->magic == ALLOC_MAGIC); guard = (guard_t*)((uintptr_t)block + sizeof(refcounter_t) + ref->length); assert(guard->magic == GUARD_MAGIC); #endif /* DEBUG */ mvp_atomic_inc(&ref->refcount); } refmem_dbg(REF_DBG_DEBUG, "%s(%p) }\n", __FUNCTION__, p); return p; } /* * ref_release(void *p) * * Scope: PUBLIC * * Description * * This is how holders of references to reference counted blocks release * those references. The argument is a pointer to a structure or string * returned from a librefmem API function (or from ref_alloc). The * structure's reference count will be decremented and, when it reaches zero * the structure's destroy function (if any) will be called and then the * memory block will be released. * * Return Value: NONE */ void ref_release(void *p) { void *block = REF_REFCNT(p); refcounter_t *ref = block; #ifdef DEBUG guard_t *guard; #endif /* DEBUG */ refmem_dbg(REF_DBG_DEBUG, "%s(%p) {\n", __FUNCTION__, p); if (p) { refmem_dbg(REF_DBG_DEBUG, "%s:%d %s(%p,ref = %p,refcount = %p,length = %d)\n", __FILE__, __LINE__, __FUNCTION__, p, ref, ref->refcount, ref->length); #ifdef DEBUG assert(ref->magic == ALLOC_MAGIC); guard = (guard_t*)((uintptr_t)block + sizeof(refcounter_t) + ref->length); assert(guard->magic == GUARD_MAGIC); #endif /* DEBUG */ if (mvp_atomic_dec_and_test(&ref->refcount)) { /* * Last reference, destroy the structure (if * there is a destroy function) and free the * block. */ if (ref->destroy) { ref->destroy(p); } refmem_dbg(REF_DBG_DEBUG, "%s:%d %s() -- free it\n", __FILE__, __LINE__, __FUNCTION__); #ifdef DEBUG ref->magic = 0; guard->magic = 0; #ifndef _WIN32 refmem_ref_remove(ref); #endif ref->next = NULL; #endif /* DEBUG */ free(block); } } refmem_dbg(REF_DBG_DEBUG, "%s(%p) }\n", __FUNCTION__, p); } #if defined(__APPLE__) && defined(__ppc__) #pragma GCC optimization_level reset #endif
gpl-2.0
fanatix/fanatix-core
dep/ACE_wrappers/ace/Module.cpp
64
7083
// $Id: Module.cpp 80826 2008-03-04 14:51:23Z wotte $ #ifndef ACE_MODULE_CPP #define ACE_MODULE_CPP #include "ace/Module.h" #if !defined (ACE_LACKS_PRAGMA_ONCE) # pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ #include "ace/Stream_Modules.h" #if !defined (__ACE_INLINE__) #include "ace/Module.inl" #endif /* __ACE_INLINE__ */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_ALLOC_HOOK_DEFINE(ACE_Module) template <ACE_SYNCH_DECL> void ACE_Module<ACE_SYNCH_USE>::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::dump"); #endif /* ACE_HAS_DUMP */ } template <ACE_SYNCH_DECL> void ACE_Module<ACE_SYNCH_USE>::writer (ACE_Task<ACE_SYNCH_USE> *q, int flags /* = M_DELETE_WRITER */) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::writer"); // Close and maybe delete old writer this->close_i (1, flags); this->q_pair_[1] = q; if (q != 0) { ACE_CLR_BITS (q->flags_, ACE_Task_Flags::ACE_READER); // Set the q's module pointer to point to us. q->mod_ = this; } // Don't allow the caller to change the reader status. ACE_SET_BITS (flags_, (flags & M_DELETE_WRITER)); } template <ACE_SYNCH_DECL> void ACE_Module<ACE_SYNCH_USE>::reader (ACE_Task<ACE_SYNCH_USE> *q, int flags /* = M_DELETE_READER */) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::reader"); // Close and maybe delete old writer this->close_i (0, flags); this->q_pair_[0] = q; if (q != 0) { ACE_SET_BITS (q->flags_, ACE_Task_Flags::ACE_READER); // Set the q's module pointer to point to us. q->mod_ = this; } // don't allow the caller to change the reader status ACE_SET_BITS (flags_, (flags & M_DELETE_READER)); } // Link this ACE_Module on top of ACE_Module M. template <ACE_SYNCH_DECL> void ACE_Module<ACE_SYNCH_USE>::link (ACE_Module<ACE_SYNCH_USE> *m) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::link"); this->next (m); this->writer ()->next (m->writer ()); m->reader ()->next (this->reader ()); } template <ACE_SYNCH_DECL> int ACE_Module<ACE_SYNCH_USE>::open (const ACE_TCHAR *mod_name, ACE_Task<ACE_SYNCH_USE> *writer_q, ACE_Task<ACE_SYNCH_USE> *reader_q, void *arg, int flags /* = M_DELETE */) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::open"); this->name (mod_name); this->arg_ = arg; // We may already have readers and/or writers. if (this->reader ()) this->close_i (0, M_DELETE_READER); if (this->writer ()) this->close_i (1, M_DELETE_WRITER); if (writer_q == 0) { ACE_NEW_RETURN (writer_q, ACE_Thru_Task<ACE_SYNCH_USE>, -1); ACE_SET_BITS (flags, M_DELETE_WRITER); } if (reader_q == 0) { ACE_NEW_RETURN (reader_q, ACE_Thru_Task<ACE_SYNCH_USE>, -1); ACE_SET_BITS (flags, M_DELETE_READER); } this->reader (reader_q); this->writer (writer_q); // Save the flags this->flags_ = flags; // Make sure that the memory is allocated before proceding. if (writer_q == 0 || reader_q == 0) { // These calls will delete writer_q and/or reader_q, if // necessary. this->close_i (0, M_DELETE_READER); this->close_i (1, M_DELETE_WRITER); errno = ENOMEM; return -1; } // Setup back pointers (this must come last, after we've made sure // there's memory allocated here. reader_q->mod_ = this; writer_q->mod_ = this; return 0; } // Set and get pointer to sibling ACE_Task in ACE_Module. template <ACE_SYNCH_DECL> ACE_Task<ACE_SYNCH_USE> * ACE_Module<ACE_SYNCH_USE>::sibling (ACE_Task<ACE_SYNCH_USE> *orig) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::sibling"); if (this->q_pair_[0] == orig) return this->q_pair_[1]; else if (this->q_pair_[1] == orig) return this->q_pair_[0]; else return 0; } template <ACE_SYNCH_DECL> ACE_Module<ACE_SYNCH_USE>::ACE_Module (void) : flags_ (M_FLAGS_NOT_SET) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::ACE_Module"); this->name (ACE_TEXT ("<unknown>")); // Do nothing... this->q_pair_[0] = 0; this->q_pair_[1] = 0; } template <ACE_SYNCH_DECL> ACE_Module<ACE_SYNCH_USE>::~ACE_Module (void) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::~ACE_Module"); // Only close down if we haven't already done so. if (this->reader () || this->writer ()) this->close (); } template <ACE_SYNCH_DECL> ACE_Module<ACE_SYNCH_USE>::ACE_Module (const ACE_TCHAR *mod_name, ACE_Task<ACE_SYNCH_USE> *writer_q, ACE_Task<ACE_SYNCH_USE> *reader_q, void *args, int flags /* = M_DELETE */) : flags_ (M_FLAGS_NOT_SET) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::ACE_Module"); this->q_pair_[0] = 0; this->q_pair_[1] = 0; if (this->open (mod_name, writer_q, reader_q, args, flags) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("ACE_Module"))); } template <ACE_SYNCH_DECL> int ACE_Module<ACE_SYNCH_USE>::close (int flags /* = M_DELETE_NONE */) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::close"); int result = 0; // Only pay attention to the flags parameter if we haven't already // set the task delete policies. if (this->flags_ == M_FLAGS_NOT_SET) ACE_SET_BITS (flags_, flags); if (this->close_i (0, flags_) == -1) result = -1; if (this->close_i (1, flags_) == -1) result = -1; return result; } template <ACE_SYNCH_DECL> int ACE_Module<ACE_SYNCH_USE>::close_i (int which, int flags) { ACE_TRACE ("ACE_Module<ACE_SYNCH_USE>::close_i"); if (this->q_pair_[which] == 0) return 0; // Copy task pointer to prevent problems when ACE_Task::close // changes the task pointer ACE_Task<ACE_SYNCH_USE> *task = this->q_pair_[which]; // Change so that close doesn't get called again from the task base. // Now close the task. int result = 0; if (task->module_closed () == -1) result = -1; task->flush (); task->next (0); // Should we also delete it ? if (flags != M_DELETE_NONE && ACE_BIT_ENABLED (flags_, which + 1)) { // Only delete the Tasks if there aren't any more threads // running in them. task->wait (); // If this assert happens it is likely because the task was // activated with the THR_DETACHED flag, which means that we // can't join() with the thread. Not using THR_DETACHED should // solve this problem. ACE_ASSERT (task->thr_count () == 0); delete task; } // Set the tasks pointer to 0 so that we don't try to close() // this object again if the destructor gets called. this->q_pair_[which] = 0; // Finally remove the delete bit. ACE_CLR_BITS (flags_, which + 1); return result; } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_MODULE_CPP */
gpl-2.0
StNick/android_kernel_samsung_lt03lte
drivers/input/touchscreen/cyttsp5/cyttsp5_device_access.c
320
14839
/* * cyttsp5_device_access.c * Cypress TrueTouch(TM) Standard Product V5 Device Access Module. * Configuration and Test command/status user interface. * For use with Cypress Txx5xx parts. * Supported parts include: * TMA5XX * * Copyright (C) 2012-2013 Cypress Semiconductor * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com> * */ #include "cyttsp5_regs.h" #define CYTTSP5_DEVICE_ACCESS_NAME "cyttsp5_device_access" #define CYTTSP5_INPUT_ELEM_SZ (sizeof("0xHH") + 1) #ifdef TTHE_TUNER_SUPPORT struct heatmap_param { bool scan_start; enum scan_data_type_list data_type; /* raw, base, diff */ int num_element; }; #endif #define CY_MAX_CONFIG_BYTES 256 #define CYTTSP5_TTHE_TUNER_GET_PANEL_DATA_FILE_NAME "get_panel_data" #define TTHE_TUNER_MAX_BUF (CY_MAX_PRBUF_SIZE * 3) struct cyttsp5_device_access_data { struct device *dev; struct cyttsp5_sysinfo *si; struct mutex sysfs_lock; u8 status; u16 response_length; bool sysfs_nodes_created; #ifdef TTHE_TUNER_SUPPORT struct heatmap_param heatmap; struct dentry *tthe_get_panel_data_debugfs; struct mutex debugfs_lock; u8 tthe_get_panel_data_buf[TTHE_TUNER_MAX_BUF]; u8 tthe_get_panel_data_is_open; #endif #ifdef VERBOSE_DEBUG u8 pr_buf[CY_MAX_PRBUF_SIZE]; #endif u8 ic_buf[CY_MAX_PRBUF_SIZE]; u8 response_buf[CY_MAX_PRBUF_SIZE]; }; static struct cyttsp5_core_commands *cmd; static inline struct cyttsp5_device_access_data *cyttsp5_get_device_access_data( struct device *dev) { return cyttsp5_get_dynamic_data(dev, CY_MODULE_DEVICE_ACCESS); } static ssize_t cyttsp5_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); u8 val; mutex_lock(&dad->sysfs_lock); val = dad->status; mutex_unlock(&dad->sysfs_lock); return scnprintf(buf, CY_MAX_PRBUF_SIZE, "%d\n", val); } static DEVICE_ATTR(status, S_IRUSR | S_IWUSR, cyttsp5_status_show, NULL); static ssize_t cyttsp5_response_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); int i; ssize_t num_read; int index; mutex_lock(&dad->sysfs_lock); index = scnprintf(buf, CY_MAX_PRBUF_SIZE, "Status %d\n", dad->status); if (!dad->status) goto error; num_read = dad->response_length; for (i = 0; i < num_read; i++) index += scnprintf(buf + index, CY_MAX_PRBUF_SIZE - index, "0x%02X\n", dad->response_buf[i]); index += scnprintf(buf + index, CY_MAX_PRBUF_SIZE - index, "(%d bytes)\n", num_read); error: mutex_unlock(&dad->sysfs_lock); return index; } static DEVICE_ATTR(response, S_IRUSR | S_IWUSR, cyttsp5_response_show, NULL); /* * Gets user input from sysfs and parse it * return size of parsed output buffer */ static int cyttsp5_ic_parse_input(struct device *dev, const char *buf, size_t buf_size, u8 *ic_buf, size_t ic_buf_size) { const char *pbuf = buf; unsigned long value; char scan_buf[CYTTSP5_INPUT_ELEM_SZ]; u32 i = 0; u32 j; int last = 0; int ret; dev_dbg(dev, "%s: pbuf=%p buf=%p size=%d %s=%d buf=%s\n", __func__, pbuf, buf, (int) buf_size, "scan buf size", CYTTSP5_INPUT_ELEM_SZ, buf); while (pbuf <= (buf + buf_size)) { if (i >= CY_MAX_CONFIG_BYTES) { dev_err(dev, "%s: %s size=%d max=%d\n", __func__, "Max cmd size exceeded", i, CY_MAX_CONFIG_BYTES); return -EINVAL; } if (i >= ic_buf_size) { dev_err(dev, "%s: %s size=%d buf_size=%d\n", __func__, "Buffer size exceeded", i, ic_buf_size); return -EINVAL; } while (((*pbuf == ' ') || (*pbuf == ',')) && (pbuf < (buf + buf_size))) { last = *pbuf; pbuf++; } if (pbuf >= (buf + buf_size)) break; memset(scan_buf, 0, CYTTSP5_INPUT_ELEM_SZ); if ((last == ',') && (*pbuf == ',')) { dev_err(dev, "%s: %s \",,\" not allowed.\n", __func__, "Invalid data format."); return -EINVAL; } for (j = 0; j < (CYTTSP5_INPUT_ELEM_SZ - 1) && (pbuf < (buf + buf_size)) && (*pbuf != ' ') && (*pbuf != ','); j++) { last = *pbuf; scan_buf[j] = *pbuf++; } ret = kstrtoul(scan_buf, 16, &value); if (ret < 0) { dev_err(dev, "%s: %s '%s' %s%s i=%d r=%d\n", __func__, "Invalid data format. ", scan_buf, "Use \"0xHH,...,0xHH\"", " instead.", i, ret); return ret; } ic_buf[i] = value; i++; } return i; } static ssize_t cyttsp5_command_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); ssize_t length; int rc; mutex_lock(&dad->sysfs_lock); dad->status = 0; dad->response_length = 0; length = cyttsp5_ic_parse_input(dev, buf, size, dad->ic_buf, CY_MAX_PRBUF_SIZE); if (length <= 0) { dev_err(dev, "%s: %s Group Data store\n", __func__, "Malformed input for"); goto exit; } /* write ic_buf to log */ cyttsp5_pr_buf(dev, dad->pr_buf, dad->ic_buf, length, "ic_buf"); /*pm_runtime_get_sync(dev);*/ rc = cmd->cmd->user_cmd(dev, 1, CY_MAX_PRBUF_SIZE, dad->response_buf, length, dad->ic_buf, &dad->response_length); /*pm_runtime_put(dev);*/ if (rc) { dad->response_length = 0; dev_err(dev, "%s: Failed to store command\n", __func__); } else { dad->status = 1; } exit: mutex_unlock(&dad->sysfs_lock); dev_vdbg(dev, "%s: return size=%d\n", __func__, size); return size; } static DEVICE_ATTR(command, S_IRUSR | S_IWUSR, NULL, cyttsp5_command_store); #ifdef TTHE_TUNER_SUPPORT /* * Execute scan command */ static int cyttsp5_exec_scan_cmd_(struct device *dev) { int rc; rc = cmd->cmd->exec_panel_scan(dev, 0); if (rc < 0) dev_err(dev, "%s: Heatmap start scan failed r=%d\n", __func__, rc); return rc; } /* * Retrieve panel data command */ static int cyttsp5_ret_scan_data_cmd_(struct device *dev, u16 read_offset, u16 read_count, u8 data_id, u8 *response, u8 *config, u16 *actual_read_len, u8 *return_buf) { int rc; rc = cmd->cmd->retrieve_panel_scan(dev, 0, read_offset, read_count, data_id, response, config, actual_read_len, return_buf); if (rc < 0) dev_err(dev, "%s: Retrieve scan data failed r=%d\n", __func__, rc); return rc; } static ssize_t tthe_get_panel_data_debugfs_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cyttsp5_device_access_data *dad = filp->private_data; struct device *dev; u8 config; u16 actual_read_len; u16 length = 0; u8 element_size = 0; u8 *buf_offset; u8 *buf_out; int elem; int elem_offset = 0; int print_idx = 0; int rc; int rc1; int i; mutex_lock(&dad->debugfs_lock); dev = dad->dev; buf_out = dad->tthe_get_panel_data_buf; if (!buf_out) goto release_mutex; /*pm_runtime_get_sync(dev);*/ rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT); if (rc < 0) goto put_runtime; if (dad->heatmap.scan_start) { /* Start scan */ rc = cyttsp5_exec_scan_cmd_(dev); if (rc < 0) goto release_exclusive; } elem = dad->heatmap.num_element; rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem, dad->heatmap.data_type, dad->ic_buf, &config, &actual_read_len, NULL); if (rc < 0) goto release_exclusive; length = get_unaligned_le16(&dad->ic_buf[0]); buf_offset = dad->ic_buf + length; element_size = config & CY_CMD_RET_PANEL_ELMNT_SZ_MASK; elem -= actual_read_len; elem_offset = actual_read_len; while (elem > 0) { rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem, dad->heatmap.data_type, NULL, &config, &actual_read_len, buf_offset); if (rc < 0) goto release_exclusive; if (!actual_read_len) break; length += actual_read_len * element_size; buf_offset = dad->ic_buf + length; elem -= actual_read_len; elem_offset += actual_read_len; } /* Reconstruct cmd header */ put_unaligned_le16(length, &dad->ic_buf[0]); put_unaligned_le16(elem_offset, &dad->ic_buf[7]); release_exclusive: rc1 = cmd->release_exclusive(dev); put_runtime: /*pm_runtime_put(dev);*/ if (rc < 0) goto release_mutex; print_idx += scnprintf(buf_out, TTHE_TUNER_MAX_BUF, "CY_DATA:"); for (i = 0; i < length; i++) print_idx += scnprintf(buf_out + print_idx, TTHE_TUNER_MAX_BUF - print_idx, "%02X ", dad->ic_buf[i]); print_idx += scnprintf(buf_out + print_idx, TTHE_TUNER_MAX_BUF - print_idx, ":(%d bytes)\n", length); rc = simple_read_from_buffer(buf, count, ppos, buf_out, print_idx); print_idx = rc; release_mutex: mutex_unlock(&dad->debugfs_lock); return print_idx; } static ssize_t tthe_get_panel_data_debugfs_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cyttsp5_device_access_data *dad = filp->private_data; struct device *dev = dad->dev; ssize_t length; int max_read; u8 *buf_in = dad->tthe_get_panel_data_buf; int ret; mutex_lock(&dad->debugfs_lock); ret = copy_from_user(buf_in + (*ppos), buf, count); if (ret) goto exit; buf_in[count] = 0; length = cyttsp5_ic_parse_input(dev, buf_in, count, dad->ic_buf, CY_MAX_PRBUF_SIZE); if (length <= 0) { dev_err(dev, "%s: %s Group Data store\n", __func__, "Malformed input for"); goto exit; } /* update parameter value */ dad->heatmap.num_element = get_unaligned_le16(&dad->ic_buf[3]); dad->heatmap.data_type = dad->ic_buf[5]; if (dad->ic_buf[6] > 0) dad->heatmap.scan_start = true; else dad->heatmap.scan_start = false; /* elem can not be bigger then buffer size */ max_read = CY_CMD_RET_PANEL_HDR; max_read += dad->heatmap.num_element * CY_CMD_RET_PANEL_ELMNT_SZ_MAX; if (max_read >= CY_MAX_PRBUF_SIZE) { dad->heatmap.num_element = (CY_MAX_PRBUF_SIZE - CY_CMD_RET_PANEL_HDR) / CY_CMD_RET_PANEL_ELMNT_SZ_MAX; dev_err(dev, "%s: Will get %d element\n", __func__, dad->heatmap.num_element); } exit: mutex_unlock(&dad->debugfs_lock); dev_vdbg(dev, "%s: return count=%d\n", __func__, count); return count; } static int tthe_get_panel_data_debugfs_open(struct inode *inode, struct file *filp) { struct cyttsp5_device_access_data *dad = inode->i_private; mutex_lock(&dad->debugfs_lock); if (dad->tthe_get_panel_data_is_open) { mutex_unlock(&dad->debugfs_lock); return -EBUSY; } filp->private_data = inode->i_private; dad->tthe_get_panel_data_is_open = 1; mutex_unlock(&dad->debugfs_lock); return 0; } static int tthe_get_panel_data_debugfs_close(struct inode *inode, struct file *filp) { struct cyttsp5_device_access_data *dad = filp->private_data; mutex_lock(&dad->debugfs_lock); filp->private_data = NULL; dad->tthe_get_panel_data_is_open = 0; mutex_unlock(&dad->debugfs_lock); return 0; } static const struct file_operations tthe_get_panel_data_fops = { .open = tthe_get_panel_data_debugfs_open, .release = tthe_get_panel_data_debugfs_close, .read = tthe_get_panel_data_debugfs_read, .write = tthe_get_panel_data_debugfs_write, }; #endif static int cyttsp5_setup_sysfs(struct device *dev) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); int rc; rc = device_create_file(dev, &dev_attr_command); if (rc) { dev_err(dev, "%s: Error, could not create command\n", __func__); goto exit; } rc = device_create_file(dev, &dev_attr_status); if (rc) { dev_err(dev, "%s: Error, could not create status\n", __func__); goto unregister_command; } rc = device_create_file(dev, &dev_attr_response); if (rc) { dev_err(dev, "%s: Error, could not create response\n", __func__); goto unregister_status; } #ifdef TTHE_TUNER_SUPPORT dad->tthe_get_panel_data_debugfs = debugfs_create_file( CYTTSP5_TTHE_TUNER_GET_PANEL_DATA_FILE_NAME, 0644, NULL, dad, &tthe_get_panel_data_fops); if (IS_ERR_OR_NULL(dad->tthe_get_panel_data_debugfs)) { dev_err(dev, "%s: Error, could not create get_panel_data\n", __func__); dad->tthe_get_panel_data_debugfs = NULL; goto unregister_response; } #endif dad->sysfs_nodes_created = true; return rc; #ifdef TTHE_TUNER_SUPPORT unregister_response: device_remove_file(dev, &dev_attr_response); #endif unregister_status: device_remove_file(dev, &dev_attr_status); unregister_command: device_remove_file(dev, &dev_attr_command); exit: return rc; } static int cyttsp5_setup_sysfs_attention(struct device *dev) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); int rc = 0; dad->si = cmd->request_sysinfo(dev); if (!dad->si) return -EINVAL; rc = cyttsp5_setup_sysfs(dev); cmd->unsubscribe_attention(dev, CY_ATTEN_STARTUP, CY_MODULE_DEVICE_ACCESS, cyttsp5_setup_sysfs_attention, 0); return rc; } int cyttsp5_device_access_probe(struct device *dev) { struct cyttsp5_core_data *cd = dev_get_drvdata(dev); struct cyttsp5_device_access_data *dad; int rc; cmd = cyttsp5_get_commands(); if (!cmd) return -EINVAL; dad = kzalloc(sizeof(*dad), GFP_KERNEL); if (!dad) { dev_err(dev, "%s: Error, kzalloc\n", __func__); rc = -ENOMEM; goto cyttsp5_device_access_probe_data_failed; } mutex_init(&dad->sysfs_lock); dad->dev = dev; #ifdef TTHE_TUNER_SUPPORT mutex_init(&dad->debugfs_lock); dad->heatmap.num_element = 200; #endif cd->cyttsp5_dynamic_data[CY_MODULE_DEVICE_ACCESS] = dad; /* get sysinfo */ dad->si = cmd->request_sysinfo(dev); if (dad->si) { rc = cyttsp5_setup_sysfs(dev); if (rc) goto cyttsp5_device_access_setup_sysfs_failed; } else { dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n", __func__, dad->si); cmd->subscribe_attention(dev, CY_ATTEN_STARTUP, CY_MODULE_DEVICE_ACCESS, cyttsp5_setup_sysfs_attention, 0); } return 0; cyttsp5_device_access_setup_sysfs_failed: kfree(dad); cyttsp5_device_access_probe_data_failed: dev_err(dev, "%s failed.\n", __func__); return rc; } EXPORT_SYMBOL(cyttsp5_device_access_probe); int cyttsp5_device_access_release(struct device *dev) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); if (dad->sysfs_nodes_created) { device_remove_file(dev, &dev_attr_command); device_remove_file(dev, &dev_attr_status); device_remove_file(dev, &dev_attr_response); #ifdef TTHE_TUNER_SUPPORT debugfs_remove(dad->tthe_get_panel_data_debugfs); #endif } else { cmd->unsubscribe_attention(dev, CY_ATTEN_STARTUP, CY_MODULE_DEVICE_ACCESS, cyttsp5_setup_sysfs_attention, 0); } kfree(dad); return 0; } EXPORT_SYMBOL(cyttsp5_device_access_release);
gpl-2.0
olegsvs/android_kernel_ark_benefit_m7_mm
drivers/block/virtio_blk.c
320
22513
//#define DEBUG #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <scsi/scsi_cmnd.h> #include <linux/idr.h> #include <linux/blk-mq.h> #include <linux/numa.h> #define PART_BITS 4 #define VQ_NAME_LEN 16 static int major; static DEFINE_IDA(vd_index_ida); static struct workqueue_struct *virtblk_wq; struct virtio_blk_vq { struct virtqueue *vq; spinlock_t lock; char name[VQ_NAME_LEN]; } ____cacheline_aligned_in_smp; struct virtio_blk { struct virtio_device *vdev; /* The disk structure for the kernel. */ struct gendisk *disk; /* Block layer tags. */ struct blk_mq_tag_set tag_set; /* Process context for config space updates */ struct work_struct config_work; /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; /* Ida index - used to track minor number allocations. */ int index; /* num of vqs */ int num_vqs; struct virtio_blk_vq *vqs; }; struct virtblk_req { struct request *req; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; u8 status; struct scatterlist sg[]; }; static inline int virtblk_result(struct virtblk_req *vbr) { switch (vbr->status) { case VIRTIO_BLK_S_OK: return 0; case VIRTIO_BLK_S_UNSUPP: return -ENOTTY; default: return -EIO; } } static int __virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, struct scatterlist *data_sg, bool have_data) { struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; unsigned int num_out = 0, num_in = 0; int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT; sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); sgs[num_out++] = &hdr; /* * If this is a packet command we need a couple of additional headers. * Behind the normal outhdr we put a segment with the scsi command * block, and before the normal inhdr we put the sense data and the * inhdr with additional status information. */ if (type == VIRTIO_BLK_T_SCSI_CMD) { sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len); sgs[num_out++] = &cmd; } if (have_data) { if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT) sgs[num_out++] = data_sg; else sgs[num_out + num_in++] = data_sg; } if (type == VIRTIO_BLK_T_SCSI_CMD) { sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); sgs[num_out + num_in++] = &sense; sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); sgs[num_out + num_in++] = &inhdr; } sg_init_one(&status, &vbr->status, sizeof(vbr->status)); sgs[num_out + num_in++] = &status; return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); } static inline void virtblk_request_done(struct request *req) { struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); int error = virtblk_result(vbr); if (req->cmd_type == REQ_TYPE_BLOCK_PC) { req->resid_len = vbr->in_hdr.residual; req->sense_len = vbr->in_hdr.sense_len; req->errors = vbr->in_hdr.errors; } else if (req->cmd_type == REQ_TYPE_SPECIAL) { req->errors = (error != 0); } blk_mq_end_request(req, error); } static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; bool req_done = false; int qid = vq->index; struct virtblk_req *vbr; unsigned long flags; unsigned int len; spin_lock_irqsave(&vblk->vqs[qid].lock, flags); do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { blk_mq_complete_request(vbr->req); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); /* In case queue is stopped waiting for more buffers. */ if (req_done) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req, bool last) { struct virtio_blk *vblk = hctx->queue->queuedata; struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; unsigned int num; int qid = hctx->queue_num; int err; bool notify = false; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); vbr->req = req; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); } else { switch (req->cmd_type) { case REQ_TYPE_FS: vbr->out_hdr.type = 0; vbr->out_hdr.sector = blk_rq_pos(vbr->req); vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_BLOCK_PC: vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_SPECIAL: vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; default: /* We don't put anything else in the queue. */ BUG(); } } blk_mq_start_request(req); num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); if (num) { if (rq_data_dir(vbr->req) == WRITE) vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; else vbr->out_hdr.type |= VIRTIO_BLK_T_IN; } spin_lock_irqsave(&vblk->vqs[qid].lock, flags); err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); /* Out of mem doesn't actually happen, since we fall back * to direct descriptors */ if (err == -ENOMEM || err == -ENOSPC) return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_ERROR; } if (last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) notify = true; spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); if (notify) virtqueue_notify(vblk->vqs[qid].vq); return BLK_MQ_RQ_QUEUE_OK; } /* return id (s/n) string for *disk to *id_str */ static int virtblk_get_id(struct gendisk *disk, char *id_str) { struct virtio_blk *vblk = disk->private_data; struct request *req; struct bio *bio; int err; bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); if (IS_ERR(bio)) return PTR_ERR(bio); req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); if (IS_ERR(req)) { bio_put(bio); return PTR_ERR(req); } req->cmd_type = REQ_TYPE_SPECIAL; err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); blk_put_request(req); return err; } static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long data) { struct gendisk *disk = bdev->bd_disk; struct virtio_blk *vblk = disk->private_data; /* * Only allow the generic SCSI ioctls if the host can support it. */ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) return -ENOTTY; return scsi_cmd_blk_ioctl(bdev, mode, cmd, (void __user *)data); } /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; /* see if the host passed in geometry config */ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { virtio_cread(vblk->vdev, struct virtio_blk_config, geometry.cylinders, &geo->cylinders); virtio_cread(vblk->vdev, struct virtio_blk_config, geometry.heads, &geo->heads); virtio_cread(vblk->vdev, struct virtio_blk_config, geometry.sectors, &geo->sectors); } else { /* some standard values, similar to sd */ geo->heads = 1 << 6; geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } return 0; } static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, .owner = THIS_MODULE, .getgeo = virtblk_getgeo, }; static int index_to_minor(int index) { return index << PART_BITS; } static int minor_to_index(int minor) { return minor >> PART_BITS; } static ssize_t virtblk_serial_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int err; /* sysfs gives us a PAGE_SIZE buffer */ BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); buf[VIRTIO_BLK_ID_BYTES] = '\0'; err = virtblk_get_id(disk, buf); if (!err) return strlen(buf); if (err == -EIO) /* Unsupported? Make it empty. */ return 0; return err; } DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); static void virtblk_config_changed_work(struct work_struct *work) { struct virtio_blk *vblk = container_of(work, struct virtio_blk, config_work); struct virtio_device *vdev = vblk->vdev; struct request_queue *q = vblk->disk->queue; char cap_str_2[10], cap_str_10[10]; char *envp[] = { "RESIZE=1", NULL }; u64 capacity, size; /* Host must always specify the capacity. */ virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); /* If capacity is too big, truncate with warning. */ if ((sector_t)capacity != capacity) { dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", (unsigned long long)capacity); capacity = (sector_t)-1; } size = capacity * queue_logical_block_size(q); string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); dev_notice(&vdev->dev, "new size: %llu %d-byte logical blocks (%s/%s)\n", (unsigned long long)capacity, queue_logical_block_size(q), cap_str_10, cap_str_2); set_capacity(vblk->disk, capacity); revalidate_disk(vblk->disk); kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp); } static void virtblk_config_changed(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; queue_work(virtblk_wq, &vblk->config_work); } static int init_vq(struct virtio_blk *vblk) { int err = 0; int i; vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; unsigned short num_vqs; struct virtio_device *vdev = vblk->vdev; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, struct virtio_blk_config, num_queues, &num_vqs); if (err) num_vqs = 1; vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); if (!vblk->vqs) { err = -ENOMEM; goto out; } names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); if (!names) goto err_names; callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); if (!callbacks) goto err_callbacks; vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); if (!vqs) goto err_vqs; for (i = 0; i < num_vqs; i++) { callbacks[i] = virtblk_done; snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); names[i] = vblk->vqs[i].name; } /* Discover virtqueues and write information to configuration. */ err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); if (err) goto err_find_vqs; for (i = 0; i < num_vqs; i++) { spin_lock_init(&vblk->vqs[i].lock); vblk->vqs[i].vq = vqs[i]; } vblk->num_vqs = num_vqs; err_find_vqs: kfree(vqs); err_vqs: kfree(callbacks); err_callbacks: kfree(names); err_names: if (err) kfree(vblk->vqs); out: return err; } /* * Legacy naming scheme used for virtio devices. We are stuck with it for * virtio blk but don't ever use it for any new driver. */ static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) { const int base = 'z' - 'a' + 1; char *begin = buf + strlen(prefix); char *end = buf + buflen; char *p; int unit; p = end - 1; *p = '\0'; unit = base; do { if (p == begin) return -EINVAL; *--p = 'a' + (index % unit); index = (index / unit) - 1; } while (index >= 0); memmove(begin, p, end - p); memcpy(buf, prefix, strlen(prefix)); return 0; } static int virtblk_get_cache_mode(struct virtio_device *vdev) { u8 writeback; int err; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, struct virtio_blk_config, wce, &writeback); if (err) writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); return writeback; } static void virtblk_update_cache_mode(struct virtio_device *vdev) { u8 writeback = virtblk_get_cache_mode(vdev); struct virtio_blk *vblk = vdev->priv; if (writeback) blk_queue_flush(vblk->disk->queue, REQ_FLUSH); else blk_queue_flush(vblk->disk->queue, 0); revalidate_disk(vblk->disk); } static const char *const virtblk_cache_types[] = { "write through", "write back" }; static ssize_t virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; struct virtio_device *vdev = vblk->vdev; int i; BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) if (sysfs_streq(buf, virtblk_cache_types[i])) break; if (i < 0) return -EINVAL; virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); virtblk_update_cache_mode(vdev); return count; } static ssize_t virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; u8 writeback = virtblk_get_cache_mode(vblk->vdev); BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]); } static const struct device_attribute dev_attr_cache_type_ro = __ATTR(cache_type, S_IRUGO, virtblk_cache_type_show, NULL); static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); static int virtblk_init_request(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx, unsigned int numa_node) { struct virtio_blk *vblk = data; struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); sg_init_table(vbr->sg, vblk->sg_elems); return 0; } static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .map_queue = blk_mq_map_queue, .complete = virtblk_request_done, .init_request = virtblk_init_request, }; static unsigned int virtblk_queue_depth; module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; struct request_queue *q; int err, index; u64 cap; u32 v, blk_size, sg_elems, opt_io_size; u16 min_io_size; u8 physical_block_exp, alignment_offset; err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), GFP_KERNEL); if (err < 0) goto out; index = err; /* We need to know how many segments before we allocate. */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, struct virtio_blk_config, seg_max, &sg_elems); /* We need at least one SG element, whatever they say. */ if (err || !sg_elems) sg_elems = 1; /* We need an extra sg elements at head and tail. */ sg_elems += 2; vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); if (!vblk) { err = -ENOMEM; goto out_free_index; } vblk->vdev = vdev; vblk->sg_elems = sg_elems; INIT_WORK(&vblk->config_work, virtblk_config_changed_work); err = init_vq(vblk); if (err) goto out_free_vblk; /* FIXME: How many partitions? How long is a piece of string? */ vblk->disk = alloc_disk(1 << PART_BITS); if (!vblk->disk) { err = -ENOMEM; goto out_free_vq; } /* Default queue sizing is to fill the ring. */ if (!virtblk_queue_depth) { virtblk_queue_depth = vblk->vqs[0].vq->num_free; /* ... but without indirect descs, we use 2 descs per req */ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) virtblk_queue_depth /= 2; } memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); vblk->tag_set.ops = &virtio_mq_ops; vblk->tag_set.queue_depth = virtblk_queue_depth; vblk->tag_set.numa_node = NUMA_NO_NODE; vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; vblk->tag_set.cmd_size = sizeof(struct virtblk_req) + sizeof(struct scatterlist) * sg_elems; vblk->tag_set.driver_data = vblk; vblk->tag_set.nr_hw_queues = vblk->num_vqs; err = blk_mq_alloc_tag_set(&vblk->tag_set); if (err) goto out_put_disk; q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); if (!q) { err = -ENOMEM; goto out_free_tags; } q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); vblk->disk->major = major; vblk->disk->first_minor = index_to_minor(index); vblk->disk->private_data = vblk; vblk->disk->fops = &virtblk_fops; vblk->disk->driverfs_dev = &vdev->dev; vblk->index = index; /* configure queue flush support */ virtblk_update_cache_mode(vdev); /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) set_disk_ro(vblk->disk, 1); /* Host must always specify the capacity. */ virtio_cread(vdev, struct virtio_blk_config, capacity, &cap); /* If capacity is too big, truncate with warning. */ if ((sector_t)cap != cap) { dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", (unsigned long long)cap); cap = (sector_t)-1; } set_capacity(vblk->disk, cap); /* We can handle whatever the host told us to handle. */ blk_queue_max_segments(q, vblk->sg_elems-2); /* No need to bounce any requests */ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); /* No real sector limit. */ blk_queue_max_hw_sectors(q, -1U); /* Host can optionally specify maximum segment size and number of * segments. */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, struct virtio_blk_config, size_max, &v); if (!err) blk_queue_max_segment_size(q, v); else blk_queue_max_segment_size(q, -1U); /* Host can optionally specify the block size of the device */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, struct virtio_blk_config, blk_size, &blk_size); if (!err) blk_queue_logical_block_size(q, blk_size); else blk_size = queue_logical_block_size(q); /* Use topology information if available */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, physical_block_exp, &physical_block_exp); if (!err && physical_block_exp) blk_queue_physical_block_size(q, blk_size * (1 << physical_block_exp)); err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, alignment_offset, &alignment_offset); if (!err && alignment_offset) blk_queue_alignment_offset(q, blk_size * alignment_offset); err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, min_io_size, &min_io_size); if (!err && min_io_size) blk_queue_io_min(q, blk_size * min_io_size); err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, opt_io_size, &opt_io_size); if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); virtio_device_ready(vdev); add_disk(vblk->disk); err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); if (err) goto out_del_disk; if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_cache_type_rw); else err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_cache_type_ro); if (err) goto out_del_disk; return 0; out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); out_free_tags: blk_mq_free_tag_set(&vblk->tag_set); out_put_disk: put_disk(vblk->disk); out_free_vq: vdev->config->del_vqs(vdev); out_free_vblk: kfree(vblk); out_free_index: ida_simple_remove(&vd_index_ida, index); out: return err; } static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; int index = vblk->index; int refc; /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); blk_mq_free_tag_set(&vblk->tag_set); /* Stop all the virtqueues. */ vdev->config->reset(vdev); refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); put_disk(vblk->disk); vdev->config->del_vqs(vdev); kfree(vblk->vqs); kfree(vblk); /* Only free device id if we don't have any users */ if (refc == 1) ida_simple_remove(&vd_index_ida, index); } #ifdef CONFIG_PM_SLEEP static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; /* Ensure we don't receive any more interrupts */ vdev->config->reset(vdev); /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); blk_mq_stop_hw_queues(vblk->disk->queue); vdev->config->del_vqs(vdev); return 0; } static int virtblk_restore(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; int ret; ret = init_vq(vdev->priv); if (ret) return ret; virtio_device_ready(vdev); blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); return 0; } #endif static const struct virtio_device_id id_table[] = { { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, VIRTIO_BLK_F_MQ, }; static struct virtio_driver virtio_blk = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtblk_probe, .remove = virtblk_remove, .config_changed = virtblk_config_changed, #ifdef CONFIG_PM_SLEEP .freeze = virtblk_freeze, .restore = virtblk_restore, #endif }; static int __init init(void) { int error; virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); if (!virtblk_wq) return -ENOMEM; major = register_blkdev(0, "virtblk"); if (major < 0) { error = major; goto out_destroy_workqueue; } error = register_virtio_driver(&virtio_blk); if (error) goto out_unregister_blkdev; return 0; out_unregister_blkdev: unregister_blkdev(major, "virtblk"); out_destroy_workqueue: destroy_workqueue(virtblk_wq); return error; } static void __exit fini(void) { unregister_blkdev(major, "virtblk"); unregister_virtio_driver(&virtio_blk); destroy_workqueue(virtblk_wq); } module_init(init); module_exit(fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio block driver"); MODULE_LICENSE("GPL");
gpl-2.0
turtlekiosk/coms4118
drivers/media/video/msm_wfd/mdp-4-subdev.c
320
6590
/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/msm_mdp.h> #include <mach/iommu_domains.h> #include <media/videobuf2-core.h> #include "enc-subdev.h" #include "mdp-subdev.h" #include "wfd-util.h" #include <linux/switch.h> struct mdp_instance { struct fb_info *mdp; u32 height; u32 width; bool secure; bool uses_iommu_split_domain; }; int mdp_init(struct v4l2_subdev *sd, u32 val) { return 0; } int mdp_open(struct v4l2_subdev *sd, void *arg) { struct mdp_instance *inst = kzalloc(sizeof(struct mdp_instance), GFP_KERNEL); struct mdp_msg_ops *mops = arg; int rc = 0; struct fb_info *fbi = NULL; if (!inst) { WFD_MSG_ERR("Out of memory\n"); rc = -ENOMEM; goto mdp_open_fail; } else if (!mops) { WFD_MSG_ERR("Invalid arguments\n"); rc = -EINVAL; goto mdp_open_fail; } fbi = msm_fb_get_writeback_fb(); if (!fbi) { WFD_MSG_ERR("Failed to acquire mdp instance\n"); rc = -ENODEV; goto mdp_open_fail; } msm_fb_writeback_init(fbi); inst->mdp = fbi; inst->secure = mops->secure; inst->uses_iommu_split_domain = mops->iommu_split_domain; mops->cookie = inst; return rc; mdp_open_fail: kfree(inst); return rc; } int mdp_start(struct v4l2_subdev *sd, void *arg) { struct mdp_instance *inst = arg; int rc = 0; struct fb_info *fbi = NULL; if (inst) { rc = msm_fb_writeback_start(inst->mdp); if (rc) { WFD_MSG_ERR("Failed to start MDP mode\n"); goto exit; } fbi = msm_fb_get_writeback_fb(); if (!fbi) { WFD_MSG_ERR("Failed to acquire mdp instance\n"); rc = -ENODEV; goto exit; } } exit: return rc; } int mdp_stop(struct v4l2_subdev *sd, void *arg) { struct mdp_instance *inst = arg; int rc = 0; struct fb_info *fbi = NULL; if (inst) { rc = msm_fb_writeback_stop(inst->mdp); if (rc) { WFD_MSG_ERR("Failed to stop writeback mode\n"); return rc; } fbi = (struct fb_info *)inst->mdp; } return 0; } int mdp_close(struct v4l2_subdev *sd, void *arg) { struct mdp_instance *inst = arg; struct fb_info *fbi = NULL; if (inst) { fbi = (struct fb_info *)inst->mdp; msm_fb_writeback_terminate(fbi); kfree(inst); } return 0; } int mdp_q_buffer(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct mdp_buf_info *binfo = arg; struct msmfb_data fbdata; struct mdp_instance *inst; if (!binfo || !binfo->inst || !binfo->cookie) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } inst = binfo->inst; fbdata.offset = binfo->offset; fbdata.memory_id = binfo->fd; fbdata.iova = binfo->paddr; fbdata.id = 0; fbdata.flags = 0; fbdata.priv = (uint32_t)binfo->cookie; WFD_MSG_DBG("queue buffer to mdp with offset = %u, fd = %u, "\ "priv = %p, iova = %p\n", fbdata.offset, fbdata.memory_id, (void *)fbdata.priv, (void *)fbdata.iova); rc = msm_fb_writeback_queue_buffer(inst->mdp, &fbdata); if (rc) WFD_MSG_ERR("Failed to queue buffer\n"); return rc; } int mdp_dq_buffer(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct mdp_buf_info *obuf = arg; struct msmfb_data fbdata; struct mdp_instance *inst; if (!arg) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } inst = obuf->inst; fbdata.flags = MSMFB_WRITEBACK_DEQUEUE_BLOCKING; rc = msm_fb_writeback_dequeue_buffer(inst->mdp, &fbdata); if (rc) { WFD_MSG_ERR("Failed to dequeue buffer\n"); return rc; } WFD_MSG_DBG("dequeue buf from mdp with priv = %u\n", fbdata.priv); obuf->cookie = (void *)fbdata.priv; return rc; } int mdp_set_prop(struct v4l2_subdev *sd, void *arg) { struct mdp_prop *prop = (struct mdp_prop *)arg; struct mdp_instance *inst = prop->inst; if (!prop || !inst) { WFD_MSG_ERR("Invalid arguments\n"); return -EINVAL; } inst->height = prop->height; inst->width = prop->width; return 0; } int mdp_mmap(struct v4l2_subdev *sd, void *arg) { int rc = 0, domain = -1; struct mem_region_map *mmap = arg; struct mem_region *mregion; bool use_iommu = true; struct mdp_instance *inst = NULL; if (!mmap || !mmap->mregion || !mmap->cookie) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } inst = mmap->cookie; mregion = mmap->mregion; if (mregion->size % SZ_4K != 0) { WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K); return -EINVAL; } if (inst->uses_iommu_split_domain) { if (inst->secure) use_iommu = false; else domain = DISPLAY_WRITE_DOMAIN; } else { domain = DISPLAY_READ_DOMAIN; } if (use_iommu) { rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle, domain, GEN_POOL, SZ_4K, 0, (unsigned long *)&mregion->paddr, (unsigned long *)&mregion->size, 0, 0); } else { rc = ion_phys(mmap->ion_client, mregion->ion_handle, (unsigned long *)&mregion->paddr, (size_t *)&mregion->size); } return rc; } int mdp_munmap(struct v4l2_subdev *sd, void *arg) { struct mem_region_map *mmap = arg; struct mem_region *mregion; bool use_iommu = false; int domain = -1; struct mdp_instance *inst = NULL; if (!mmap || !mmap->mregion || !mmap->cookie) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } inst = mmap->cookie; mregion = mmap->mregion; if (inst->uses_iommu_split_domain) { if (inst->secure) use_iommu = false; else domain = DISPLAY_WRITE_DOMAIN; } else { domain = DISPLAY_READ_DOMAIN; } if (use_iommu) ion_unmap_iommu(mmap->ion_client, mregion->ion_handle, domain, GEN_POOL); return 0; } long mdp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { int rc = 0; if (!sd) { WFD_MSG_ERR("Invalid arguments\n"); return -EINVAL; } switch (cmd) { case MDP_Q_BUFFER: rc = mdp_q_buffer(sd, arg); break; case MDP_DQ_BUFFER: rc = mdp_dq_buffer(sd, arg); break; case MDP_OPEN: rc = mdp_open(sd, arg); break; case MDP_START: rc = mdp_start(sd, arg); break; case MDP_STOP: rc = mdp_stop(sd, arg); break; case MDP_SET_PROP: rc = mdp_set_prop(sd, arg); break; case MDP_CLOSE: rc = mdp_close(sd, arg); break; case MDP_MMAP: rc = mdp_mmap(sd, arg); break; case MDP_MUNMAP: rc = mdp_munmap(sd, arg); break; default: WFD_MSG_ERR("IOCTL: %u not supported\n", cmd); rc = -EINVAL; break; } return rc; }
gpl-2.0
chneukirchen/linux-jetson-tk1
drivers/iommu/iova.c
576
12915
/* * Copyright © 2006-2009, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #include <linux/iova.h> void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) { spin_lock_init(&iovad->iova_rbtree_lock); iovad->rbroot = RB_ROOT; iovad->cached32_node = NULL; iovad->dma_32bit_pfn = pfn_32bit; } static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) { if ((*limit_pfn != iovad->dma_32bit_pfn) || (iovad->cached32_node == NULL)) return rb_last(&iovad->rbroot); else { struct rb_node *prev_node = rb_prev(iovad->cached32_node); struct iova *curr_iova = container_of(iovad->cached32_node, struct iova, node); *limit_pfn = curr_iova->pfn_lo - 1; return prev_node; } } static void __cached_rbnode_insert_update(struct iova_domain *iovad, unsigned long limit_pfn, struct iova *new) { if (limit_pfn != iovad->dma_32bit_pfn) return; iovad->cached32_node = &new->node; } static void __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) { struct iova *cached_iova; struct rb_node *curr; if (!iovad->cached32_node) return; curr = iovad->cached32_node; cached_iova = container_of(curr, struct iova, node); if (free->pfn_lo >= cached_iova->pfn_lo) { struct rb_node *node = rb_next(&free->node); struct iova *iova = container_of(node, struct iova, node); /* only cache if it's below 32bit pfn */ if (node && iova->pfn_lo < iovad->dma_32bit_pfn) iovad->cached32_node = node; else iovad->cached32_node = NULL; } } /* Computes the padding size required, to make the * the start address naturally aligned on its size */ static int iova_get_pad_size(int size, unsigned int limit_pfn) { unsigned int pad_size = 0; unsigned int order = ilog2(size); if (order) pad_size = (limit_pfn + 1) % (1 << order); return pad_size; } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, struct iova *new, bool size_aligned) { struct rb_node *prev, *curr = NULL; unsigned long flags; unsigned long saved_pfn; unsigned int pad_size = 0; /* Walk the tree backwards */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); saved_pfn = limit_pfn; curr = __get_cached_rbnode(iovad, &limit_pfn); prev = curr; while (curr) { struct iova *curr_iova = container_of(curr, struct iova, node); if (limit_pfn < curr_iova->pfn_lo) goto move_left; else if (limit_pfn < curr_iova->pfn_hi) goto adjust_limit_pfn; else { if (size_aligned) pad_size = iova_get_pad_size(size, limit_pfn); if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) break; /* found a free slot */ } adjust_limit_pfn: limit_pfn = curr_iova->pfn_lo - 1; move_left: prev = curr; curr = rb_prev(curr); } if (!curr) { if (size_aligned) pad_size = iova_get_pad_size(size, limit_pfn); if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = limit_pfn - (size + pad_size) + 1; new->pfn_hi = new->pfn_lo + size - 1; /* Insert the new_iova into domain rbtree by holding writer lock */ /* Add new node and rebalance tree. */ { struct rb_node **entry, *parent = NULL; /* If we have 'prev', it's a valid place to start the insertion. Otherwise, start from the root. */ if (prev) entry = &prev; else entry = &iovad->rbroot.rb_node; /* Figure out where to put new node */ while (*entry) { struct iova *this = container_of(*entry, struct iova, node); parent = *entry; if (new->pfn_lo < this->pfn_lo) entry = &((*entry)->rb_left); else if (new->pfn_lo > this->pfn_lo) entry = &((*entry)->rb_right); else BUG(); /* this should not happen */ } /* Add new node and rebalance tree. */ rb_link_node(&new->node, parent, entry); rb_insert_color(&new->node, &iovad->rbroot); } __cached_rbnode_insert_update(iovad, saved_pfn, new); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return 0; } static void iova_insert_rbtree(struct rb_root *root, struct iova *iova) { struct rb_node **new = &(root->rb_node), *parent = NULL; /* Figure out where to put new node */ while (*new) { struct iova *this = container_of(*new, struct iova, node); parent = *new; if (iova->pfn_lo < this->pfn_lo) new = &((*new)->rb_left); else if (iova->pfn_lo > this->pfn_lo) new = &((*new)->rb_right); else BUG(); /* this should not happen */ } /* Add new node and rebalance tree. */ rb_link_node(&iova->node, parent, new); rb_insert_color(&iova->node, root); } /** * alloc_iova - allocates an iova * @iovad: - iova domain in question * @size: - size of page frames to allocate * @limit_pfn: - max limit address * @size_aligned: - set if size_aligned address range is required * This function allocates an iova in the range limit_pfn to IOVA_START_PFN * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned * flag is set then the allocated address iova->pfn_lo will be naturally * aligned on roundup_power_of_two(size). */ struct iova * alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned) { struct iova *new_iova; int ret; new_iova = alloc_iova_mem(); if (!new_iova) return NULL; /* If size aligned is set then round the size to * to next power of two. */ if (size_aligned) size = __roundup_pow_of_two(size); ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); if (ret) { free_iova_mem(new_iova); return NULL; } return new_iova; } /** * find_iova - find's an iova for a given pfn * @iovad: - iova domain in question. * @pfn: - page frame number * This function finds and returns an iova belonging to the * given doamin which matches the given pfn. */ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) { unsigned long flags; struct rb_node *node; /* Take the lock so that no other thread is manipulating the rbtree */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); node = iovad->rbroot.rb_node; while (node) { struct iova *iova = container_of(node, struct iova, node); /* If pfn falls within iova's range, return iova */ if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); /* We are not holding the lock while this iova * is referenced by the caller as the same thread * which called this function also calls __free_iova() * and it is by design that only one thread can possibly * reference a particular iova and hence no conflict. */ return iova; } if (pfn < iova->pfn_lo) node = node->rb_left; else if (pfn > iova->pfn_lo) node = node->rb_right; } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return NULL; } /** * __free_iova - frees the given iova * @iovad: iova domain in question. * @iova: iova in question. * Frees the given iova belonging to the giving domain */ void __free_iova(struct iova_domain *iovad, struct iova *iova) { unsigned long flags; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); __cached_rbnode_delete_update(iovad, iova); rb_erase(&iova->node, &iovad->rbroot); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); free_iova_mem(iova); } /** * free_iova - finds and frees the iova for a given pfn * @iovad: - iova domain in question. * @pfn: - pfn that is allocated previously * This functions finds an iova for a given pfn and then * frees the iova from that domain. */ void free_iova(struct iova_domain *iovad, unsigned long pfn) { struct iova *iova = find_iova(iovad, pfn); if (iova) __free_iova(iovad, iova); } /** * put_iova_domain - destroys the iova doamin * @iovad: - iova domain in question. * All the iova's in that domain are destroyed. */ void put_iova_domain(struct iova_domain *iovad) { struct rb_node *node; unsigned long flags; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); node = rb_first(&iovad->rbroot); while (node) { struct iova *iova = container_of(node, struct iova, node); rb_erase(node, &iovad->rbroot); free_iova_mem(iova); node = rb_first(&iovad->rbroot); } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); } static int __is_range_overlap(struct rb_node *node, unsigned long pfn_lo, unsigned long pfn_hi) { struct iova *iova = container_of(node, struct iova, node); if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) return 1; return 0; } static inline struct iova * alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) { struct iova *iova; iova = alloc_iova_mem(); if (iova) { iova->pfn_lo = pfn_lo; iova->pfn_hi = pfn_hi; } return iova; } static struct iova * __insert_new_range(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { struct iova *iova; iova = alloc_and_init_iova(pfn_lo, pfn_hi); if (iova) iova_insert_rbtree(&iovad->rbroot, iova); return iova; } static void __adjust_overlap_range(struct iova *iova, unsigned long *pfn_lo, unsigned long *pfn_hi) { if (*pfn_lo < iova->pfn_lo) iova->pfn_lo = *pfn_lo; if (*pfn_hi > iova->pfn_hi) *pfn_lo = iova->pfn_hi + 1; } /** * reserve_iova - reserves an iova in the given range * @iovad: - iova domain pointer * @pfn_lo: - lower page frame address * @pfn_hi:- higher pfn adderss * This function allocates reserves the address range from pfn_lo to pfn_hi so * that this address is not dished out as part of alloc_iova. */ struct iova * reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { struct rb_node *node; unsigned long flags; struct iova *iova; unsigned int overlap = 0; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) { iova = container_of(node, struct iova, node); __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); if ((pfn_lo >= iova->pfn_lo) && (pfn_hi <= iova->pfn_hi)) goto finish; overlap = 1; } else if (overlap) break; } /* We are here either because this is the first reserver node * or need to insert remaining non overlap addr range */ iova = __insert_new_range(iovad, pfn_lo, pfn_hi); finish: spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } /** * copy_reserved_iova - copies the reserved between domains * @from: - source doamin from where to copy * @to: - destination domin where to copy * This function copies reserved iova's from one doamin to * other. */ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) { unsigned long flags; struct rb_node *node; spin_lock_irqsave(&from->iova_rbtree_lock, flags); for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { struct iova *iova = container_of(node, struct iova, node); struct iova *new_iova; new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); if (!new_iova) printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", iova->pfn_lo, iova->pfn_lo); } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } struct iova * split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi) { unsigned long flags; struct iova *prev = NULL, *next = NULL; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); if (iova->pfn_lo < pfn_lo) { prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); if (prev == NULL) goto error; } if (iova->pfn_hi > pfn_hi) { next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); if (next == NULL) goto error; } __cached_rbnode_delete_update(iovad, iova); rb_erase(&iova->node, &iovad->rbroot); if (prev) { iova_insert_rbtree(&iovad->rbroot, prev); iova->pfn_lo = pfn_lo; } if (next) { iova_insert_rbtree(&iovad->rbroot, next); iova->pfn_hi = pfn_hi; } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; error: spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); if (prev) free_iova_mem(prev); return NULL; }
gpl-2.0
visi0nary/mt6735-kernel-3.10.61
drivers/md/dm-cache-target.c
1344
66488
/* * Copyright (C) 2012 Red Hat. All rights reserved. * * This file is released under the GPL. */ #include "dm.h" #include "dm-bio-prison.h" #include "dm-bio-record.h" #include "dm-cache-metadata.h" #include <linux/dm-io.h> #include <linux/dm-kcopyd.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #define DM_MSG_PREFIX "cache" DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle, "A percentage of time allocated for copying to and/or from cache"); /*----------------------------------------------------------------*/ /* * Glossary: * * oblock: index of an origin block * cblock: index of a cache block * promotion: movement of a block from origin to cache * demotion: movement of a block from cache to origin * migration: movement of a block between the origin and cache device, * either direction */ /*----------------------------------------------------------------*/ static size_t bitset_size_in_bytes(unsigned nr_entries) { return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); } static unsigned long *alloc_bitset(unsigned nr_entries) { size_t s = bitset_size_in_bytes(nr_entries); return vzalloc(s); } static void clear_bitset(void *bitset, unsigned nr_entries) { size_t s = bitset_size_in_bytes(nr_entries); memset(bitset, 0, s); } static void free_bitset(unsigned long *bits) { vfree(bits); } /*----------------------------------------------------------------*/ #define PRISON_CELLS 1024 #define MIGRATION_POOL_SIZE 128 #define COMMIT_PERIOD HZ #define MIGRATION_COUNT_WINDOW 10 /* * The block size of the device holding cache data must be >= 32KB */ #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) /* * FIXME: the cache is read/write for the time being. */ enum cache_mode { CM_WRITE, /* metadata may be changed */ CM_READ_ONLY, /* metadata may not be changed */ }; struct cache_features { enum cache_mode mode; bool write_through:1; }; struct cache_stats { atomic_t read_hit; atomic_t read_miss; atomic_t write_hit; atomic_t write_miss; atomic_t demotion; atomic_t promotion; atomic_t copies_avoided; atomic_t cache_cell_clash; atomic_t commit_count; atomic_t discard_count; }; struct cache { struct dm_target *ti; struct dm_target_callbacks callbacks; /* * Metadata is written to this device. */ struct dm_dev *metadata_dev; /* * The slower of the two data devices. Typically a spindle. */ struct dm_dev *origin_dev; /* * The faster of the two data devices. Typically an SSD. */ struct dm_dev *cache_dev; /* * Cache features such as write-through. */ struct cache_features features; /* * Size of the origin device in _complete_ blocks and native sectors. */ dm_oblock_t origin_blocks; sector_t origin_sectors; /* * Size of the cache device in blocks. */ dm_cblock_t cache_size; /* * Fields for converting from sectors to blocks. */ uint32_t sectors_per_block; int sectors_per_block_shift; struct dm_cache_metadata *cmd; spinlock_t lock; struct bio_list deferred_bios; struct bio_list deferred_flush_bios; struct bio_list deferred_writethrough_bios; struct list_head quiesced_migrations; struct list_head completed_migrations; struct list_head need_commit_migrations; sector_t migration_threshold; atomic_t nr_migrations; wait_queue_head_t migration_wait; wait_queue_head_t quiescing_wait; atomic_t quiescing_ack; /* * cache_size entries, dirty if set */ dm_cblock_t nr_dirty; unsigned long *dirty_bitset; /* * origin_blocks entries, discarded if set. */ uint32_t discard_block_size; /* a power of 2 times sectors per block */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; struct dm_kcopyd_client *copier; struct workqueue_struct *wq; struct work_struct worker; struct delayed_work waker; unsigned long last_commit_jiffies; struct dm_bio_prison *prison; struct dm_deferred_set *all_io_ds; mempool_t *migration_pool; struct dm_cache_migration *next_migration; struct dm_cache_policy *policy; unsigned policy_nr_args; bool need_tick_bio:1; bool sized:1; bool quiescing:1; bool commit_requested:1; bool loaded_mappings:1; bool loaded_discards:1; struct cache_stats stats; /* * Rather than reconstructing the table line for the status we just * save it and regurgitate. */ unsigned nr_ctr_args; const char **ctr_args; }; struct per_bio_data { bool tick:1; unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; /* * writethrough fields. These MUST remain at the end of this * structure and the 'cache' member must be the first as it * is used to determine the offset of the writethrough fields. */ struct cache *cache; dm_cblock_t cblock; bio_end_io_t *saved_bi_end_io; struct dm_bio_details bio_details; }; struct dm_cache_migration { struct list_head list; struct cache *cache; unsigned long start_jiffies; dm_oblock_t old_oblock; dm_oblock_t new_oblock; dm_cblock_t cblock; bool err:1; bool writeback:1; bool demote:1; bool promote:1; struct dm_bio_prison_cell *old_ocell; struct dm_bio_prison_cell *new_ocell; }; /* * Processing a bio in the worker thread may require these memory * allocations. We prealloc to avoid deadlocks (the same worker thread * frees them back to the mempool). */ struct prealloc { struct dm_cache_migration *mg; struct dm_bio_prison_cell *cell1; struct dm_bio_prison_cell *cell2; }; static void wake_worker(struct cache *cache) { queue_work(cache->wq, &cache->worker); } /*----------------------------------------------------------------*/ static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) { /* FIXME: change to use a local slab. */ return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); } static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) { dm_bio_prison_free_cell(cache->prison, cell); } static int prealloc_data_structs(struct cache *cache, struct prealloc *p) { if (!p->mg) { p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); if (!p->mg) return -ENOMEM; } if (!p->cell1) { p->cell1 = alloc_prison_cell(cache); if (!p->cell1) return -ENOMEM; } if (!p->cell2) { p->cell2 = alloc_prison_cell(cache); if (!p->cell2) return -ENOMEM; } return 0; } static void prealloc_free_structs(struct cache *cache, struct prealloc *p) { if (p->cell2) free_prison_cell(cache, p->cell2); if (p->cell1) free_prison_cell(cache, p->cell1); if (p->mg) mempool_free(p->mg, cache->migration_pool); } static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) { struct dm_cache_migration *mg = p->mg; BUG_ON(!mg); p->mg = NULL; return mg; } /* * You must have a cell within the prealloc struct to return. If not this * function will BUG() rather than returning NULL. */ static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p) { struct dm_bio_prison_cell *r = NULL; if (p->cell1) { r = p->cell1; p->cell1 = NULL; } else if (p->cell2) { r = p->cell2; p->cell2 = NULL; } else BUG(); return r; } /* * You can't have more than two cells in a prealloc struct. BUG() will be * called if you try and overfill. */ static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell) { if (!p->cell2) p->cell2 = cell; else if (!p->cell1) p->cell1 = cell; else BUG(); } /*----------------------------------------------------------------*/ static void build_key(dm_oblock_t oblock, struct dm_cell_key *key) { key->virtual = 0; key->dev = 0; key->block = from_oblock(oblock); } /* * The caller hands in a preallocated cell, and a free function for it. * The cell will be freed if there's an error, or if it wasn't used because * a cell with that key already exists. */ typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell); static int bio_detain(struct cache *cache, dm_oblock_t oblock, struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, cell_free_fn free_fn, void *free_context, struct dm_bio_prison_cell **cell_result) { int r; struct dm_cell_key key; build_key(oblock, &key); r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); if (r) free_fn(free_context, cell_prealloc); return r; } static int get_cell(struct cache *cache, dm_oblock_t oblock, struct prealloc *structs, struct dm_bio_prison_cell **cell_result) { int r; struct dm_cell_key key; struct dm_bio_prison_cell *cell_prealloc; cell_prealloc = prealloc_get_cell(structs); build_key(oblock, &key); r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); if (r) prealloc_put_cell(structs, cell_prealloc); return r; } /*----------------------------------------------------------------*/ static bool is_dirty(struct cache *cache, dm_cblock_t b) { return test_bit(from_cblock(b), cache->dirty_bitset); } static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) { if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); policy_set_dirty(cache->policy, oblock); } } static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) { if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { policy_clear_dirty(cache->policy, oblock); cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); if (!from_cblock(cache->nr_dirty)) dm_table_event(cache->ti->table); } } /*----------------------------------------------------------------*/ static bool block_size_is_power_of_two(struct cache *cache) { return cache->sectors_per_block_shift >= 0; } static dm_block_t block_div(dm_block_t b, uint32_t n) { do_div(b, n); return b; } static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) { uint32_t discard_blocks = cache->discard_block_size; dm_block_t b = from_oblock(oblock); if (!block_size_is_power_of_two(cache)) discard_blocks = discard_blocks / cache->sectors_per_block; else discard_blocks >>= cache->sectors_per_block_shift; b = block_div(b, discard_blocks); return to_dblock(b); } static void set_discard(struct cache *cache, dm_dblock_t b) { unsigned long flags; atomic_inc(&cache->stats.discard_count); spin_lock_irqsave(&cache->lock, flags); set_bit(from_dblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } static void clear_discard(struct cache *cache, dm_dblock_t b) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); clear_bit(from_dblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } static bool is_discarded(struct cache *cache, dm_dblock_t b) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); r = test_bit(from_dblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; } static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); r = test_bit(from_dblock(oblock_to_dblock(cache, b)), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; } /*----------------------------------------------------------------*/ static void load_stats(struct cache *cache) { struct dm_cache_statistics stats; dm_cache_metadata_get_stats(cache->cmd, &stats); atomic_set(&cache->stats.read_hit, stats.read_hits); atomic_set(&cache->stats.read_miss, stats.read_misses); atomic_set(&cache->stats.write_hit, stats.write_hits); atomic_set(&cache->stats.write_miss, stats.write_misses); } static void save_stats(struct cache *cache) { struct dm_cache_statistics stats; stats.read_hits = atomic_read(&cache->stats.read_hit); stats.read_misses = atomic_read(&cache->stats.read_miss); stats.write_hits = atomic_read(&cache->stats.write_hit); stats.write_misses = atomic_read(&cache->stats.write_miss); dm_cache_metadata_set_stats(cache->cmd, &stats); } /*---------------------------------------------------------------- * Per bio data *--------------------------------------------------------------*/ /* * If using writeback, leave out struct per_bio_data's writethrough fields. */ #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) static size_t get_per_bio_data_size(struct cache *cache) { return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; } static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) { struct per_bio_data *pb = dm_per_bio_data(bio, data_size); BUG_ON(!pb); return pb; } static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) { struct per_bio_data *pb = get_per_bio_data(bio, data_size); pb->tick = false; pb->req_nr = dm_bio_get_target_bio_nr(bio); pb->all_io_entry = NULL; return pb; } /*---------------------------------------------------------------- * Remapping *--------------------------------------------------------------*/ static void remap_to_origin(struct cache *cache, struct bio *bio) { bio->bi_bdev = cache->origin_dev->bdev; } static void remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { sector_t bi_sector = bio->bi_sector; bio->bi_bdev = cache->cache_dev->bdev; if (!block_size_is_power_of_two(cache)) bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + sector_div(bi_sector, cache->sectors_per_block); else bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | (bi_sector & (cache->sectors_per_block - 1)); } static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) { unsigned long flags; size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { pb->tick = true; cache->need_tick_bio = false; } spin_unlock_irqrestore(&cache->lock, flags); } static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, dm_oblock_t oblock) { check_if_tick_bio_needed(cache, bio); remap_to_origin(cache, bio); if (bio_data_dir(bio) == WRITE) clear_discard(cache, oblock_to_dblock(cache, oblock)); } static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { remap_to_cache(cache, bio, cblock); if (bio_data_dir(bio) == WRITE) { set_dirty(cache, oblock, cblock); clear_discard(cache, oblock_to_dblock(cache, oblock)); } } static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) { sector_t block_nr = bio->bi_sector; if (!block_size_is_power_of_two(cache)) (void) sector_div(block_nr, cache->sectors_per_block); else block_nr >>= cache->sectors_per_block_shift; return to_oblock(block_nr); } static int bio_triggers_commit(struct cache *cache, struct bio *bio) { return bio->bi_rw & (REQ_FLUSH | REQ_FUA); } static void issue(struct cache *cache, struct bio *bio) { unsigned long flags; if (!bio_triggers_commit(cache, bio)) { generic_make_request(bio); return; } /* * Batch together any bios that trigger commits and then issue a * single commit for them in do_worker(). */ spin_lock_irqsave(&cache->lock, flags); cache->commit_requested = true; bio_list_add(&cache->deferred_flush_bios, bio); spin_unlock_irqrestore(&cache->lock, flags); } static void defer_writethrough_bio(struct cache *cache, struct bio *bio) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); bio_list_add(&cache->deferred_writethrough_bios, bio); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void writethrough_endio(struct bio *bio, int err) { struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); bio->bi_end_io = pb->saved_bi_end_io; if (err) { bio_endio(bio, err); return; } dm_bio_restore(&pb->bio_details, bio); remap_to_cache(pb->cache, bio, pb->cblock); /* * We can't issue this bio directly, since we're in interrupt * context. So it gets put on a bio list for processing by the * worker thread. */ defer_writethrough_bio(pb->cache, bio); } /* * When running in writethrough mode we need to send writes to clean blocks * to both the cache and origin devices. In future we'd like to clone the * bio and send them in parallel, but for now we're doing them in * series as this is easier. */ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); pb->cache = cache; pb->cblock = cblock; pb->saved_bi_end_io = bio->bi_end_io; dm_bio_record(&pb->bio_details, bio); bio->bi_end_io = writethrough_endio; remap_to_origin_clear_discard(pb->cache, bio, oblock); } /*---------------------------------------------------------------- * Migration processing * * Migration covers moving data from the origin device to the cache, or * vice versa. *--------------------------------------------------------------*/ static void free_migration(struct dm_cache_migration *mg) { mempool_free(mg, mg->cache->migration_pool); } static void inc_nr_migrations(struct cache *cache) { atomic_inc(&cache->nr_migrations); } static void dec_nr_migrations(struct cache *cache) { atomic_dec(&cache->nr_migrations); /* * Wake the worker in case we're suspending the target. */ wake_up(&cache->migration_wait); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) { (holder ? dm_cell_release : dm_cell_release_no_holder) (cache->prison, cell, &cache->deferred_bios); free_prison_cell(cache, cell); } static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); __cell_defer(cache, cell, holder); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void cleanup_migration(struct dm_cache_migration *mg) { struct cache *cache = mg->cache; free_migration(mg); dec_nr_migrations(cache); } static void migration_failure(struct dm_cache_migration *mg) { struct cache *cache = mg->cache; if (mg->writeback) { DMWARN_LIMIT("writeback failed; couldn't copy block"); set_dirty(cache, mg->old_oblock, mg->cblock); cell_defer(cache, mg->old_ocell, false); } else if (mg->demote) { DMWARN_LIMIT("demotion failed; couldn't copy block"); policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); if (mg->promote) cell_defer(cache, mg->new_ocell, 1); } else { DMWARN_LIMIT("promotion failed; couldn't copy block"); policy_remove_mapping(cache->policy, mg->new_oblock); cell_defer(cache, mg->new_ocell, 1); } cleanup_migration(mg); } static void migration_success_pre_commit(struct dm_cache_migration *mg) { unsigned long flags; struct cache *cache = mg->cache; if (mg->writeback) { cell_defer(cache, mg->old_ocell, false); clear_dirty(cache, mg->old_oblock, mg->cblock); cleanup_migration(mg); return; } else if (mg->demote) { if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { DMWARN_LIMIT("demotion failed; couldn't update on disk metadata"); policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); if (mg->promote) cell_defer(cache, mg->new_ocell, true); cleanup_migration(mg); return; } } else { if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); policy_remove_mapping(cache->policy, mg->new_oblock); cleanup_migration(mg); return; } } spin_lock_irqsave(&cache->lock, flags); list_add_tail(&mg->list, &cache->need_commit_migrations); cache->commit_requested = true; spin_unlock_irqrestore(&cache->lock, flags); } static void migration_success_post_commit(struct dm_cache_migration *mg) { unsigned long flags; struct cache *cache = mg->cache; if (mg->writeback) { DMWARN("writeback unexpectedly triggered commit"); return; } else if (mg->demote) { cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); if (mg->promote) { mg->demote = false; spin_lock_irqsave(&cache->lock, flags); list_add_tail(&mg->list, &cache->quiesced_migrations); spin_unlock_irqrestore(&cache->lock, flags); } else cleanup_migration(mg); } else { cell_defer(cache, mg->new_ocell, true); clear_dirty(cache, mg->new_oblock, mg->cblock); cleanup_migration(mg); } } static void copy_complete(int read_err, unsigned long write_err, void *context) { unsigned long flags; struct dm_cache_migration *mg = (struct dm_cache_migration *) context; struct cache *cache = mg->cache; if (read_err || write_err) mg->err = true; spin_lock_irqsave(&cache->lock, flags); list_add_tail(&mg->list, &cache->completed_migrations); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void issue_copy_real(struct dm_cache_migration *mg) { int r; struct dm_io_region o_region, c_region; struct cache *cache = mg->cache; sector_t cblock = from_cblock(mg->cblock); o_region.bdev = cache->origin_dev->bdev; o_region.count = cache->sectors_per_block; c_region.bdev = cache->cache_dev->bdev; c_region.sector = cblock * cache->sectors_per_block; c_region.count = cache->sectors_per_block; if (mg->writeback || mg->demote) { /* demote */ o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); } else { /* promote */ o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); } if (r < 0) migration_failure(mg); } static void avoid_copy(struct dm_cache_migration *mg) { atomic_inc(&mg->cache->stats.copies_avoided); migration_success_pre_commit(mg); } static void issue_copy(struct dm_cache_migration *mg) { bool avoid; struct cache *cache = mg->cache; if (mg->writeback || mg->demote) avoid = !is_dirty(cache, mg->cblock) || is_discarded_oblock(cache, mg->old_oblock); else avoid = is_discarded_oblock(cache, mg->new_oblock); avoid ? avoid_copy(mg) : issue_copy_real(mg); } static void complete_migration(struct dm_cache_migration *mg) { if (mg->err) migration_failure(mg); else migration_success_pre_commit(mg); } static void process_migrations(struct cache *cache, struct list_head *head, void (*fn)(struct dm_cache_migration *)) { unsigned long flags; struct list_head list; struct dm_cache_migration *mg, *tmp; INIT_LIST_HEAD(&list); spin_lock_irqsave(&cache->lock, flags); list_splice_init(head, &list); spin_unlock_irqrestore(&cache->lock, flags); list_for_each_entry_safe(mg, tmp, &list, list) fn(mg); } static void __queue_quiesced_migration(struct dm_cache_migration *mg) { list_add_tail(&mg->list, &mg->cache->quiesced_migrations); } static void queue_quiesced_migration(struct dm_cache_migration *mg) { unsigned long flags; struct cache *cache = mg->cache; spin_lock_irqsave(&cache->lock, flags); __queue_quiesced_migration(mg); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) { unsigned long flags; struct dm_cache_migration *mg, *tmp; spin_lock_irqsave(&cache->lock, flags); list_for_each_entry_safe(mg, tmp, work, list) __queue_quiesced_migration(mg); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void check_for_quiesced_migrations(struct cache *cache, struct per_bio_data *pb) { struct list_head work; if (!pb->all_io_entry) return; INIT_LIST_HEAD(&work); if (pb->all_io_entry) dm_deferred_entry_dec(pb->all_io_entry, &work); if (!list_empty(&work)) queue_quiesced_migrations(cache, &work); } static void quiesce_migration(struct dm_cache_migration *mg) { if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) queue_quiesced_migration(mg); } static void promote(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) { struct dm_cache_migration *mg = prealloc_get_migration(structs); mg->err = false; mg->writeback = false; mg->demote = false; mg->promote = true; mg->cache = cache; mg->new_oblock = oblock; mg->cblock = cblock; mg->old_ocell = NULL; mg->new_ocell = cell; mg->start_jiffies = jiffies; inc_nr_migrations(cache); quiesce_migration(mg); } static void writeback(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) { struct dm_cache_migration *mg = prealloc_get_migration(structs); mg->err = false; mg->writeback = true; mg->demote = false; mg->promote = false; mg->cache = cache; mg->old_oblock = oblock; mg->cblock = cblock; mg->old_ocell = cell; mg->new_ocell = NULL; mg->start_jiffies = jiffies; inc_nr_migrations(cache); quiesce_migration(mg); } static void demote_then_promote(struct cache *cache, struct prealloc *structs, dm_oblock_t old_oblock, dm_oblock_t new_oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *old_ocell, struct dm_bio_prison_cell *new_ocell) { struct dm_cache_migration *mg = prealloc_get_migration(structs); mg->err = false; mg->writeback = false; mg->demote = true; mg->promote = true; mg->cache = cache; mg->old_oblock = old_oblock; mg->new_oblock = new_oblock; mg->cblock = cblock; mg->old_ocell = old_ocell; mg->new_ocell = new_ocell; mg->start_jiffies = jiffies; inc_nr_migrations(cache); quiesce_migration(mg); } /*---------------------------------------------------------------- * bio processing *--------------------------------------------------------------*/ static void defer_bio(struct cache *cache, struct bio *bio) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); bio_list_add(&cache->deferred_bios, bio); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void process_flush_bio(struct cache *cache, struct bio *bio) { size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); BUG_ON(bio->bi_size); if (!pb->req_nr) remap_to_origin(cache, bio); else remap_to_cache(cache, bio, 0); issue(cache, bio); } /* * People generally discard large parts of a device, eg, the whole device * when formatting. Splitting these large discards up into cache block * sized ios and then quiescing (always neccessary for discard) takes too * long. * * We keep it simple, and allow any size of discard to come in, and just * mark off blocks on the discard bitset. No passdown occurs! * * To implement passdown we need to change the bio_prison such that a cell * can have a key that spans many blocks. */ static void process_discard_bio(struct cache *cache, struct bio *bio) { dm_block_t start_block = dm_sector_div_up(bio->bi_sector, cache->discard_block_size); dm_block_t end_block = bio->bi_sector + bio_sectors(bio); dm_block_t b; end_block = block_div(end_block, cache->discard_block_size); for (b = start_block; b < end_block; b++) set_discard(cache, to_dblock(b)); bio_endio(bio, 0); } static bool spare_migration_bandwidth(struct cache *cache) { sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * cache->sectors_per_block; return current_volume < cache->migration_threshold; } static bool is_writethrough_io(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { return bio_data_dir(bio) == WRITE && cache->features.write_through && !is_dirty(cache, cblock); } static void inc_hit_counter(struct cache *cache, struct bio *bio) { atomic_inc(bio_data_dir(bio) == READ ? &cache->stats.read_hit : &cache->stats.write_hit); } static void inc_miss_counter(struct cache *cache, struct bio *bio) { atomic_inc(bio_data_dir(bio) == READ ? &cache->stats.read_miss : &cache->stats.write_miss); } static void process_bio(struct cache *cache, struct prealloc *structs, struct bio *bio) { int r; bool release_cell = true; dm_oblock_t block = get_bio_block(cache, bio); struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; struct policy_result lookup_result; size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); bool discarded_block = is_discarded_oblock(cache, block); bool can_migrate = discarded_block || spare_migration_bandwidth(cache); /* * Check to see if that block is currently migrating. */ cell_prealloc = prealloc_get_cell(structs); r = bio_detain(cache, block, bio, cell_prealloc, (cell_free_fn) prealloc_put_cell, structs, &new_ocell); if (r > 0) return; r = policy_map(cache->policy, block, true, can_migrate, discarded_block, bio, &lookup_result); if (r == -EWOULDBLOCK) /* migration has been denied */ lookup_result.op = POLICY_MISS; switch (lookup_result.op) { case POLICY_HIT: inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (is_writethrough_io(cache, bio, lookup_result.cblock)) remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); issue(cache, bio); break; case POLICY_MISS: inc_miss_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); remap_to_origin_clear_discard(cache, bio, block); issue(cache, bio); break; case POLICY_NEW: atomic_inc(&cache->stats.promotion); promote(cache, structs, block, lookup_result.cblock, new_ocell); release_cell = false; break; case POLICY_REPLACE: cell_prealloc = prealloc_get_cell(structs); r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc, (cell_free_fn) prealloc_put_cell, structs, &old_ocell); if (r > 0) { /* * We have to be careful to avoid lock inversion of * the cells. So we back off, and wait for the * old_ocell to become free. */ policy_force_mapping(cache->policy, block, lookup_result.old_oblock); atomic_inc(&cache->stats.cache_cell_clash); break; } atomic_inc(&cache->stats.demotion); atomic_inc(&cache->stats.promotion); demote_then_promote(cache, structs, lookup_result.old_oblock, block, lookup_result.cblock, old_ocell, new_ocell); release_cell = false; break; default: DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, (unsigned) lookup_result.op); bio_io_error(bio); } if (release_cell) cell_defer(cache, new_ocell, false); } static int need_commit_due_to_time(struct cache *cache) { return jiffies < cache->last_commit_jiffies || jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; } static int commit_if_needed(struct cache *cache) { if (dm_cache_changed_this_transaction(cache->cmd) && (cache->commit_requested || need_commit_due_to_time(cache))) { atomic_inc(&cache->stats.commit_count); cache->last_commit_jiffies = jiffies; cache->commit_requested = false; return dm_cache_commit(cache->cmd, false); } return 0; } static void process_deferred_bios(struct cache *cache) { unsigned long flags; struct bio_list bios; struct bio *bio; struct prealloc structs; memset(&structs, 0, sizeof(structs)); bio_list_init(&bios); spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&bios, &cache->deferred_bios); bio_list_init(&cache->deferred_bios); spin_unlock_irqrestore(&cache->lock, flags); while (!bio_list_empty(&bios)) { /* * If we've got no free migration structs, and processing * this bio might require one, we pause until there are some * prepared mappings to process. */ if (prealloc_data_structs(cache, &structs)) { spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&cache->deferred_bios, &bios); spin_unlock_irqrestore(&cache->lock, flags); break; } bio = bio_list_pop(&bios); if (bio->bi_rw & REQ_FLUSH) process_flush_bio(cache, bio); else if (bio->bi_rw & REQ_DISCARD) process_discard_bio(cache, bio); else process_bio(cache, &structs, bio); } prealloc_free_structs(cache, &structs); } static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) { unsigned long flags; struct bio_list bios; struct bio *bio; bio_list_init(&bios); spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&bios, &cache->deferred_flush_bios); bio_list_init(&cache->deferred_flush_bios); spin_unlock_irqrestore(&cache->lock, flags); while ((bio = bio_list_pop(&bios))) submit_bios ? generic_make_request(bio) : bio_io_error(bio); } static void process_deferred_writethrough_bios(struct cache *cache) { unsigned long flags; struct bio_list bios; struct bio *bio; bio_list_init(&bios); spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&bios, &cache->deferred_writethrough_bios); bio_list_init(&cache->deferred_writethrough_bios); spin_unlock_irqrestore(&cache->lock, flags); while ((bio = bio_list_pop(&bios))) generic_make_request(bio); } static void writeback_some_dirty_blocks(struct cache *cache) { int r = 0; dm_oblock_t oblock; dm_cblock_t cblock; struct prealloc structs; struct dm_bio_prison_cell *old_ocell; memset(&structs, 0, sizeof(structs)); while (spare_migration_bandwidth(cache)) { if (prealloc_data_structs(cache, &structs)) break; r = policy_writeback_work(cache->policy, &oblock, &cblock); if (r) break; r = get_cell(cache, oblock, &structs, &old_ocell); if (r) { policy_set_dirty(cache->policy, oblock); break; } writeback(cache, &structs, oblock, cblock, old_ocell); } prealloc_free_structs(cache, &structs); } /*---------------------------------------------------------------- * Main worker loop *--------------------------------------------------------------*/ static bool is_quiescing(struct cache *cache) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); r = cache->quiescing; spin_unlock_irqrestore(&cache->lock, flags); return r; } static void ack_quiescing(struct cache *cache) { if (is_quiescing(cache)) { atomic_inc(&cache->quiescing_ack); wake_up(&cache->quiescing_wait); } } static void wait_for_quiescing_ack(struct cache *cache) { wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); } static void start_quiescing(struct cache *cache) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); cache->quiescing = true; spin_unlock_irqrestore(&cache->lock, flags); wait_for_quiescing_ack(cache); } static void stop_quiescing(struct cache *cache) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); cache->quiescing = false; spin_unlock_irqrestore(&cache->lock, flags); atomic_set(&cache->quiescing_ack, 0); } static void wait_for_migrations(struct cache *cache) { wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); } static void stop_worker(struct cache *cache) { cancel_delayed_work(&cache->waker); flush_workqueue(cache->wq); } static void requeue_deferred_io(struct cache *cache) { struct bio *bio; struct bio_list bios; bio_list_init(&bios); bio_list_merge(&bios, &cache->deferred_bios); bio_list_init(&cache->deferred_bios); while ((bio = bio_list_pop(&bios))) bio_endio(bio, DM_ENDIO_REQUEUE); } static int more_work(struct cache *cache) { if (is_quiescing(cache)) return !list_empty(&cache->quiesced_migrations) || !list_empty(&cache->completed_migrations) || !list_empty(&cache->need_commit_migrations); else return !bio_list_empty(&cache->deferred_bios) || !bio_list_empty(&cache->deferred_flush_bios) || !bio_list_empty(&cache->deferred_writethrough_bios) || !list_empty(&cache->quiesced_migrations) || !list_empty(&cache->completed_migrations) || !list_empty(&cache->need_commit_migrations); } static void do_worker(struct work_struct *ws) { struct cache *cache = container_of(ws, struct cache, worker); do { if (!is_quiescing(cache)) { writeback_some_dirty_blocks(cache); process_deferred_writethrough_bios(cache); process_deferred_bios(cache); } process_migrations(cache, &cache->quiesced_migrations, issue_copy); process_migrations(cache, &cache->completed_migrations, complete_migration); if (commit_if_needed(cache)) { process_deferred_flush_bios(cache, false); /* * FIXME: rollback metadata or just go into a * failure mode and error everything */ } else { process_deferred_flush_bios(cache, true); process_migrations(cache, &cache->need_commit_migrations, migration_success_post_commit); } ack_quiescing(cache); } while (more_work(cache)); } /* * We want to commit periodically so that not too much * unwritten metadata builds up. */ static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); policy_tick(cache->policy); wake_worker(cache); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } /*----------------------------------------------------------------*/ static int is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); return bdi_congested(&q->backing_dev_info, bdi_bits); } static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) { struct cache *cache = container_of(cb, struct cache, callbacks); return is_congested(cache->origin_dev, bdi_bits) || is_congested(cache->cache_dev, bdi_bits); } /*---------------------------------------------------------------- * Target methods *--------------------------------------------------------------*/ /* * This function gets called on the error paths of the constructor, so we * have to cope with a partially initialised struct. */ static void destroy(struct cache *cache) { unsigned i; if (cache->next_migration) mempool_free(cache->next_migration, cache->migration_pool); if (cache->migration_pool) mempool_destroy(cache->migration_pool); if (cache->all_io_ds) dm_deferred_set_destroy(cache->all_io_ds); if (cache->prison) dm_bio_prison_destroy(cache->prison); if (cache->wq) destroy_workqueue(cache->wq); if (cache->dirty_bitset) free_bitset(cache->dirty_bitset); if (cache->discard_bitset) free_bitset(cache->discard_bitset); if (cache->copier) dm_kcopyd_client_destroy(cache->copier); if (cache->cmd) dm_cache_metadata_close(cache->cmd); if (cache->metadata_dev) dm_put_device(cache->ti, cache->metadata_dev); if (cache->origin_dev) dm_put_device(cache->ti, cache->origin_dev); if (cache->cache_dev) dm_put_device(cache->ti, cache->cache_dev); if (cache->policy) dm_cache_policy_destroy(cache->policy); for (i = 0; i < cache->nr_ctr_args ; i++) kfree(cache->ctr_args[i]); kfree(cache->ctr_args); kfree(cache); } static void cache_dtr(struct dm_target *ti) { struct cache *cache = ti->private; destroy(cache); } static sector_t get_dev_size(struct dm_dev *dev) { return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; } /*----------------------------------------------------------------*/ /* * Construct a cache device mapping. * * cache <metadata dev> <cache dev> <origin dev> <block size> * <#feature args> [<feature arg>]* * <policy> <#policy args> [<policy arg>]* * * metadata dev : fast device holding the persistent metadata * cache dev : fast device holding cached data blocks * origin dev : slow device holding original data blocks * block size : cache unit size in sectors * * #feature args : number of feature arguments passed * feature args : writethrough. (The default is writeback.) * * policy : the replacement policy to use * #policy args : an even number of policy arguments corresponding * to key/value pairs passed to the policy * policy args : key/value pairs passed to the policy * E.g. 'sequential_threshold 1024' * See cache-policies.txt for details. * * Optional feature arguments are: * writethrough : write through caching that prohibits cache block * content from being different from origin block content. * Without this argument, the default behaviour is to write * back cache block contents later for performance reasons, * so they may differ from the corresponding origin blocks. */ struct cache_args { struct dm_target *ti; struct dm_dev *metadata_dev; struct dm_dev *cache_dev; sector_t cache_sectors; struct dm_dev *origin_dev; sector_t origin_sectors; uint32_t block_size; const char *policy_name; int policy_argc; const char **policy_argv; struct cache_features features; }; static void destroy_cache_args(struct cache_args *ca) { if (ca->metadata_dev) dm_put_device(ca->ti, ca->metadata_dev); if (ca->cache_dev) dm_put_device(ca->ti, ca->cache_dev); if (ca->origin_dev) dm_put_device(ca->ti, ca->origin_dev); kfree(ca); } static bool at_least_one_arg(struct dm_arg_set *as, char **error) { if (!as->argc) { *error = "Insufficient args"; return false; } return true; } static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { int r; sector_t metadata_dev_size; char b[BDEVNAME_SIZE]; if (!at_least_one_arg(as, error)) return -EINVAL; r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, &ca->metadata_dev); if (r) { *error = "Error opening metadata device"; return r; } metadata_dev_size = get_dev_size(ca->metadata_dev); if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING) DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); return 0; } static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { int r; if (!at_least_one_arg(as, error)) return -EINVAL; r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, &ca->cache_dev); if (r) { *error = "Error opening cache device"; return r; } ca->cache_sectors = get_dev_size(ca->cache_dev); return 0; } static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { int r; if (!at_least_one_arg(as, error)) return -EINVAL; r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, &ca->origin_dev); if (r) { *error = "Error opening origin device"; return r; } ca->origin_sectors = get_dev_size(ca->origin_dev); if (ca->ti->len > ca->origin_sectors) { *error = "Device size larger than cached device"; return -EINVAL; } return 0; } static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, char **error) { unsigned long tmp; if (!at_least_one_arg(as, error)) return -EINVAL; if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp || tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { *error = "Invalid data block size"; return -EINVAL; } if (tmp > ca->cache_sectors) { *error = "Data block size is larger than the cache device"; return -EINVAL; } ca->block_size = tmp; return 0; } static void init_features(struct cache_features *cf) { cf->mode = CM_WRITE; cf->write_through = false; } static int parse_features(struct cache_args *ca, struct dm_arg_set *as, char **error) { static struct dm_arg _args[] = { {0, 1, "Invalid number of cache feature arguments"}, }; int r; unsigned argc; const char *arg; struct cache_features *cf = &ca->features; init_features(cf); r = dm_read_arg_group(_args, as, &argc, error); if (r) return -EINVAL; while (argc--) { arg = dm_shift_arg(as); if (!strcasecmp(arg, "writeback")) cf->write_through = false; else if (!strcasecmp(arg, "writethrough")) cf->write_through = true; else { *error = "Unrecognised cache feature requested"; return -EINVAL; } } return 0; } static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, char **error) { static struct dm_arg _args[] = { {0, 1024, "Invalid number of policy arguments"}, }; int r; if (!at_least_one_arg(as, error)) return -EINVAL; ca->policy_name = dm_shift_arg(as); r = dm_read_arg_group(_args, as, &ca->policy_argc, error); if (r) return -EINVAL; ca->policy_argv = (const char **)as->argv; dm_consume_args(as, ca->policy_argc); return 0; } static int parse_cache_args(struct cache_args *ca, int argc, char **argv, char **error) { int r; struct dm_arg_set as; as.argc = argc; as.argv = argv; r = parse_metadata_dev(ca, &as, error); if (r) return r; r = parse_cache_dev(ca, &as, error); if (r) return r; r = parse_origin_dev(ca, &as, error); if (r) return r; r = parse_block_size(ca, &as, error); if (r) return r; r = parse_features(ca, &as, error); if (r) return r; r = parse_policy(ca, &as, error); if (r) return r; return 0; } /*----------------------------------------------------------------*/ static struct kmem_cache *migration_cache; #define NOT_CORE_OPTION 1 static int process_config_option(struct cache *cache, const char *key, const char *value) { unsigned long tmp; if (!strcasecmp(key, "migration_threshold")) { if (kstrtoul(value, 10, &tmp)) return -EINVAL; cache->migration_threshold = tmp; return 0; } return NOT_CORE_OPTION; } static int set_config_value(struct cache *cache, const char *key, const char *value) { int r = process_config_option(cache, key, value); if (r == NOT_CORE_OPTION) r = policy_set_config_value(cache->policy, key, value); if (r) DMWARN("bad config value for %s: %s", key, value); return r; } static int set_config_values(struct cache *cache, int argc, const char **argv) { int r = 0; if (argc & 1) { DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs."); return -EINVAL; } while (argc) { r = set_config_value(cache, argv[0], argv[1]); if (r) break; argc -= 2; argv += 2; } return r; } static int create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) { cache->policy = dm_cache_policy_create(ca->policy_name, cache->cache_size, cache->origin_sectors, cache->sectors_per_block); if (!cache->policy) { *error = "Error creating cache's policy"; return -ENOMEM; } return 0; } /* * We want the discard block size to be a power of two, at least the size * of the cache block size, and have no more than 2^14 discard blocks * across the origin. */ #define MAX_DISCARD_BLOCKS (1 << 14) static bool too_many_discard_blocks(sector_t discard_block_size, sector_t origin_size) { (void) sector_div(origin_size, discard_block_size); return origin_size > MAX_DISCARD_BLOCKS; } static sector_t calculate_discard_block_size(sector_t cache_block_size, sector_t origin_size) { sector_t discard_block_size; discard_block_size = roundup_pow_of_two(cache_block_size); if (origin_size) while (too_many_discard_blocks(discard_block_size, origin_size)) discard_block_size *= 2; return discard_block_size; } #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) { int r = 0; char **error = &ca->ti->error; struct cache *cache; struct dm_target *ti = ca->ti; dm_block_t origin_blocks; struct dm_cache_metadata *cmd; bool may_format = ca->features.mode == CM_WRITE; cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (!cache) return -ENOMEM; cache->ti = ca->ti; ti->private = cache; ti->num_flush_bios = 2; ti->flush_supported = true; ti->num_discard_bios = 1; ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; /* Discard bios must be split on a block boundary */ ti->split_discard_bios = true; cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; dm_table_add_target_callbacks(ti->table, &cache->callbacks); cache->metadata_dev = ca->metadata_dev; cache->origin_dev = ca->origin_dev; cache->cache_dev = ca->cache_dev; ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; /* FIXME: factor out this whole section */ origin_blocks = cache->origin_sectors = ca->origin_sectors; origin_blocks = block_div(origin_blocks, ca->block_size); cache->origin_blocks = to_oblock(origin_blocks); cache->sectors_per_block = ca->block_size; if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { r = -EINVAL; goto bad; } if (ca->block_size & (ca->block_size - 1)) { dm_block_t cache_size = ca->cache_sectors; cache->sectors_per_block_shift = -1; cache_size = block_div(cache_size, ca->block_size); cache->cache_size = to_cblock(cache_size); } else { cache->sectors_per_block_shift = __ffs(ca->block_size); cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift); } r = create_cache_policy(cache, ca, error); if (r) goto bad; cache->policy_nr_args = ca->policy_argc; cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; r = set_config_values(cache, ca->policy_argc, ca->policy_argv); if (r) { *error = "Error setting cache policy's config values"; goto bad; } cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, ca->block_size, may_format, dm_cache_policy_get_hint_size(cache->policy)); if (IS_ERR(cmd)) { *error = "Error creating metadata object"; r = PTR_ERR(cmd); goto bad; } cache->cmd = cmd; spin_lock_init(&cache->lock); bio_list_init(&cache->deferred_bios); bio_list_init(&cache->deferred_flush_bios); bio_list_init(&cache->deferred_writethrough_bios); INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); atomic_set(&cache->nr_migrations, 0); init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->quiescing_wait); atomic_set(&cache->quiescing_ack, 0); r = -ENOMEM; cache->nr_dirty = 0; cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { *error = "could not allocate dirty bitset"; goto bad; } clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); cache->discard_block_size = calculate_discard_block_size(cache->sectors_per_block, cache->origin_sectors); cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { *error = "could not allocate discard bitset"; goto bad; } clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(cache->copier)) { *error = "could not create kcopyd client"; r = PTR_ERR(cache->copier); goto bad; } cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); if (!cache->wq) { *error = "could not create workqueue for metadata object"; goto bad; } INIT_WORK(&cache->worker, do_worker); INIT_DELAYED_WORK(&cache->waker, do_waker); cache->last_commit_jiffies = jiffies; cache->prison = dm_bio_prison_create(PRISON_CELLS); if (!cache->prison) { *error = "could not create bio prison"; goto bad; } cache->all_io_ds = dm_deferred_set_create(); if (!cache->all_io_ds) { *error = "could not create all_io deferred set"; goto bad; } cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, migration_cache); if (!cache->migration_pool) { *error = "Error creating cache's migration mempool"; goto bad; } cache->next_migration = NULL; cache->need_tick_bio = true; cache->sized = false; cache->quiescing = false; cache->commit_requested = false; cache->loaded_mappings = false; cache->loaded_discards = false; load_stats(cache); atomic_set(&cache->stats.demotion, 0); atomic_set(&cache->stats.promotion, 0); atomic_set(&cache->stats.copies_avoided, 0); atomic_set(&cache->stats.cache_cell_clash, 0); atomic_set(&cache->stats.commit_count, 0); atomic_set(&cache->stats.discard_count, 0); *result = cache; return 0; bad: destroy(cache); return r; } static int copy_ctr_args(struct cache *cache, int argc, const char **argv) { unsigned i; const char **copy; copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL); if (!copy) return -ENOMEM; for (i = 0; i < argc; i++) { copy[i] = kstrdup(argv[i], GFP_KERNEL); if (!copy[i]) { while (i--) kfree(copy[i]); kfree(copy); return -ENOMEM; } } cache->nr_ctr_args = argc; cache->ctr_args = copy; return 0; } static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) { int r = -EINVAL; struct cache_args *ca; struct cache *cache = NULL; ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) { ti->error = "Error allocating memory for cache"; return -ENOMEM; } ca->ti = ti; r = parse_cache_args(ca, argc, argv, &ti->error); if (r) goto out; r = cache_create(ca, &cache); if (r) goto out; r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); if (r) { destroy(cache); goto out; } ti->private = cache; out: destroy_cache_args(ca); return r; } static int cache_map(struct dm_target *ti, struct bio *bio) { struct cache *cache = ti->private; int r; dm_oblock_t block = get_bio_block(cache, bio); size_t pb_data_size = get_per_bio_data_size(cache); bool can_migrate = false; bool discarded_block; struct dm_bio_prison_cell *cell; struct policy_result lookup_result; struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { /* * This can only occur if the io goes to a partial block at * the end of the origin device. We don't cache these. * Just remap to the origin and carry on. */ remap_to_origin(cache, bio); return DM_MAPIO_REMAPPED; } if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } /* * Check to see if that block is currently migrating. */ cell = alloc_prison_cell(cache); if (!cell) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } r = bio_detain(cache, block, bio, cell, (cell_free_fn) free_prison_cell, cache, &cell); if (r) { if (r < 0) defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } discarded_block = is_discarded_oblock(cache, block); r = policy_map(cache->policy, block, false, can_migrate, discarded_block, bio, &lookup_result); if (r == -EWOULDBLOCK) { cell_defer(cache, cell, true); return DM_MAPIO_SUBMITTED; } else if (r) { DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); bio_io_error(bio); return DM_MAPIO_SUBMITTED; } switch (lookup_result.op) { case POLICY_HIT: inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (is_writethrough_io(cache, bio, lookup_result.cblock)) remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); cell_defer(cache, cell, false); break; case POLICY_MISS: inc_miss_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (pb->req_nr != 0) { /* * This is a duplicate writethrough io that is no * longer needed because the block has been demoted. */ bio_endio(bio, 0); cell_defer(cache, cell, false); return DM_MAPIO_SUBMITTED; } else { remap_to_origin_clear_discard(cache, bio, block); cell_defer(cache, cell, false); } break; default: DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, (unsigned) lookup_result.op); bio_io_error(bio); return DM_MAPIO_SUBMITTED; } return DM_MAPIO_REMAPPED; } static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) { struct cache *cache = ti->private; unsigned long flags; size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); if (pb->tick) { policy_tick(cache->policy); spin_lock_irqsave(&cache->lock, flags); cache->need_tick_bio = true; spin_unlock_irqrestore(&cache->lock, flags); } check_for_quiesced_migrations(cache, pb); return 0; } static int write_dirty_bitset(struct cache *cache) { unsigned i, r; for (i = 0; i < from_cblock(cache->cache_size); i++) { r = dm_cache_set_dirty(cache->cmd, to_cblock(i), is_dirty(cache, to_cblock(i))); if (r) return r; } return 0; } static int write_discard_bitset(struct cache *cache) { unsigned i, r; r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, cache->discard_nr_blocks); if (r) { DMERR("could not resize on-disk discard bitset"); return r; } for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { r = dm_cache_set_discard(cache->cmd, to_dblock(i), is_discarded(cache, to_dblock(i))); if (r) return r; } return 0; } static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint) { struct cache *cache = context; return dm_cache_save_hint(cache->cmd, cblock, hint); } static int write_hints(struct cache *cache) { int r; r = dm_cache_begin_hints(cache->cmd, cache->policy); if (r) { DMERR("dm_cache_begin_hints failed"); return r; } r = policy_walk_mappings(cache->policy, save_hint, cache); if (r) DMERR("policy_walk_mappings failed"); return r; } /* * returns true on success */ static bool sync_metadata(struct cache *cache) { int r1, r2, r3, r4; r1 = write_dirty_bitset(cache); if (r1) DMERR("could not write dirty bitset"); r2 = write_discard_bitset(cache); if (r2) DMERR("could not write discard bitset"); save_stats(cache); r3 = write_hints(cache); if (r3) DMERR("could not write hints"); /* * If writing the above metadata failed, we still commit, but don't * set the clean shutdown flag. This will effectively force every * dirty bit to be set on reload. */ r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); if (r4) DMERR("could not write cache metadata. Data loss may occur."); return !r1 && !r2 && !r3 && !r4; } static void cache_postsuspend(struct dm_target *ti) { struct cache *cache = ti->private; start_quiescing(cache); wait_for_migrations(cache); stop_worker(cache); requeue_deferred_io(cache); stop_quiescing(cache); (void) sync_metadata(cache); } static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, bool dirty, uint32_t hint, bool hint_valid) { int r; struct cache *cache = context; r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); if (r) return r; if (dirty) set_dirty(cache, oblock, cblock); else clear_dirty(cache, oblock, cblock); return 0; } static int load_discard(void *context, sector_t discard_block_size, dm_dblock_t dblock, bool discard) { struct cache *cache = context; /* FIXME: handle mis-matched block size */ if (discard) set_discard(cache, dblock); else clear_discard(cache, dblock); return 0; } static int cache_preresume(struct dm_target *ti) { int r = 0; struct cache *cache = ti->private; sector_t actual_cache_size = get_dev_size(cache->cache_dev); (void) sector_div(actual_cache_size, cache->sectors_per_block); /* * Check to see if the cache has resized. */ if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) { cache->cache_size = to_cblock(actual_cache_size); r = dm_cache_resize(cache->cmd, cache->cache_size); if (r) { DMERR("could not resize cache metadata"); return r; } cache->sized = true; } if (!cache->loaded_mappings) { r = dm_cache_load_mappings(cache->cmd, cache->policy, load_mapping, cache); if (r) { DMERR("could not load cache mappings"); return r; } cache->loaded_mappings = true; } if (!cache->loaded_discards) { r = dm_cache_load_discards(cache->cmd, load_discard, cache); if (r) { DMERR("could not load origin discards"); return r; } cache->loaded_discards = true; } return r; } static void cache_resume(struct dm_target *ti) { struct cache *cache = ti->private; cache->need_tick_bio = true; do_waker(&cache->waker.work); } /* * Status format: * * <#used metadata blocks>/<#total metadata blocks> * <#read hits> <#read misses> <#write hits> <#write misses> * <#demotions> <#promotions> <#blocks in cache> <#dirty> * <#features> <features>* * <#core args> <core args> * <#policy args> <policy args>* */ static void cache_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { int r = 0; unsigned i; ssize_t sz = 0; dm_block_t nr_free_blocks_metadata = 0; dm_block_t nr_blocks_metadata = 0; char buf[BDEVNAME_SIZE]; struct cache *cache = ti->private; dm_cblock_t residency; switch (type) { case STATUSTYPE_INFO: /* Commit to ensure statistics aren't out-of-date */ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) { r = dm_cache_commit(cache->cmd, false); if (r) DMERR("could not commit metadata for accurate status"); } r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); if (r) { DMERR("could not get metadata free block count"); goto err; } r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); if (r) { DMERR("could not get metadata device size"); goto err; } residency = policy_residency(cache->policy); DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ", (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), (unsigned long long)nr_blocks_metadata, (unsigned) atomic_read(&cache->stats.read_hit), (unsigned) atomic_read(&cache->stats.read_miss), (unsigned) atomic_read(&cache->stats.write_hit), (unsigned) atomic_read(&cache->stats.write_miss), (unsigned) atomic_read(&cache->stats.demotion), (unsigned) atomic_read(&cache->stats.promotion), (unsigned long long) from_cblock(residency), cache->nr_dirty); if (cache->features.write_through) DMEMIT("1 writethrough "); else DMEMIT("0 "); DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); if (sz < maxlen) { r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); if (r) DMERR("policy_emit_config_values returned %d", r); } break; case STATUSTYPE_TABLE: format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); DMEMIT("%s ", buf); format_dev_t(buf, cache->cache_dev->bdev->bd_dev); DMEMIT("%s ", buf); format_dev_t(buf, cache->origin_dev->bdev->bd_dev); DMEMIT("%s", buf); for (i = 0; i < cache->nr_ctr_args - 1; i++) DMEMIT(" %s", cache->ctr_args[i]); if (cache->nr_ctr_args) DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); } return; err: DMEMIT("Error"); } /* * Supports <key> <value>. * * The key migration_threshold is supported by the cache target core. */ static int cache_message(struct dm_target *ti, unsigned argc, char **argv) { struct cache *cache = ti->private; if (argc != 2) return -EINVAL; return set_config_value(cache, argv[0], argv[1]); } static int cache_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { int r = 0; struct cache *cache = ti->private; r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); if (!r) r = fn(ti, cache->origin_dev, 0, ti->len, data); return r; } /* * We assume I/O is going to the origin (which is the volume * more likely to have restrictions e.g. by being striped). * (Looking up the exact location of the data would be expensive * and could always be out of date by the time the bio is submitted.) */ static int cache_bvec_merge(struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size) { struct cache *cache = ti->private; struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); if (!q->merge_bvec_fn) return max_size; bvm->bi_bdev = cache->origin_dev->bdev; return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } static void set_discard_limits(struct cache *cache, struct queue_limits *limits) { /* * FIXME: these limits may be incompatible with the cache device */ limits->max_discard_sectors = cache->discard_block_size * 1024; limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; } static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct cache *cache = ti->private; blk_limits_io_min(limits, 0); blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); set_discard_limits(cache, limits); } /*----------------------------------------------------------------*/ static struct target_type cache_target = { .name = "cache", .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, .map = cache_map, .end_io = cache_end_io, .postsuspend = cache_postsuspend, .preresume = cache_preresume, .resume = cache_resume, .status = cache_status, .message = cache_message, .iterate_devices = cache_iterate_devices, .merge = cache_bvec_merge, .io_hints = cache_io_hints, }; static int __init dm_cache_init(void) { int r; r = dm_register_target(&cache_target); if (r) { DMERR("cache target registration failed: %d", r); return r; } migration_cache = KMEM_CACHE(dm_cache_migration, 0); if (!migration_cache) { dm_unregister_target(&cache_target); return -ENOMEM; } return 0; } static void __exit dm_cache_exit(void) { dm_unregister_target(&cache_target); kmem_cache_destroy(migration_cache); } module_init(dm_cache_init); module_exit(dm_cache_exit); MODULE_DESCRIPTION(DM_NAME " cache target"); MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Clouded/linux-rt-odroid-c1
backports/drivers/net/wireless/ath/ath6kl/htc_mbox.c
1344
76352
/* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" #include "hif.h" #include "debug.h" #include "hif-ops.h" #include "trace.h" #include <asm/unaligned.h> #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) static void ath6kl_htc_mbox_cleanup(struct htc_target *target); static void ath6kl_htc_mbox_stop(struct htc_target *target); static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, struct list_head *pkt_queue); static void ath6kl_htc_set_credit_dist(struct htc_target *target, struct ath6kl_htc_credit_info *cred_info, u16 svc_pri_order[], int len); /* threshold to re-enable Tx bundling for an AC*/ #define TX_RESUME_BUNDLE_THRESHOLD 1500 /* Functions for Tx credit handling */ static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, struct htc_endpoint_credit_dist *ep_dist, int credits) { ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n", ep_dist->endpoint, credits); ep_dist->credits += credits; ep_dist->cred_assngd += credits; cred_info->cur_free_credits -= credits; } static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info, struct list_head *ep_list, int tot_credits) { struct htc_endpoint_credit_dist *cur_ep_dist; int count; ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits); cred_info->cur_free_credits = tot_credits; cred_info->total_avail_credits = tot_credits; list_for_each_entry(cur_ep_dist, ep_list, list) { if (cur_ep_dist->endpoint == ENDPOINT_0) continue; cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; if (tot_credits > 4) { if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { ath6kl_credit_deposit(cred_info, cur_ep_dist, cur_ep_dist->cred_min); cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; } } if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { ath6kl_credit_deposit(cred_info, cur_ep_dist, cur_ep_dist->cred_min); /* * Control service is always marked active, it * never goes inactive EVER. */ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; } /* * Streams have to be created (explicit | implicit) for all * kinds of traffic. BE endpoints are also inactive in the * beginning. When BE traffic starts it creates implicit * streams that redistributes credits. * * Note: all other endpoints have minimums set but are * initially given NO credits. credits will be distributed * as traffic activity demands */ } /* * For ath6kl_credit_seek function, * it use list_for_each_entry_reverse to walk around the whole ep list. * Therefore assign this lowestpri_ep_dist after walk around the ep_list */ cred_info->lowestpri_ep_dist = cur_ep_dist->list; WARN_ON(cred_info->cur_free_credits <= 0); list_for_each_entry(cur_ep_dist, ep_list, list) { if (cur_ep_dist->endpoint == ENDPOINT_0) continue; if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; } else { /* * For the remaining data endpoints, we assume that * each cred_per_msg are the same. We use a simple * calculation here, we take the remaining credits * and determine how many max messages this can * cover and then set each endpoint's normal value * equal to 3/4 this amount. */ count = (cred_info->cur_free_credits / cur_ep_dist->cred_per_msg) * cur_ep_dist->cred_per_msg; count = (count * 3) >> 2; count = max(count, cur_ep_dist->cred_per_msg); cur_ep_dist->cred_norm = count; } ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n", cur_ep_dist->endpoint, cur_ep_dist->svc_id, cur_ep_dist->credits, cur_ep_dist->cred_per_msg, cur_ep_dist->cred_norm, cur_ep_dist->cred_min); } } /* initialize and setup credit distribution */ static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target, struct ath6kl_htc_credit_info *cred_info) { u16 servicepriority[5]; memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info)); servicepriority[0] = WMI_CONTROL_SVC; /* highest */ servicepriority[1] = WMI_DATA_VO_SVC; servicepriority[2] = WMI_DATA_VI_SVC; servicepriority[3] = WMI_DATA_BE_SVC; servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ /* set priority list */ ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5); return 0; } /* reduce an ep's credits back to a set limit */ static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, struct htc_endpoint_credit_dist *ep_dist, int limit) { int credits; ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n", ep_dist->endpoint, limit); ep_dist->cred_assngd = limit; if (ep_dist->credits <= limit) return; credits = ep_dist->credits - limit; ep_dist->credits -= credits; cred_info->cur_free_credits += credits; } static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, struct list_head *epdist_list) { struct htc_endpoint_credit_dist *cur_list; list_for_each_entry(cur_list, epdist_list, list) { if (cur_list->endpoint == ENDPOINT_0) continue; if (cur_list->cred_to_dist > 0) { cur_list->credits += cur_list->cred_to_dist; cur_list->cred_to_dist = 0; if (cur_list->credits > cur_list->cred_assngd) ath6kl_credit_reduce(cred_info, cur_list, cur_list->cred_assngd); if (cur_list->credits > cur_list->cred_norm) ath6kl_credit_reduce(cred_info, cur_list, cur_list->cred_norm); if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) { if (cur_list->txq_depth == 0) ath6kl_credit_reduce(cred_info, cur_list, 0); } } } } /* * HTC has an endpoint that needs credits, ep_dist is the endpoint in * question. */ static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info, struct htc_endpoint_credit_dist *ep_dist) { struct htc_endpoint_credit_dist *curdist_list; int credits = 0; int need; if (ep_dist->svc_id == WMI_CONTROL_SVC) goto out; if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || (ep_dist->svc_id == WMI_DATA_VO_SVC)) if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) goto out; /* * For all other services, we follow a simple algorithm of: * * 1. checking the free pool for credits * 2. checking lower priority endpoints for credits to take */ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); if (credits >= ep_dist->seek_cred) goto out; /* * We don't have enough in the free pool, try taking away from * lower priority services The rule for taking away credits: * * 1. Only take from lower priority endpoints * 2. Only take what is allocated above the minimum (never * starve an endpoint completely) * 3. Only take what you need. */ list_for_each_entry_reverse(curdist_list, &cred_info->lowestpri_ep_dist, list) { if (curdist_list == ep_dist) break; need = ep_dist->seek_cred - cred_info->cur_free_credits; if ((curdist_list->cred_assngd - need) >= curdist_list->cred_min) { /* * The current one has been allocated more than * it's minimum and it has enough credits assigned * above it's minimum to fulfill our need try to * take away just enough to fulfill our need. */ ath6kl_credit_reduce(cred_info, curdist_list, curdist_list->cred_assngd - need); if (cred_info->cur_free_credits >= ep_dist->seek_cred) break; } if (curdist_list->endpoint == ENDPOINT_0) break; } credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); out: /* did we find some credits? */ if (credits) ath6kl_credit_deposit(cred_info, ep_dist, credits); ep_dist->seek_cred = 0; } /* redistribute credits based on activity change */ static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info, struct list_head *ep_dist_list) { struct htc_endpoint_credit_dist *curdist_list; list_for_each_entry(curdist_list, ep_dist_list, list) { if (curdist_list->endpoint == ENDPOINT_0) continue; if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || (curdist_list->svc_id == WMI_DATA_BE_SVC)) curdist_list->dist_flags |= HTC_EP_ACTIVE; if ((curdist_list->svc_id != WMI_CONTROL_SVC) && !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { if (curdist_list->txq_depth == 0) ath6kl_credit_reduce(info, curdist_list, 0); else ath6kl_credit_reduce(info, curdist_list, curdist_list->cred_min); } } } /* * * This function is invoked whenever endpoints require credit * distributions. A lock is held while this function is invoked, this * function shall NOT block. The ep_dist_list is a list of distribution * structures in prioritized order as defined by the call to the * htc_set_credit_dist() api. */ static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info, struct list_head *ep_dist_list, enum htc_credit_dist_reason reason) { switch (reason) { case HTC_CREDIT_DIST_SEND_COMPLETE: ath6kl_credit_update(cred_info, ep_dist_list); break; case HTC_CREDIT_DIST_ACTIVITY_CHANGE: ath6kl_credit_redistribute(cred_info, ep_dist_list); break; default: break; } WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); WARN_ON(cred_info->cur_free_credits < 0); } static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len) { u8 *align_addr; if (!IS_ALIGNED((unsigned long) *buf, 4)) { align_addr = PTR_ALIGN(*buf - 4, 4); memmove(align_addr, *buf, len); *buf = align_addr; } } static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags, int ctrl0, int ctrl1) { struct htc_frame_hdr *hdr; packet->buf -= HTC_HDR_LENGTH; hdr = (struct htc_frame_hdr *)packet->buf; /* Endianess? */ put_unaligned((u16)packet->act_len, &hdr->payld_len); hdr->flags = flags; hdr->eid = packet->endpoint; hdr->ctrl[0] = ctrl0; hdr->ctrl[1] = ctrl1; } static void htc_reclaim_txctrl_buf(struct htc_target *target, struct htc_packet *pkt) { spin_lock_bh(&target->htc_lock); list_add_tail(&pkt->list, &target->free_ctrl_txbuf); spin_unlock_bh(&target->htc_lock); } static struct htc_packet *htc_get_control_buf(struct htc_target *target, bool tx) { struct htc_packet *packet = NULL; struct list_head *buf_list; buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf; spin_lock_bh(&target->htc_lock); if (list_empty(buf_list)) { spin_unlock_bh(&target->htc_lock); return NULL; } packet = list_first_entry(buf_list, struct htc_packet, list); list_del(&packet->list); spin_unlock_bh(&target->htc_lock); if (tx) packet->buf = packet->buf_start + HTC_HDR_LENGTH; return packet; } static void htc_tx_comp_update(struct htc_target *target, struct htc_endpoint *endpoint, struct htc_packet *packet) { packet->completion = NULL; packet->buf += HTC_HDR_LENGTH; if (!packet->status) return; ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n", packet->status, packet->endpoint, packet->act_len, packet->info.tx.cred_used); /* on failure to submit, reclaim credits for this packet */ spin_lock_bh(&target->tx_lock); endpoint->cred_dist.cred_to_dist += packet->info.tx.cred_used; endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n", target->credit_info, &target->cred_dist_list); ath6kl_credit_distribute(target->credit_info, &target->cred_dist_list, HTC_CREDIT_DIST_SEND_COMPLETE); spin_unlock_bh(&target->tx_lock); } static void htc_tx_complete(struct htc_endpoint *endpoint, struct list_head *txq) { if (list_empty(txq)) return; ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete ep %d pkts %d\n", endpoint->eid, get_queue_depth(txq)); ath6kl_tx_complete(endpoint->target, txq); } static void htc_tx_comp_handler(struct htc_target *target, struct htc_packet *packet) { struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; struct list_head container; ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n", packet->info.tx.seqno); htc_tx_comp_update(target, endpoint, packet); INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); /* do completion */ htc_tx_complete(endpoint, &container); } static void htc_async_tx_scat_complete(struct htc_target *target, struct hif_scatter_req *scat_req) { struct htc_endpoint *endpoint; struct htc_packet *packet; struct list_head tx_compq; int i; INIT_LIST_HEAD(&tx_compq); ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx scat complete len %d entries %d\n", scat_req->len, scat_req->scat_entries); if (scat_req->status) ath6kl_err("send scatter req failed: %d\n", scat_req->status); packet = scat_req->scat_list[0].packet; endpoint = &target->endpoint[packet->endpoint]; /* walk through the scatter list and process */ for (i = 0; i < scat_req->scat_entries; i++) { packet = scat_req->scat_list[i].packet; if (!packet) { WARN_ON(1); return; } packet->status = scat_req->status; htc_tx_comp_update(target, endpoint, packet); list_add_tail(&packet->list, &tx_compq); } /* free scatter request */ hif_scatter_req_add(target->dev->ar, scat_req); /* complete all packets */ htc_tx_complete(endpoint, &tx_compq); } static int ath6kl_htc_tx_issue(struct htc_target *target, struct htc_packet *packet) { int status; bool sync = false; u32 padded_len, send_len; if (!packet->completion) sync = true; send_len = packet->act_len + HTC_HDR_LENGTH; padded_len = CALC_TXRX_PADDED_LEN(target, send_len); ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n", send_len, packet->info.tx.seqno, padded_len, target->dev->ar->mbox_info.htc_addr, sync ? "sync" : "async"); if (sync) { status = hif_read_write_sync(target->dev->ar, target->dev->ar->mbox_info.htc_addr, packet->buf, padded_len, HIF_WR_SYNC_BLOCK_INC); packet->status = status; packet->buf += HTC_HDR_LENGTH; } else status = hif_write_async(target->dev->ar, target->dev->ar->mbox_info.htc_addr, packet->buf, padded_len, HIF_WR_ASYNC_BLOCK_INC, packet); trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len); return status; } static int htc_check_credits(struct htc_target *target, struct htc_endpoint *ep, u8 *flags, enum htc_endpoint_id eid, unsigned int len, int *req_cred) { *req_cred = (len > target->tgt_cred_sz) ? DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n", *req_cred, ep->cred_dist.credits); if (ep->cred_dist.credits < *req_cred) { if (eid == ENDPOINT_0) return -EINVAL; /* Seek more credits */ ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; ath6kl_credit_seek(target->credit_info, &ep->cred_dist); ep->cred_dist.seek_cred = 0; if (ep->cred_dist.credits < *req_cred) { ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit not found for ep %d\n", eid); return -EINVAL; } } ep->cred_dist.credits -= *req_cred; ep->ep_st.cred_cosumd += *req_cred; /* When we are getting low on credits, ask for more */ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { ep->cred_dist.seek_cred = ep->cred_dist.cred_per_msg - ep->cred_dist.credits; ath6kl_credit_seek(target->credit_info, &ep->cred_dist); /* see if we were successful in getting more */ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { /* tell the target we need credits ASAP! */ *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; ep->ep_st.cred_low_indicate += 1; ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit we need credits asap\n"); } } return 0; } static void ath6kl_htc_tx_pkts_get(struct htc_target *target, struct htc_endpoint *endpoint, struct list_head *queue) { int req_cred; u8 flags; struct htc_packet *packet; unsigned int len; while (true) { flags = 0; if (list_empty(&endpoint->txq)) break; packet = list_first_entry(&endpoint->txq, struct htc_packet, list); ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx got packet 0x%p queue depth %d\n", packet, get_queue_depth(&endpoint->txq)); len = CALC_TXRX_PADDED_LEN(target, packet->act_len + HTC_HDR_LENGTH); if (htc_check_credits(target, endpoint, &flags, packet->endpoint, len, &req_cred)) break; /* now we can fully move onto caller's queue */ packet = list_first_entry(&endpoint->txq, struct htc_packet, list); list_move_tail(&packet->list, queue); /* save the number of credits this packet consumed */ packet->info.tx.cred_used = req_cred; /* all TX packets are handled asynchronously */ packet->completion = htc_tx_comp_handler; packet->context = target; endpoint->ep_st.tx_issued += 1; /* save send flags */ packet->info.tx.flags = flags; packet->info.tx.seqno = endpoint->seqno; endpoint->seqno++; } } /* See if the padded tx length falls on a credit boundary */ static int htc_get_credit_padding(unsigned int cred_sz, int *len, struct htc_endpoint *ep) { int rem_cred, cred_pad; rem_cred = *len % cred_sz; /* No padding needed */ if (!rem_cred) return 0; if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN)) return -1; /* * The transfer consumes a "partial" credit, this * packet cannot be bundled unless we add * additional "dummy" padding (max 255 bytes) to * consume the entire credit. */ cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred; if ((cred_pad > 0) && (cred_pad <= 255)) *len += cred_pad; else /* The amount of padding is too large, send as non-bundled */ return -1; return cred_pad; } static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, struct htc_endpoint *endpoint, struct hif_scatter_req *scat_req, int n_scat, struct list_head *queue) { struct htc_packet *packet; int i, len, rem_scat, cred_pad; int status = 0; u8 flags; rem_scat = target->max_tx_bndl_sz; for (i = 0; i < n_scat; i++) { scat_req->scat_list[i].packet = NULL; if (list_empty(queue)) break; packet = list_first_entry(queue, struct htc_packet, list); len = CALC_TXRX_PADDED_LEN(target, packet->act_len + HTC_HDR_LENGTH); cred_pad = htc_get_credit_padding(target->tgt_cred_sz, &len, endpoint); if (cred_pad < 0 || rem_scat < len) { status = -ENOSPC; break; } rem_scat -= len; /* now remove it from the queue */ list_del(&packet->list); scat_req->scat_list[i].packet = packet; /* prepare packet and flag message as part of a send bundle */ flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE; ath6kl_htc_tx_prep_pkt(packet, flags, cred_pad, packet->info.tx.seqno); /* Make sure the buffer is 4-byte aligned */ ath6kl_htc_tx_buf_align(&packet->buf, packet->act_len + HTC_HDR_LENGTH); scat_req->scat_list[i].buf = packet->buf; scat_req->scat_list[i].len = len; scat_req->len += len; scat_req->scat_entries++; ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n", i, packet, packet->info.tx.seqno, len, rem_scat); } /* Roll back scatter setup in case of any failure */ if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) { for (i = scat_req->scat_entries - 1; i >= 0; i--) { packet = scat_req->scat_list[i].packet; if (packet) { packet->buf += HTC_HDR_LENGTH; list_add(&packet->list, queue); } } return -EAGAIN; } return status; } /* * Drain a queue and send as bundles this function may return without fully * draining the queue when * * 1. scatter resources are exhausted * 2. a message that will consume a partial credit will stop the * bundling process early * 3. we drop below the minimum number of messages for a bundle */ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, struct list_head *queue, int *sent_bundle, int *n_bundle_pkts) { struct htc_target *target = endpoint->target; struct hif_scatter_req *scat_req = NULL; int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i; struct htc_packet *packet; int status; u32 txb_mask; u8 ac = WMM_NUM_AC; if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) && (WMI_CONTROL_SVC != endpoint->svc_id)) ac = target->dev->ar->ep2ac_map[endpoint->eid]; while (true) { status = 0; n_scat = get_queue_depth(queue); n_scat = min(n_scat, target->msg_per_bndl_max); if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE) /* not enough to bundle */ break; scat_req = hif_scatter_req_get(target->dev->ar); if (!scat_req) { /* no scatter resources */ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx no more scatter resources\n"); break; } if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) { if (WMM_AC_BE == ac) /* * BE, BK have priorities and bit * positions reversed */ txb_mask = (1 << WMM_AC_BK); else /* * any AC with priority lower than * itself */ txb_mask = ((1 << ac) - 1); /* * when the scatter request resources drop below a * certain threshold, disable Tx bundling for all * AC's with priority lower than the current requesting * AC. Otherwise re-enable Tx bundling for them */ if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS) target->tx_bndl_mask &= ~txb_mask; else target->tx_bndl_mask |= txb_mask; } ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", n_scat); scat_req->len = 0; scat_req->scat_entries = 0; status = ath6kl_htc_tx_setup_scat_list(target, endpoint, scat_req, n_scat, queue); if (status == -EAGAIN) { hif_scatter_req_add(target->dev->ar, scat_req); break; } /* send path is always asynchronous */ scat_req->complete = htc_async_tx_scat_complete; n_sent_bundle++; tot_pkts_bundle += scat_req->scat_entries; ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx scatter bytes %d entries %d\n", scat_req->len, scat_req->scat_entries); for (i = 0; i < scat_req->scat_entries; i++) { packet = scat_req->scat_list[i].packet; trace_ath6kl_htc_tx(packet->status, packet->endpoint, packet->buf, packet->act_len); } ath6kl_hif_submit_scat_req(target->dev, scat_req, false); if (status) break; } *sent_bundle = n_sent_bundle; *n_bundle_pkts = tot_pkts_bundle; ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n", n_sent_bundle); return; } static void ath6kl_htc_tx_from_queue(struct htc_target *target, struct htc_endpoint *endpoint) { struct list_head txq; struct htc_packet *packet; int bundle_sent; int n_pkts_bundle; u8 ac = WMM_NUM_AC; int status; spin_lock_bh(&target->tx_lock); endpoint->tx_proc_cnt++; if (endpoint->tx_proc_cnt > 1) { endpoint->tx_proc_cnt--; spin_unlock_bh(&target->tx_lock); ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n"); return; } /* * drain the endpoint TX queue for transmission as long * as we have enough credits. */ INIT_LIST_HEAD(&txq); if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) && (WMI_CONTROL_SVC != endpoint->svc_id)) ac = target->dev->ar->ep2ac_map[endpoint->eid]; while (true) { if (list_empty(&endpoint->txq)) break; ath6kl_htc_tx_pkts_get(target, endpoint, &txq); if (list_empty(&txq)) break; spin_unlock_bh(&target->tx_lock); bundle_sent = 0; n_pkts_bundle = 0; while (true) { /* try to send a bundle on each pass */ if ((target->tx_bndl_mask) && (get_queue_depth(&txq) >= HTC_MIN_HTC_MSGS_TO_BUNDLE)) { int temp1 = 0, temp2 = 0; /* check if bundling is enabled for an AC */ if (target->tx_bndl_mask & (1 << ac)) { ath6kl_htc_tx_bundle(endpoint, &txq, &temp1, &temp2); bundle_sent += temp1; n_pkts_bundle += temp2; } } if (list_empty(&txq)) break; packet = list_first_entry(&txq, struct htc_packet, list); list_del(&packet->list); ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags, 0, packet->info.tx.seqno); status = ath6kl_htc_tx_issue(target, packet); if (status) { packet->status = status; packet->completion(packet->context, packet); } } spin_lock_bh(&target->tx_lock); endpoint->ep_st.tx_bundles += bundle_sent; endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle; /* * if an AC has bundling disabled and no tx bundling * has occured continously for a certain number of TX, * enable tx bundling for this AC */ if (!bundle_sent) { if (!(target->tx_bndl_mask & (1 << ac)) && (ac < WMM_NUM_AC)) { if (++target->ac_tx_count[ac] >= TX_RESUME_BUNDLE_THRESHOLD) { target->ac_tx_count[ac] = 0; target->tx_bndl_mask |= (1 << ac); } } } else { /* tx bundling will reset the counter */ if (ac < WMM_NUM_AC) target->ac_tx_count[ac] = 0; } } endpoint->tx_proc_cnt = 0; spin_unlock_bh(&target->tx_lock); } static bool ath6kl_htc_tx_try(struct htc_target *target, struct htc_endpoint *endpoint, struct htc_packet *tx_pkt) { struct htc_ep_callbacks ep_cb; int txq_depth; bool overflow = false; ep_cb = endpoint->ep_cb; spin_lock_bh(&target->tx_lock); txq_depth = get_queue_depth(&endpoint->txq); spin_unlock_bh(&target->tx_lock); if (txq_depth >= endpoint->max_txq_depth) overflow = true; if (overflow) ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx overflow ep %d depth %d max %d\n", endpoint->eid, txq_depth, endpoint->max_txq_depth); if (overflow && ep_cb.tx_full) { if (ep_cb.tx_full(endpoint->target, tx_pkt) == HTC_SEND_FULL_DROP) { endpoint->ep_st.tx_dropped += 1; return false; } } spin_lock_bh(&target->tx_lock); list_add_tail(&tx_pkt->list, &endpoint->txq); spin_unlock_bh(&target->tx_lock); ath6kl_htc_tx_from_queue(target, endpoint); return true; } static void htc_chk_ep_txq(struct htc_target *target) { struct htc_endpoint *endpoint; struct htc_endpoint_credit_dist *cred_dist; /* * Run through the credit distribution list to see if there are * packets queued. NOTE: no locks need to be taken since the * distribution list is not dynamic (cannot be re-ordered) and we * are not modifying any state. */ list_for_each_entry(cred_dist, &target->cred_dist_list, list) { endpoint = cred_dist->htc_ep; spin_lock_bh(&target->tx_lock); if (!list_empty(&endpoint->txq)) { ath6kl_dbg(ATH6KL_DBG_HTC, "htc creds ep %d credits %d pkts %d\n", cred_dist->endpoint, endpoint->cred_dist.credits, get_queue_depth(&endpoint->txq)); spin_unlock_bh(&target->tx_lock); /* * Try to start the stalled queue, this list is * ordered by priority. If there are credits * available the highest priority queue will get a * chance to reclaim credits from lower priority * ones. */ ath6kl_htc_tx_from_queue(target, endpoint); spin_lock_bh(&target->tx_lock); } spin_unlock_bh(&target->tx_lock); } } static int htc_setup_tx_complete(struct htc_target *target) { struct htc_packet *send_pkt = NULL; int status; send_pkt = htc_get_control_buf(target, true); if (!send_pkt) return -ENOMEM; if (target->htc_tgt_ver >= HTC_VERSION_2P1) { struct htc_setup_comp_ext_msg *setup_comp_ext; u32 flags = 0; setup_comp_ext = (struct htc_setup_comp_ext_msg *)send_pkt->buf; memset(setup_comp_ext, 0, sizeof(*setup_comp_ext)); setup_comp_ext->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); if (target->msg_per_bndl_max > 0) { /* Indicate HTC bundling to the target */ flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN; setup_comp_ext->msg_per_rxbndl = target->msg_per_bndl_max; } memcpy(&setup_comp_ext->flags, &flags, sizeof(setup_comp_ext->flags)); set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext, sizeof(struct htc_setup_comp_ext_msg), ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); } else { struct htc_setup_comp_msg *setup_comp; setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf; memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg)); setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID); set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp, sizeof(struct htc_setup_comp_msg), ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); } /* we want synchronous operation */ send_pkt->completion = NULL; ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0); status = ath6kl_htc_tx_issue(target, send_pkt); if (send_pkt != NULL) htc_reclaim_txctrl_buf(target, send_pkt); return status; } static void ath6kl_htc_set_credit_dist(struct htc_target *target, struct ath6kl_htc_credit_info *credit_info, u16 srvc_pri_order[], int list_len) { struct htc_endpoint *endpoint; int i, ep; target->credit_info = credit_info; list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, &target->cred_dist_list); for (i = 0; i < list_len; i++) { for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) { endpoint = &target->endpoint[ep]; if (endpoint->svc_id == srvc_pri_order[i]) { list_add_tail(&endpoint->cred_dist.list, &target->cred_dist_list); break; } } if (ep >= ENDPOINT_MAX) { WARN_ON(1); return; } } } static int ath6kl_htc_mbox_tx(struct htc_target *target, struct htc_packet *packet) { struct htc_endpoint *endpoint; struct list_head queue; ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ep id %d buf 0x%p len %d\n", packet->endpoint, packet->buf, packet->act_len); if (packet->endpoint >= ENDPOINT_MAX) { WARN_ON(1); return -EINVAL; } endpoint = &target->endpoint[packet->endpoint]; if (!ath6kl_htc_tx_try(target, endpoint, packet)) { packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ? -ECANCELED : -ENOSPC; INIT_LIST_HEAD(&queue); list_add(&packet->list, &queue); htc_tx_complete(endpoint, &queue); } return 0; } /* flush endpoint TX queue */ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target, enum htc_endpoint_id eid, u16 tag) { struct htc_packet *packet, *tmp_pkt; struct list_head discard_q, container; struct htc_endpoint *endpoint = &target->endpoint[eid]; if (!endpoint->svc_id) { WARN_ON(1); return; } /* initialize the discard queue */ INIT_LIST_HEAD(&discard_q); spin_lock_bh(&target->tx_lock); list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) { if ((tag == HTC_TX_PACKET_TAG_ALL) || (tag == packet->info.tx.tag)) list_move_tail(&packet->list, &discard_q); } spin_unlock_bh(&target->tx_lock); list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { packet->status = -ECANCELED; list_del(&packet->list); ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", packet, packet->act_len, packet->endpoint, packet->info.tx.tag); INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); htc_tx_complete(endpoint, &container); } } static void ath6kl_htc_flush_txep_all(struct htc_target *target) { struct htc_endpoint *endpoint; int i; dump_cred_dist_stats(target); for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { endpoint = &target->endpoint[i]; if (endpoint->svc_id == 0) /* not in use.. */ continue; ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); } } static void ath6kl_htc_mbox_activity_changed(struct htc_target *target, enum htc_endpoint_id eid, bool active) { struct htc_endpoint *endpoint = &target->endpoint[eid]; bool dist = false; if (endpoint->svc_id == 0) { WARN_ON(1); return; } spin_lock_bh(&target->tx_lock); if (active) { if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) { endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE; dist = true; } } else { if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) { endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE; dist = true; } } if (dist) { endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx activity ctxt 0x%p dist 0x%p\n", target->credit_info, &target->cred_dist_list); ath6kl_credit_distribute(target->credit_info, &target->cred_dist_list, HTC_CREDIT_DIST_ACTIVITY_CHANGE); } spin_unlock_bh(&target->tx_lock); if (dist && !active) htc_chk_ep_txq(target); } /* HTC Rx */ static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint, int n_look_ahds) { endpoint->ep_st.rx_pkts++; if (n_look_ahds == 1) endpoint->ep_st.rx_lkahds++; else if (n_look_ahds > 1) endpoint->ep_st.rx_bundle_lkahd++; } static inline bool htc_valid_rx_frame_len(struct htc_target *target, enum htc_endpoint_id eid, int len) { return (eid == target->dev->ar->ctrl_ep) ? len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE; } static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet) { struct list_head queue; INIT_LIST_HEAD(&queue); list_add_tail(&packet->list, &queue); return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue); } static void htc_reclaim_rxbuf(struct htc_target *target, struct htc_packet *packet, struct htc_endpoint *ep) { if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) { htc_rxpkt_reset(packet); packet->status = -ECANCELED; ep->ep_cb.rx(ep->target, packet); } else { htc_rxpkt_reset(packet); htc_add_rxbuf((void *)(target), packet); } } static void reclaim_rx_ctrl_buf(struct htc_target *target, struct htc_packet *packet) { spin_lock_bh(&target->htc_lock); list_add_tail(&packet->list, &target->free_ctrl_rxbuf); spin_unlock_bh(&target->htc_lock); } static int ath6kl_htc_rx_packet(struct htc_target *target, struct htc_packet *packet, u32 rx_len) { struct ath6kl_device *dev = target->dev; u32 padded_len; int status; padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); if (padded_len > packet->buf_len) { ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n", padded_len, rx_len, packet->buf_len); return -ENOMEM; } ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n", packet, packet->info.rx.exp_hdr, padded_len, dev->ar->mbox_info.htc_addr); status = hif_read_write_sync(dev->ar, dev->ar->mbox_info.htc_addr, packet->buf, padded_len, HIF_RD_SYNC_BLOCK_FIX); packet->status = status; return status; } /* * optimization for recv packets, we can indicate a * "hint" that there are more single-packets to fetch * on this endpoint. */ static void ath6kl_htc_rx_set_indicate(u32 lk_ahd, struct htc_endpoint *endpoint, struct htc_packet *packet) { struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd; if (htc_hdr->eid == packet->endpoint) { if (!list_empty(&endpoint->rx_bufq)) packet->info.rx.indicat_flags |= HTC_RX_FLAGS_INDICATE_MORE_PKTS; } } static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint) { struct htc_ep_callbacks ep_cb = endpoint->ep_cb; if (ep_cb.rx_refill_thresh > 0) { spin_lock_bh(&endpoint->target->rx_lock); if (get_queue_depth(&endpoint->rx_bufq) < ep_cb.rx_refill_thresh) { spin_unlock_bh(&endpoint->target->rx_lock); ep_cb.rx_refill(endpoint->target, endpoint->eid); return; } spin_unlock_bh(&endpoint->target->rx_lock); } } /* This function is called with rx_lock held */ static int ath6kl_htc_rx_setup(struct htc_target *target, struct htc_endpoint *ep, u32 *lk_ahds, struct list_head *queue, int n_msg) { struct htc_packet *packet; /* FIXME: type of lk_ahds can't be right */ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds; struct htc_ep_callbacks ep_cb; int status = 0, j, full_len; bool no_recycle; full_len = CALC_TXRX_PADDED_LEN(target, le16_to_cpu(htc_hdr->payld_len) + sizeof(*htc_hdr)); if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n", htc_hdr->eid, htc_hdr->flags, le16_to_cpu(htc_hdr->payld_len)); return -EINVAL; } ep_cb = ep->ep_cb; for (j = 0; j < n_msg; j++) { /* * Reset flag, any packets allocated using the * rx_alloc() API cannot be recycled on * cleanup,they must be explicitly returned. */ no_recycle = false; if (ep_cb.rx_allocthresh && (full_len > ep_cb.rx_alloc_thresh)) { ep->ep_st.rx_alloc_thresh_hit += 1; ep->ep_st.rxalloc_thresh_byte += le16_to_cpu(htc_hdr->payld_len); spin_unlock_bh(&target->rx_lock); no_recycle = true; packet = ep_cb.rx_allocthresh(ep->target, ep->eid, full_len); spin_lock_bh(&target->rx_lock); } else { /* refill handler is being used */ if (list_empty(&ep->rx_bufq)) { if (ep_cb.rx_refill) { spin_unlock_bh(&target->rx_lock); ep_cb.rx_refill(ep->target, ep->eid); spin_lock_bh(&target->rx_lock); } } if (list_empty(&ep->rx_bufq)) { packet = NULL; } else { packet = list_first_entry(&ep->rx_bufq, struct htc_packet, list); list_del(&packet->list); } } if (!packet) { target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS; target->ep_waiting = ep->eid; return -ENOSPC; } /* clear flags */ packet->info.rx.rx_flags = 0; packet->info.rx.indicat_flags = 0; packet->status = 0; if (no_recycle) /* * flag that these packets cannot be * recycled, they have to be returned to * the user */ packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE; /* Caller needs to free this upon any failure */ list_add_tail(&packet->list, queue); if (target->htc_flags & HTC_OP_STATE_STOPPING) { status = -ECANCELED; break; } if (j) { packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR; packet->info.rx.exp_hdr = 0xFFFFFFFF; } else /* set expected look ahead */ packet->info.rx.exp_hdr = *lk_ahds; packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH; } return status; } static int ath6kl_htc_rx_alloc(struct htc_target *target, u32 lk_ahds[], int msg, struct htc_endpoint *endpoint, struct list_head *queue) { int status = 0; struct htc_packet *packet, *tmp_pkt; struct htc_frame_hdr *htc_hdr; int i, n_msg; spin_lock_bh(&target->rx_lock); for (i = 0; i < msg; i++) { htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; if (htc_hdr->eid >= ENDPOINT_MAX) { ath6kl_err("invalid ep in look-ahead: %d\n", htc_hdr->eid); status = -ENOMEM; break; } if (htc_hdr->eid != endpoint->eid) { ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n", htc_hdr->eid, endpoint->eid, i); status = -ENOMEM; break; } if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) { ath6kl_err("payload len %d exceeds max htc : %d !\n", htc_hdr->payld_len, (u32) HTC_MAX_PAYLOAD_LENGTH); status = -ENOMEM; break; } if (endpoint->svc_id == 0) { ath6kl_err("ep %d is not connected !\n", htc_hdr->eid); status = -ENOMEM; break; } if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) { /* * HTC header indicates that every packet to follow * has the same padded length so that it can be * optimally fetched as a full bundle. */ n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >> HTC_FLG_RX_BNDL_CNT_S; /* the count doesn't include the starter frame */ n_msg++; if (n_msg > target->msg_per_bndl_max) { status = -ENOMEM; break; } endpoint->ep_st.rx_bundle_from_hdr += 1; ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx bundle pkts %d\n", n_msg); } else /* HTC header only indicates 1 message to fetch */ n_msg = 1; /* Setup packet buffers for each message */ status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i], queue, n_msg); /* * This is due to unavailabilty of buffers to rx entire data. * Return no error so that free buffers from queue can be used * to receive partial data. */ if (status == -ENOSPC) { spin_unlock_bh(&target->rx_lock); return 0; } if (status) break; } spin_unlock_bh(&target->rx_lock); if (status) { list_for_each_entry_safe(packet, tmp_pkt, queue, list) { list_del(&packet->list); htc_reclaim_rxbuf(target, packet, &target->endpoint[packet->endpoint]); } } return status; } static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) { if (packets->endpoint != ENDPOINT_0) { WARN_ON(1); return; } if (packets->status == -ECANCELED) { reclaim_rx_ctrl_buf(context, packets); return; } if (packets->act_len > 0) { ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", packets->act_len + HTC_HDR_LENGTH); ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx unexpected endpoint 0 message", "", packets->buf - HTC_HDR_LENGTH, packets->act_len + HTC_HDR_LENGTH); } htc_reclaim_rxbuf(context, packets, &context->endpoint[0]); } static void htc_proc_cred_rpt(struct htc_target *target, struct htc_credit_report *rpt, int n_entries, enum htc_endpoint_id from_ep) { struct htc_endpoint *endpoint; int tot_credits = 0, i; bool dist = false; spin_lock_bh(&target->tx_lock); for (i = 0; i < n_entries; i++, rpt++) { if (rpt->eid >= ENDPOINT_MAX) { WARN_ON(1); spin_unlock_bh(&target->tx_lock); return; } endpoint = &target->endpoint[rpt->eid]; ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit report ep %d credits %d\n", rpt->eid, rpt->credits); endpoint->ep_st.tx_cred_rpt += 1; endpoint->ep_st.cred_retnd += rpt->credits; if (from_ep == rpt->eid) { /* * This credit report arrived on the same endpoint * indicating it arrived in an RX packet. */ endpoint->ep_st.cred_from_rx += rpt->credits; endpoint->ep_st.cred_rpt_from_rx += 1; } else if (from_ep == ENDPOINT_0) { /* credit arrived on endpoint 0 as a NULL message */ endpoint->ep_st.cred_from_ep0 += rpt->credits; endpoint->ep_st.cred_rpt_ep0 += 1; } else { endpoint->ep_st.cred_from_other += rpt->credits; endpoint->ep_st.cred_rpt_from_other += 1; } if (rpt->eid == ENDPOINT_0) /* always give endpoint 0 credits back */ endpoint->cred_dist.credits += rpt->credits; else { endpoint->cred_dist.cred_to_dist += rpt->credits; dist = true; } /* * Refresh tx depth for distribution function that will * recover these credits NOTE: this is only valid when * there are credits to recover! */ endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); tot_credits += rpt->credits; } if (dist) { /* * This was a credit return based on a completed send * operations note, this is done with the lock held */ ath6kl_credit_distribute(target->credit_info, &target->cred_dist_list, HTC_CREDIT_DIST_SEND_COMPLETE); } spin_unlock_bh(&target->tx_lock); if (tot_credits) htc_chk_ep_txq(target); } static int htc_parse_trailer(struct htc_target *target, struct htc_record_hdr *record, u8 *record_buf, u32 *next_lk_ahds, enum htc_endpoint_id endpoint, int *n_lk_ahds) { struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt; struct htc_lookahead_report *lk_ahd; int len; switch (record->rec_id) { case HTC_RECORD_CREDITS: len = record->len / sizeof(struct htc_credit_report); if (!len) { WARN_ON(1); return -EINVAL; } htc_proc_cred_rpt(target, (struct htc_credit_report *) record_buf, len, endpoint); break; case HTC_RECORD_LOOKAHEAD: len = record->len / sizeof(*lk_ahd); if (!len) { WARN_ON(1); return -EINVAL; } lk_ahd = (struct htc_lookahead_report *) record_buf; if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && next_lk_ahds) { ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", lk_ahd->pre_valid, lk_ahd->post_valid); /* look ahead bytes are valid, copy them over */ memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx next look ahead", "", next_lk_ahds, 4); *n_lk_ahds = 1; } break; case HTC_RECORD_LOOKAHEAD_BUNDLE: len = record->len / sizeof(*bundle_lkahd_rpt); if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) { WARN_ON(1); return -EINVAL; } if (next_lk_ahds) { int i; bundle_lkahd_rpt = (struct htc_bundle_lkahd_rpt *) record_buf; ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd", "", record_buf, record->len); for (i = 0; i < len; i++) { memcpy((u8 *)&next_lk_ahds[i], bundle_lkahd_rpt->lk_ahd, 4); bundle_lkahd_rpt++; } *n_lk_ahds = i; } break; default: ath6kl_err("unhandled record: id:%d len:%d\n", record->rec_id, record->len); break; } return 0; } static int htc_proc_trailer(struct htc_target *target, u8 *buf, int len, u32 *next_lk_ahds, int *n_lk_ahds, enum htc_endpoint_id endpoint) { struct htc_record_hdr *record; int orig_len; int status; u8 *record_buf; u8 *orig_buf; ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len); ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len); orig_buf = buf; orig_len = len; status = 0; while (len > 0) { if (len < sizeof(struct htc_record_hdr)) { status = -ENOMEM; break; } /* these are byte aligned structs */ record = (struct htc_record_hdr *) buf; len -= sizeof(struct htc_record_hdr); buf += sizeof(struct htc_record_hdr); if (record->len > len) { ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n", record->len, record->rec_id, len); status = -ENOMEM; break; } record_buf = buf; status = htc_parse_trailer(target, record, record_buf, next_lk_ahds, endpoint, n_lk_ahds); if (status) break; /* advance buffer past this record for next time around */ buf += record->len; len -= record->len; } if (status) ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer", "", orig_buf, orig_len); return status; } static int ath6kl_htc_rx_process_hdr(struct htc_target *target, struct htc_packet *packet, u32 *next_lkahds, int *n_lkahds) { int status = 0; u16 payload_len; u32 lk_ahd; struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf; if (n_lkahds != NULL) *n_lkahds = 0; /* * NOTE: we cannot assume the alignment of buf, so we use the safe * macros to retrieve 16 bit fields. */ payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd)); if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) { /* * Refresh the expected header and the actual length as it * was unknown when this packet was grabbed as part of the * bundle. */ packet->info.rx.exp_hdr = lk_ahd; packet->act_len = payload_len + HTC_HDR_LENGTH; /* validate the actual header that was refreshed */ if (packet->act_len > packet->buf_len) { ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n", payload_len, lk_ahd); /* * Limit this to max buffer just to print out some * of the buffer. */ packet->act_len = min(packet->act_len, packet->buf_len); status = -ENOMEM; goto fail_rx; } if (packet->endpoint != htc_hdr->eid) { ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n", htc_hdr->eid, packet->endpoint); status = -ENOMEM; goto fail_rx; } } if (lk_ahd != packet->info.rx.exp_hdr) { ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", __func__, packet, packet->info.rx.rx_flags); ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd", "", &packet->info.rx.exp_hdr, 4); ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header", "", (u8 *)&lk_ahd, sizeof(lk_ahd)); status = -ENOMEM; goto fail_rx; } if (htc_hdr->flags & HTC_FLG_RX_TRAILER) { if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) || htc_hdr->ctrl[0] > payload_len) { ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n", __func__, payload_len, htc_hdr->ctrl[0]); status = -ENOMEM; goto fail_rx; } if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) { next_lkahds = NULL; n_lkahds = NULL; } status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH + payload_len - htc_hdr->ctrl[0], htc_hdr->ctrl[0], next_lkahds, n_lkahds, packet->endpoint); if (status) goto fail_rx; packet->act_len -= htc_hdr->ctrl[0]; } packet->buf += HTC_HDR_LENGTH; packet->act_len -= HTC_HDR_LENGTH; fail_rx: if (status) ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet", "", packet->buf, packet->act_len); return status; } static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint, struct htc_packet *packet) { ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx complete ep %d packet 0x%p\n", endpoint->eid, packet); endpoint->ep_cb.rx(endpoint->target, packet); } static int ath6kl_htc_rx_bundle(struct htc_target *target, struct list_head *rxq, struct list_head *sync_compq, int *n_pkt_fetched, bool part_bundle) { struct hif_scatter_req *scat_req; struct htc_packet *packet; int rem_space = target->max_rx_bndl_sz; int n_scat_pkt, status = 0, i, len; n_scat_pkt = get_queue_depth(rxq); n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max); if ((get_queue_depth(rxq) - n_scat_pkt) > 0) { /* * We were forced to split this bundle receive operation * all packets in this partial bundle must have their * lookaheads ignored. */ part_bundle = true; /* * This would only happen if the target ignored our max * bundle limit. */ ath6kl_warn("%s(): partial bundle detected num:%d , %d\n", __func__, get_queue_depth(rxq), n_scat_pkt); } len = 0; ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx bundle depth %d pkts %d\n", get_queue_depth(rxq), n_scat_pkt); scat_req = hif_scatter_req_get(target->dev->ar); if (scat_req == NULL) goto fail_rx_pkt; for (i = 0; i < n_scat_pkt; i++) { int pad_len; packet = list_first_entry(rxq, struct htc_packet, list); list_del(&packet->list); pad_len = CALC_TXRX_PADDED_LEN(target, packet->act_len); if ((rem_space - pad_len) < 0) { list_add(&packet->list, rxq); break; } rem_space -= pad_len; if (part_bundle || (i < (n_scat_pkt - 1))) /* * Packet 0..n-1 cannot be checked for look-aheads * since we are fetching a bundle the last packet * however can have it's lookahead used */ packet->info.rx.rx_flags |= HTC_RX_PKT_IGNORE_LOOKAHEAD; /* NOTE: 1 HTC packet per scatter entry */ scat_req->scat_list[i].buf = packet->buf; scat_req->scat_list[i].len = pad_len; packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE; list_add_tail(&packet->list, sync_compq); WARN_ON(!scat_req->scat_list[i].len); len += scat_req->scat_list[i].len; } scat_req->len = len; scat_req->scat_entries = i; status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true); if (!status) *n_pkt_fetched = i; /* free scatter request */ hif_scatter_req_add(target->dev->ar, scat_req); fail_rx_pkt: return status; } static int ath6kl_htc_rx_process_packets(struct htc_target *target, struct list_head *comp_pktq, u32 lk_ahds[], int *n_lk_ahd) { struct htc_packet *packet, *tmp_pkt; struct htc_endpoint *ep; int status = 0; list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { ep = &target->endpoint[packet->endpoint]; trace_ath6kl_htc_rx(packet->status, packet->endpoint, packet->buf, packet->act_len); /* process header for each of the recv packet */ status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds, n_lk_ahd); if (status) return status; list_del(&packet->list); if (list_empty(comp_pktq)) { /* * Last packet's more packet flag is set * based on the lookahead. */ if (*n_lk_ahd > 0) ath6kl_htc_rx_set_indicate(lk_ahds[0], ep, packet); } else /* * Packets in a bundle automatically have * this flag set. */ packet->info.rx.indicat_flags |= HTC_RX_FLAGS_INDICATE_MORE_PKTS; ath6kl_htc_rx_update_stats(ep, *n_lk_ahd); if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE) ep->ep_st.rx_bundl += 1; ath6kl_htc_rx_complete(ep, packet); } return status; } static int ath6kl_htc_rx_fetch(struct htc_target *target, struct list_head *rx_pktq, struct list_head *comp_pktq) { int fetched_pkts; bool part_bundle = false; int status = 0; struct list_head tmp_rxq; struct htc_packet *packet, *tmp_pkt; /* now go fetch the list of HTC packets */ while (!list_empty(rx_pktq)) { fetched_pkts = 0; INIT_LIST_HEAD(&tmp_rxq); if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { /* * There are enough packets to attempt a * bundle transfer and recv bundling is * allowed. */ status = ath6kl_htc_rx_bundle(target, rx_pktq, &tmp_rxq, &fetched_pkts, part_bundle); if (status) goto fail_rx; if (!list_empty(rx_pktq)) part_bundle = true; list_splice_tail_init(&tmp_rxq, comp_pktq); } if (!fetched_pkts) { packet = list_first_entry(rx_pktq, struct htc_packet, list); /* fully synchronous */ packet->completion = NULL; if (!list_is_singular(rx_pktq)) /* * look_aheads in all packet * except the last one in the * bundle must be ignored */ packet->info.rx.rx_flags |= HTC_RX_PKT_IGNORE_LOOKAHEAD; /* go fetch the packet */ status = ath6kl_htc_rx_packet(target, packet, packet->act_len); list_move_tail(&packet->list, &tmp_rxq); if (status) goto fail_rx; list_splice_tail_init(&tmp_rxq, comp_pktq); } } return 0; fail_rx: /* * Cleanup any packets we allocated but didn't use to * actually fetch any packets. */ list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) { list_del(&packet->list); htc_reclaim_rxbuf(target, packet, &target->endpoint[packet->endpoint]); } list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) { list_del(&packet->list); htc_reclaim_rxbuf(target, packet, &target->endpoint[packet->endpoint]); } return status; } int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, u32 msg_look_ahead, int *num_pkts) { struct htc_packet *packets, *tmp_pkt; struct htc_endpoint *endpoint; struct list_head rx_pktq, comp_pktq; int status = 0; u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE]; int num_look_ahead = 1; enum htc_endpoint_id id; int n_fetched = 0; INIT_LIST_HEAD(&comp_pktq); *num_pkts = 0; /* * On first entry copy the look_aheads into our temp array for * processing */ look_aheads[0] = msg_look_ahead; while (true) { /* * First lookahead sets the expected endpoint IDs for all * packets in a bundle. */ id = ((struct htc_frame_hdr *)&look_aheads[0])->eid; endpoint = &target->endpoint[id]; if (id >= ENDPOINT_MAX) { ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n", id); status = -ENOMEM; break; } INIT_LIST_HEAD(&rx_pktq); INIT_LIST_HEAD(&comp_pktq); /* * Try to allocate as many HTC RX packets indicated by the * look_aheads. */ status = ath6kl_htc_rx_alloc(target, look_aheads, num_look_ahead, endpoint, &rx_pktq); if (status) break; if (get_queue_depth(&rx_pktq) >= 2) /* * A recv bundle was detected, force IRQ status * re-check again */ target->chk_irq_status_cnt = 1; n_fetched += get_queue_depth(&rx_pktq); num_look_ahead = 0; status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq); if (!status) ath6kl_htc_rx_chk_water_mark(endpoint); /* Process fetched packets */ status = ath6kl_htc_rx_process_packets(target, &comp_pktq, look_aheads, &num_look_ahead); if (!num_look_ahead || status) break; /* * For SYNCH processing, if we get here, we are running * through the loop again due to a detected lookahead. Set * flag that we should re-check IRQ status registers again * before leaving IRQ processing, this can net better * performance in high throughput situations. */ target->chk_irq_status_cnt = 1; } if (status) { ath6kl_err("failed to get pending recv messages: %d\n", status); /* cleanup any packets in sync completion queue */ list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { list_del(&packets->list); htc_reclaim_rxbuf(target, packets, &target->endpoint[packets->endpoint]); } if (target->htc_flags & HTC_OP_STATE_STOPPING) { ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); ath6kl_hif_rx_control(target->dev, false); } } /* * Before leaving, check to see if host ran out of buffers and * needs to stop the receiver. */ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); ath6kl_hif_rx_control(target->dev, false); } *num_pkts = n_fetched; return status; } /* * Synchronously wait for a control message from the target, * This function is used at initialization time ONLY. At init messages * on ENDPOINT 0 are expected. */ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) { struct htc_packet *packet = NULL; struct htc_frame_hdr *htc_hdr; u32 look_ahead; if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, HTC_TARGET_RESPONSE_TIMEOUT)) return NULL; ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); htc_hdr = (struct htc_frame_hdr *)&look_ahead; if (htc_hdr->eid != ENDPOINT_0) return NULL; packet = htc_get_control_buf(target, false); if (!packet) return NULL; packet->info.rx.rx_flags = 0; packet->info.rx.exp_hdr = look_ahead; packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH; if (packet->act_len > packet->buf_len) goto fail_ctrl_rx; /* we want synchronous operation */ packet->completion = NULL; /* get the message from the device, this will block */ if (ath6kl_htc_rx_packet(target, packet, packet->act_len)) goto fail_ctrl_rx; trace_ath6kl_htc_rx(packet->status, packet->endpoint, packet->buf, packet->act_len); /* process receive header */ packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL); if (packet->status) { ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n", packet->status); goto fail_ctrl_rx; } return packet; fail_ctrl_rx: if (packet != NULL) { htc_rxpkt_reset(packet); reclaim_rx_ctrl_buf(target, packet); } return NULL; } static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, struct list_head *pkt_queue) { struct htc_endpoint *endpoint; struct htc_packet *first_pkt; bool rx_unblock = false; int status = 0, depth; if (list_empty(pkt_queue)) return -ENOMEM; first_pkt = list_first_entry(pkt_queue, struct htc_packet, list); if (first_pkt->endpoint >= ENDPOINT_MAX) return status; depth = get_queue_depth(pkt_queue); ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx add multiple ep id %d cnt %d len %d\n", first_pkt->endpoint, depth, first_pkt->buf_len); endpoint = &target->endpoint[first_pkt->endpoint]; if (target->htc_flags & HTC_OP_STATE_STOPPING) { struct htc_packet *packet, *tmp_pkt; /* walk through queue and mark each one canceled */ list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { packet->status = -ECANCELED; list_del(&packet->list); ath6kl_htc_rx_complete(endpoint, packet); } return status; } spin_lock_bh(&target->rx_lock); list_splice_tail_init(pkt_queue, &endpoint->rx_bufq); /* check if we are blocked waiting for a new buffer */ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { if (target->ep_waiting == first_pkt->endpoint) { ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx blocked on ep %d, unblocking\n", target->ep_waiting); target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; target->ep_waiting = ENDPOINT_MAX; rx_unblock = true; } } spin_unlock_bh(&target->rx_lock); if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) /* TODO : implement a buffer threshold count? */ ath6kl_hif_rx_control(target->dev, true); return status; } static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target) { struct htc_endpoint *endpoint; struct htc_packet *packet, *tmp_pkt; int i; for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { endpoint = &target->endpoint[i]; if (!endpoint->svc_id) /* not in use.. */ continue; spin_lock_bh(&target->rx_lock); list_for_each_entry_safe(packet, tmp_pkt, &endpoint->rx_bufq, list) { list_del(&packet->list); spin_unlock_bh(&target->rx_lock); ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx flush pkt 0x%p len %d ep %d\n", packet, packet->buf_len, packet->endpoint); /* * packets in rx_bufq of endpoint 0 have originally * been queued from target->free_ctrl_rxbuf where * packet and packet->buf_start are allocated * separately using kmalloc(). For other endpoint * rx_bufq, it is allocated as skb where packet is * skb->head. Take care of this difference while freeing * the memory. */ if (packet->endpoint == ENDPOINT_0) { kfree(packet->buf_start); kfree(packet); } else { dev_kfree_skb(packet->pkt_cntxt); } spin_lock_bh(&target->rx_lock); } spin_unlock_bh(&target->rx_lock); } } static int ath6kl_htc_mbox_conn_service(struct htc_target *target, struct htc_service_connect_req *conn_req, struct htc_service_connect_resp *conn_resp) { struct htc_packet *rx_pkt = NULL; struct htc_packet *tx_pkt = NULL; struct htc_conn_service_resp *resp_msg; struct htc_conn_service_msg *conn_msg; struct htc_endpoint *endpoint; enum htc_endpoint_id assigned_ep = ENDPOINT_MAX; unsigned int max_msg_sz = 0; int status = 0; u16 msg_id; ath6kl_dbg(ATH6KL_DBG_HTC, "htc connect service target 0x%p service id 0x%x\n", target, conn_req->svc_id); if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { /* special case for pseudo control service */ assigned_ep = ENDPOINT_0; max_msg_sz = HTC_MAX_CTRL_MSG_LEN; } else { /* allocate a packet to send to the target */ tx_pkt = htc_get_control_buf(target, true); if (!tx_pkt) return -ENOMEM; conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf; memset(conn_msg, 0, sizeof(*conn_msg)); conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags); set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg, sizeof(*conn_msg) + conn_msg->svc_meta_len, ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); /* we want synchronous operation */ tx_pkt->completion = NULL; ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0); status = ath6kl_htc_tx_issue(target, tx_pkt); if (status) goto fail_tx; /* wait for response */ rx_pkt = htc_wait_for_ctrl_msg(target); if (!rx_pkt) { status = -ENOMEM; goto fail_tx; } resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf; msg_id = le16_to_cpu(resp_msg->msg_id); if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) || (rx_pkt->act_len < sizeof(*resp_msg))) { status = -ENOMEM; goto fail_tx; } conn_resp->resp_code = resp_msg->status; /* check response status */ if (resp_msg->status != HTC_SERVICE_SUCCESS) { ath6kl_err("target failed service 0x%X connect request (status:%d)\n", resp_msg->svc_id, resp_msg->status); status = -ENOMEM; goto fail_tx; } assigned_ep = (enum htc_endpoint_id)resp_msg->eid; max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz); } if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED || assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) { status = -ENOMEM; goto fail_tx; } endpoint = &target->endpoint[assigned_ep]; endpoint->eid = assigned_ep; if (endpoint->svc_id) { status = -ENOMEM; goto fail_tx; } /* return assigned endpoint to caller */ conn_resp->endpoint = assigned_ep; conn_resp->len_max = max_msg_sz; /* setup the endpoint */ /* this marks the endpoint in use */ endpoint->svc_id = conn_req->svc_id; endpoint->max_txq_depth = conn_req->max_txq_depth; endpoint->len_max = max_msg_sz; endpoint->ep_cb = conn_req->ep_cb; endpoint->cred_dist.svc_id = conn_req->svc_id; endpoint->cred_dist.htc_ep = endpoint; endpoint->cred_dist.endpoint = assigned_ep; endpoint->cred_dist.cred_sz = target->tgt_cred_sz; switch (endpoint->svc_id) { case WMI_DATA_BK_SVC: endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3; break; default: endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; break; } if (conn_req->max_rxmsg_sz) { /* * Override cred_per_msg calculation, this optimizes * the credit-low indications since the host will actually * issue smaller messages in the Send path. */ if (conn_req->max_rxmsg_sz > max_msg_sz) { status = -ENOMEM; goto fail_tx; } endpoint->cred_dist.cred_per_msg = conn_req->max_rxmsg_sz / target->tgt_cred_sz; } else endpoint->cred_dist.cred_per_msg = max_msg_sz / target->tgt_cred_sz; if (!endpoint->cred_dist.cred_per_msg) endpoint->cred_dist.cred_per_msg = 1; /* save local connection flags */ endpoint->conn_flags = conn_req->flags; fail_tx: if (tx_pkt) htc_reclaim_txctrl_buf(target, tx_pkt); if (rx_pkt) { htc_rxpkt_reset(rx_pkt); reclaim_rx_ctrl_buf(target, rx_pkt); } return status; } static void reset_ep_state(struct htc_target *target) { struct htc_endpoint *endpoint; int i; for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { endpoint = &target->endpoint[i]; memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist)); endpoint->svc_id = 0; endpoint->len_max = 0; endpoint->max_txq_depth = 0; memset(&endpoint->ep_st, 0, sizeof(endpoint->ep_st)); INIT_LIST_HEAD(&endpoint->rx_bufq); INIT_LIST_HEAD(&endpoint->txq); endpoint->target = target; } /* reset distribution list */ /* FIXME: free existing entries */ INIT_LIST_HEAD(&target->cred_dist_list); } static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint) { int num; spin_lock_bh(&target->rx_lock); num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); spin_unlock_bh(&target->rx_lock); return num; } static void htc_setup_msg_bndl(struct htc_target *target) { /* limit what HTC can handle */ target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE, target->msg_per_bndl_max); if (ath6kl_hif_enable_scatter(target->dev->ar)) { target->msg_per_bndl_max = 0; return; } /* limit bundle what the device layer can handle */ target->msg_per_bndl_max = min(target->max_scat_entries, target->msg_per_bndl_max); ath6kl_dbg(ATH6KL_DBG_BOOT, "htc bundling allowed msg_per_bndl_max %d\n", target->msg_per_bndl_max); /* Max rx bundle size is limited by the max tx bundle size */ target->max_rx_bndl_sz = target->max_xfer_szper_scatreq; /* Max tx bundle size if limited by the extended mbox address range */ target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, target->max_xfer_szper_scatreq); ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n", target->max_rx_bndl_sz, target->max_tx_bndl_sz); if (target->max_tx_bndl_sz) /* tx_bndl_mask is enabled per AC, each has 1 bit */ target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1; if (target->max_rx_bndl_sz) target->rx_bndl_enable = true; if ((target->tgt_cred_sz % target->block_sz) != 0) { ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n", target->tgt_cred_sz); /* * Disallow send bundling since the credit size is * not aligned to a block size the I/O block * padding will spill into the next credit buffer * which is fatal. */ target->tx_bndl_mask = 0; } } static int ath6kl_htc_mbox_wait_target(struct htc_target *target) { struct htc_packet *packet = NULL; struct htc_ready_ext_msg *rdy_msg; struct htc_service_connect_req connect; struct htc_service_connect_resp resp; int status; /* we should be getting 1 control message that the target is ready */ packet = htc_wait_for_ctrl_msg(target); if (!packet) return -ENOMEM; /* we controlled the buffer creation so it's properly aligned */ rdy_msg = (struct htc_ready_ext_msg *)packet->buf; if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) || (packet->act_len < sizeof(struct htc_ready_msg))) { status = -ENOMEM; goto fail_wait_target; } if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) { status = -ENOMEM; goto fail_wait_target; } target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); ath6kl_dbg(ATH6KL_DBG_BOOT, "htc target ready credits %d size %d\n", target->tgt_creds, target->tgt_cred_sz); /* check if this is an extended ready message */ if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) { /* this is an extended message */ target->htc_tgt_ver = rdy_msg->htc_ver; target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl; } else { /* legacy */ target->htc_tgt_ver = HTC_VERSION_2P0; target->msg_per_bndl_max = 0; } ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n", (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", target->htc_tgt_ver); if (target->msg_per_bndl_max > 0) htc_setup_msg_bndl(target); /* setup our pseudo HTC control endpoint connection */ memset(&connect, 0, sizeof(connect)); memset(&resp, 0, sizeof(resp)); connect.ep_cb.rx = htc_ctrl_rx; connect.ep_cb.rx_refill = NULL; connect.ep_cb.tx_full = NULL; connect.max_txq_depth = NUM_CONTROL_BUFFERS; connect.svc_id = HTC_CTRL_RSVD_SVC; /* connect fake service */ status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp); if (status) /* * FIXME: this call doesn't make sense, the caller should * call ath6kl_htc_mbox_cleanup() when it wants remove htc */ ath6kl_hif_cleanup_scatter(target->dev->ar); fail_wait_target: if (packet) { htc_rxpkt_reset(packet); reclaim_rx_ctrl_buf(target, packet); } return status; } /* * Start HTC, enable interrupts and let the target know * host has finished setup. */ static int ath6kl_htc_mbox_start(struct htc_target *target) { struct htc_packet *packet; int status; memset(&target->dev->irq_proc_reg, 0, sizeof(target->dev->irq_proc_reg)); /* Disable interrupts at the chip level */ ath6kl_hif_disable_intrs(target->dev); target->htc_flags = 0; target->rx_st_flags = 0; /* Push control receive buffers into htc control endpoint */ while ((packet = htc_get_control_buf(target, false)) != NULL) { status = htc_add_rxbuf(target, packet); if (status) return status; } /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ ath6kl_credit_init(target->credit_info, &target->cred_dist_list, target->tgt_creds); dump_cred_dist_stats(target); /* Indicate to the target of the setup completion */ status = htc_setup_tx_complete(target); if (status) return status; /* unmask interrupts */ status = ath6kl_hif_unmask_intrs(target->dev); if (status) ath6kl_htc_mbox_stop(target); return status; } static int ath6kl_htc_reset(struct htc_target *target) { u32 block_size, ctrl_bufsz; struct htc_packet *packet; int i; reset_ep_state(target); block_size = target->dev->ar->mbox_info.block_size; ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? (block_size + HTC_HDR_LENGTH) : (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { packet = kzalloc(sizeof(*packet), GFP_KERNEL); if (!packet) return -ENOMEM; packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); if (!packet->buf_start) { kfree(packet); return -ENOMEM; } packet->buf_len = ctrl_bufsz; if (i < NUM_CONTROL_RX_BUFFERS) { packet->act_len = 0; packet->buf = packet->buf_start; packet->endpoint = ENDPOINT_0; list_add_tail(&packet->list, &target->free_ctrl_rxbuf); } else { list_add_tail(&packet->list, &target->free_ctrl_txbuf); } } return 0; } /* htc_stop: stop interrupt reception, and flush all queued buffers */ static void ath6kl_htc_mbox_stop(struct htc_target *target) { spin_lock_bh(&target->htc_lock); target->htc_flags |= HTC_OP_STATE_STOPPING; spin_unlock_bh(&target->htc_lock); /* * Masking interrupts is a synchronous operation, when this * function returns all pending HIF I/O has completed, we can * safely flush the queues. */ ath6kl_hif_mask_intrs(target->dev); ath6kl_htc_flush_txep_all(target); ath6kl_htc_mbox_flush_rx_buf(target); ath6kl_htc_reset(target); } static void *ath6kl_htc_mbox_create(struct ath6kl *ar) { struct htc_target *target = NULL; int status = 0; target = kzalloc(sizeof(*target), GFP_KERNEL); if (!target) { ath6kl_err("unable to allocate memory\n"); return NULL; } target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); if (!target->dev) { ath6kl_err("unable to allocate memory\n"); status = -ENOMEM; goto err_htc_cleanup; } spin_lock_init(&target->htc_lock); spin_lock_init(&target->rx_lock); spin_lock_init(&target->tx_lock); INIT_LIST_HEAD(&target->free_ctrl_txbuf); INIT_LIST_HEAD(&target->free_ctrl_rxbuf); INIT_LIST_HEAD(&target->cred_dist_list); target->dev->ar = ar; target->dev->htc_cnxt = target; target->ep_waiting = ENDPOINT_MAX; status = ath6kl_hif_setup(target->dev); if (status) goto err_htc_cleanup; status = ath6kl_htc_reset(target); if (status) goto err_htc_cleanup; return target; err_htc_cleanup: ath6kl_htc_mbox_cleanup(target); return NULL; } /* cleanup the HTC instance */ static void ath6kl_htc_mbox_cleanup(struct htc_target *target) { struct htc_packet *packet, *tmp_packet; ath6kl_hif_cleanup_scatter(target->dev->ar); list_for_each_entry_safe(packet, tmp_packet, &target->free_ctrl_txbuf, list) { list_del(&packet->list); kfree(packet->buf_start); kfree(packet); } list_for_each_entry_safe(packet, tmp_packet, &target->free_ctrl_rxbuf, list) { list_del(&packet->list); kfree(packet->buf_start); kfree(packet); } kfree(target->dev); kfree(target); } static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = { .create = ath6kl_htc_mbox_create, .wait_target = ath6kl_htc_mbox_wait_target, .start = ath6kl_htc_mbox_start, .conn_service = ath6kl_htc_mbox_conn_service, .tx = ath6kl_htc_mbox_tx, .stop = ath6kl_htc_mbox_stop, .cleanup = ath6kl_htc_mbox_cleanup, .flush_txep = ath6kl_htc_mbox_flush_txep, .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf, .activity_changed = ath6kl_htc_mbox_activity_changed, .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num, .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple, .credit_setup = ath6kl_htc_mbox_credit_setup, }; void ath6kl_htc_mbox_attach(struct ath6kl *ar) { ar->htc_ops = &ath6kl_htc_mbox_ops; }
gpl-2.0
dmeadows013/furry-hipster
fs/xfs/xfs_vnodeops.c
1600
71797
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_itable.h" #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_bmap.h" #include "xfs_acl.h" #include "xfs_attr.h" #include "xfs_rw.h" #include "xfs_error.h" #include "xfs_quota.h" #include "xfs_utils.h" #include "xfs_rtalloc.h" #include "xfs_trans_space.h" #include "xfs_log_priv.h" #include "xfs_filestream.h" #include "xfs_vnodeops.h" #include "xfs_trace.h" int xfs_setattr( struct xfs_inode *ip, struct iattr *iattr, int flags) { xfs_mount_t *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_trans_t *tp; int code; uint lock_flags; uint commit_flags=0; uid_t uid=0, iuid=0; gid_t gid=0, igid=0; struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; int need_iolock = 1; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); code = -inode_change_ok(inode, iattr); if (code) return code; olddquot1 = olddquot2 = NULL; udqp = gdqp = NULL; /* * If disk quotas is on, we make sure that the dquots do exist on disk, * before we start any other transactions. Trying to do this later * is messy. We don't care to take a readlock to look at the ids * in inode here, because we can't hold it across the trans_reserve. * If the IDs do change before we take the ilock, we're covered * because the i_*dquot fields will get updated anyway. */ if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { uint qflags = 0; if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { uid = iattr->ia_uid; qflags |= XFS_QMOPT_UQUOTA; } else { uid = ip->i_d.di_uid; } if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { gid = iattr->ia_gid; qflags |= XFS_QMOPT_GQUOTA; } else { gid = ip->i_d.di_gid; } /* * We take a reference when we initialize udqp and gdqp, * so it is important that we never blindly double trip on * the same variable. See xfs_create() for an example. */ ASSERT(udqp == NULL); ASSERT(gdqp == NULL); code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), qflags, &udqp, &gdqp); if (code) return code; } /* * For the other attributes, we acquire the inode lock and * first do an error checking pass. */ tp = NULL; lock_flags = XFS_ILOCK_EXCL; if (flags & XFS_ATTR_NOLOCK) need_iolock = 0; if (!(mask & ATTR_SIZE)) { tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); commit_flags = 0; code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); if (code) { lock_flags = 0; goto error_return; } } else { if (need_iolock) lock_flags |= XFS_IOLOCK_EXCL; } xfs_ilock(ip, lock_flags); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * These IDs could have changed since we last looked at them. * But, we're assured that if the ownership did change * while we didn't have the inode locked, inode's dquot(s) * would have changed also. */ iuid = ip->i_d.di_uid; igid = ip->i_d.di_gid; gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; /* * Do a quota reservation only if uid/gid is actually * going to change. */ if (XFS_IS_QUOTA_RUNNING(mp) && ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { ASSERT(tp); code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); if (code) /* out of quota */ goto error_return; } } /* * Truncate file. Must have write permission and not be a directory. */ if (mask & ATTR_SIZE) { /* Short circuit the truncate case for zero length files */ if (iattr->ia_size == 0 && ip->i_size == 0 && ip->i_d.di_nextents == 0) { xfs_iunlock(ip, XFS_ILOCK_EXCL); lock_flags &= ~XFS_ILOCK_EXCL; if (mask & ATTR_CTIME) { inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); xfs_mark_inode_dirty_sync(ip); } code = 0; goto error_return; } if (S_ISDIR(ip->i_d.di_mode)) { code = XFS_ERROR(EISDIR); goto error_return; } else if (!S_ISREG(ip->i_d.di_mode)) { code = XFS_ERROR(EINVAL); goto error_return; } /* * Make sure that the dquots are attached to the inode. */ code = xfs_qm_dqattach_locked(ip, 0); if (code) goto error_return; /* * Now we can make the changes. Before we join the inode * to the transaction, if ATTR_SIZE is set then take care of * the part of the truncation that must be done without the * inode lock. This needs to be done before joining the inode * to the transaction, because the inode cannot be unlocked * once it is a part of the transaction. */ if (iattr->ia_size > ip->i_size) { /* * Do the first part of growing a file: zero any data * in the last block that is beyond the old EOF. We * need to do this before the inode is joined to the * transaction to modify the i_size. */ code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size); if (code) goto error_return; } xfs_iunlock(ip, XFS_ILOCK_EXCL); lock_flags &= ~XFS_ILOCK_EXCL; /* * We are going to log the inode size change in this * transaction so any previous writes that are beyond the on * disk EOF and the new EOF that have not been written out need * to be written here. If we do not write the data out, we * expose ourselves to the null files problem. * * Only flush from the on disk size to the smaller of the in * memory file size or the new size as that's the range we * really care about here and prevents waiting for other data * not within the range we care about here. */ if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) { code = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, XBF_ASYNC, FI_NONE); if (code) goto error_return; } /* wait for all I/O to complete */ xfs_ioend_wait(ip); code = -block_truncate_page(inode->i_mapping, iattr->ia_size, xfs_get_blocks); if (code) goto error_return; tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); code = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (code) goto error_return; truncate_setsize(inode, iattr->ia_size); commit_flags = XFS_TRANS_RELEASE_LOG_RES; lock_flags |= XFS_ILOCK_EXCL; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); /* * Only change the c/mtime if we are changing the size * or we are explicitly asked to change it. This handles * the semantic difference between truncate() and ftruncate() * as implemented in the VFS. * * The regular truncate() case without ATTR_CTIME and ATTR_MTIME * is a special case where we need to update the times despite * not having these flags set. For all other operations the * VFS set these flags explicitly if it wants a timestamp * update. */ if (iattr->ia_size != ip->i_size && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { iattr->ia_ctime = iattr->ia_mtime = current_fs_time(inode->i_sb); mask |= ATTR_CTIME | ATTR_MTIME; } if (iattr->ia_size > ip->i_size) { ip->i_d.di_size = iattr->ia_size; ip->i_size = iattr->ia_size; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } else if (iattr->ia_size <= ip->i_size || (iattr->ia_size == 0 && ip->i_d.di_nextents)) { /* * signal a sync transaction unless * we're truncating an already unlinked * file on a wsync filesystem */ code = xfs_itruncate_finish(&tp, ip, iattr->ia_size, XFS_DATA_FORK, ((ip->i_d.di_nlink != 0 || !(mp->m_flags & XFS_MOUNT_WSYNC)) ? 1 : 0)); if (code) goto abort_return; /* * Truncated "down", so we're removing references * to old data here - if we now delay flushing for * a long time, we expose ourselves unduly to the * notorious NULL files problem. So, we mark this * vnode and flush it when the file is closed, and * do not wait the usual (long) time for writeout. */ xfs_iflags_set(ip, XFS_ITRUNCATED); } } else if (tp) { xfs_trans_ijoin(tp, ip); } /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * CAP_FSETID overrides the following restrictions: * * The set-user-ID and set-group-ID bits of a file will be * cleared upon successful return from chown() */ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && !capable(CAP_FSETID)) { ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); } /* * Change the ownerships and register quota modifications * in the transaction. */ if (iuid != uid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(mask & ATTR_UID); ASSERT(udqp); olddquot1 = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp); } ip->i_d.di_uid = uid; inode->i_uid = uid; } if (igid != gid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { ASSERT(!XFS_IS_PQUOTA_ON(mp)); ASSERT(mask & ATTR_GID); ASSERT(gdqp); olddquot2 = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp); } ip->i_d.di_gid = gid; inode->i_gid = gid; } } /* * Change file access modes. */ if (mask & ATTR_MODE) { umode_t mode = iattr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; ip->i_d.di_mode &= S_IFMT; ip->i_d.di_mode |= mode & ~S_IFMT; inode->i_mode &= S_IFMT; inode->i_mode |= mode & ~S_IFMT; } /* * Change file access or modified times. */ if (mask & ATTR_ATIME) { inode->i_atime = iattr->ia_atime; ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; ip->i_update_core = 1; } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; ip->i_update_core = 1; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; ip->i_update_core = 1; } /* * And finally, log the inode core if any attribute in it * has been changed. */ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE| ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); /* * If this is a synchronous mount, make sure that the * transaction goes to disk before returning to the user. * This is slightly sub-optimal in that truncates require * two sync transactions instead of one for wsync filesystems. * One for the truncate and one for the timestamps since we * don't want to change the timestamps unless we're sure the * truncate worked. Truncates are less than 1% of the laddis * mix so this probably isn't worth the trouble to optimize. */ code = 0; if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); code = xfs_trans_commit(tp, commit_flags); xfs_iunlock(ip, lock_flags); /* * Release any dquot(s) the inode had kept before chown. */ xfs_qm_dqrele(olddquot1); xfs_qm_dqrele(olddquot2); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (code) return code; /* * XXX(hch): Updating the ACL entries is not atomic vs the i_mode * update. We could avoid this with linked transactions * and passing down the transaction pointer all the way * to attr_set. No previous user of the generic * Posix ACL code seems to care about this issue either. */ if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { code = -xfs_acl_chmod(inode); if (code) return XFS_ERROR(code); } return 0; abort_return: commit_flags |= XFS_TRANS_ABORT; error_return: xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (tp) { xfs_trans_cancel(tp, commit_flags); } if (lock_flags != 0) { xfs_iunlock(ip, lock_flags); } return code; } /* * The maximum pathlen is 1024 bytes. Since the minimum file system * blocksize is 512 bytes, we can get a max of 2 extents back from * bmapi. */ #define SYMLINK_MAPS 2 STATIC int xfs_readlink_bmap( xfs_inode_t *ip, char *link) { xfs_mount_t *mp = ip->i_mount; int pathlen = ip->i_d.di_size; int nmaps = SYMLINK_MAPS; xfs_bmbt_irec_t mval[SYMLINK_MAPS]; xfs_daddr_t d; int byte_cnt; int n; xfs_buf_t *bp; int error = 0; error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0, mval, &nmaps, NULL); if (error) goto out; for (n = 0; n < nmaps; n++) { d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK); error = XFS_BUF_GETERROR(bp); if (error) { xfs_ioerror_alert("xfs_readlink", ip->i_mount, bp, XFS_BUF_ADDR(bp)); xfs_buf_relse(bp); goto out; } if (pathlen < byte_cnt) byte_cnt = pathlen; pathlen -= byte_cnt; memcpy(link, XFS_BUF_PTR(bp), byte_cnt); xfs_buf_relse(bp); } link[ip->i_d.di_size] = '\0'; error = 0; out: return error; } int xfs_readlink( xfs_inode_t *ip, char *link) { xfs_mount_t *mp = ip->i_mount; xfs_fsize_t pathlen; int error = 0; trace_xfs_readlink(ip); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); xfs_ilock(ip, XFS_ILOCK_SHARED); pathlen = ip->i_d.di_size; if (!pathlen) goto out; if (pathlen < 0 || pathlen > MAXPATHLEN) { xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)", __func__, (unsigned long long) ip->i_ino, (long long) pathlen); ASSERT(0); error = XFS_ERROR(EFSCORRUPTED); goto out; } if (ip->i_df.if_flags & XFS_IFINLINE) { memcpy(link, ip->i_df.if_u1.if_data, pathlen); link[pathlen] = '\0'; } else { error = xfs_readlink_bmap(ip, link); } out: xfs_iunlock(ip, XFS_ILOCK_SHARED); return error; } /* * Flags for xfs_free_eofblocks */ #define XFS_FREE_EOF_TRYLOCK (1<<0) /* * This is called by xfs_inactive to free any blocks beyond eof * when the link count isn't zero and by xfs_dm_punch_hole() when * punching a hole to EOF. */ STATIC int xfs_free_eofblocks( xfs_mount_t *mp, xfs_inode_t *ip, int flags) { xfs_trans_t *tp; int error; xfs_fileoff_t end_fsb; xfs_fileoff_t last_fsb; xfs_filblks_t map_len; int nimaps; xfs_bmbt_irec_t imap; /* * Figure out if there are any blocks beyond the end * of the file. If not, then there is nothing to do. */ end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size)); last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); if (last_fsb <= end_fsb) return 0; map_len = last_fsb - end_fsb; nimaps = 1; xfs_ilock(ip, XFS_ILOCK_SHARED); error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, NULL, 0, &imap, &nimaps, NULL); xfs_iunlock(ip, XFS_ILOCK_SHARED); if (!error && (nimaps != 0) && (imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks)) { /* * Attach the dquots to the inode up front. */ error = xfs_qm_dqattach(ip, 0); if (error) return error; /* * There are blocks after the end of file. * Free them up now by truncating the file to * its current size. */ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); /* * Do the xfs_itruncate_start() call before * reserving any log space because * itruncate_start will call into the buffer * cache and we can't * do that within a transaction. */ if (flags & XFS_FREE_EOF_TRYLOCK) { if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { xfs_trans_cancel(tp, 0); return 0; } } else { xfs_ilock(ip, XFS_IOLOCK_EXCL); } error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, ip->i_size); if (error) { xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; } error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); error = xfs_itruncate_finish(&tp, ip, ip->i_size, XFS_DATA_FORK, 0); /* * If we get an error at this point we * simply don't bother truncating the file. */ if (error) { xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); } else { error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); } xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); } return error; } /* * Free a symlink that has blocks associated with it. */ STATIC int xfs_inactive_symlink_rmt( xfs_inode_t *ip, xfs_trans_t **tpp) { xfs_buf_t *bp; int committed; int done; int error; xfs_fsblock_t first_block; xfs_bmap_free_t free_list; int i; xfs_mount_t *mp; xfs_bmbt_irec_t mval[SYMLINK_MAPS]; int nmaps; xfs_trans_t *ntp; int size; xfs_trans_t *tp; tp = *tpp; mp = ip->i_mount; ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip)); /* * We're freeing a symlink that has some * blocks allocated to it. Free the * blocks here. We know that we've got * either 1 or 2 extents and that we can * free them all in one bunmapi call. */ ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); *tpp = NULL; return error; } /* * Lock the inode, fix the size, and join it to the transaction. * Hold it so in the normal path, we still have it locked for * the second transaction. In the error paths we need it * held so the cancel won't rele it, see below. */ xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); size = (int)ip->i_d.di_size; ip->i_d.di_size = 0; xfs_trans_ijoin(tp, ip); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); /* * Find the block(s) so we can inval and unmap them. */ done = 0; xfs_bmap_init(&free_list, &first_block); nmaps = ARRAY_SIZE(mval); if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, &free_list))) goto error0; /* * Invalidate the block(s). */ for (i = 0; i < nmaps; i++) { bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); xfs_trans_binval(tp, bp); } /* * Unmap the dead block(s) to the free_list. */ if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, &first_block, &free_list, &done))) goto error1; ASSERT(done); /* * Commit the first transaction. This logs the EFI and the inode. */ if ((error = xfs_bmap_finish(&tp, &free_list, &committed))) goto error1; /* * The transaction must have been committed, since there were * actually extents freed by xfs_bunmapi. See xfs_bmap_finish. * The new tp has the extent freeing and EFDs. */ ASSERT(committed); /* * The first xact was committed, so add the inode to the new one. * Mark it dirty so it will be logged and moved forward in the log as * part of every commit. */ xfs_trans_ijoin(tp, ip); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); /* * Get a new, empty transaction to return to our caller. */ ntp = xfs_trans_dup(tp); /* * Commit the transaction containing extent freeing and EFDs. * If we get an error on the commit here or on the reserve below, * we need to unlock the inode since the new transaction doesn't * have the inode attached. */ error = xfs_trans_commit(tp, 0); tp = ntp; if (error) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); goto error0; } /* * transaction commit worked ok so we can drop the extra ticket * reference that we gained in xfs_trans_dup() */ xfs_log_ticket_put(tp->t_ticket); /* * Remove the memory for extent descriptions (just bookkeeping). */ if (ip->i_df.if_bytes) xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK); ASSERT(ip->i_df.if_bytes == 0); /* * Put an itruncate log reservation in the new transaction * for our caller. */ if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); goto error0; } /* * Return with the inode locked but not joined to the transaction. */ *tpp = tp; return 0; error1: xfs_bmap_cancel(&free_list); error0: /* * Have to come here with the inode locked and either * (held and in the transaction) or (not in the transaction). * If the inode isn't held then cancel would iput it, but * that's wrong since this is inactive and the vnode ref * count is 0 already. * Cancel won't do anything to the inode if held, but it still * needs to be locked until the cancel is done, if it was * joined to the transaction. */ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); *tpp = NULL; return error; } STATIC int xfs_inactive_symlink_local( xfs_inode_t *ip, xfs_trans_t **tpp) { int error; ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip)); /* * We're freeing a symlink which fit into * the inode. Just free the memory used * to hold the old symlink. */ error = xfs_trans_reserve(*tpp, 0, XFS_ITRUNCATE_LOG_RES(ip->i_mount), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { xfs_trans_cancel(*tpp, 0); *tpp = NULL; return error; } xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); /* * Zero length symlinks _can_ exist. */ if (ip->i_df.if_bytes > 0) { xfs_idata_realloc(ip, -(ip->i_df.if_bytes), XFS_DATA_FORK); ASSERT(ip->i_df.if_bytes == 0); } return 0; } STATIC int xfs_inactive_attrs( xfs_inode_t *ip, xfs_trans_t **tpp) { xfs_trans_t *tp; int error; xfs_mount_t *mp; ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); tp = *tpp; mp = ip->i_mount; ASSERT(ip->i_d.di_forkoff != 0); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) goto error_unlock; error = xfs_attr_inactive(ip); if (error) goto error_unlock; tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); error = xfs_trans_reserve(tp, 0, XFS_IFREE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_INACTIVE_LOG_COUNT); if (error) goto error_cancel; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); xfs_idestroy_fork(ip, XFS_ATTR_FORK); ASSERT(ip->i_d.di_anextents == 0); *tpp = tp; return 0; error_cancel: ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); error_unlock: *tpp = NULL; xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; } int xfs_release( xfs_inode_t *ip) { xfs_mount_t *mp = ip->i_mount; int error; if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0)) return 0; /* If this is a read-only mount, don't do this (would generate I/O) */ if (mp->m_flags & XFS_MOUNT_RDONLY) return 0; if (!XFS_FORCED_SHUTDOWN(mp)) { int truncated; /* * If we are using filestreams, and we have an unlinked * file that we are processing the last close on, then nothing * will be able to reopen and write to this file. Purge this * inode from the filestreams cache so that it doesn't delay * teardown of the inode. */ if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip)) xfs_filestream_deassociate(ip); /* * If we previously truncated this file and removed old data * in the process, we want to initiate "early" writeout on * the last close. This is an attempt to combat the notorious * NULL files problem which is particularly noticeable from a * truncate down, buffered (re-)write (delalloc), followed by * a crash. What we are effectively doing here is * significantly reducing the time window where we'd otherwise * be exposed to that problem. */ truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); if (truncated) { xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); } } if (ip->i_d.di_nlink == 0) return 0; if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) && (ip->i_df.if_flags & XFS_IFEXTENTS)) && (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { /* * If we can't get the iolock just skip truncating the blocks * past EOF because we could deadlock with the mmap_sem * otherwise. We'll get another chance to drop them once the * last reference to the inode is dropped, so we'll never leak * blocks permanently. * * Further, check if the inode is being opened, written and * closed frequently and we have delayed allocation blocks * outstanding (e.g. streaming writes from the NFS server), * truncating the blocks past EOF will cause fragmentation to * occur. * * In this case don't do the truncation, either, but we have to * be careful how we detect this case. Blocks beyond EOF show * up as i_delayed_blks even when the inode is clean, so we * need to truncate them away first before checking for a dirty * release. Hence on the first dirty close we will still remove * the speculative allocation, but after that we will leave it * in place. */ if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) return 0; error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_TRYLOCK); if (error) return error; /* delalloc blocks after truncation means it really is dirty */ if (ip->i_delayed_blks) xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); } return 0; } /* * xfs_inactive * * This is called when the vnode reference count for the vnode * goes to zero. If the file has been unlinked, then it must * now be truncated. Also, we clear all of the read-ahead state * kept for the inode here since the file is now closed. */ int xfs_inactive( xfs_inode_t *ip) { xfs_bmap_free_t free_list; xfs_fsblock_t first_block; int committed; xfs_trans_t *tp; xfs_mount_t *mp; int error; int truncate; /* * If the inode is already free, then there can be nothing * to clean up here. */ if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) { ASSERT(ip->i_df.if_real_bytes == 0); ASSERT(ip->i_df.if_broot_bytes == 0); return VN_INACTIVE_CACHE; } /* * Only do a truncate if it's a regular file with * some actual space in it. It's OK to look at the * inode's fields without the lock because we're the * only one with a reference to the inode. */ truncate = ((ip->i_d.di_nlink == 0) && ((ip->i_d.di_size != 0) || (ip->i_size != 0) || (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) && ((ip->i_d.di_mode & S_IFMT) == S_IFREG)); mp = ip->i_mount; error = 0; /* If this is a read-only mount, don't do this (would generate I/O) */ if (mp->m_flags & XFS_MOUNT_RDONLY) goto out; if (ip->i_d.di_nlink != 0) { if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) && (ip->i_df.if_flags & XFS_IFEXTENTS) && (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || (ip->i_delayed_blks != 0)))) { error = xfs_free_eofblocks(mp, ip, 0); if (error) return VN_INACTIVE_CACHE; } goto out; } ASSERT(ip->i_d.di_nlink == 0); error = xfs_qm_dqattach(ip, 0); if (error) return VN_INACTIVE_CACHE; tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); if (truncate) { /* * Do the xfs_itruncate_start() call before * reserving any log space because itruncate_start * will call into the buffer cache and we can't * do that within a transaction. */ xfs_ilock(ip, XFS_IOLOCK_EXCL); error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0); if (error) { xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return VN_INACTIVE_CACHE; } error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { /* Don't call itruncate_cleanup */ ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return VN_INACTIVE_CACHE; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); /* * normally, we have to run xfs_itruncate_finish sync. * But if filesystem is wsync and we're in the inactive * path, then we know that nlink == 0, and that the * xaction that made nlink == 0 is permanently committed * since xfs_remove runs as a synchronous transaction. */ error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0)); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); return VN_INACTIVE_CACHE; } } else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) { /* * If we get an error while cleaning up a * symlink we bail out. */ error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ? xfs_inactive_symlink_rmt(ip, &tp) : xfs_inactive_symlink_local(ip, &tp); if (error) { ASSERT(tp == NULL); return VN_INACTIVE_CACHE; } xfs_trans_ijoin(tp, ip); } else { error = xfs_trans_reserve(tp, 0, XFS_IFREE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_INACTIVE_LOG_COUNT); if (error) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); return VN_INACTIVE_CACHE; } xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_trans_ijoin(tp, ip); } /* * If there are attributes associated with the file * then blow them away now. The code calls a routine * that recursively deconstructs the attribute fork. * We need to just commit the current transaction * because we can't use it for xfs_attr_inactive(). */ if (ip->i_d.di_anextents > 0) { error = xfs_inactive_attrs(ip, &tp); /* * If we got an error, the transaction is already * cancelled, and the inode is unlocked. Just get out. */ if (error) return VN_INACTIVE_CACHE; } else if (ip->i_afp) { xfs_idestroy_fork(ip, XFS_ATTR_FORK); } /* * Free the inode. */ xfs_bmap_init(&free_list, &first_block); error = xfs_ifree(tp, ip, &free_list); if (error) { /* * If we fail to free the inode, shut down. The cancel * might do that, we need to make sure. Otherwise the * inode might be lost for a long time or forever. */ if (!XFS_FORCED_SHUTDOWN(mp)) { xfs_notice(mp, "%s: xfs_ifree returned error %d", __func__, error); xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); } xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); } else { /* * Credit the quota account(s). The inode is gone. */ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); /* * Just ignore errors at this point. There is nothing we can * do except to try to keep going. Make sure it's not a silent * error. */ error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) xfs_notice(mp, "%s: xfs_bmap_finish returned error %d", __func__, error); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) xfs_notice(mp, "%s: xfs_trans_commit returned error %d", __func__, error); } /* * Release the dquots held by inode, if any. */ xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); out: return VN_INACTIVE_CACHE; } /* * Lookups up an inode from "name". If ci_name is not NULL, then a CI match * is allowed, otherwise it has to be an exact match. If a CI match is found, * ci_name->name will point to a the actual name (caller must free) or * will be set to NULL if an exact match is found. */ int xfs_lookup( xfs_inode_t *dp, struct xfs_name *name, xfs_inode_t **ipp, struct xfs_name *ci_name) { xfs_ino_t inum; int error; uint lock_mode; trace_xfs_lookup(dp, name); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return XFS_ERROR(EIO); lock_mode = xfs_ilock_map_shared(dp); error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); xfs_iunlock_map_shared(dp, lock_mode); if (error) goto out; error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); if (error) goto out_free_name; return 0; out_free_name: if (ci_name) kmem_free(ci_name->name); out: *ipp = NULL; return error; } int xfs_create( xfs_inode_t *dp, struct xfs_name *name, mode_t mode, xfs_dev_t rdev, xfs_inode_t **ipp) { int is_dir = S_ISDIR(mode); struct xfs_mount *mp = dp->i_mount; struct xfs_inode *ip = NULL; struct xfs_trans *tp = NULL; int error; xfs_bmap_free_t free_list; xfs_fsblock_t first_block; boolean_t unlock_dp_on_error = B_FALSE; uint cancel_flags; int committed; prid_t prid; struct xfs_dquot *udqp = NULL; struct xfs_dquot *gdqp = NULL; uint resblks; uint log_res; uint log_count; trace_xfs_create(dp, name); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) prid = xfs_get_projid(dp); else prid = XFS_PROJID_DEFAULT; /* * Make sure that we have allocated dquot(s) on disk. */ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) return error; if (is_dir) { rdev = 0; resblks = XFS_MKDIR_SPACE_RES(mp, name->len); log_res = XFS_MKDIR_LOG_RES(mp); log_count = XFS_MKDIR_LOG_COUNT; tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR); } else { resblks = XFS_CREATE_SPACE_RES(mp, name->len); log_res = XFS_CREATE_LOG_RES(mp); log_count = XFS_CREATE_LOG_COUNT; tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE); } cancel_flags = XFS_TRANS_RELEASE_LOG_RES; /* * Initially assume that the file does not exist and * reserve the resources for that case. If that is not * the case we'll drop the one we have and get a more * appropriate transaction later. */ error = xfs_trans_reserve(tp, resblks, log_res, 0, XFS_TRANS_PERM_LOG_RES, log_count); if (error == ENOSPC) { /* flush outstanding delalloc blocks and retry */ xfs_flush_inodes(dp); error = xfs_trans_reserve(tp, resblks, log_res, 0, XFS_TRANS_PERM_LOG_RES, log_count); } if (error == ENOSPC) { /* No space at all so try a "no-allocation" reservation */ resblks = 0; error = xfs_trans_reserve(tp, 0, log_res, 0, XFS_TRANS_PERM_LOG_RES, log_count); } if (error) { cancel_flags = 0; goto out_trans_cancel; } xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); unlock_dp_on_error = B_TRUE; /* * Check for directory link count overflow. */ if (is_dir && dp->i_d.di_nlink >= XFS_MAXLINK) { error = XFS_ERROR(EMLINK); goto out_trans_cancel; } xfs_bmap_init(&free_list, &first_block); /* * Reserve disk quota and the inode. */ error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0); if (error) goto out_trans_cancel; error = xfs_dir_canenter(tp, dp, name, resblks); if (error) goto out_trans_cancel; /* * A newly created regular or special file just has one directory * entry pointing to them, but a directory also the "." entry * pointing to itself. */ error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, resblks > 0, &ip, &committed); if (error) { if (error == ENOSPC) goto out_trans_cancel; goto out_trans_abort; } /* * Now we join the directory inode to the transaction. We do not do it * earlier because xfs_dir_ialloc might commit the previous transaction * (and release all the locks). An error from here on will result in * the transaction cancel unlocking dp so don't do it explicitly in the * error path. */ xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL); unlock_dp_on_error = B_FALSE; error = xfs_dir_createname(tp, dp, name, ip->i_ino, &first_block, &free_list, resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0); if (error) { ASSERT(error != ENOSPC); goto out_trans_abort; } xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); if (is_dir) { error = xfs_dir_init(tp, ip, dp); if (error) goto out_bmap_cancel; error = xfs_bumplink(tp, dp); if (error) goto out_bmap_cancel; } /* * If this is a synchronous mount, make sure that the * create transaction goes to disk before returning to * the user. */ if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) xfs_trans_set_sync(tp); /* * Attach the dquot(s) to the inodes and modify them incore. * These ids of the inode couldn't have changed since the new * inode has been locked ever since it was created. */ xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp); error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) goto out_bmap_cancel; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto out_release_inode; xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); *ipp = ip; return 0; out_bmap_cancel: xfs_bmap_cancel(&free_list); out_trans_abort: cancel_flags |= XFS_TRANS_ABORT; out_trans_cancel: xfs_trans_cancel(tp, cancel_flags); out_release_inode: /* * Wait until after the current transaction is aborted to * release the inode. This prevents recursive transactions * and deadlocks from xfs_inactive. */ if (ip) IRELE(ip); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (unlock_dp_on_error) xfs_iunlock(dp, XFS_ILOCK_EXCL); return error; } #ifdef DEBUG int xfs_locked_n; int xfs_small_retries; int xfs_middle_retries; int xfs_lots_retries; int xfs_lock_delays; #endif /* * Bump the subclass so xfs_lock_inodes() acquires each lock with * a different value */ static inline int xfs_lock_inumorder(int lock_mode, int subclass) { if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT; if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT; return lock_mode; } /* * The following routine will lock n inodes in exclusive mode. * We assume the caller calls us with the inodes in i_ino order. * * We need to detect deadlock where an inode that we lock * is in the AIL and we start waiting for another inode that is locked * by a thread in a long running transaction (such as truncate). This can * result in deadlock since the long running trans might need to wait * for the inode we just locked in order to push the tail and free space * in the log. */ void xfs_lock_inodes( xfs_inode_t **ips, int inodes, uint lock_mode) { int attempts = 0, i, j, try_lock; xfs_log_item_t *lp; ASSERT(ips && (inodes >= 2)); /* we need at least two */ try_lock = 0; i = 0; again: for (; i < inodes; i++) { ASSERT(ips[i]); if (i && (ips[i] == ips[i-1])) /* Already locked */ continue; /* * If try_lock is not set yet, make sure all locked inodes * are not in the AIL. * If any are, set try_lock to be used later. */ if (!try_lock) { for (j = (i - 1); j >= 0 && !try_lock; j--) { lp = (xfs_log_item_t *)ips[j]->i_itemp; if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { try_lock++; } } } /* * If any of the previous locks we have locked is in the AIL, * we must TRY to get the second and subsequent locks. If * we can't get any, we must release all we have * and try again. */ if (try_lock) { /* try_lock must be 0 if i is 0. */ /* * try_lock means we have an inode locked * that is in the AIL. */ ASSERT(i != 0); if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) { attempts++; /* * Unlock all previous guys and try again. * xfs_iunlock will try to push the tail * if the inode is in the AIL. */ for(j = i - 1; j >= 0; j--) { /* * Check to see if we've already * unlocked this one. * Not the first one going back, * and the inode ptr is the same. */ if ((j != (i - 1)) && ips[j] == ips[j+1]) continue; xfs_iunlock(ips[j], lock_mode); } if ((attempts % 5) == 0) { delay(1); /* Don't just spin the CPU */ #ifdef DEBUG xfs_lock_delays++; #endif } i = 0; try_lock = 0; goto again; } } else { xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); } } #ifdef DEBUG if (attempts) { if (attempts < 5) xfs_small_retries++; else if (attempts < 100) xfs_middle_retries++; else xfs_lots_retries++; } else { xfs_locked_n++; } #endif } /* * xfs_lock_two_inodes() can only be used to lock one type of lock * at a time - the iolock or the ilock, but not both at once. If * we lock both at once, lockdep will report false positives saying * we have violated locking orders. */ void xfs_lock_two_inodes( xfs_inode_t *ip0, xfs_inode_t *ip1, uint lock_mode) { xfs_inode_t *temp; int attempts = 0; xfs_log_item_t *lp; if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0); ASSERT(ip0->i_ino != ip1->i_ino); if (ip0->i_ino > ip1->i_ino) { temp = ip0; ip0 = ip1; ip1 = temp; } again: xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0)); /* * If the first lock we have locked is in the AIL, we must TRY to get * the second lock. If we can't get it, we must release the first one * and try again. */ lp = (xfs_log_item_t *)ip0->i_itemp; if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) { xfs_iunlock(ip0, lock_mode); if ((++attempts % 5) == 0) delay(1); /* Don't just spin the CPU */ goto again; } } else { xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1)); } } int xfs_remove( xfs_inode_t *dp, struct xfs_name *name, xfs_inode_t *ip) { xfs_mount_t *mp = dp->i_mount; xfs_trans_t *tp = NULL; int is_dir = S_ISDIR(ip->i_d.di_mode); int error = 0; xfs_bmap_free_t free_list; xfs_fsblock_t first_block; int cancel_flags; int committed; int link_zero; uint resblks; uint log_count; trace_xfs_remove(dp, name); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = xfs_qm_dqattach(dp, 0); if (error) goto std_return; error = xfs_qm_dqattach(ip, 0); if (error) goto std_return; if (is_dir) { tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR); log_count = XFS_DEFAULT_LOG_COUNT; } else { tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); log_count = XFS_REMOVE_LOG_COUNT; } cancel_flags = XFS_TRANS_RELEASE_LOG_RES; /* * We try to get the real space reservation first, * allowing for directory btree deletion(s) implying * possible bmap insert(s). If we can't get the space * reservation then we use 0 instead, and avoid the bmap * btree insert(s) in the directory code by, if the bmap * insert tries to happen, instead trimming the LAST * block from the directory. */ resblks = XFS_REMOVE_SPACE_RES(mp); error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, log_count); if (error == ENOSPC) { resblks = 0; error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, log_count); } if (error) { ASSERT(error != ENOSPC); cancel_flags = 0; goto out_trans_cancel; } xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL); xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); /* * If we're removing a directory perform some additional validation. */ if (is_dir) { ASSERT(ip->i_d.di_nlink >= 2); if (ip->i_d.di_nlink != 2) { error = XFS_ERROR(ENOTEMPTY); goto out_trans_cancel; } if (!xfs_dir_isempty(ip)) { error = XFS_ERROR(ENOTEMPTY); goto out_trans_cancel; } } xfs_bmap_init(&free_list, &first_block); error = xfs_dir_removename(tp, dp, name, ip->i_ino, &first_block, &free_list, resblks); if (error) { ASSERT(error != ENOENT); goto out_bmap_cancel; } xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); if (is_dir) { /* * Drop the link from ip's "..". */ error = xfs_droplink(tp, dp); if (error) goto out_bmap_cancel; /* * Drop the "." link from ip to self. */ error = xfs_droplink(tp, ip); if (error) goto out_bmap_cancel; } else { /* * When removing a non-directory we need to log the parent * inode here. For a directory this is done implicitly * by the xfs_droplink call for the ".." entry. */ xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); } /* * Drop the link from dp to ip. */ error = xfs_droplink(tp, ip); if (error) goto out_bmap_cancel; /* * Determine if this is the last link while * we are in the transaction. */ link_zero = (ip->i_d.di_nlink == 0); /* * If this is a synchronous mount, make sure that the * remove transaction goes to disk before returning to * the user. */ if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) xfs_trans_set_sync(tp); error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) goto out_bmap_cancel; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto std_return; /* * If we are using filestreams, kill the stream association. * If the file is still open it may get a new one but that * will get killed on last close in xfs_close() so we don't * have to worry about that. */ if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) xfs_filestream_deassociate(ip); return 0; out_bmap_cancel: xfs_bmap_cancel(&free_list); cancel_flags |= XFS_TRANS_ABORT; out_trans_cancel: xfs_trans_cancel(tp, cancel_flags); std_return: return error; } int xfs_link( xfs_inode_t *tdp, xfs_inode_t *sip, struct xfs_name *target_name) { xfs_mount_t *mp = tdp->i_mount; xfs_trans_t *tp; int error; xfs_bmap_free_t free_list; xfs_fsblock_t first_block; int cancel_flags; int committed; int resblks; trace_xfs_link(tdp, target_name); ASSERT(!S_ISDIR(sip->i_d.di_mode)); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = xfs_qm_dqattach(sip, 0); if (error) goto std_return; error = xfs_qm_dqattach(tdp, 0); if (error) goto std_return; tp = xfs_trans_alloc(mp, XFS_TRANS_LINK); cancel_flags = XFS_TRANS_RELEASE_LOG_RES; resblks = XFS_LINK_SPACE_RES(mp, target_name->len); error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); if (error == ENOSPC) { resblks = 0; error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); } if (error) { cancel_flags = 0; goto error_return; } xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); xfs_trans_ijoin_ref(tp, sip, XFS_ILOCK_EXCL); xfs_trans_ijoin_ref(tp, tdp, XFS_ILOCK_EXCL); /* * If the source has too many links, we can't make any more to it. */ if (sip->i_d.di_nlink >= XFS_MAXLINK) { error = XFS_ERROR(EMLINK); goto error_return; } /* * If we are using project inheritance, we only allow hard link * creation in our tree when the project IDs are the same; else * the tree quota mechanism could be circumvented. */ if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { error = XFS_ERROR(EXDEV); goto error_return; } error = xfs_dir_canenter(tp, tdp, target_name, resblks); if (error) goto error_return; xfs_bmap_init(&free_list, &first_block); error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, &first_block, &free_list, resblks); if (error) goto abort_return; xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); error = xfs_bumplink(tp, sip); if (error) goto abort_return; /* * If this is a synchronous mount, make sure that the * link transaction goes to disk before returning to * the user. */ if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { xfs_trans_set_sync(tp); } error = xfs_bmap_finish (&tp, &free_list, &committed); if (error) { xfs_bmap_cancel(&free_list); goto abort_return; } return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); abort_return: cancel_flags |= XFS_TRANS_ABORT; error_return: xfs_trans_cancel(tp, cancel_flags); std_return: return error; } int xfs_symlink( xfs_inode_t *dp, struct xfs_name *link_name, const char *target_path, mode_t mode, xfs_inode_t **ipp) { xfs_mount_t *mp = dp->i_mount; xfs_trans_t *tp; xfs_inode_t *ip; int error; int pathlen; xfs_bmap_free_t free_list; xfs_fsblock_t first_block; boolean_t unlock_dp_on_error = B_FALSE; uint cancel_flags; int committed; xfs_fileoff_t first_fsb; xfs_filblks_t fs_blocks; int nmaps; xfs_bmbt_irec_t mval[SYMLINK_MAPS]; xfs_daddr_t d; const char *cur_chunk; int byte_cnt; int n; xfs_buf_t *bp; prid_t prid; struct xfs_dquot *udqp, *gdqp; uint resblks; *ipp = NULL; error = 0; ip = NULL; tp = NULL; trace_xfs_symlink(dp, link_name); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); /* * Check component lengths of the target path name. */ pathlen = strlen(target_path); if (pathlen >= MAXPATHLEN) /* total string too long */ return XFS_ERROR(ENAMETOOLONG); udqp = gdqp = NULL; if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) prid = xfs_get_projid(dp); else prid = XFS_PROJID_DEFAULT; /* * Make sure that we have allocated dquot(s) on disk. */ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) goto std_return; tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK); cancel_flags = XFS_TRANS_RELEASE_LOG_RES; /* * The symlink will fit into the inode data fork? * There can't be any attributes so we get the whole variable part. */ if (pathlen <= XFS_LITINO(mp)) fs_blocks = 0; else fs_blocks = XFS_B_TO_FSB(mp, pathlen); resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); if (error == ENOSPC && fs_blocks == 0) { resblks = 0; error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); } if (error) { cancel_flags = 0; goto error_return; } xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); unlock_dp_on_error = B_TRUE; /* * Check whether the directory allows new symlinks or not. */ if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) { error = XFS_ERROR(EPERM); goto error_return; } /* * Reserve disk quota : blocks and inode. */ error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0); if (error) goto error_return; /* * Check for ability to enter directory entry, if no space reserved. */ error = xfs_dir_canenter(tp, dp, link_name, resblks); if (error) goto error_return; /* * Initialize the bmap freelist prior to calling either * bmapi or the directory create code. */ xfs_bmap_init(&free_list, &first_block); /* * Allocate an inode for the symlink. */ error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, prid, resblks > 0, &ip, NULL); if (error) { if (error == ENOSPC) goto error_return; goto error1; } /* * An error after we've joined dp to the transaction will result in the * transaction cancel unlocking dp so don't do it explicitly in the * error path. */ xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL); unlock_dp_on_error = B_FALSE; /* * Also attach the dquot(s) to it, if applicable. */ xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp); if (resblks) resblks -= XFS_IALLOC_SPACE_RES(mp); /* * If the symlink will fit into the inode, write it inline. */ if (pathlen <= XFS_IFORK_DSIZE(ip)) { xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK); memcpy(ip->i_df.if_u1.if_data, target_path, pathlen); ip->i_d.di_size = pathlen; /* * The inode was initially created in extent format. */ ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT); ip->i_df.if_flags |= XFS_IFINLINE; ip->i_d.di_format = XFS_DINODE_FMT_LOCAL; xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE); } else { first_fsb = 0; nmaps = SYMLINK_MAPS; error = xfs_bmapi(tp, ip, first_fsb, fs_blocks, XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &first_block, resblks, mval, &nmaps, &free_list); if (error) goto error2; if (resblks) resblks -= fs_blocks; ip->i_d.di_size = pathlen; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); cur_chunk = target_path; for (n = 0; n < nmaps; n++) { d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, BTOBB(byte_cnt), 0); ASSERT(bp && !XFS_BUF_GETERROR(bp)); if (pathlen < byte_cnt) { byte_cnt = pathlen; } pathlen -= byte_cnt; memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt); cur_chunk += byte_cnt; xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1); } } /* * Create the directory entry for the symlink. */ error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, &first_block, &free_list, resblks); if (error) goto error2; xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); /* * If this is a synchronous mount, make sure that the * symlink transaction goes to disk before returning to * the user. */ if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { xfs_trans_set_sync(tp); } error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) { goto error2; } error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); *ipp = ip; return 0; error2: IRELE(ip); error1: xfs_bmap_cancel(&free_list); cancel_flags |= XFS_TRANS_ABORT; error_return: xfs_trans_cancel(tp, cancel_flags); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (unlock_dp_on_error) xfs_iunlock(dp, XFS_ILOCK_EXCL); std_return: return error; } int xfs_set_dmattrs( xfs_inode_t *ip, u_int evmask, u_int16_t state) { xfs_mount_t *mp = ip->i_mount; xfs_trans_t *tp; int error; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0); if (error) { xfs_trans_cancel(tp, 0); return error; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); ip->i_d.di_dmevmask = evmask; ip->i_d.di_dmstate = state; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_trans_commit(tp, 0); return error; } /* * xfs_alloc_file_space() * This routine allocates disk space for the given file. * * If alloc_type == 0, this request is for an ALLOCSP type * request which will change the file size. In this case, no * DMAPI event will be generated by the call. A TRUNCATE event * will be generated later by xfs_setattr. * * If alloc_type != 0, this request is for a RESVSP type * request, and a DMAPI DM_EVENT_WRITE will be generated if the * lower block boundary byte address is less than the file's * length. * * RETURNS: * 0 on success * errno on error * */ STATIC int xfs_alloc_file_space( xfs_inode_t *ip, xfs_off_t offset, xfs_off_t len, int alloc_type, int attr_flags) { xfs_mount_t *mp = ip->i_mount; xfs_off_t count; xfs_filblks_t allocated_fsb; xfs_filblks_t allocatesize_fsb; xfs_extlen_t extsz, temp; xfs_fileoff_t startoffset_fsb; xfs_fsblock_t firstfsb; int nimaps; int bmapi_flag; int quota_flag; int rt; xfs_trans_t *tp; xfs_bmbt_irec_t imaps[1], *imapp; xfs_bmap_free_t free_list; uint qblocks, resblks, resrtextents; int committed; int error; trace_xfs_alloc_file_space(ip); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = xfs_qm_dqattach(ip, 0); if (error) return error; if (len <= 0) return XFS_ERROR(EINVAL); rt = XFS_IS_REALTIME_INODE(ip); extsz = xfs_get_extsz_hint(ip); count = len; imapp = &imaps[0]; nimaps = 1; bmapi_flag = XFS_BMAPI_WRITE | alloc_type; startoffset_fsb = XFS_B_TO_FSBT(mp, offset); allocatesize_fsb = XFS_B_TO_FSB(mp, count); /* * Allocate file space until done or until there is an error */ while (allocatesize_fsb && !error) { xfs_fileoff_t s, e; /* * Determine space reservations for data/realtime. */ if (unlikely(extsz)) { s = startoffset_fsb; do_div(s, extsz); s *= extsz; e = startoffset_fsb + allocatesize_fsb; if ((temp = do_mod(startoffset_fsb, extsz))) e += temp; if ((temp = do_mod(e, extsz))) e += extsz - temp; } else { s = 0; e = allocatesize_fsb; } /* * The transaction reservation is limited to a 32-bit block * count, hence we need to limit the number of blocks we are * trying to reserve to avoid an overflow. We can't allocate * more than @nimaps extents, and an extent is limited on disk * to MAXEXTLEN (21 bits), so use that to enforce the limit. */ resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps)); if (unlikely(rt)) { resrtextents = qblocks = resblks; resrtextents /= mp->m_sb.sb_rextsize; resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); quota_flag = XFS_QMOPT_RES_RTBLKS; } else { resrtextents = 0; resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks); quota_flag = XFS_QMOPT_RES_REGBLKS; } /* * Allocate and setup the transaction. */ tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), resrtextents, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); /* * Check for running out of space */ if (error) { /* * Free the transaction structure. */ ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); break; } xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); if (error) goto error1; xfs_trans_ijoin(tp, ip); /* * Issue the xfs_bmapi() call to allocate the blocks */ xfs_bmap_init(&free_list, &firstfsb); error = xfs_bmapi(tp, ip, startoffset_fsb, allocatesize_fsb, bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list); if (error) { goto error0; } /* * Complete the transaction */ error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) { goto error0; } error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) { break; } allocated_fsb = imapp->br_blockcount; if (nimaps == 0) { error = XFS_ERROR(ENOSPC); break; } startoffset_fsb += allocated_fsb; allocatesize_fsb -= allocated_fsb; } return error; error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ xfs_bmap_cancel(&free_list); xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); error1: /* Just cancel transaction */ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } /* * Zero file bytes between startoff and endoff inclusive. * The iolock is held exclusive and no blocks are buffered. * * This function is used by xfs_free_file_space() to zero * partial blocks when the range to free is not block aligned. * When unreserving space with boundaries that are not block * aligned we round up the start and round down the end * boundaries and then use this function to zero the parts of * the blocks that got dropped during the rounding. */ STATIC int xfs_zero_remaining_bytes( xfs_inode_t *ip, xfs_off_t startoff, xfs_off_t endoff) { xfs_bmbt_irec_t imap; xfs_fileoff_t offset_fsb; xfs_off_t lastoffset; xfs_off_t offset; xfs_buf_t *bp; xfs_mount_t *mp = ip->i_mount; int nimap; int error = 0; /* * Avoid doing I/O beyond eof - it's not necessary * since nothing can read beyond eof. The space will * be zeroed when the file is extended anyway. */ if (startoff >= ip->i_size) return 0; if (endoff > ip->i_size) endoff = ip->i_size; bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp, mp->m_sb.sb_blocksize, XBF_DONT_BLOCK); if (!bp) return XFS_ERROR(ENOMEM); for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { offset_fsb = XFS_B_TO_FSBT(mp, offset); nimap = 1; error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, NULL, 0, &imap, &nimap, NULL); if (error || nimap < 1) break; ASSERT(imap.br_blockcount >= 1); ASSERT(imap.br_startoff == offset_fsb); lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1; if (lastoffset > endoff) lastoffset = endoff; if (imap.br_startblock == HOLESTARTBLOCK) continue; ASSERT(imap.br_startblock != DELAYSTARTBLOCK); if (imap.br_state == XFS_EXT_UNWRITTEN) continue; XFS_BUF_UNDONE(bp); XFS_BUF_UNWRITE(bp); XFS_BUF_READ(bp); XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); xfsbdstrat(mp, bp); error = xfs_buf_iowait(bp); if (error) { xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", mp, bp, XFS_BUF_ADDR(bp)); break; } memset(XFS_BUF_PTR(bp) + (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), 0, lastoffset - offset + 1); XFS_BUF_UNDONE(bp); XFS_BUF_UNREAD(bp); XFS_BUF_WRITE(bp); xfsbdstrat(mp, bp); error = xfs_buf_iowait(bp); if (error) { xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", mp, bp, XFS_BUF_ADDR(bp)); break; } } xfs_buf_free(bp); return error; } /* * xfs_free_file_space() * This routine frees disk space for the given file. * * This routine is only called by xfs_change_file_space * for an UNRESVSP type call. * * RETURNS: * 0 on success * errno on error * */ STATIC int xfs_free_file_space( xfs_inode_t *ip, xfs_off_t offset, xfs_off_t len, int attr_flags) { int committed; int done; xfs_fileoff_t endoffset_fsb; int error; xfs_fsblock_t firstfsb; xfs_bmap_free_t free_list; xfs_bmbt_irec_t imap; xfs_off_t ioffset; xfs_extlen_t mod=0; xfs_mount_t *mp; int nimap; uint resblks; uint rounding; int rt; xfs_fileoff_t startoffset_fsb; xfs_trans_t *tp; int need_iolock = 1; mp = ip->i_mount; trace_xfs_free_file_space(ip); error = xfs_qm_dqattach(ip, 0); if (error) return error; error = 0; if (len <= 0) /* if nothing being freed */ return error; rt = XFS_IS_REALTIME_INODE(ip); startoffset_fsb = XFS_B_TO_FSB(mp, offset); endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); if (attr_flags & XFS_ATTR_NOLOCK) need_iolock = 0; if (need_iolock) { xfs_ilock(ip, XFS_IOLOCK_EXCL); /* wait for the completion of any pending DIOs */ xfs_ioend_wait(ip); } rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); ioffset = offset & ~(rounding - 1); if (VN_CACHED(VFS_I(ip)) != 0) { error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); if (error) goto out_unlock_iolock; } /* * Need to zero the stuff we're not freeing, on disk. * If it's a realtime file & can't use unwritten extents then we * actually need to zero the extent edges. Otherwise xfs_bunmapi * will take care of it for us. */ if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) { nimap = 1; error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0, &imap, &nimap, NULL); if (error) goto out_unlock_iolock; ASSERT(nimap == 0 || nimap == 1); if (nimap && imap.br_startblock != HOLESTARTBLOCK) { xfs_daddr_t block; ASSERT(imap.br_startblock != DELAYSTARTBLOCK); block = imap.br_startblock; mod = do_div(block, mp->m_sb.sb_rextsize); if (mod) startoffset_fsb += mp->m_sb.sb_rextsize - mod; } nimap = 1; error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 1, 0, NULL, 0, &imap, &nimap, NULL); if (error) goto out_unlock_iolock; ASSERT(nimap == 0 || nimap == 1); if (nimap && imap.br_startblock != HOLESTARTBLOCK) { ASSERT(imap.br_startblock != DELAYSTARTBLOCK); mod++; if (mod && (mod != mp->m_sb.sb_rextsize)) endoffset_fsb -= mod; } } if ((done = (endoffset_fsb <= startoffset_fsb))) /* * One contiguous piece to clear */ error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1); else { /* * Some full blocks, possibly two pieces to clear */ if (offset < XFS_FSB_TO_B(mp, startoffset_fsb)) error = xfs_zero_remaining_bytes(ip, offset, XFS_FSB_TO_B(mp, startoffset_fsb) - 1); if (!error && XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len) error = xfs_zero_remaining_bytes(ip, XFS_FSB_TO_B(mp, endoffset_fsb), offset + len - 1); } /* * free file space until done or until there is an error */ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); while (!error && !done) { /* * allocate and setup the transaction. Allow this * transaction to dip into the reserve blocks to ensure * the freeing of the space succeeds at ENOSPC. */ tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); tp->t_flags |= XFS_TRANS_RESERVE; error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); /* * check for running out of space */ if (error) { /* * Free the transaction structure. */ ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); break; } xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS); if (error) goto error1; xfs_trans_ijoin(tp, ip); /* * issue the bunmapi() call to free the blocks */ xfs_bmap_init(&free_list, &firstfsb); error = xfs_bunmapi(tp, ip, startoffset_fsb, endoffset_fsb - startoffset_fsb, 0, 2, &firstfsb, &free_list, &done); if (error) { goto error0; } /* * complete the transaction */ error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) { goto error0; } error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(ip, XFS_ILOCK_EXCL); } out_unlock_iolock: if (need_iolock) xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; error0: xfs_bmap_cancel(&free_list); error1: xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) : XFS_ILOCK_EXCL); return error; } /* * xfs_change_file_space() * This routine allocates or frees disk space for the given file. * The user specified parameters are checked for alignment and size * limitations. * * RETURNS: * 0 on success * errno on error * */ int xfs_change_file_space( xfs_inode_t *ip, int cmd, xfs_flock64_t *bf, xfs_off_t offset, int attr_flags) { xfs_mount_t *mp = ip->i_mount; int clrprealloc; int error; xfs_fsize_t fsize; int setprealloc; xfs_off_t startoffset; xfs_off_t llen; xfs_trans_t *tp; struct iattr iattr; int prealloc_type; if (!S_ISREG(ip->i_d.di_mode)) return XFS_ERROR(EINVAL); switch (bf->l_whence) { case 0: /*SEEK_SET*/ break; case 1: /*SEEK_CUR*/ bf->l_start += offset; break; case 2: /*SEEK_END*/ bf->l_start += ip->i_size; break; default: return XFS_ERROR(EINVAL); } llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len; if ( (bf->l_start < 0) || (bf->l_start > XFS_MAXIOFFSET(mp)) || (bf->l_start + llen < 0) || (bf->l_start + llen > XFS_MAXIOFFSET(mp))) return XFS_ERROR(EINVAL); bf->l_whence = 0; startoffset = bf->l_start; fsize = ip->i_size; /* * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve * file space. * These calls do NOT zero the data space allocated to the file, * nor do they change the file size. * * XFS_IOC_ALLOCSP and XFS_IOC_FREESP will allocate and free file * space. * These calls cause the new file data to be zeroed and the file * size to be changed. */ setprealloc = clrprealloc = 0; prealloc_type = XFS_BMAPI_PREALLOC; switch (cmd) { case XFS_IOC_ZERO_RANGE: prealloc_type |= XFS_BMAPI_CONVERT; xfs_tosspages(ip, startoffset, startoffset + bf->l_len, 0); /* FALLTHRU */ case XFS_IOC_RESVSP: case XFS_IOC_RESVSP64: error = xfs_alloc_file_space(ip, startoffset, bf->l_len, prealloc_type, attr_flags); if (error) return error; setprealloc = 1; break; case XFS_IOC_UNRESVSP: case XFS_IOC_UNRESVSP64: if ((error = xfs_free_file_space(ip, startoffset, bf->l_len, attr_flags))) return error; break; case XFS_IOC_ALLOCSP: case XFS_IOC_ALLOCSP64: case XFS_IOC_FREESP: case XFS_IOC_FREESP64: if (startoffset > fsize) { error = xfs_alloc_file_space(ip, fsize, startoffset - fsize, 0, attr_flags); if (error) break; } iattr.ia_valid = ATTR_SIZE; iattr.ia_size = startoffset; error = xfs_setattr(ip, &iattr, attr_flags); if (error) return error; clrprealloc = 1; break; default: ASSERT(0); return XFS_ERROR(EINVAL); } /* * update the inode timestamp, mode, and prealloc flag bits */ tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp), 0, 0, 0))) { /* ASSERT(0); */ xfs_trans_cancel(tp, 0); return error; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); if ((attr_flags & XFS_ATTR_DMI) == 0) { ip->i_d.di_mode &= ~S_ISUID; /* * Note that we don't have to worry about mandatory * file locking being disabled here because we only * clear the S_ISGID bit if the Group execute bit is * on, but if it was on then mandatory locking wouldn't * have been enabled. */ if (ip->i_d.di_mode & S_IXGRP) ip->i_d.di_mode &= ~S_ISGID; xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); } if (setprealloc) ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; else if (clrprealloc) ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (attr_flags & XFS_ATTR_SYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; }
gpl-2.0
HerroYou/android_kernel_samsung_msm7x27
arch/frv/kernel/futex.c
1856
6709
/* futex.c: futex operations * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/futex.h> #include <asm/errno.h> /* * the various futex operations; MMU fault checking is ignored under no-MMU * conditions */ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " add %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " or %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " and %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; asm("0: \n" " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ " ckeq icc3,cc7 \n" "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ " xor %1,%3,%3 \n" "2: cst.p %3,%M0 ,cc3,#1 \n" " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ " beq icc3,#0,0b \n" " setlos 0,%2 \n" "3: \n" ".subsection 2 \n" "4: setlos %5,%2 \n" " bra 3b \n" ".previous \n" ".section __ex_table,\"a\" \n" " .balign 8 \n" " .long 1b,4b \n" " .long 2b,4b \n" ".previous" : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) : "3"(oparg), "i"(-EFAULT) : "memory", "cc7", "cc3", "icc3" ); *_oldval = oldval; return ret; } /*****************************************************************************/ /* * do the futex operations */ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); switch (op) { case FUTEX_OP_SET: ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval); break; case FUTEX_OP_ADD: ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval); break; case FUTEX_OP_OR: ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval); break; case FUTEX_OP_ANDN: ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval); break; case FUTEX_OP_XOR: ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval); break; default: ret = -ENOSYS; break; } pagefault_enable(); if (!ret) { switch (cmp) { case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; default: ret = -ENOSYS; break; } } return ret; } /* end futex_atomic_op_inuser() */
gpl-2.0
AayushRd7/Xeski
drivers/platform/x86/chromeos_laptop.c
2368
10654
/* * chromeos_laptop.c - Driver to instantiate Chromebook i2c/smbus devices. * * Author : Benson Leung <bleung@chromium.org> * * Copyright (C) 2012 Google, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/i2c/atmel_mxt_ts.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> #define ATMEL_TP_I2C_ADDR 0x4b #define ATMEL_TP_I2C_BL_ADDR 0x25 #define ATMEL_TS_I2C_ADDR 0x4a #define ATMEL_TS_I2C_BL_ADDR 0x26 #define CYAPA_TP_I2C_ADDR 0x67 #define ISL_ALS_I2C_ADDR 0x44 #define TAOS_ALS_I2C_ADDR 0x29 static struct i2c_client *als; static struct i2c_client *tp; static struct i2c_client *ts; const char *i2c_adapter_names[] = { "SMBus I801 adapter", "i915 gmbus vga", "i915 gmbus panel", }; /* Keep this enum consistent with i2c_adapter_names */ enum i2c_adapter_type { I2C_ADAPTER_SMBUS = 0, I2C_ADAPTER_VGADDC, I2C_ADAPTER_PANEL, }; static struct i2c_board_info __initdata cyapa_device = { I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR), .flags = I2C_CLIENT_WAKE, }; static struct i2c_board_info __initdata isl_als_device = { I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR), }; static struct i2c_board_info __initdata tsl2583_als_device = { I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR), }; static struct i2c_board_info __initdata tsl2563_als_device = { I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR), }; static struct mxt_platform_data atmel_224s_tp_platform_data = { .x_line = 18, .y_line = 12, .x_size = 102*20, .y_size = 68*20, .blen = 0x80, /* Gain setting is in upper 4 bits */ .threshold = 0x32, .voltage = 0, /* 3.3V */ .orient = MXT_VERTICAL_FLIP, .irqflags = IRQF_TRIGGER_FALLING, .is_tp = true, .key_map = { KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, BTN_LEFT }, .config = NULL, .config_length = 0, }; static struct i2c_board_info __initdata atmel_224s_tp_device = { I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR), .platform_data = &atmel_224s_tp_platform_data, .flags = I2C_CLIENT_WAKE, }; static struct mxt_platform_data atmel_1664s_platform_data = { .x_line = 32, .y_line = 50, .x_size = 1700, .y_size = 2560, .blen = 0x89, /* Gain setting is in upper 4 bits */ .threshold = 0x28, .voltage = 0, /* 3.3V */ .orient = MXT_ROTATED_90_COUNTER, .irqflags = IRQF_TRIGGER_FALLING, .is_tp = false, .config = NULL, .config_length = 0, }; static struct i2c_board_info __initdata atmel_1664s_device = { I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR), .platform_data = &atmel_1664s_platform_data, .flags = I2C_CLIENT_WAKE, }; static struct i2c_client __init *__add_probed_i2c_device( const char *name, int bus, struct i2c_board_info *info, const unsigned short *addrs) { const struct dmi_device *dmi_dev; const struct dmi_dev_onboard *dev_data; struct i2c_adapter *adapter; struct i2c_client *client; if (bus < 0) return NULL; /* * If a name is specified, look for irq platform information stashed * in DMI_DEV_TYPE_DEV_ONBOARD by the Chrome OS custom system firmware. */ if (name) { dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, name, NULL); if (!dmi_dev) { pr_err("%s failed to dmi find device %s.\n", __func__, name); return NULL; } dev_data = (struct dmi_dev_onboard *)dmi_dev->device_data; if (!dev_data) { pr_err("%s failed to get data from dmi for %s.\n", __func__, name); return NULL; } info->irq = dev_data->instance; } adapter = i2c_get_adapter(bus); if (!adapter) { pr_err("%s failed to get i2c adapter %d.\n", __func__, bus); return NULL; } /* add the i2c device */ client = i2c_new_probed_device(adapter, info, addrs, NULL); if (!client) pr_err("%s failed to register device %d-%02x\n", __func__, bus, info->addr); else pr_debug("%s added i2c device %d-%02x\n", __func__, bus, info->addr); i2c_put_adapter(adapter); return client; } static int __init __find_i2c_adap(struct device *dev, void *data) { const char *name = data; static const char *prefix = "i2c-"; struct i2c_adapter *adapter; if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0) return 0; adapter = to_i2c_adapter(dev); return (strncmp(adapter->name, name, strlen(name)) == 0); } static int __init find_i2c_adapter_num(enum i2c_adapter_type type) { struct device *dev = NULL; struct i2c_adapter *adapter; const char *name = i2c_adapter_names[type]; /* find the adapter by name */ dev = bus_find_device(&i2c_bus_type, NULL, (void *)name, __find_i2c_adap); if (!dev) { pr_err("%s: i2c adapter %s not found on system.\n", __func__, name); return -ENODEV; } adapter = to_i2c_adapter(dev); return adapter->nr; } /* * Takes a list of addresses in addrs as such : * { addr1, ... , addrn, I2C_CLIENT_END }; * add_probed_i2c_device will use i2c_new_probed_device * and probe for devices at all of the addresses listed. * Returns NULL if no devices found. * See Documentation/i2c/instantiating-devices for more information. */ static __init struct i2c_client *add_probed_i2c_device( const char *name, enum i2c_adapter_type type, struct i2c_board_info *info, const unsigned short *addrs) { return __add_probed_i2c_device(name, find_i2c_adapter_num(type), info, addrs); } /* * Probes for a device at a single address, the one provided by * info->addr. * Returns NULL if no device found. */ static __init struct i2c_client *add_i2c_device(const char *name, enum i2c_adapter_type type, struct i2c_board_info *info) { const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END }; return __add_probed_i2c_device(name, find_i2c_adapter_num(type), info, addr_list); } static struct i2c_client __init *add_smbus_device(const char *name, struct i2c_board_info *info) { return add_i2c_device(name, I2C_ADAPTER_SMBUS, info); } static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id) { /* add cyapa touchpad on smbus */ tp = add_smbus_device("trackpad", &cyapa_device); return 0; } static int __init setup_atmel_224s_tp(const struct dmi_system_id *id) { const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR, ATMEL_TP_I2C_ADDR, I2C_CLIENT_END }; /* add atmel mxt touchpad on VGA DDC GMBus */ tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC, &atmel_224s_tp_device, addr_list); return 0; } static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id) { const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR, ATMEL_TS_I2C_ADDR, I2C_CLIENT_END }; /* add atmel mxt touch device on PANEL GMBus */ ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL, &atmel_1664s_device, addr_list); return 0; } static int __init setup_isl29018_als(const struct dmi_system_id *id) { /* add isl29018 light sensor */ als = add_smbus_device("lightsensor", &isl_als_device); return 0; } static int __init setup_isl29023_als(const struct dmi_system_id *id) { /* add isl29023 light sensor on Panel GMBus */ als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL, &isl_als_device); return 0; } static int __init setup_tsl2583_als(const struct dmi_system_id *id) { /* add tsl2583 light sensor on smbus */ als = add_smbus_device(NULL, &tsl2583_als_device); return 0; } static int __init setup_tsl2563_als(const struct dmi_system_id *id) { /* add tsl2563 light sensor on smbus */ als = add_smbus_device(NULL, &tsl2563_als_device); return 0; } static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = { { .ident = "Samsung Series 5 550 - Touchpad", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"), DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"), }, .callback = setup_cyapa_smbus_tp, }, { .ident = "Chromebook Pixel - Touchscreen", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Link"), }, .callback = setup_atmel_1664s_ts, }, { .ident = "Chromebook Pixel - Touchpad", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Link"), }, .callback = setup_atmel_224s_tp, }, { .ident = "Samsung Series 5 550 - Light Sensor", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"), DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"), }, .callback = setup_isl29018_als, }, { .ident = "Chromebook Pixel - Light Sensor", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Link"), }, .callback = setup_isl29023_als, }, { .ident = "Acer C7 Chromebook - Touchpad", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"), }, .callback = setup_cyapa_smbus_tp, }, { .ident = "HP Pavilion 14 Chromebook - Touchpad", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"), }, .callback = setup_cyapa_smbus_tp, }, { .ident = "Samsung Series 5 - Light Sensor", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "Alex"), }, .callback = setup_tsl2583_als, }, { .ident = "Cr-48 - Light Sensor", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "Mario"), }, .callback = setup_tsl2563_als, }, { .ident = "Acer AC700 - Light Sensor", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), }, .callback = setup_tsl2563_als, }, { } }; MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table); static int __init chromeos_laptop_init(void) { if (!dmi_check_system(chromeos_laptop_dmi_table)) { pr_debug("%s unsupported system.\n", __func__); return -ENODEV; } return 0; } static void __exit chromeos_laptop_exit(void) { if (als) i2c_unregister_device(als); if (tp) i2c_unregister_device(tp); if (ts) i2c_unregister_device(ts); } module_init(chromeos_laptop_init); module_exit(chromeos_laptop_exit); MODULE_DESCRIPTION("Chrome OS Laptop driver"); MODULE_AUTHOR("Benson Leung <bleung@chromium.org>"); MODULE_LICENSE("GPL");
gpl-2.0
stevezilla/amherst-linux-imx6
drivers/block/osdblk.c
2368
15959
/* osdblk.c -- Export a single SCSI OSD object as a Linux block device Copyright 2009 Red Hat, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. Instructions for use -------------------- 1) Map a Linux block device to an existing OSD object. In this example, we will use partition id 1234, object id 5678, OSD device /dev/osd1. $ echo "1234 5678 /dev/osd1" > /sys/class/osdblk/add 2) List all active blkdev<->object mappings. In this example, we have performed step #1 twice, creating two blkdevs, mapped to two separate OSD objects. $ cat /sys/class/osdblk/list 0 174 1234 5678 /dev/osd1 1 179 1994 897123 /dev/osd0 The columns, in order, are: - blkdev unique id - blkdev assigned major - OSD object partition id - OSD object id - OSD device 3) Remove an active blkdev<->object mapping. In this example, we remove the mapping with blkdev unique id 1. $ echo 1 > /sys/class/osdblk/remove NOTE: The actual creation and deletion of OSD objects is outside the scope of this driver. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> #include <scsi/osd_initiator.h> #include <scsi/osd_attributes.h> #include <scsi/osd_sec.h> #include <scsi/scsi_device.h> #define DRV_NAME "osdblk" #define PFX DRV_NAME ": " /* #define _OSDBLK_DEBUG */ #ifdef _OSDBLK_DEBUG #define OSDBLK_DEBUG(fmt, a...) \ printk(KERN_NOTICE "osdblk @%s:%d: " fmt, __func__, __LINE__, ##a) #else #define OSDBLK_DEBUG(fmt, a...) \ do { if (0) printk(fmt, ##a); } while (0) #endif MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); MODULE_DESCRIPTION("block device inside an OSD object osdblk.ko"); MODULE_LICENSE("GPL"); struct osdblk_device; enum { OSDBLK_MINORS_PER_MAJOR = 256, /* max minors per blkdev */ OSDBLK_MAX_REQ = 32, /* max parallel requests */ OSDBLK_OP_TIMEOUT = 4 * 60, /* sync OSD req timeout */ }; struct osdblk_request { struct request *rq; /* blk layer request */ struct bio *bio; /* cloned bio */ struct osdblk_device *osdev; /* associated blkdev */ }; struct osdblk_device { int id; /* blkdev unique id */ int major; /* blkdev assigned major */ struct gendisk *disk; /* blkdev's gendisk and rq */ struct request_queue *q; struct osd_dev *osd; /* associated OSD */ char name[32]; /* blkdev name, e.g. osdblk34 */ spinlock_t lock; /* queue lock */ struct osd_obj_id obj; /* OSD partition, obj id */ uint8_t obj_cred[OSD_CAP_LEN]; /* OSD cred */ struct osdblk_request req[OSDBLK_MAX_REQ]; /* request table */ struct list_head node; char osd_path[0]; /* OSD device path */ }; static struct class *class_osdblk; /* /sys/class/osdblk */ static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ static LIST_HEAD(osdblkdev_list); static const struct block_device_operations osdblk_bd_ops = { .owner = THIS_MODULE, }; static const struct osd_attr g_attr_logical_length = ATTR_DEF( OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8); static void osdblk_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj) { osd_sec_init_nosec_doall_caps(cred_a, obj, false, true); } /* copied from exofs; move to libosd? */ /* * Perform a synchronous OSD operation. copied from exofs; move to libosd? */ static int osd_sync_op(struct osd_request *or, int timeout, uint8_t *credential) { int ret; or->timeout = timeout; ret = osd_finalize_request(or, 0, credential, NULL); if (ret) return ret; ret = osd_execute_request(or); /* osd_req_decode_sense(or, ret); */ return ret; } /* * Perform an asynchronous OSD operation. copied from exofs; move to libosd? */ static int osd_async_op(struct osd_request *or, osd_req_done_fn *async_done, void *caller_context, u8 *cred) { int ret; ret = osd_finalize_request(or, 0, cred, NULL); if (ret) return ret; ret = osd_execute_request_async(or, async_done, caller_context); return ret; } /* copied from exofs; move to libosd? */ static int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr) { struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */ void *iter = NULL; int nelem; do { nelem = 1; osd_req_decode_get_attr_list(or, &cur_attr, &nelem, &iter); if ((cur_attr.attr_page == attr->attr_page) && (cur_attr.attr_id == attr->attr_id)) { attr->len = cur_attr.len; attr->val_ptr = cur_attr.val_ptr; return 0; } } while (iter); return -EIO; } static int osdblk_get_obj_size(struct osdblk_device *osdev, u64 *size_out) { struct osd_request *or; struct osd_attr attr; int ret; /* start request */ or = osd_start_request(osdev->osd, GFP_KERNEL); if (!or) return -ENOMEM; /* create a get-attributes(length) request */ osd_req_get_attributes(or, &osdev->obj); osd_req_add_get_attr_list(or, &g_attr_logical_length, 1); /* execute op synchronously */ ret = osd_sync_op(or, OSDBLK_OP_TIMEOUT, osdev->obj_cred); if (ret) goto out; /* extract length from returned attribute info */ attr = g_attr_logical_length; ret = extract_attr_from_req(or, &attr); if (ret) goto out; *size_out = get_unaligned_be64(attr.val_ptr); out: osd_end_request(or); return ret; } static void osdblk_osd_complete(struct osd_request *or, void *private) { struct osdblk_request *orq = private; struct osd_sense_info osi; int ret = osd_req_decode_sense(or, &osi); if (ret) { ret = -EIO; OSDBLK_DEBUG("osdblk_osd_complete with err=%d\n", ret); } /* complete OSD request */ osd_end_request(or); /* complete request passed to osdblk by block layer */ __blk_end_request_all(orq->rq, ret); } static void bio_chain_put(struct bio *chain) { struct bio *tmp; while (chain) { tmp = chain; chain = chain->bi_next; bio_put(tmp); } } static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask) { struct bio *tmp, *new_chain = NULL, *tail = NULL; while (old_chain) { tmp = bio_clone_kmalloc(old_chain, gfpmask); if (!tmp) goto err_out; tmp->bi_bdev = NULL; gfpmask &= ~__GFP_WAIT; tmp->bi_next = NULL; if (!new_chain) new_chain = tail = tmp; else { tail->bi_next = tmp; tail = tmp; } old_chain = old_chain->bi_next; } return new_chain; err_out: OSDBLK_DEBUG("bio_chain_clone with err\n"); bio_chain_put(new_chain); return NULL; } static void osdblk_rq_fn(struct request_queue *q) { struct osdblk_device *osdev = q->queuedata; while (1) { struct request *rq; struct osdblk_request *orq; struct osd_request *or; struct bio *bio; bool do_write, do_flush; /* peek at request from block layer */ rq = blk_fetch_request(q); if (!rq) break; /* filter out block requests we don't understand */ if (rq->cmd_type != REQ_TYPE_FS) { blk_end_request_all(rq, 0); continue; } /* deduce our operation (read, write, flush) */ /* I wish the block layer simplified cmd_type/cmd_flags/cmd[] * into a clearly defined set of RPC commands: * read, write, flush, scsi command, power mgmt req, * driver-specific, etc. */ do_flush = rq->cmd_flags & REQ_FLUSH; do_write = (rq_data_dir(rq) == WRITE); if (!do_flush) { /* osd_flush does not use a bio */ /* a bio clone to be passed down to OSD request */ bio = bio_chain_clone(rq->bio, GFP_ATOMIC); if (!bio) break; } else bio = NULL; /* alloc internal OSD request, for OSD command execution */ or = osd_start_request(osdev->osd, GFP_ATOMIC); if (!or) { bio_chain_put(bio); OSDBLK_DEBUG("osd_start_request with err\n"); break; } orq = &osdev->req[rq->tag]; orq->rq = rq; orq->bio = bio; orq->osdev = osdev; /* init OSD command: flush, write or read */ if (do_flush) osd_req_flush_object(or, &osdev->obj, OSD_CDB_FLUSH_ALL, 0, 0); else if (do_write) osd_req_write(or, &osdev->obj, blk_rq_pos(rq) * 512ULL, bio, blk_rq_bytes(rq)); else osd_req_read(or, &osdev->obj, blk_rq_pos(rq) * 512ULL, bio, blk_rq_bytes(rq)); OSDBLK_DEBUG("%s 0x%x bytes at 0x%llx\n", do_flush ? "flush" : do_write ? "write" : "read", blk_rq_bytes(rq), blk_rq_pos(rq) * 512ULL); /* begin OSD command execution */ if (osd_async_op(or, osdblk_osd_complete, orq, osdev->obj_cred)) { osd_end_request(or); blk_requeue_request(q, rq); bio_chain_put(bio); OSDBLK_DEBUG("osd_execute_request_async with err\n"); break; } /* remove the special 'flush' marker, now that the command * is executing */ rq->special = NULL; } } static void osdblk_free_disk(struct osdblk_device *osdev) { struct gendisk *disk = osdev->disk; if (!disk) return; if (disk->flags & GENHD_FL_UP) del_gendisk(disk); if (disk->queue) blk_cleanup_queue(disk->queue); put_disk(disk); } static int osdblk_init_disk(struct osdblk_device *osdev) { struct gendisk *disk; struct request_queue *q; int rc; u64 obj_size = 0; /* contact OSD, request size info about the object being mapped */ rc = osdblk_get_obj_size(osdev, &obj_size); if (rc) return rc; /* create gendisk info */ disk = alloc_disk(OSDBLK_MINORS_PER_MAJOR); if (!disk) return -ENOMEM; sprintf(disk->disk_name, DRV_NAME "%d", osdev->id); disk->major = osdev->major; disk->first_minor = 0; disk->fops = &osdblk_bd_ops; disk->private_data = osdev; /* init rq */ q = blk_init_queue(osdblk_rq_fn, &osdev->lock); if (!q) { put_disk(disk); return -ENOMEM; } /* switch queue to TCQ mode; allocate tag map */ rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL); if (rc) { blk_cleanup_queue(q); put_disk(disk); return rc; } /* Set our limits to the lower device limits, because osdblk cannot * sleep when allocating a lower-request and therefore cannot be * bouncing. */ blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); blk_queue_prep_rq(q, blk_queue_start_tag); blk_queue_flush(q, REQ_FLUSH); disk->queue = q; q->queuedata = osdev; osdev->disk = disk; osdev->q = q; /* finally, announce the disk to the world */ set_capacity(disk, obj_size / 512ULL); add_disk(disk); printk(KERN_INFO "%s: Added of size 0x%llx\n", disk->disk_name, (unsigned long long)obj_size); return 0; } /******************************************************************** * /sys/class/osdblk/ * add map OSD object to blkdev * remove unmap OSD object * list show mappings *******************************************************************/ static void class_osdblk_release(struct class *cls) { kfree(cls); } static ssize_t class_osdblk_list(struct class *c, struct class_attribute *attr, char *data) { int n = 0; struct list_head *tmp; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); list_for_each(tmp, &osdblkdev_list) { struct osdblk_device *osdev; osdev = list_entry(tmp, struct osdblk_device, node); n += sprintf(data+n, "%d %d %llu %llu %s\n", osdev->id, osdev->major, osdev->obj.partition, osdev->obj.id, osdev->osd_path); } mutex_unlock(&ctl_mutex); return n; } static ssize_t class_osdblk_add(struct class *c, struct class_attribute *attr, const char *buf, size_t count) { struct osdblk_device *osdev; ssize_t rc; int irc, new_id = 0; struct list_head *tmp; if (!try_module_get(THIS_MODULE)) return -ENODEV; /* new osdblk_device object */ osdev = kzalloc(sizeof(*osdev) + strlen(buf) + 1, GFP_KERNEL); if (!osdev) { rc = -ENOMEM; goto err_out_mod; } /* static osdblk_device initialization */ spin_lock_init(&osdev->lock); INIT_LIST_HEAD(&osdev->node); /* generate unique id: find highest unique id, add one */ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); list_for_each(tmp, &osdblkdev_list) { struct osdblk_device *osdev; osdev = list_entry(tmp, struct osdblk_device, node); if (osdev->id > new_id) new_id = osdev->id + 1; } osdev->id = new_id; /* add to global list */ list_add_tail(&osdev->node, &osdblkdev_list); mutex_unlock(&ctl_mutex); /* parse add command */ if (sscanf(buf, "%llu %llu %s", &osdev->obj.partition, &osdev->obj.id, osdev->osd_path) != 3) { rc = -EINVAL; goto err_out_slot; } /* initialize rest of new object */ sprintf(osdev->name, DRV_NAME "%d", osdev->id); /* contact requested OSD */ osdev->osd = osduld_path_lookup(osdev->osd_path); if (IS_ERR(osdev->osd)) { rc = PTR_ERR(osdev->osd); goto err_out_slot; } /* build OSD credential */ osdblk_make_credential(osdev->obj_cred, &osdev->obj); /* register our block device */ irc = register_blkdev(0, osdev->name); if (irc < 0) { rc = irc; goto err_out_osd; } osdev->major = irc; /* set up and announce blkdev mapping */ rc = osdblk_init_disk(osdev); if (rc) goto err_out_blkdev; return count; err_out_blkdev: unregister_blkdev(osdev->major, osdev->name); err_out_osd: osduld_put_device(osdev->osd); err_out_slot: mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); list_del_init(&osdev->node); mutex_unlock(&ctl_mutex); kfree(osdev); err_out_mod: OSDBLK_DEBUG("Error adding device %s\n", buf); module_put(THIS_MODULE); return rc; } static ssize_t class_osdblk_remove(struct class *c, struct class_attribute *attr, const char *buf, size_t count) { struct osdblk_device *osdev = NULL; int target_id, rc; unsigned long ul; struct list_head *tmp; rc = strict_strtoul(buf, 10, &ul); if (rc) return rc; /* convert to int; abort if we lost anything in the conversion */ target_id = (int) ul; if (target_id != ul) return -EINVAL; /* remove object from list immediately */ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); list_for_each(tmp, &osdblkdev_list) { osdev = list_entry(tmp, struct osdblk_device, node); if (osdev->id == target_id) { list_del_init(&osdev->node); break; } osdev = NULL; } mutex_unlock(&ctl_mutex); if (!osdev) return -ENOENT; /* clean up and free blkdev and associated OSD connection */ osdblk_free_disk(osdev); unregister_blkdev(osdev->major, osdev->name); osduld_put_device(osdev->osd); kfree(osdev); /* release module ref */ module_put(THIS_MODULE); return count; } static struct class_attribute class_osdblk_attrs[] = { __ATTR(add, 0200, NULL, class_osdblk_add), __ATTR(remove, 0200, NULL, class_osdblk_remove), __ATTR(list, 0444, class_osdblk_list, NULL), __ATTR_NULL }; static int osdblk_sysfs_init(void) { int ret = 0; /* * create control files in sysfs * /sys/class/osdblk/... */ class_osdblk = kzalloc(sizeof(*class_osdblk), GFP_KERNEL); if (!class_osdblk) return -ENOMEM; class_osdblk->name = DRV_NAME; class_osdblk->owner = THIS_MODULE; class_osdblk->class_release = class_osdblk_release; class_osdblk->class_attrs = class_osdblk_attrs; ret = class_register(class_osdblk); if (ret) { kfree(class_osdblk); class_osdblk = NULL; printk(PFX "failed to create class osdblk\n"); return ret; } return 0; } static void osdblk_sysfs_cleanup(void) { if (class_osdblk) class_destroy(class_osdblk); class_osdblk = NULL; } static int __init osdblk_init(void) { int rc; rc = osdblk_sysfs_init(); if (rc) return rc; return 0; } static void __exit osdblk_exit(void) { osdblk_sysfs_cleanup(); } module_init(osdblk_init); module_exit(osdblk_exit);
gpl-2.0
pio-masaki/kernel_at300
arch/x86/kernel/cpu/perf_event_p6.c
2880
3485
#ifdef CONFIG_CPU_SUP_INTEL /* * Not sure about some of these */ static const u64 p6_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, }; static u64 p6_pmu_event_map(int hw_event) { return p6_perfmon_event_map[hw_event]; } /* * Event setting that is specified not to count anything. * We use this to effectively disable a counter. * * L2_RQSTS with 0 MESI unit mask. */ #define P6_NOP_EVENT 0x0000002EULL static struct event_constraint p6_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ EVENT_CONSTRAINT_END }; static void p6_pmu_disable_all(void) { u64 val; /* p6 only has one enable register */ rdmsrl(MSR_P6_EVNTSEL0, val); val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(MSR_P6_EVNTSEL0, val); } static void p6_pmu_enable_all(int added) { unsigned long val; /* p6 only has one enable register */ rdmsrl(MSR_P6_EVNTSEL0, val); val |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(MSR_P6_EVNTSEL0, val); } static inline void p6_pmu_disable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; u64 val = P6_NOP_EVENT; if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; (void)checking_wrmsrl(hwc->config_base, val); } static void p6_pmu_enable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; u64 val; val = hwc->config; if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; (void)checking_wrmsrl(hwc->config_base, val); } static __initconst const struct x86_pmu p6_pmu = { .name = "p6", .handle_irq = x86_pmu_handle_irq, .disable_all = p6_pmu_disable_all, .enable_all = p6_pmu_enable_all, .enable = p6_pmu_enable_event, .disable = p6_pmu_disable_event, .hw_config = x86_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_P6_EVNTSEL0, .perfctr = MSR_P6_PERFCTR0, .event_map = p6_pmu_event_map, .max_events = ARRAY_SIZE(p6_perfmon_event_map), .apic = 1, .max_period = (1ULL << 31) - 1, .version = 0, .num_counters = 2, /* * Events have 40 bits implemented. However they are designed such * that bits [32-39] are sign extensions of bit 31. As such the * effective width of a event for P6-like PMU is 32 bits only. * * See IA-32 Intel Architecture Software developer manual Vol 3B */ .cntval_bits = 32, .cntval_mask = (1ULL << 32) - 1, .get_event_constraints = x86_get_event_constraints, .event_constraints = p6_event_constraints, }; static __init int p6_pmu_init(void) { switch (boot_cpu_data.x86_model) { case 1: case 3: /* Pentium Pro */ case 5: case 6: /* Pentium II */ case 7: case 8: case 11: /* Pentium III */ case 9: case 13: /* Pentium M */ break; default: pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model); return -ENODEV; } x86_pmu = p6_pmu; return 0; } #endif /* CONFIG_CPU_SUP_INTEL */
gpl-2.0
StelixROM/android_kernel_htc_msm8974
arch/mips/kernel/smp.c
3648
10268
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2000, 2001 Kanoj Sarcar * Copyright (C) 2000, 2001 Ralf Baechle * Copyright (C) 2000, 2001 Silicon Graphics, Inc. * Copyright (C) 2000, 2001, 2003 Broadcom Corporation */ #include <linux/cache.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/module.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/cpu.h> #include <linux/err.h> #include <linux/ftrace.h> #include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> #include <asm/r4k-timer.h> #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> #ifdef CONFIG_MIPS_MT_SMTC #include <asm/mipsmtregs.h> #endif /* CONFIG_MIPS_MT_SMTC */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ EXPORT_SYMBOL(__cpu_logical_map); /* Number of TCs (or siblings in Intel speak) per CPU core */ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); /* representing the TCs (or siblings in Intel speak) of each logical CPU */ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; static inline void set_cpu_sibling_map(int cpu) { int i; cpu_set(cpu, cpu_sibling_setup_map); if (smp_num_siblings > 1) { for_each_cpu_mask(i, cpu_sibling_setup_map) { if (cpu_data[cpu].core == cpu_data[i].core) { cpu_set(i, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[i]); } } } else cpu_set(cpu, cpu_sibling_map[cpu]); } struct plat_smp_ops *mp_ops; __cpuinit void register_smp_ops(struct plat_smp_ops *ops) { if (mp_ops) printk(KERN_WARNING "Overriding previously set SMP ops\n"); mp_ops = ops; } /* * First C code run on the secondary CPUs after being started up by * the master. */ asmlinkage __cpuinit void start_secondary(void) { unsigned int cpu; #ifdef CONFIG_MIPS_MT_SMTC /* Only do cpu_probe for first TC of CPU */ if ((read_c0_tcbind() & TCBIND_CURTC) == 0) #endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); per_cpu_trap_init(); mips_clockevent_init(); mp_ops->init_secondary(); /* * XXX parity protection should be folded in here when it's converted * to an option instead of something based on .cputype */ calibrate_delay(); preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; notify_cpu_starting(cpu); mp_ops->smp_finish(); set_cpu_sibling_map(cpu); cpu_set(cpu, cpu_callin_map); synchronise_count_slave(); cpu_idle(); } /* * Call into both interrupt handlers, as we share the IPI for them */ void __irq_entry smp_call_function_interrupt(void) { irq_enter(); generic_smp_call_function_single_interrupt(); generic_smp_call_function_interrupt(); irq_exit(); } static void stop_this_cpu(void *dummy) { /* * Remove this CPU: */ set_cpu_online(smp_processor_id(), false); for (;;) { if (cpu_wait) (*cpu_wait)(); /* Wait if available. */ } } void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 0); } void __init smp_cpus_done(unsigned int max_cpus) { mp_ops->cpus_done(); synchronise_count_master(); } /* called from main before smp_init() */ void __init smp_prepare_cpus(unsigned int max_cpus) { init_new_context(current, &init_mm); current_thread_info()->cpu = 0; mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif } /* preload SMP state for boot cpu */ void __devinit smp_prepare_boot_cpu(void) { set_cpu_possible(0, true); set_cpu_online(0, true); cpu_set(0, cpu_callin_map); } /* * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu * and keep control until "cpu_online(cpu)" is set. Note: cpu is * physical, not logical. */ static struct task_struct *cpu_idle_thread[NR_CPUS]; struct create_idle { struct work_struct work; struct task_struct *idle; struct completion done; int cpu; }; static void __cpuinit do_fork_idle(struct work_struct *work) { struct create_idle *c_idle = container_of(work, struct create_idle, work); c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); } int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; /* * Processor goes to start_secondary(), sets online flag * The following code is purely to make sure * Linux can schedule processes on this slave. */ if (!cpu_idle_thread[cpu]) { /* * Schedule work item to avoid forking user task * Ported from arch/x86/kernel/smpboot.c */ struct create_idle c_idle = { .cpu = cpu, .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), }; INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); schedule_work(&c_idle.work); wait_for_completion(&c_idle.done); idle = cpu_idle_thread[cpu] = c_idle.idle; if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); } else { idle = cpu_idle_thread[cpu]; init_idle(idle, cpu); } mp_ops->boot_secondary(cpu, idle); /* * Trust is futile. We should really have timeouts ... */ while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); set_cpu_online(cpu, true); return 0; } /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { return 0; } static void flush_tlb_all_ipi(void *info) { local_flush_tlb_all(); } void flush_tlb_all(void) { on_each_cpu(flush_tlb_all_ipi, NULL, 1); } static void flush_tlb_mm_ipi(void *mm) { local_flush_tlb_mm((struct mm_struct *)mm); } /* * Special Variant of smp_call_function for use by TLB functions: * * o No return value * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. */ static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) { #ifndef CONFIG_MIPS_MT_SMTC smp_call_function(func, info, 1); #endif } static inline void smp_on_each_tlb(void (*func) (void *info), void *info) { preempt_disable(); smp_on_other_tlbs(func, info); func(info); preempt_enable(); } /* * The following tlb flush calls are invoked when old translations are * being torn down, or pte attributes are changing. For single threaded * address spaces, a new context is obtained on the current cpu, and tlb * context on other cpus are invalidated to force a new context allocation * at switch_mm time, should the mm ever be used on other cpus. For * multithreaded address spaces, intercpu interrupts have to be sent. * Another case where intercpu interrupts are required is when the target * mm might be active on another cpu (eg debuggers doing the flushes on * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. */ void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } } local_flush_tlb_mm(mm); preempt_enable(); } struct flush_tlb_data { struct vm_area_struct *vma; unsigned long addr1; unsigned long addr2; }; static void flush_tlb_range_ipi(void *info) { struct flush_tlb_data *fd = info; local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = start, .addr2 = end, }; smp_on_other_tlbs(flush_tlb_range_ipi, &fd); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } } local_flush_tlb_range(vma, start, end); preempt_enable(); } static void flush_tlb_kernel_range_ipi(void *info) { struct flush_tlb_data *fd = info; local_flush_tlb_kernel_range(fd->addr1, fd->addr2); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { struct flush_tlb_data fd = { .addr1 = start, .addr2 = end, }; on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); } static void flush_tlb_page_ipi(void *info) { struct flush_tlb_data *fd = info; local_flush_tlb_page(fd->vma, fd->addr1); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { preempt_disable(); if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = page, }; smp_on_other_tlbs(flush_tlb_page_ipi, &fd); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; } } local_flush_tlb_page(vma, page); preempt_enable(); } static void flush_tlb_one_ipi(void *info) { unsigned long vaddr = (unsigned long) info; local_flush_tlb_one(vaddr); } void flush_tlb_one(unsigned long vaddr) { smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); } EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one);
gpl-2.0
thewisenerd/android_kernel_xiaomi_armani
drivers/mtd/nand/s3c2410.c
4928
29430
/* linux/drivers/mtd/nand/s3c2410.c * * Copyright © 2004-2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * Samsung S3C2410/S3C2440/S3C2412 NAND driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef CONFIG_MTD_NAND_S3C2410_DEBUG #define DEBUG #endif #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <plat/regs-nand.h> #include <plat/nand.h> #ifdef CONFIG_MTD_NAND_S3C2410_HWECC static int hardware_ecc = 1; #else static int hardware_ecc = 0; #endif #ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP static const int clock_stop = 1; #else static const int clock_stop = 0; #endif /* new oob placement block for use with hardware ecc generation */ static struct nand_ecclayout nand_hw_eccoob = { .eccbytes = 3, .eccpos = {0, 1, 2}, .oobfree = {{8, 8}} }; /* controller and mtd information */ struct s3c2410_nand_info; /** * struct s3c2410_nand_mtd - driver MTD structure * @mtd: The MTD instance to pass to the MTD layer. * @chip: The NAND chip information. * @set: The platform information supplied for this set of NAND chips. * @info: Link back to the hardware information. * @scan_res: The result from calling nand_scan_ident(). */ struct s3c2410_nand_mtd { struct mtd_info mtd; struct nand_chip chip; struct s3c2410_nand_set *set; struct s3c2410_nand_info *info; int scan_res; }; enum s3c_cpu_type { TYPE_S3C2410, TYPE_S3C2412, TYPE_S3C2440, }; enum s3c_nand_clk_state { CLOCK_DISABLE = 0, CLOCK_ENABLE, CLOCK_SUSPEND, }; /* overview of the s3c2410 nand state */ /** * struct s3c2410_nand_info - NAND controller state. * @mtds: An array of MTD instances on this controoler. * @platform: The platform data for this board. * @device: The platform device we bound to. * @area: The IO area resource that came from request_mem_region(). * @clk: The clock resource for this controller. * @regs: The area mapped for the hardware registers described by @area. * @sel_reg: Pointer to the register controlling the NAND selection. * @sel_bit: The bit in @sel_reg to select the NAND chip. * @mtd_count: The number of MTDs created from this controller. * @save_sel: The contents of @sel_reg to be saved over suspend. * @clk_rate: The clock rate from @clk. * @clk_state: The current clock state. * @cpu_type: The exact type of this controller. */ struct s3c2410_nand_info { /* mtd info */ struct nand_hw_control controller; struct s3c2410_nand_mtd *mtds; struct s3c2410_platform_nand *platform; /* device info */ struct device *device; struct resource *area; struct clk *clk; void __iomem *regs; void __iomem *sel_reg; int sel_bit; int mtd_count; unsigned long save_sel; unsigned long clk_rate; enum s3c_nand_clk_state clk_state; enum s3c_cpu_type cpu_type; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif }; /* conversion functions */ static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd) { return container_of(mtd, struct s3c2410_nand_mtd, mtd); } static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd) { return s3c2410_nand_mtd_toours(mtd)->info; } static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev) { return platform_get_drvdata(dev); } static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev) { return dev->dev.platform_data; } static inline int allow_clk_suspend(struct s3c2410_nand_info *info) { return clock_stop; } /** * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock. * @info: The controller instance. * @new_state: State to which clock should be set. */ static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info, enum s3c_nand_clk_state new_state) { if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND) return; if (info->clk_state == CLOCK_ENABLE) { if (new_state != CLOCK_ENABLE) clk_disable(info->clk); } else { if (new_state == CLOCK_ENABLE) clk_enable(info->clk); } info->clk_state = new_state; } /* timing calculations */ #define NS_IN_KHZ 1000000 /** * s3c_nand_calc_rate - calculate timing data. * @wanted: The cycle time in nanoseconds. * @clk: The clock rate in kHz. * @max: The maximum divider value. * * Calculate the timing value from the given parameters. */ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) { int result; result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ); pr_debug("result %d from %ld, %d\n", result, clk, wanted); if (result > max) { printk("%d ns is too big for current clock rate %ld\n", wanted, clk); return -1; } if (result < 1) result = 1; return result; } #define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) /* controller setup */ /** * s3c2410_nand_setrate - setup controller timing information. * @info: The controller instance. * * Given the information supplied by the platform, calculate and set * the necessary timing registers in the hardware to generate the * necessary timing cycles to the hardware. */ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) { struct s3c2410_platform_nand *plat = info->platform; int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; int tacls, twrph0, twrph1; unsigned long clkrate = clk_get_rate(info->clk); unsigned long uninitialized_var(set), cfg, uninitialized_var(mask); unsigned long flags; /* calculate the timing information for the controller */ info->clk_rate = clkrate; clkrate /= 1000; /* turn clock into kHz for ease of use */ if (plat != NULL) { tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max); twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8); twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8); } else { /* default timings */ tacls = tacls_max; twrph0 = 8; twrph1 = 8; } if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { dev_err(info->device, "cannot get suitable timings\n"); return -EINVAL; } dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); switch (info->cpu_type) { case TYPE_S3C2410: mask = (S3C2410_NFCONF_TACLS(3) | S3C2410_NFCONF_TWRPH0(7) | S3C2410_NFCONF_TWRPH1(7)); set = S3C2410_NFCONF_EN; set |= S3C2410_NFCONF_TACLS(tacls - 1); set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); break; case TYPE_S3C2440: case TYPE_S3C2412: mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) | S3C2440_NFCONF_TWRPH0(7) | S3C2440_NFCONF_TWRPH1(7)); set = S3C2440_NFCONF_TACLS(tacls - 1); set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); break; default: BUG(); } local_irq_save(flags); cfg = readl(info->regs + S3C2410_NFCONF); cfg &= ~mask; cfg |= set; writel(cfg, info->regs + S3C2410_NFCONF); local_irq_restore(flags); dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg); return 0; } /** * s3c2410_nand_inithw - basic hardware initialisation * @info: The hardware state. * * Do the basic initialisation of the hardware, using s3c2410_nand_setrate() * to setup the hardware access speeds and set the controller to be enabled. */ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info) { int ret; ret = s3c2410_nand_setrate(info); if (ret < 0) return ret; switch (info->cpu_type) { case TYPE_S3C2410: default: break; case TYPE_S3C2440: case TYPE_S3C2412: /* enable the controller and de-assert nFCE */ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); } return 0; } /** * s3c2410_nand_select_chip - select the given nand chip * @mtd: The MTD instance for this chip. * @chip: The chip number. * * This is called by the MTD layer to either select a given chip for the * @mtd instance, or to indicate that the access has finished and the * chip can be de-selected. * * The routine ensures that the nFCE line is correctly setup, and any * platform specific selection code is called to route nFCE to the specific * chip. */ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip) { struct s3c2410_nand_info *info; struct s3c2410_nand_mtd *nmtd; struct nand_chip *this = mtd->priv; unsigned long cur; nmtd = this->priv; info = nmtd->info; if (chip != -1) s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); cur = readl(info->sel_reg); if (chip == -1) { cur |= info->sel_bit; } else { if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { dev_err(info->device, "invalid chip %d\n", chip); return; } if (info->platform != NULL) { if (info->platform->select_chip != NULL) (info->platform->select_chip) (nmtd->set, chip); } cur &= ~info->sel_bit; } writel(cur, info->sel_reg); if (chip == -1) s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } /* s3c2410_nand_hwcontrol * * Issue command and address cycles to the chip */ static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, info->regs + S3C2410_NFCMD); else writeb(cmd, info->regs + S3C2410_NFADDR); } /* command and control functions */ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, info->regs + S3C2440_NFCMD); else writeb(cmd, info->regs + S3C2440_NFADDR); } /* s3c2410_nand_devready() * * returns 0 if the nand is busy, 1 if it is ready */ static int s3c2410_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; } static int s3c2440_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY; } static int s3c2412_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY; } /* ECC handling functions */ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned int diff0, diff1, diff2; unsigned int bit, byte; pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc); diff0 = read_ecc[0] ^ calc_ecc[0]; diff1 = read_ecc[1] ^ calc_ecc[1]; diff2 = read_ecc[2] ^ calc_ecc[2]; pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n", __func__, read_ecc[0], read_ecc[1], read_ecc[2], calc_ecc[0], calc_ecc[1], calc_ecc[2], diff0, diff1, diff2); if (diff0 == 0 && diff1 == 0 && diff2 == 0) return 0; /* ECC is ok */ /* sometimes people do not think about using the ECC, so check * to see if we have an 0xff,0xff,0xff read ECC and then ignore * the error, on the assumption that this is an un-eccd page. */ if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff && info->platform->ignore_unset_ecc) return 0; /* Can we correct this ECC (ie, one row and column change). * Note, this is similar to the 256 error code on smartmedia */ if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 && ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 && ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { /* calculate the bit position of the error */ bit = ((diff2 >> 3) & 1) | ((diff2 >> 4) & 2) | ((diff2 >> 5) & 4); /* calculate the byte position of the error */ byte = ((diff2 << 7) & 0x100) | ((diff1 << 0) & 0x80) | ((diff1 << 1) & 0x40) | ((diff1 << 2) & 0x20) | ((diff1 << 3) & 0x10) | ((diff0 >> 4) & 0x08) | ((diff0 >> 3) & 0x04) | ((diff0 >> 2) & 0x02) | ((diff0 >> 1) & 0x01); dev_dbg(info->device, "correcting error bit %d, byte %d\n", bit, byte); dat[byte] ^= (1 << bit); return 1; } /* if there is only one bit difference in the ECC, then * one of only a row or column parity has changed, which * means the error is most probably in the ECC itself */ diff0 |= (diff1 << 8); diff0 |= (diff2 << 16); if ((diff0 & ~(1<<fls(diff0))) == 0) return 1; return -1; } /* ECC functions * * These allow the s3c2410 and s3c2440 to use the controller's ECC * generator block to ECC the data as it passes through] */ static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2410_NFCONF); ctrl |= S3C2410_NFCONF_INITECC; writel(ctrl, info->regs + S3C2410_NFCONF); } static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2440_NFCONT); writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT); } static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2440_NFCONT); writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); } static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0); ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); pr_debug("%s: returning ecc %02x%02x%02x\n", __func__, ecc_code[0], ecc_code[1], ecc_code[2]); return 0; } static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); ecc_code[0] = ecc; ecc_code[1] = ecc >> 8; ecc_code[2] = ecc >> 16; pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]); return 0; } static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); ecc_code[0] = ecc; ecc_code[1] = ecc >> 8; ecc_code[2] = ecc >> 16; pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff); return 0; } /* over-ride the standard functions for a little more speed. We can * use read/write block to move the data buffers to/from the controller */ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; readsb(this->IO_ADDR_R, buf, len); } static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); readsl(info->regs + S3C2440_NFDATA, buf, len >> 2); /* cleanup if we've got less than a word to do */ if (len & 3) { buf += len & ~3; for (; len & 3; len--) *buf++ = readb(info->regs + S3C2440_NFDATA); } } static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; writesb(this->IO_ADDR_W, buf, len); } static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); writesl(info->regs + S3C2440_NFDATA, buf, len >> 2); /* cleanup any fractional write */ if (len & 3) { buf += len & ~3; for (; len & 3; len--, buf++) writeb(*buf, info->regs + S3C2440_NFDATA); } } /* cpufreq driver support */ #ifdef CONFIG_CPU_FREQ static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct s3c2410_nand_info *info; unsigned long newclk; info = container_of(nb, struct s3c2410_nand_info, freq_transition); newclk = clk_get_rate(info->clk); if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) || (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) { s3c2410_nand_setrate(info); } return 0; } static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) { info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition; return cpufreq_register_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) { cpufreq_unregister_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) { return 0; } static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) { } #endif /* device management functions */ static int s3c24xx_nand_remove(struct platform_device *pdev) { struct s3c2410_nand_info *info = to_nand_info(pdev); platform_set_drvdata(pdev, NULL); if (info == NULL) return 0; s3c2410_nand_cpufreq_deregister(info); /* Release all our mtds and their partitions, then go through * freeing the resources used */ if (info->mtds != NULL) { struct s3c2410_nand_mtd *ptr = info->mtds; int mtdno; for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); nand_release(&ptr->mtd); } kfree(info->mtds); } /* free the common resources */ if (!IS_ERR(info->clk)) { s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); clk_put(info->clk); } if (info->regs != NULL) { iounmap(info->regs); info->regs = NULL; } if (info->area != NULL) { release_resource(info->area); kfree(info->area); info->area = NULL; } kfree(info); return 0; } static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *mtd, struct s3c2410_nand_set *set) { if (set) mtd->mtd.name = set->name; return mtd_device_parse_register(&mtd->mtd, NULL, NULL, set->partitions, set->nr_partitions); } /** * s3c2410_nand_init_chip - initialise a single instance of an chip * @info: The base NAND controller the chip is on. * @nmtd: The new controller MTD instance to fill in. * @set: The information passed from the board specific platform data. * * Initialise the given @nmtd from the information in @info and @set. This * readies the structure for use with the MTD layer functions by ensuring * all pointers are setup and the necessary control routines selected. */ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *nmtd, struct s3c2410_nand_set *set) { struct nand_chip *chip = &nmtd->chip; void __iomem *regs = info->regs; chip->write_buf = s3c2410_nand_write_buf; chip->read_buf = s3c2410_nand_read_buf; chip->select_chip = s3c2410_nand_select_chip; chip->chip_delay = 50; chip->priv = nmtd; chip->options = set->options; chip->controller = &info->controller; switch (info->cpu_type) { case TYPE_S3C2410: chip->IO_ADDR_W = regs + S3C2410_NFDATA; info->sel_reg = regs + S3C2410_NFCONF; info->sel_bit = S3C2410_NFCONF_nFCE; chip->cmd_ctrl = s3c2410_nand_hwcontrol; chip->dev_ready = s3c2410_nand_devready; break; case TYPE_S3C2440: chip->IO_ADDR_W = regs + S3C2440_NFDATA; info->sel_reg = regs + S3C2440_NFCONT; info->sel_bit = S3C2440_NFCONT_nFCE; chip->cmd_ctrl = s3c2440_nand_hwcontrol; chip->dev_ready = s3c2440_nand_devready; chip->read_buf = s3c2440_nand_read_buf; chip->write_buf = s3c2440_nand_write_buf; break; case TYPE_S3C2412: chip->IO_ADDR_W = regs + S3C2440_NFDATA; info->sel_reg = regs + S3C2440_NFCONT; info->sel_bit = S3C2412_NFCONT_nFCE0; chip->cmd_ctrl = s3c2440_nand_hwcontrol; chip->dev_ready = s3c2412_nand_devready; if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT) dev_info(info->device, "System booted from NAND\n"); break; } chip->IO_ADDR_R = chip->IO_ADDR_W; nmtd->info = info; nmtd->mtd.priv = chip; nmtd->mtd.owner = THIS_MODULE; nmtd->set = set; if (hardware_ecc) { chip->ecc.calculate = s3c2410_nand_calculate_ecc; chip->ecc.correct = s3c2410_nand_correct_data; chip->ecc.mode = NAND_ECC_HW; chip->ecc.strength = 1; switch (info->cpu_type) { case TYPE_S3C2410: chip->ecc.hwctl = s3c2410_nand_enable_hwecc; chip->ecc.calculate = s3c2410_nand_calculate_ecc; break; case TYPE_S3C2412: chip->ecc.hwctl = s3c2412_nand_enable_hwecc; chip->ecc.calculate = s3c2412_nand_calculate_ecc; break; case TYPE_S3C2440: chip->ecc.hwctl = s3c2440_nand_enable_hwecc; chip->ecc.calculate = s3c2440_nand_calculate_ecc; break; } } else { chip->ecc.mode = NAND_ECC_SOFT; } if (set->ecc_layout != NULL) chip->ecc.layout = set->ecc_layout; if (set->disable_ecc) chip->ecc.mode = NAND_ECC_NONE; switch (chip->ecc.mode) { case NAND_ECC_NONE: dev_info(info->device, "NAND ECC disabled\n"); break; case NAND_ECC_SOFT: dev_info(info->device, "NAND soft ECC\n"); break; case NAND_ECC_HW: dev_info(info->device, "NAND hardware ECC\n"); break; default: dev_info(info->device, "NAND ECC UNKNOWN\n"); break; } /* If you use u-boot BBT creation code, specifying this flag will * let the kernel fish out the BBT from the NAND, and also skip the * full NAND scan that can take 1/2s or so. Little things... */ if (set->flash_bbt) { chip->bbt_options |= NAND_BBT_USE_FLASH; chip->options |= NAND_SKIP_BBTSCAN; } } /** * s3c2410_nand_update_chip - post probe update * @info: The controller instance. * @nmtd: The driver version of the MTD instance. * * This routine is called after the chip probe has successfully completed * and the relevant per-chip information updated. This call ensure that * we update the internal state accordingly. * * The internal state is currently limited to the ECC state information. */ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *nmtd) { struct nand_chip *chip = &nmtd->chip; dev_dbg(info->device, "chip %p => page shift %d\n", chip, chip->page_shift); if (chip->ecc.mode != NAND_ECC_HW) return; /* change the behaviour depending on wether we are using * the large or small page nand device */ if (chip->page_shift > 10) { chip->ecc.size = 256; chip->ecc.bytes = 3; } else { chip->ecc.size = 512; chip->ecc.bytes = 3; chip->ecc.layout = &nand_hw_eccoob; } } /* s3c24xx_nand_probe * * called by device layer when it finds a device matching * one our driver can handled. This code checks to see if * it can allocate all necessary resources then calls the * nand layer to look for devices */ static int s3c24xx_nand_probe(struct platform_device *pdev) { struct s3c2410_platform_nand *plat = to_nand_plat(pdev); enum s3c_cpu_type cpu_type; struct s3c2410_nand_info *info; struct s3c2410_nand_mtd *nmtd; struct s3c2410_nand_set *sets; struct resource *res; int err = 0; int size; int nr_sets; int setno; cpu_type = platform_get_device_id(pdev)->driver_data; pr_debug("s3c2410_nand_probe(%p)\n", pdev); info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { dev_err(&pdev->dev, "no memory for flash info\n"); err = -ENOMEM; goto exit_error; } platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); init_waitqueue_head(&info->controller.wq); /* get the clock source and enable it */ info->clk = clk_get(&pdev->dev, "nand"); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); err = -ENOENT; goto exit_error; } s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); /* allocate and map the resource */ /* currently we assume we have the one resource */ res = pdev->resource; size = resource_size(res); info->area = request_mem_region(res->start, size, pdev->name); if (info->area == NULL) { dev_err(&pdev->dev, "cannot reserve register region\n"); err = -ENOENT; goto exit_error; } info->device = &pdev->dev; info->platform = plat; info->regs = ioremap(res->start, size); info->cpu_type = cpu_type; if (info->regs == NULL) { dev_err(&pdev->dev, "cannot reserve register region\n"); err = -EIO; goto exit_error; } dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs); /* initialise the hardware */ err = s3c2410_nand_inithw(info); if (err != 0) goto exit_error; sets = (plat != NULL) ? plat->sets : NULL; nr_sets = (plat != NULL) ? plat->nr_sets : 1; info->mtd_count = nr_sets; /* allocate our information */ size = nr_sets * sizeof(*info->mtds); info->mtds = kzalloc(size, GFP_KERNEL); if (info->mtds == NULL) { dev_err(&pdev->dev, "failed to allocate mtd storage\n"); err = -ENOMEM; goto exit_error; } /* initialise all possible chips */ nmtd = info->mtds; for (setno = 0; setno < nr_sets; setno++, nmtd++) { pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info); s3c2410_nand_init_chip(info, nmtd, sets); nmtd->scan_res = nand_scan_ident(&nmtd->mtd, (sets) ? sets->nr_chips : 1, NULL); if (nmtd->scan_res == 0) { s3c2410_nand_update_chip(info, nmtd); nand_scan_tail(&nmtd->mtd); s3c2410_nand_add_partition(info, nmtd, sets); } if (sets != NULL) sets++; } err = s3c2410_nand_cpufreq_register(info); if (err < 0) { dev_err(&pdev->dev, "failed to init cpufreq support\n"); goto exit_error; } if (allow_clk_suspend(info)) { dev_info(&pdev->dev, "clock idle support enabled\n"); s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } pr_debug("initialised ok\n"); return 0; exit_error: s3c24xx_nand_remove(pdev); if (err == 0) err = -EINVAL; return err; } /* PM Support */ #ifdef CONFIG_PM static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm) { struct s3c2410_nand_info *info = platform_get_drvdata(dev); if (info) { info->save_sel = readl(info->sel_reg); /* For the moment, we must ensure nFCE is high during * the time we are suspended. This really should be * handled by suspending the MTDs we are using, but * that is currently not the case. */ writel(info->save_sel | info->sel_bit, info->sel_reg); s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); } return 0; } static int s3c24xx_nand_resume(struct platform_device *dev) { struct s3c2410_nand_info *info = platform_get_drvdata(dev); unsigned long sel; if (info) { s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); s3c2410_nand_inithw(info); /* Restore the state of the nFCE line. */ sel = readl(info->sel_reg); sel &= ~info->sel_bit; sel |= info->save_sel & info->sel_bit; writel(sel, info->sel_reg); s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } return 0; } #else #define s3c24xx_nand_suspend NULL #define s3c24xx_nand_resume NULL #endif /* driver device registration */ static struct platform_device_id s3c24xx_driver_ids[] = { { .name = "s3c2410-nand", .driver_data = TYPE_S3C2410, }, { .name = "s3c2440-nand", .driver_data = TYPE_S3C2440, }, { .name = "s3c2412-nand", .driver_data = TYPE_S3C2412, }, { .name = "s3c6400-nand", .driver_data = TYPE_S3C2412, /* compatible with 2412 */ }, { } }; MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); static struct platform_driver s3c24xx_nand_driver = { .probe = s3c24xx_nand_probe, .remove = s3c24xx_nand_remove, .suspend = s3c24xx_nand_suspend, .resume = s3c24xx_nand_resume, .id_table = s3c24xx_driver_ids, .driver = { .name = "s3c24xx-nand", .owner = THIS_MODULE, }, }; static int __init s3c2410_nand_init(void) { printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n"); return platform_driver_register(&s3c24xx_nand_driver); } static void __exit s3c2410_nand_exit(void) { platform_driver_unregister(&s3c24xx_nand_driver); } module_init(s3c2410_nand_init); module_exit(s3c2410_nand_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
gpl-2.0
adityaxavier/MSM8610-Kernel
drivers/scsi/aic94xx/aic94xx_dev.c
5184
11428
/* * Aic94xx SAS/SATA DDB management * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * $Id: //depot/aic94xx/aic94xx_dev.c#21 $ */ #include "aic94xx.h" #include "aic94xx_hwi.h" #include "aic94xx_reg.h" #include "aic94xx_sas.h" #define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \ (_ha)->hw_prof.max_ddbs) #define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) #define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) static int asd_get_ddb(struct asd_ha_struct *asd_ha) { int ddb, i; ddb = FIND_FREE_DDB(asd_ha); if (ddb >= asd_ha->hw_prof.max_ddbs) { ddb = -ENOMEM; goto out; } SET_DDB(ddb, asd_ha); for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) asd_ddbsite_write_dword(asd_ha, ddb, i, 0); out: return ddb; } #define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag) #define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr) #define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head) #define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type) #define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask) #define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags) #define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2) #define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail) #define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail) #define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb) #define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn) #define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts) #define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr) #define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask) #define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags) #define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status) #define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr) #define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout) static void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) { if (!ddb || ddb >= 0xFFFF) return; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); CLEAR_DDB(ddb, asd_ha); } static void asd_set_ddb_type(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; if (dev->dev_type == SATA_PM_PORT) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); else if (dev->tproto) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); else asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR); } static int asd_init_sata_tag_ddb(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb, i; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2) asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, SISTER_DDB, ddb); return 0; } void asd_set_dmamode(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; struct ata_device *ata_dev = sas_to_ata_dev(dev); int ddb = (int) (unsigned long) dev->lldd_dev; u32 qdepth = 0; if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { if (ata_id_has_ncq(ata_dev->id)) qdepth = ata_id_queue_depth(ata_dev->id); asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, (1ULL<<qdepth)-1); asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth); } if (qdepth > 0) if (asd_init_sata_tag_ddb(dev) != 0) { unsigned long flags; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); ata_dev->flags |= ATA_DFLAG_NCQ_OFF; spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); } } static int asd_init_sata(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || dev->dev_type == SATA_PM_PORT) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); } asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF); return 0; } static int asd_init_target_ddb(struct domain_device *dev) { int ddb, i; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; u8 flags = 0; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; dev->lldd_dev = (void *) (unsigned long) ddb; asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE); asd_ddbsite_write_byte(asd_ha, ddb, 1, 0); asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF); for (i = 0; i < SAS_ADDR_SIZE; i++) asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i, dev->sas_addr[i]); asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF); asd_set_ddb_type(dev); asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); if (dev->port->oob_mode != SATA_OOB_MODE) { flags |= OPEN_REQUIRED; if ((dev->dev_type == SATA_DEV) || (dev->tproto & SAS_PROTOCOL_STP)) { struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; if (rps_resp->frame_type == SMP_RESPONSE && rps_resp->function == SMP_REPORT_PHY_SATA && rps_resp->result == SMP_RESP_FUNC_ACC) { if (rps_resp->rps.affil_valid) flags |= STP_AFFIL_POL; if (rps_resp->rps.affil_supp) flags |= SUPPORTS_AFFIL; } } else { flags |= CONCURRENT_CONN_SUPP; if (!dev->parent && (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)) asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 4); else asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, dev->pathways); asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); } } if (dev->dev_type == SATA_PM) flags |= SATA_MULTIPORT; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); flags = 0; if (dev->tproto & SAS_PROTOCOL_STP) flags |= STP_CL_POL_NO_TX; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags); asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { i = asd_init_sata(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); return i; } } if (dev->dev_type == SAS_END_DEV) { struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); if (rdev->I_T_nexus_loss_timeout > 0) asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, min(rdev->I_T_nexus_loss_timeout, (u16)ITNL_TIMEOUT_CONST)); else asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, (u16)ITNL_TIMEOUT_CONST); } return 0; } static int asd_init_sata_pm_table_ddb(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb, i; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; for (i = 0; i < 32; i += 2) asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, SISTER_DDB, ddb); return 0; } #define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags) #define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb) /** * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port * dev: pointer to domain device * * For SATA Port Multiplier Ports we need to allocate one SATA Port * Multiplier Port DDB and depending on whether the target on it * supports SATA II NCQ, one SATA Tag DDB. */ static int asd_init_sata_pm_port_ddb(struct domain_device *dev) { int ddb, i, parent_ddb, pmtable_ddb; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; u8 flags; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; asd_set_ddb_type(dev); flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET; asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); asd_init_sata(dev); parent_ddb = (int) (unsigned long) dev->parent->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb); pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB); asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb); if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) { i = asd_init_sata_tag_ddb(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); return i; } } return 0; } static int asd_init_initiator_ddb(struct domain_device *dev) { return -ENODEV; } /** * asd_init_sata_pm_ddb -- SATA Port Multiplier * dev: pointer to domain device * * For STP and direct-attached SATA Port Multipliers we need * one target port DDB entry and one SATA PM table DDB entry. */ static int asd_init_sata_pm_ddb(struct domain_device *dev) { int res = 0; res = asd_init_target_ddb(dev); if (res) goto out; res = asd_init_sata_pm_table_ddb(dev); if (res) asd_free_ddb(dev->port->ha->lldd_ha, (int) (unsigned long) dev->lldd_dev); out: return res; } int asd_dev_found(struct domain_device *dev) { unsigned long flags; int res = 0; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { case SATA_PM: res = asd_init_sata_pm_ddb(dev); break; case SATA_PM_PORT: res = asd_init_sata_pm_port_ddb(dev); break; default: if (dev->tproto) res = asd_init_target_ddb(dev); else res = asd_init_initiator_ddb(dev); } spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); return res; } void asd_dev_gone(struct domain_device *dev) { int ddb, sister_ddb; unsigned long flags; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); ddb = (int) (unsigned long) dev->lldd_dev; sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); if (sister_ddb != 0xFFFF) asd_free_ddb(asd_ha, sister_ddb); asd_free_ddb(asd_ha, ddb); dev->lldd_dev = NULL; spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); }
gpl-2.0
infected-lp/kernel_sony_msm8974
sound/firewire/isight.c
7232
18316
/* * Apple iSight audio driver * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * Licensed under the terms of the GNU General Public License, version 2. */ #include <asm/byteorder.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/string.h> #include <sound/control.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/tlv.h> #include "lib.h" #include "iso-resources.h" #include "packets-buffer.h" #define OUI_APPLE 0x000a27 #define MODEL_APPLE_ISIGHT 0x000008 #define SW_ISIGHT_AUDIO 0x000010 #define REG_AUDIO_ENABLE 0x000 #define AUDIO_ENABLE 0x80000000 #define REG_DEF_AUDIO_GAIN 0x204 #define REG_GAIN_RAW_START 0x210 #define REG_GAIN_RAW_END 0x214 #define REG_GAIN_DB_START 0x218 #define REG_GAIN_DB_END 0x21c #define REG_SAMPLE_RATE_INQUIRY 0x280 #define REG_ISO_TX_CONFIG 0x300 #define SPEED_SHIFT 16 #define REG_SAMPLE_RATE 0x400 #define RATE_48000 0x80000000 #define REG_GAIN 0x500 #define REG_MUTE 0x504 #define MAX_FRAMES_PER_PACKET 475 #define QUEUE_LENGTH 20 struct isight { struct snd_card *card; struct fw_unit *unit; struct fw_device *device; u64 audio_base; struct snd_pcm_substream *pcm; struct mutex mutex; struct iso_packets_buffer buffer; struct fw_iso_resources resources; struct fw_iso_context *context; bool pcm_active; bool pcm_running; bool first_packet; int packet_index; u32 total_samples; unsigned int buffer_pointer; unsigned int period_counter; s32 gain_min, gain_max; unsigned int gain_tlv[4]; }; struct audio_payload { __be32 sample_count; __be32 signature; __be32 sample_total; __be32 reserved; __be16 samples[2 * MAX_FRAMES_PER_PACKET]; }; MODULE_DESCRIPTION("iSight audio driver"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL v2"); static struct fw_iso_packet audio_packet = { .payload_length = sizeof(struct audio_payload), .interrupt = 1, .header_length = 4, }; static void isight_update_pointers(struct isight *isight, unsigned int count) { struct snd_pcm_runtime *runtime = isight->pcm->runtime; unsigned int ptr; smp_wmb(); /* update buffer data before buffer pointer */ ptr = isight->buffer_pointer; ptr += count; if (ptr >= runtime->buffer_size) ptr -= runtime->buffer_size; ACCESS_ONCE(isight->buffer_pointer) = ptr; isight->period_counter += count; if (isight->period_counter >= runtime->period_size) { isight->period_counter -= runtime->period_size; snd_pcm_period_elapsed(isight->pcm); } } static void isight_samples(struct isight *isight, const __be16 *samples, unsigned int count) { struct snd_pcm_runtime *runtime; unsigned int count1; if (!ACCESS_ONCE(isight->pcm_running)) return; runtime = isight->pcm->runtime; if (isight->buffer_pointer + count <= runtime->buffer_size) { memcpy(runtime->dma_area + isight->buffer_pointer * 4, samples, count * 4); } else { count1 = runtime->buffer_size - isight->buffer_pointer; memcpy(runtime->dma_area + isight->buffer_pointer * 4, samples, count1 * 4); samples += count1 * 2; memcpy(runtime->dma_area, samples, (count - count1) * 4); } isight_update_pointers(isight, count); } static void isight_pcm_abort(struct isight *isight) { unsigned long flags; if (ACCESS_ONCE(isight->pcm_active)) { snd_pcm_stream_lock_irqsave(isight->pcm, flags); if (snd_pcm_running(isight->pcm)) snd_pcm_stop(isight->pcm, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock_irqrestore(isight->pcm, flags); } } static void isight_dropped_samples(struct isight *isight, unsigned int total) { struct snd_pcm_runtime *runtime; u32 dropped; unsigned int count1; if (!ACCESS_ONCE(isight->pcm_running)) return; runtime = isight->pcm->runtime; dropped = total - isight->total_samples; if (dropped < runtime->buffer_size) { if (isight->buffer_pointer + dropped <= runtime->buffer_size) { memset(runtime->dma_area + isight->buffer_pointer * 4, 0, dropped * 4); } else { count1 = runtime->buffer_size - isight->buffer_pointer; memset(runtime->dma_area + isight->buffer_pointer * 4, 0, count1 * 4); memset(runtime->dma_area, 0, (dropped - count1) * 4); } isight_update_pointers(isight, dropped); } else { isight_pcm_abort(isight); } } static void isight_packet(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct isight *isight = data; const struct audio_payload *payload; unsigned int index, length, count, total; int err; if (isight->packet_index < 0) return; index = isight->packet_index; payload = isight->buffer.packets[index].buffer; length = be32_to_cpup(header) >> 16; if (likely(length >= 16 && payload->signature == cpu_to_be32(0x73676874/*"sght"*/))) { count = be32_to_cpu(payload->sample_count); if (likely(count <= (length - 16) / 4)) { total = be32_to_cpu(payload->sample_total); if (unlikely(total != isight->total_samples)) { if (!isight->first_packet) isight_dropped_samples(isight, total); isight->first_packet = false; isight->total_samples = total; } isight_samples(isight, payload->samples, count); isight->total_samples += count; } } err = fw_iso_context_queue(isight->context, &audio_packet, &isight->buffer.iso_buffer, isight->buffer.packets[index].offset); if (err < 0) { dev_err(&isight->unit->device, "queueing error: %d\n", err); isight_pcm_abort(isight); isight->packet_index = -1; return; } fw_iso_context_queue_flush(isight->context); if (++index >= QUEUE_LENGTH) index = 0; isight->packet_index = index; } static int isight_connect(struct isight *isight) { int ch, err, rcode, errors = 0; __be32 value; retry_after_bus_reset: ch = fw_iso_resources_allocate(&isight->resources, sizeof(struct audio_payload), isight->device->max_speed); if (ch < 0) { err = ch; goto error; } value = cpu_to_be32(ch | (isight->device->max_speed << SPEED_SHIFT)); for (;;) { rcode = fw_run_transaction( isight->device->card, TCODE_WRITE_QUADLET_REQUEST, isight->device->node_id, isight->resources.generation, isight->device->max_speed, isight->audio_base + REG_ISO_TX_CONFIG, &value, 4); if (rcode == RCODE_COMPLETE) { return 0; } else if (rcode == RCODE_GENERATION) { fw_iso_resources_free(&isight->resources); goto retry_after_bus_reset; } else if (rcode_is_permanent_error(rcode) || ++errors >= 3) { err = -EIO; goto err_resources; } msleep(5); } err_resources: fw_iso_resources_free(&isight->resources); error: return err; } static int isight_open(struct snd_pcm_substream *substream) { static const struct snd_pcm_hardware hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_BE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 4 * 1024 * 1024, .period_bytes_min = MAX_FRAMES_PER_PACKET * 4, .period_bytes_max = 1024 * 1024, .periods_min = 2, .periods_max = UINT_MAX, }; struct isight *isight = substream->private_data; substream->runtime->hw = hardware; return iso_packets_buffer_init(&isight->buffer, isight->unit, QUEUE_LENGTH, sizeof(struct audio_payload), DMA_FROM_DEVICE); } static int isight_close(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; iso_packets_buffer_destroy(&isight->buffer, isight->unit); return 0; } static int isight_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct isight *isight = substream->private_data; int err; err = snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; ACCESS_ONCE(isight->pcm_active) = true; return 0; } static int reg_read(struct isight *isight, int offset, __be32 *value) { return snd_fw_transaction(isight->unit, TCODE_READ_QUADLET_REQUEST, isight->audio_base + offset, value, 4); } static int reg_write(struct isight *isight, int offset, __be32 value) { return snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST, isight->audio_base + offset, &value, 4); } static void isight_stop_streaming(struct isight *isight) { if (!isight->context) return; fw_iso_context_stop(isight->context); fw_iso_context_destroy(isight->context); isight->context = NULL; fw_iso_resources_free(&isight->resources); reg_write(isight, REG_AUDIO_ENABLE, 0); } static int isight_hw_free(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; ACCESS_ONCE(isight->pcm_active) = false; mutex_lock(&isight->mutex); isight_stop_streaming(isight); mutex_unlock(&isight->mutex); return snd_pcm_lib_free_vmalloc_buffer(substream); } static int isight_start_streaming(struct isight *isight) { unsigned int i; int err; if (isight->context) { if (isight->packet_index < 0) isight_stop_streaming(isight); else return 0; } err = reg_write(isight, REG_SAMPLE_RATE, cpu_to_be32(RATE_48000)); if (err < 0) goto error; err = isight_connect(isight); if (err < 0) goto error; err = reg_write(isight, REG_AUDIO_ENABLE, cpu_to_be32(AUDIO_ENABLE)); if (err < 0) goto err_resources; isight->context = fw_iso_context_create(isight->device->card, FW_ISO_CONTEXT_RECEIVE, isight->resources.channel, isight->device->max_speed, 4, isight_packet, isight); if (IS_ERR(isight->context)) { err = PTR_ERR(isight->context); isight->context = NULL; goto err_resources; } for (i = 0; i < QUEUE_LENGTH; ++i) { err = fw_iso_context_queue(isight->context, &audio_packet, &isight->buffer.iso_buffer, isight->buffer.packets[i].offset); if (err < 0) goto err_context; } isight->first_packet = true; isight->packet_index = 0; err = fw_iso_context_start(isight->context, -1, 0, FW_ISO_CONTEXT_MATCH_ALL_TAGS/*?*/); if (err < 0) goto err_context; return 0; err_context: fw_iso_context_destroy(isight->context); isight->context = NULL; err_resources: fw_iso_resources_free(&isight->resources); reg_write(isight, REG_AUDIO_ENABLE, 0); error: return err; } static int isight_prepare(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; int err; isight->buffer_pointer = 0; isight->period_counter = 0; mutex_lock(&isight->mutex); err = isight_start_streaming(isight); mutex_unlock(&isight->mutex); return err; } static int isight_trigger(struct snd_pcm_substream *substream, int cmd) { struct isight *isight = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: ACCESS_ONCE(isight->pcm_running) = true; break; case SNDRV_PCM_TRIGGER_STOP: ACCESS_ONCE(isight->pcm_running) = false; break; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; return ACCESS_ONCE(isight->buffer_pointer); } static int isight_create_pcm(struct isight *isight) { static struct snd_pcm_ops ops = { .open = isight_open, .close = isight_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = isight_hw_params, .hw_free = isight_hw_free, .prepare = isight_prepare, .trigger = isight_trigger, .pointer = isight_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; struct snd_pcm *pcm; int err; err = snd_pcm_new(isight->card, "iSight", 0, 0, 1, &pcm); if (err < 0) return err; pcm->private_data = isight; strcpy(pcm->name, "iSight"); isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; isight->pcm->ops = &ops; return 0; } static int isight_gain_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { struct isight *isight = ctl->private_data; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 1; info->value.integer.min = isight->gain_min; info->value.integer.max = isight->gain_max; return 0; } static int isight_gain_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; __be32 gain; int err; err = reg_read(isight, REG_GAIN, &gain); if (err < 0) return err; value->value.integer.value[0] = (s32)be32_to_cpu(gain); return 0; } static int isight_gain_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; if (value->value.integer.value[0] < isight->gain_min || value->value.integer.value[0] > isight->gain_max) return -EINVAL; return reg_write(isight, REG_GAIN, cpu_to_be32(value->value.integer.value[0])); } static int isight_mute_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; __be32 mute; int err; err = reg_read(isight, REG_MUTE, &mute); if (err < 0) return err; value->value.integer.value[0] = !mute; return 0; } static int isight_mute_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; return reg_write(isight, REG_MUTE, (__force __be32)!value->value.integer.value[0]); } static int isight_create_mixer(struct isight *isight) { static const struct snd_kcontrol_new gain_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Capture Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = isight_gain_info, .get = isight_gain_get, .put = isight_gain_put, }; static const struct snd_kcontrol_new mute_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Capture Switch", .info = snd_ctl_boolean_mono_info, .get = isight_mute_get, .put = isight_mute_put, }; __be32 value; struct snd_kcontrol *ctl; int err; err = reg_read(isight, REG_GAIN_RAW_START, &value); if (err < 0) return err; isight->gain_min = be32_to_cpu(value); err = reg_read(isight, REG_GAIN_RAW_END, &value); if (err < 0) return err; isight->gain_max = be32_to_cpu(value); isight->gain_tlv[0] = SNDRV_CTL_TLVT_DB_MINMAX; isight->gain_tlv[1] = 2 * sizeof(unsigned int); err = reg_read(isight, REG_GAIN_DB_START, &value); if (err < 0) return err; isight->gain_tlv[2] = (s32)be32_to_cpu(value) * 100; err = reg_read(isight, REG_GAIN_DB_END, &value); if (err < 0) return err; isight->gain_tlv[3] = (s32)be32_to_cpu(value) * 100; ctl = snd_ctl_new1(&gain_control, isight); if (ctl) ctl->tlv.p = isight->gain_tlv; err = snd_ctl_add(isight->card, ctl); if (err < 0) return err; err = snd_ctl_add(isight->card, snd_ctl_new1(&mute_control, isight)); if (err < 0) return err; return 0; } static void isight_card_free(struct snd_card *card) { struct isight *isight = card->private_data; fw_iso_resources_destroy(&isight->resources); fw_unit_put(isight->unit); mutex_destroy(&isight->mutex); } static u64 get_unit_base(struct fw_unit *unit) { struct fw_csr_iterator i; int key, value; fw_csr_iterator_init(&i, unit->directory); while (fw_csr_iterator_next(&i, &key, &value)) if (key == CSR_OFFSET) return CSR_REGISTER_BASE + value * 4; return 0; } static int isight_probe(struct device *unit_dev) { struct fw_unit *unit = fw_unit(unit_dev); struct fw_device *fw_dev = fw_parent_device(unit); struct snd_card *card; struct isight *isight; int err; err = snd_card_create(-1, NULL, THIS_MODULE, sizeof(*isight), &card); if (err < 0) return err; snd_card_set_dev(card, unit_dev); isight = card->private_data; isight->card = card; mutex_init(&isight->mutex); isight->unit = fw_unit_get(unit); isight->device = fw_dev; isight->audio_base = get_unit_base(unit); if (!isight->audio_base) { dev_err(&unit->device, "audio unit base not found\n"); err = -ENXIO; goto err_unit; } fw_iso_resources_init(&isight->resources, unit); card->private_free = isight_card_free; strcpy(card->driver, "iSight"); strcpy(card->shortname, "Apple iSight"); snprintf(card->longname, sizeof(card->longname), "Apple iSight (GUID %08x%08x) at %s, S%d", fw_dev->config_rom[3], fw_dev->config_rom[4], dev_name(&unit->device), 100 << fw_dev->max_speed); strcpy(card->mixername, "iSight"); err = isight_create_pcm(isight); if (err < 0) goto error; err = isight_create_mixer(isight); if (err < 0) goto error; err = snd_card_register(card); if (err < 0) goto error; dev_set_drvdata(unit_dev, isight); return 0; err_unit: fw_unit_put(isight->unit); mutex_destroy(&isight->mutex); error: snd_card_free(card); return err; } static int isight_remove(struct device *dev) { struct isight *isight = dev_get_drvdata(dev); isight_pcm_abort(isight); snd_card_disconnect(isight->card); mutex_lock(&isight->mutex); isight_stop_streaming(isight); mutex_unlock(&isight->mutex); snd_card_free_when_closed(isight->card); return 0; } static void isight_bus_reset(struct fw_unit *unit) { struct isight *isight = dev_get_drvdata(&unit->device); if (fw_iso_resources_update(&isight->resources) < 0) { isight_pcm_abort(isight); mutex_lock(&isight->mutex); isight_stop_streaming(isight); mutex_unlock(&isight->mutex); } } static const struct ieee1394_device_id isight_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = OUI_APPLE, .version = SW_ISIGHT_AUDIO, }, { } }; MODULE_DEVICE_TABLE(ieee1394, isight_id_table); static struct fw_driver isight_driver = { .driver = { .owner = THIS_MODULE, .name = KBUILD_MODNAME, .bus = &fw_bus_type, .probe = isight_probe, .remove = isight_remove, }, .update = isight_bus_reset, .id_table = isight_id_table, }; static int __init alsa_isight_init(void) { return driver_register(&isight_driver.driver); } static void __exit alsa_isight_exit(void) { driver_unregister(&isight_driver.driver); } module_init(alsa_isight_init); module_exit(alsa_isight_exit);
gpl-2.0
kv193/buildroot
linux/linux-kernel/drivers/crypto/amcc/crypto4xx_alg.c
12096
8415
/** * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. * All rights reserved. James Hsiao <jhsiao@amcc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This file implements the Linux crypto algorithms. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/spinlock_types.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/hash.h> #include <crypto/internal/hash.h> #include <linux/dma-mapping.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/sha.h> #include "crypto4xx_reg_def.h" #include "crypto4xx_sa.h" #include "crypto4xx_core.h" void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc, u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op, u32 dir) { sa->sa_command_0.w = 0; sa->sa_command_0.bf.save_hash_state = save_h; sa->sa_command_0.bf.save_iv = save_iv; sa->sa_command_0.bf.load_hash_state = ld_h; sa->sa_command_0.bf.load_iv = ld_iv; sa->sa_command_0.bf.hdr_proc = hdr_proc; sa->sa_command_0.bf.hash_alg = h; sa->sa_command_0.bf.cipher_alg = c; sa->sa_command_0.bf.pad_type = pad_type & 3; sa->sa_command_0.bf.extend_pad = pad_type >> 2; sa->sa_command_0.bf.op_group = op_grp; sa->sa_command_0.bf.opcode = op; sa->sa_command_0.bf.dir = dir; } void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc, u32 cfb, u32 esn, u32 sn_mask, u32 mute, u32 cp_pad, u32 cp_pay, u32 cp_hdr) { sa->sa_command_1.w = 0; sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2; sa->sa_command_1.bf.crypto_mode9_8 = cm & 3; sa->sa_command_1.bf.feedback_mode = cfb, sa->sa_command_1.bf.sa_rev = 1; sa->sa_command_1.bf.extended_seq_num = esn; sa->sa_command_1.bf.seq_num_mask = sn_mask; sa->sa_command_1.bf.mutable_bit_proc = mute; sa->sa_command_1.bf.copy_pad = cp_pad; sa->sa_command_1.bf.copy_payload = cp_pay; sa->sa_command_1.bf.copy_hdr = cp_hdr; } int crypto4xx_encrypt(struct ablkcipher_request *req) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ctx->direction = DIR_OUTBOUND; ctx->hash_final = 0; ctx->is_hash = 0; ctx->pd_ctl = 0x1; return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, req->nbytes, req->info, get_dynamic_sa_iv_size(ctx)); } int crypto4xx_decrypt(struct ablkcipher_request *req) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ctx->direction = DIR_INBOUND; ctx->hash_final = 0; ctx->is_hash = 0; ctx->pd_ctl = 1; return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, req->nbytes, req->info, get_dynamic_sa_iv_size(ctx)); } /** * AES Functions */ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen, unsigned char cm, u8 fb) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); struct dynamic_sa_ctl *sa; int rc; if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /* Create SA */ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) crypto4xx_free_sa(ctx); rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4); if (rc) return rc; if (ctx->state_record_dma_addr == 0) { rc = crypto4xx_alloc_state_record(ctx); if (rc) { crypto4xx_free_sa(ctx); return rc; } } /* Setup SA */ sa = (struct dynamic_sa_ctl *) ctx->sa_in; ctx->hash_final = 0; set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT, DIR_INBOUND); set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH, fb, SA_EXTENDED_SN_OFF, SA_SEQ_MASK_OFF, SA_MC_ENABLE, SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, SA_NOT_COPY_HDR); crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx), key, keylen); sa->sa_contents = SA_AES_CONTENTS | (keylen << 2); sa->sa_command_1.bf.key_len = keylen >> 3; ctx->is_hash = 0; ctx->direction = DIR_INBOUND; memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx), (void *)&ctx->state_record_dma_addr, 4); ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); sa = (struct dynamic_sa_ctl *) ctx->sa_out; sa->sa_command_0.bf.dir = DIR_OUTBOUND; return 0; } int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC, CRYPTO_FEEDBACK_MODE_NO_FB); } /** * HASH SHA1 Functions */ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, unsigned int sa_len, unsigned char ha, unsigned char hm) { struct crypto_alg *alg = tfm->__crt_alg; struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg); struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); struct dynamic_sa_ctl *sa; struct dynamic_sa_hash160 *sa_in; int rc; ctx->dev = my_alg->dev; ctx->is_hash = 1; ctx->hash_final = 0; /* Create SA */ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) crypto4xx_free_sa(ctx); rc = crypto4xx_alloc_sa(ctx, sa_len); if (rc) return rc; if (ctx->state_record_dma_addr == 0) { crypto4xx_alloc_state_record(ctx); if (!ctx->state_record_dma_addr) { crypto4xx_free_sa(ctx); return -ENOMEM; } } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct crypto4xx_ctx)); sa = (struct dynamic_sa_ctl *) ctx->sa_in; set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL, SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, SA_OPCODE_HASH, DIR_INBOUND); set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH, CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, SA_SEQ_MASK_OFF, SA_MC_ENABLE, SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, SA_NOT_COPY_HDR); ctx->direction = DIR_INBOUND; sa->sa_contents = SA_HASH160_CONTENTS; sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in; /* Need to zero hash digest in SA */ memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest)); memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest)); sa_in->state_ptr = ctx->state_record_dma_addr; ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); return 0; } int crypto4xx_hash_init(struct ahash_request *req) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); int ds; struct dynamic_sa_ctl *sa; sa = (struct dynamic_sa_ctl *) ctx->sa_in; ds = crypto_ahash_digestsize( __crypto_ahash_cast(req->base.tfm)); sa->sa_command_0.bf.digest_len = ds >> 2; sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; ctx->is_hash = 1; ctx->direction = DIR_INBOUND; return 0; } int crypto4xx_hash_update(struct ahash_request *req) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ctx->is_hash = 1; ctx->hash_final = 0; ctx->pd_ctl = 0x11; ctx->direction = DIR_INBOUND; return crypto4xx_build_pd(&req->base, ctx, req->src, (struct scatterlist *) req->result, req->nbytes, NULL, 0); } int crypto4xx_hash_final(struct ahash_request *req) { return 0; } int crypto4xx_hash_digest(struct ahash_request *req) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ctx->hash_final = 1; ctx->pd_ctl = 0x11; ctx->direction = DIR_INBOUND; return crypto4xx_build_pd(&req->base, ctx, req->src, (struct scatterlist *) req->result, req->nbytes, NULL, 0); } /** * SHA1 Algorithm */ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) { return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1, SA_HASH_MODE_HASH); }
gpl-2.0
bm371613/zso3-kernel
arch/mips/vr41xx/ibm-workpad/setup.c
13888
1365
/* * setup.c, Setup for the IBM WorkPad z50. * * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/ioport.h> #include <asm/io.h> #define WORKPAD_ISA_IO_BASE 0x15000000 #define WORKPAD_ISA_IO_SIZE 0x03000000 #define WORKPAD_ISA_IO_START 0 #define WORKPAD_ISA_IO_END (WORKPAD_ISA_IO_SIZE - 1) #define WORKPAD_IO_PORT_BASE KSEG1ADDR(WORKPAD_ISA_IO_BASE) static int __init ibm_workpad_setup(void) { set_io_port_base(WORKPAD_IO_PORT_BASE); ioport_resource.start = WORKPAD_ISA_IO_START; ioport_resource.end = WORKPAD_ISA_IO_END; return 0; } arch_initcall(ibm_workpad_setup);
gpl-2.0
klquicksall/EVO3D-CDMA
drivers/video/matrox/g450_pll.c
14656
13720
/* * * Hardware accelerated Matrox PCI cards - G450/G550 PLL control. * * (c) 2001-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.64 2002/06/10 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * */ #include "g450_pll.h" #include "matroxfb_DAC1064.h" static inline unsigned int g450_vco2f(unsigned char p, unsigned int fvco) { return (p & 0x40) ? fvco : fvco >> ((p & 3) + 1); } static inline unsigned int g450_f2vco(unsigned char p, unsigned int fin) { return (p & 0x40) ? fin : fin << ((p & 3) + 1); } static unsigned int g450_mnp2vco(const struct matrox_fb_info *minfo, unsigned int mnp) { unsigned int m, n; m = ((mnp >> 16) & 0x0FF) + 1; n = ((mnp >> 7) & 0x1FE) + 4; return (minfo->features.pll.ref_freq * n + (m >> 1)) / m; } unsigned int g450_mnp2f(const struct matrox_fb_info *minfo, unsigned int mnp) { return g450_vco2f(mnp, g450_mnp2vco(minfo, mnp)); } static inline unsigned int pll_freq_delta(unsigned int f1, unsigned int f2) { if (f2 < f1) { f2 = f1 - f2; } else { f2 = f2 - f1; } return f2; } #define NO_MORE_MNP 0x01FFFFFF #define G450_MNP_FREQBITS (0xFFFFFF43) /* do not mask high byte so we'll catch NO_MORE_MNP */ static unsigned int g450_nextpll(const struct matrox_fb_info *minfo, const struct matrox_pll_limits *pi, unsigned int *fvco, unsigned int mnp) { unsigned int m, n, p; unsigned int tvco = *fvco; m = (mnp >> 16) & 0xFF; p = mnp & 0xFF; do { if (m == 0 || m == 0xFF) { if (m == 0) { if (p & 0x40) { return NO_MORE_MNP; } if (p & 3) { p--; } else { p = 0x40; } tvco >>= 1; if (tvco < pi->vcomin) { return NO_MORE_MNP; } *fvco = tvco; } p &= 0x43; if (tvco < 550000) { /* p |= 0x00; */ } else if (tvco < 700000) { p |= 0x08; } else if (tvco < 1000000) { p |= 0x10; } else if (tvco < 1150000) { p |= 0x18; } else { p |= 0x20; } m = 9; } else { m--; } n = ((tvco * (m+1) + minfo->features.pll.ref_freq) / (minfo->features.pll.ref_freq * 2)) - 2; } while (n < 0x03 || n > 0x7A); return (m << 16) | (n << 8) | p; } static unsigned int g450_firstpll(const struct matrox_fb_info *minfo, const struct matrox_pll_limits *pi, unsigned int *vco, unsigned int fout) { unsigned int p; unsigned int vcomax; vcomax = pi->vcomax; if (fout > (vcomax / 2)) { if (fout > vcomax) { *vco = vcomax; } else { *vco = fout; } p = 0x40; } else { unsigned int tvco; p = 3; tvco = g450_f2vco(p, fout); while (p && (tvco > vcomax)) { p--; tvco >>= 1; } if (tvco < pi->vcomin) { tvco = pi->vcomin; } *vco = tvco; } return g450_nextpll(minfo, pi, vco, 0xFF0000 | p); } static inline unsigned int g450_setpll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { switch (pll) { case M_PIXEL_PLL_A: matroxfb_DAC_out(minfo, M1064_XPIXPLLAM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLAN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLAP, mnp); return M1064_XPIXPLLSTAT; case M_PIXEL_PLL_B: matroxfb_DAC_out(minfo, M1064_XPIXPLLBM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLBN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLBP, mnp); return M1064_XPIXPLLSTAT; case M_PIXEL_PLL_C: matroxfb_DAC_out(minfo, M1064_XPIXPLLCM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XPIXPLLCN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XPIXPLLCP, mnp); return M1064_XPIXPLLSTAT; case M_SYSTEM_PLL: matroxfb_DAC_out(minfo, DAC1064_XSYSPLLM, mnp >> 16); matroxfb_DAC_out(minfo, DAC1064_XSYSPLLN, mnp >> 8); matroxfb_DAC_out(minfo, DAC1064_XSYSPLLP, mnp); return DAC1064_XSYSPLLSTAT; case M_VIDEO_PLL: matroxfb_DAC_out(minfo, M1064_XVIDPLLM, mnp >> 16); matroxfb_DAC_out(minfo, M1064_XVIDPLLN, mnp >> 8); matroxfb_DAC_out(minfo, M1064_XVIDPLLP, mnp); return M1064_XVIDPLLSTAT; } return 0; } static inline unsigned int g450_cmppll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { unsigned char m = mnp >> 16; unsigned char n = mnp >> 8; unsigned char p = mnp; switch (pll) { case M_PIXEL_PLL_A: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLAM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLAN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLAP) != p); case M_PIXEL_PLL_B: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLBM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLBN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLBP) != p); case M_PIXEL_PLL_C: return (matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) != m || matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) != n || matroxfb_DAC_in(minfo, M1064_XPIXPLLCP) != p); case M_SYSTEM_PLL: return (matroxfb_DAC_in(minfo, DAC1064_XSYSPLLM) != m || matroxfb_DAC_in(minfo, DAC1064_XSYSPLLN) != n || matroxfb_DAC_in(minfo, DAC1064_XSYSPLLP) != p); case M_VIDEO_PLL: return (matroxfb_DAC_in(minfo, M1064_XVIDPLLM) != m || matroxfb_DAC_in(minfo, M1064_XVIDPLLN) != n || matroxfb_DAC_in(minfo, M1064_XVIDPLLP) != p); } return 1; } static inline int g450_isplllocked(const struct matrox_fb_info *minfo, unsigned int regidx) { unsigned int j; for (j = 0; j < 1000; j++) { if (matroxfb_DAC_in(minfo, regidx) & 0x40) { unsigned int r = 0; int i; for (i = 0; i < 100; i++) { r += matroxfb_DAC_in(minfo, regidx) & 0x40; } return r >= (90 * 0x40); } /* udelay(1)... but DAC_in is much slower... */ } return 0; } static int g450_testpll(const struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { return g450_isplllocked(minfo, g450_setpll(minfo, mnp, pll)); } static void updatehwstate_clk(struct matrox_hw_state* hw, unsigned int mnp, unsigned int pll) { switch (pll) { case M_SYSTEM_PLL: hw->DACclk[3] = mnp >> 16; hw->DACclk[4] = mnp >> 8; hw->DACclk[5] = mnp; break; } } void matroxfb_g450_setpll_cond(struct matrox_fb_info *minfo, unsigned int mnp, unsigned int pll) { if (g450_cmppll(minfo, mnp, pll)) { g450_setpll(minfo, mnp, pll); } } static inline unsigned int g450_findworkingpll(struct matrox_fb_info *minfo, unsigned int pll, unsigned int *mnparray, unsigned int mnpcount) { unsigned int found = 0; unsigned int idx; unsigned int mnpfound = mnparray[0]; for (idx = 0; idx < mnpcount; idx++) { unsigned int sarray[3]; unsigned int *sptr; { unsigned int mnp; sptr = sarray; mnp = mnparray[idx]; if (mnp & 0x38) { *sptr++ = mnp - 8; } if ((mnp & 0x38) != 0x38) { *sptr++ = mnp + 8; } *sptr = mnp; } while (sptr >= sarray) { unsigned int mnp = *sptr--; if (g450_testpll(minfo, mnp - 0x0300, pll) && g450_testpll(minfo, mnp + 0x0300, pll) && g450_testpll(minfo, mnp - 0x0200, pll) && g450_testpll(minfo, mnp + 0x0200, pll) && g450_testpll(minfo, mnp - 0x0100, pll) && g450_testpll(minfo, mnp + 0x0100, pll)) { if (g450_testpll(minfo, mnp, pll)) { return mnp; } } else if (!found && g450_testpll(minfo, mnp, pll)) { mnpfound = mnp; found = 1; } } } g450_setpll(minfo, mnpfound, pll); return mnpfound; } static void g450_addcache(struct matrox_pll_cache* ci, unsigned int mnp_key, unsigned int mnp_value) { if (++ci->valid > ARRAY_SIZE(ci->data)) { ci->valid = ARRAY_SIZE(ci->data); } memmove(ci->data + 1, ci->data, (ci->valid - 1) * sizeof(*ci->data)); ci->data[0].mnp_key = mnp_key & G450_MNP_FREQBITS; ci->data[0].mnp_value = mnp_value; } static int g450_checkcache(struct matrox_fb_info *minfo, struct matrox_pll_cache *ci, unsigned int mnp_key) { unsigned int i; mnp_key &= G450_MNP_FREQBITS; for (i = 0; i < ci->valid; i++) { if (ci->data[i].mnp_key == mnp_key) { unsigned int mnp; mnp = ci->data[i].mnp_value; if (i) { memmove(ci->data + 1, ci->data, i * sizeof(*ci->data)); ci->data[0].mnp_key = mnp_key; ci->data[0].mnp_value = mnp; } return mnp; } } return NO_MORE_MNP; } static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, unsigned int pll, unsigned int *mnparray, unsigned int *deltaarray) { unsigned int mnpcount; unsigned int pixel_vco; const struct matrox_pll_limits* pi; struct matrox_pll_cache* ci; pixel_vco = 0; switch (pll) { case M_PIXEL_PLL_A: case M_PIXEL_PLL_B: case M_PIXEL_PLL_C: { u_int8_t tmp, xpwrctrl; unsigned long flags; matroxfb_DAC_lock_irqsave(flags); xpwrctrl = matroxfb_DAC_in(minfo, M1064_XPWRCTRL); matroxfb_DAC_out(minfo, M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN); mga_outb(M_SEQ_INDEX, M_SEQ1); mga_outb(M_SEQ_DATA, mga_inb(M_SEQ_DATA) | M_SEQ1_SCROFF); tmp = matroxfb_DAC_in(minfo, M1064_XPIXCLKCTRL); tmp |= M1064_XPIXCLKCTRL_DIS; if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) { tmp |= M1064_XPIXCLKCTRL_PLL_UP; } matroxfb_DAC_out(minfo, M1064_XPIXCLKCTRL, tmp); /* DVI PLL preferred for frequencies up to panel link max, standard PLL otherwise */ if (fout >= minfo->max_pixel_clock_panellink) tmp = 0; else tmp = M1064_XDVICLKCTRL_DVIDATAPATHSEL | M1064_XDVICLKCTRL_C1DVICLKSEL | M1064_XDVICLKCTRL_C1DVICLKEN | M1064_XDVICLKCTRL_DVILOOPCTL | M1064_XDVICLKCTRL_P1LOOPBWDTCTL; /* Setting this breaks PC systems so don't do it */ /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */ matroxfb_DAC_out(minfo, M1064_XPWRCTRL, xpwrctrl); matroxfb_DAC_unlock_irqrestore(flags); } { u_int8_t misc; misc = mga_inb(M_MISC_REG_READ) & ~0x0C; switch (pll) { case M_PIXEL_PLL_A: break; case M_PIXEL_PLL_B: misc |= 0x04; break; default: misc |= 0x0C; break; } mga_outb(M_MISC_REG, misc); } pi = &minfo->limits.pixel; ci = &minfo->cache.pixel; break; case M_SYSTEM_PLL: { u_int32_t opt; pci_read_config_dword(minfo->pcidev, PCI_OPTION_REG, &opt); if (!(opt & 0x20)) { pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, opt | 0x20); } } pi = &minfo->limits.system; ci = &minfo->cache.system; break; case M_VIDEO_PLL: { u_int8_t tmp; unsigned int mnp; unsigned long flags; matroxfb_DAC_lock_irqsave(flags); tmp = matroxfb_DAC_in(minfo, M1064_XPWRCTRL); if (!(tmp & 2)) { matroxfb_DAC_out(minfo, M1064_XPWRCTRL, tmp | 2); } mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16; mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8; pixel_vco = g450_mnp2vco(minfo, mnp); matroxfb_DAC_unlock_irqrestore(flags); } pi = &minfo->limits.video; ci = &minfo->cache.video; break; default: return -EINVAL; } mnpcount = 0; { unsigned int mnp; unsigned int xvco; for (mnp = g450_firstpll(minfo, pi, &xvco, fout); mnp != NO_MORE_MNP; mnp = g450_nextpll(minfo, pi, &xvco, mnp)) { unsigned int idx; unsigned int vco; unsigned int delta; vco = g450_mnp2vco(minfo, mnp); #if 0 if (pll == M_VIDEO_PLL) { unsigned int big, small; if (vco < pixel_vco) { small = vco; big = pixel_vco; } else { small = pixel_vco; big = vco; } while (big > small) { big >>= 1; } if (big == small) { continue; } } #endif delta = pll_freq_delta(fout, g450_vco2f(mnp, vco)); for (idx = mnpcount; idx > 0; idx--) { /* == is important; due to nextpll algorithm we get sorted equally good frequencies from lower VCO frequency to higher - with <= lowest wins, while with < highest one wins */ if (delta <= deltaarray[idx-1]) { /* all else being equal except VCO, * choose VCO not near (within 1/16th or so) VCOmin * (freqs near VCOmin aren't as stable) */ if (delta == deltaarray[idx-1] && vco != g450_mnp2vco(minfo, mnparray[idx-1]) && vco < (pi->vcomin * 17 / 16)) { break; } mnparray[idx] = mnparray[idx-1]; deltaarray[idx] = deltaarray[idx-1]; } else { break; } } mnparray[idx] = mnp; deltaarray[idx] = delta; mnpcount++; } } /* VideoPLL and PixelPLL matched: do nothing... In all other cases we should get at least one frequency */ if (!mnpcount) { return -EBUSY; } { unsigned long flags; unsigned int mnp; matroxfb_DAC_lock_irqsave(flags); mnp = g450_checkcache(minfo, ci, mnparray[0]); if (mnp != NO_MORE_MNP) { matroxfb_g450_setpll_cond(minfo, mnp, pll); } else { mnp = g450_findworkingpll(minfo, pll, mnparray, mnpcount); g450_addcache(ci, mnparray[0], mnp); } updatehwstate_clk(&minfo->hw, mnp, pll); matroxfb_DAC_unlock_irqrestore(flags); return mnp; } } /* It must be greater than number of possible PLL values. * Currently there is 5(p) * 10(m) = 50 possible values. */ #define MNP_TABLE_SIZE 64 int matroxfb_g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, unsigned int pll) { unsigned int* arr; arr = kmalloc(sizeof(*arr) * MNP_TABLE_SIZE * 2, GFP_KERNEL); if (arr) { int r; r = __g450_setclk(minfo, fout, pll, arr, arr + MNP_TABLE_SIZE); kfree(arr); return r; } return -ENOMEM; } EXPORT_SYMBOL(matroxfb_g450_setclk); EXPORT_SYMBOL(g450_mnp2f); EXPORT_SYMBOL(matroxfb_g450_setpll_cond); MODULE_AUTHOR("(c) 2001-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Matrox G450/G550 PLL driver"); MODULE_LICENSE("GPL");
gpl-2.0
tomasbw/linux-yocto-4.4
arch/tile/kernel/hardwall.c
1345
30634
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/kprobes.h> #include <linux/sched.h> #include <linux/hardirq.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/cdev.h> #include <linux/compat.h> #include <asm/hardwall.h> #include <asm/traps.h> #include <asm/siginfo.h> #include <asm/irq_regs.h> #include <arch/interrupts.h> #include <arch/spr_def.h> /* * Implement a per-cpu "hardwall" resource class such as UDN or IPI. * We use "hardwall" nomenclature throughout for historical reasons. * The lock here controls access to the list data structure as well as * to the items on the list. */ struct hardwall_type { int index; int is_xdn; int is_idn; int disabled; const char *name; struct list_head list; spinlock_t lock; struct proc_dir_entry *proc_dir; }; enum hardwall_index { HARDWALL_UDN = 0, #ifndef __tilepro__ HARDWALL_IDN = 1, HARDWALL_IPI = 2, #endif _HARDWALL_TYPES }; static struct hardwall_type hardwall_types[] = { { /* user-space access to UDN */ 0, 1, 0, 0, "udn", LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock), NULL }, #ifndef __tilepro__ { /* user-space access to IDN */ 1, 1, 1, 1, /* disabled pending hypervisor support */ "idn", LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock), NULL }, { /* access to user-space IPI */ 2, 0, 0, 0, "ipi", LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock), NULL }, #endif }; /* * This data structure tracks the cpu data, etc., associated * one-to-one with a "struct file *" from opening a hardwall device file. * Note that the file's private data points back to this structure. */ struct hardwall_info { struct list_head list; /* for hardwall_types.list */ struct list_head task_head; /* head of tasks in this hardwall */ struct hardwall_type *type; /* type of this resource */ struct cpumask cpumask; /* cpus reserved */ int id; /* integer id for this hardwall */ int teardown_in_progress; /* are we tearing this one down? */ /* Remaining fields only valid for user-network resources. */ int ulhc_x; /* upper left hand corner x coord */ int ulhc_y; /* upper left hand corner y coord */ int width; /* rectangle width */ int height; /* rectangle height */ #if CHIP_HAS_REV1_XDN() atomic_t xdn_pending_count; /* cores in phase 1 of drain */ #endif }; /* /proc/tile/hardwall */ static struct proc_dir_entry *hardwall_proc_dir; /* Functions to manage files in /proc/tile/hardwall. */ static void hardwall_add_proc(struct hardwall_info *); static void hardwall_remove_proc(struct hardwall_info *); /* Allow disabling UDN access. */ static int __init noudn(char *str) { pr_info("User-space UDN access is disabled\n"); hardwall_types[HARDWALL_UDN].disabled = 1; return 0; } early_param("noudn", noudn); #ifndef __tilepro__ /* Allow disabling IDN access. */ static int __init noidn(char *str) { pr_info("User-space IDN access is disabled\n"); hardwall_types[HARDWALL_IDN].disabled = 1; return 0; } early_param("noidn", noidn); /* Allow disabling IPI access. */ static int __init noipi(char *str) { pr_info("User-space IPI access is disabled\n"); hardwall_types[HARDWALL_IPI].disabled = 1; return 0; } early_param("noipi", noipi); #endif /* * Low-level primitives for UDN/IDN */ #ifdef __tilepro__ #define mtspr_XDN(hwt, name, val) \ do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0) #define mtspr_MPL_XDN(hwt, name, val) \ do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0) #define mfspr_XDN(hwt, name) \ ((void)(hwt), __insn_mfspr(SPR_UDN_##name)) #else #define mtspr_XDN(hwt, name, val) \ do { \ if ((hwt)->is_idn) \ __insn_mtspr(SPR_IDN_##name, (val)); \ else \ __insn_mtspr(SPR_UDN_##name, (val)); \ } while (0) #define mtspr_MPL_XDN(hwt, name, val) \ do { \ if ((hwt)->is_idn) \ __insn_mtspr(SPR_MPL_IDN_##name, (val)); \ else \ __insn_mtspr(SPR_MPL_UDN_##name, (val)); \ } while (0) #define mfspr_XDN(hwt, name) \ ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name)) #endif /* Set a CPU bit if the CPU is online. */ #define cpu_online_set(cpu, dst) do { \ if (cpu_online(cpu)) \ cpumask_set_cpu(cpu, dst); \ } while (0) /* Does the given rectangle contain the given x,y coordinate? */ static int contains(struct hardwall_info *r, int x, int y) { return (x >= r->ulhc_x && x < r->ulhc_x + r->width) && (y >= r->ulhc_y && y < r->ulhc_y + r->height); } /* Compute the rectangle parameters and validate the cpumask. */ static int check_rectangle(struct hardwall_info *r, struct cpumask *mask) { int x, y, cpu, ulhc, lrhc; /* The first cpu is the ULHC, the last the LRHC. */ ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits); lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits); /* Compute the rectangle attributes from the cpus. */ r->ulhc_x = cpu_x(ulhc); r->ulhc_y = cpu_y(ulhc); r->width = cpu_x(lrhc) - r->ulhc_x + 1; r->height = cpu_y(lrhc) - r->ulhc_y + 1; /* Width and height must be positive */ if (r->width <= 0 || r->height <= 0) return -EINVAL; /* Confirm that the cpumask is exactly the rectangle. */ for (y = 0, cpu = 0; y < smp_height; ++y) for (x = 0; x < smp_width; ++x, ++cpu) if (cpumask_test_cpu(cpu, mask) != contains(r, x, y)) return -EINVAL; /* * Note that offline cpus can't be drained when this user network * rectangle eventually closes. We used to detect this * situation and print a warning, but it annoyed users and * they ignored it anyway, so now we just return without a * warning. */ return 0; } /* * Hardware management of hardwall setup, teardown, trapping, * and enabling/disabling PL0 access to the networks. */ /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */ enum direction_protect { N_PROTECT = (1 << 0), E_PROTECT = (1 << 1), S_PROTECT = (1 << 2), W_PROTECT = (1 << 3), C_PROTECT = (1 << 4), }; static inline int xdn_which_interrupt(struct hardwall_type *hwt) { #ifndef __tilepro__ if (hwt->is_idn) return INT_IDN_FIREWALL; #endif return INT_UDN_FIREWALL; } static void enable_firewall_interrupts(struct hardwall_type *hwt) { arch_local_irq_unmask_now(xdn_which_interrupt(hwt)); } static void disable_firewall_interrupts(struct hardwall_type *hwt) { arch_local_irq_mask_now(xdn_which_interrupt(hwt)); } /* Set up hardwall on this cpu based on the passed hardwall_info. */ static void hardwall_setup_func(void *info) { struct hardwall_info *r = info; struct hardwall_type *hwt = r->type; int cpu = smp_processor_id(); /* on_each_cpu disables preemption */ int x = cpu_x(cpu); int y = cpu_y(cpu); int bits = 0; if (x == r->ulhc_x) bits |= W_PROTECT; if (x == r->ulhc_x + r->width - 1) bits |= E_PROTECT; if (y == r->ulhc_y) bits |= N_PROTECT; if (y == r->ulhc_y + r->height - 1) bits |= S_PROTECT; BUG_ON(bits == 0); mtspr_XDN(hwt, DIRECTION_PROTECT, bits); enable_firewall_interrupts(hwt); } /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ static void hardwall_protect_rectangle(struct hardwall_info *r) { int x, y, cpu, delta; struct cpumask rect_cpus; cpumask_clear(&rect_cpus); /* First include the top and bottom edges */ cpu = r->ulhc_y * smp_width + r->ulhc_x; delta = (r->height - 1) * smp_width; for (x = 0; x < r->width; ++x, ++cpu) { cpu_online_set(cpu, &rect_cpus); cpu_online_set(cpu + delta, &rect_cpus); } /* Then the left and right edges */ cpu -= r->width; delta = r->width - 1; for (y = 0; y < r->height; ++y, cpu += smp_width) { cpu_online_set(cpu, &rect_cpus); cpu_online_set(cpu + delta, &rect_cpus); } /* Then tell all the cpus to set up their protection SPR */ on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1); } /* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) { struct hardwall_info *rect; struct hardwall_type *hwt; struct task_struct *p; struct siginfo info; int cpu = smp_processor_id(); int found_processes; struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); /* Figure out which network trapped. */ switch (fault_num) { #ifndef __tilepro__ case INT_IDN_FIREWALL: hwt = &hardwall_types[HARDWALL_IDN]; break; #endif case INT_UDN_FIREWALL: hwt = &hardwall_types[HARDWALL_UDN]; break; default: BUG(); } BUG_ON(hwt->disabled); /* This tile trapped a network access; find the rectangle. */ spin_lock(&hwt->lock); list_for_each_entry(rect, &hwt->list, list) { if (cpumask_test_cpu(cpu, &rect->cpumask)) break; } /* * It shouldn't be possible not to find this cpu on the * rectangle list, since only cpus in rectangles get hardwalled. * The hardwall is only removed after the user network is drained. */ BUG_ON(&rect->list == &hwt->list); /* * If we already started teardown on this hardwall, don't worry; * the abort signal has been sent and we are just waiting for things * to quiesce. */ if (rect->teardown_in_progress) { pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n", cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); goto done; } /* * Kill off any process that is activated in this rectangle. * We bypass security to deliver the signal, since it must be * one of the activated processes that generated the user network * message that caused this trap, and all the activated * processes shared a single open file so are pretty tightly * bound together from a security point of view to begin with. */ rect->teardown_in_progress = 1; wmb(); /* Ensure visibility of rectangle before notifying processes. */ pr_notice("cpu %d: detected %s hardwall violation %#lx...\n", cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_HARDWALL; found_processes = 0; list_for_each_entry(p, &rect->task_head, thread.hardwall[hwt->index].list) { BUG_ON(p->thread.hardwall[hwt->index].info != rect); if (!(p->flags & PF_EXITING)) { found_processes = 1; pr_notice("hardwall: killing %d\n", p->pid); do_send_sig_info(info.si_signo, &info, p, false); } } if (!found_processes) pr_notice("hardwall: no associated processes!\n"); done: spin_unlock(&hwt->lock); /* * We have to disable firewall interrupts now, or else when we * return from this handler, we will simply re-interrupt back to * it. However, we can't clear the protection bits, since we * haven't yet drained the network, and that would allow packets * to cross out of the hardwall region. */ disable_firewall_interrupts(hwt); irq_exit(); set_irq_regs(old_regs); } /* Allow access from user space to the user network. */ void grant_hardwall_mpls(struct hardwall_type *hwt) { #ifndef __tilepro__ if (!hwt->is_xdn) { __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1); return; } #endif mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1); mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1); mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1); mtspr_MPL_XDN(hwt, TIMER_SET_0, 1); #if !CHIP_HAS_REV1_XDN() mtspr_MPL_XDN(hwt, REFILL_SET_0, 1); mtspr_MPL_XDN(hwt, CA_SET_0, 1); #endif } /* Deny access from user space to the user network. */ void restrict_hardwall_mpls(struct hardwall_type *hwt) { #ifndef __tilepro__ if (!hwt->is_xdn) { __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1); return; } #endif mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1); mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1); mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1); mtspr_MPL_XDN(hwt, TIMER_SET_1, 1); #if !CHIP_HAS_REV1_XDN() mtspr_MPL_XDN(hwt, REFILL_SET_1, 1); mtspr_MPL_XDN(hwt, CA_SET_1, 1); #endif } /* Restrict or deny as necessary for the task we're switching to. */ void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next) { int i; for (i = 0; i < HARDWALL_TYPES; ++i) { if (prev->thread.hardwall[i].info != NULL) { if (next->thread.hardwall[i].info == NULL) restrict_hardwall_mpls(&hardwall_types[i]); } else if (next->thread.hardwall[i].info != NULL) { grant_hardwall_mpls(&hardwall_types[i]); } } } /* Does this task have the right to IPI the given cpu? */ int hardwall_ipi_valid(int cpu) { #ifdef __tilegx__ struct hardwall_info *info = current->thread.hardwall[HARDWALL_IPI].info; return info && cpumask_test_cpu(cpu, &info->cpumask); #else return 0; #endif } /* * Code to create, activate, deactivate, and destroy hardwall resources. */ /* Create a hardwall for the given resource */ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt, size_t size, const unsigned char __user *bits) { struct hardwall_info *iter, *info; struct cpumask mask; unsigned long flags; int rc; /* Reject crazy sizes out of hand, a la sys_mbind(). */ if (size > PAGE_SIZE) return ERR_PTR(-EINVAL); /* Copy whatever fits into a cpumask. */ if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size))) return ERR_PTR(-EFAULT); /* * If the size was short, clear the rest of the mask; * otherwise validate that the rest of the user mask was zero * (we don't try hard to be efficient when validating huge masks). */ if (size < sizeof(struct cpumask)) { memset((char *)&mask + size, 0, sizeof(struct cpumask) - size); } else if (size > sizeof(struct cpumask)) { size_t i; for (i = sizeof(struct cpumask); i < size; ++i) { char c; if (get_user(c, &bits[i])) return ERR_PTR(-EFAULT); if (c) return ERR_PTR(-EINVAL); } } /* Allocate a new hardwall_info optimistically. */ info = kmalloc(sizeof(struct hardwall_info), GFP_KERNEL | __GFP_ZERO); if (info == NULL) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&info->task_head); info->type = hwt; /* Compute the rectangle size and validate that it's plausible. */ cpumask_copy(&info->cpumask, &mask); info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits); if (hwt->is_xdn) { rc = check_rectangle(info, &mask); if (rc != 0) { kfree(info); return ERR_PTR(rc); } } /* * Eliminate cpus that are not part of this Linux client. * Note that this allows for configurations that we might not want to * support, such as one client on every even cpu, another client on * every odd cpu. */ cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask); /* Confirm it doesn't overlap and add it to the list. */ spin_lock_irqsave(&hwt->lock, flags); list_for_each_entry(iter, &hwt->list, list) { if (cpumask_intersects(&iter->cpumask, &info->cpumask)) { spin_unlock_irqrestore(&hwt->lock, flags); kfree(info); return ERR_PTR(-EBUSY); } } list_add_tail(&info->list, &hwt->list); spin_unlock_irqrestore(&hwt->lock, flags); /* Set up appropriate hardwalling on all affected cpus. */ if (hwt->is_xdn) hardwall_protect_rectangle(info); /* Create a /proc/tile/hardwall entry. */ hardwall_add_proc(info); return info; } /* Activate a given hardwall on this cpu for this process. */ static int hardwall_activate(struct hardwall_info *info) { int cpu; unsigned long flags; struct task_struct *p = current; struct thread_struct *ts = &p->thread; struct hardwall_type *hwt; /* Require a hardwall. */ if (info == NULL) return -ENODATA; /* Not allowed to activate a hardwall that is being torn down. */ if (info->teardown_in_progress) return -EINVAL; /* * Get our affinity; if we're not bound to this tile uniquely, * we can't access the network registers. */ if (cpumask_weight(&p->cpus_allowed) != 1) return -EPERM; /* Make sure we are bound to a cpu assigned to this resource. */ cpu = smp_processor_id(); BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); if (!cpumask_test_cpu(cpu, &info->cpumask)) return -EINVAL; /* If we are already bound to this hardwall, it's a no-op. */ hwt = info->type; if (ts->hardwall[hwt->index].info) { BUG_ON(ts->hardwall[hwt->index].info != info); return 0; } /* Success! This process gets to use the resource on this cpu. */ ts->hardwall[hwt->index].info = info; spin_lock_irqsave(&hwt->lock, flags); list_add(&ts->hardwall[hwt->index].list, &info->task_head); spin_unlock_irqrestore(&hwt->lock, flags); grant_hardwall_mpls(hwt); printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n", p->pid, p->comm, hwt->name, cpu); return 0; } /* * Deactivate a task's hardwall. Must hold lock for hardwall_type. * This method may be called from exit_thread(), so we don't want to * rely on too many fields of struct task_struct still being valid. * We assume the cpus_allowed, pid, and comm fields are still valid. */ static void _hardwall_deactivate(struct hardwall_type *hwt, struct task_struct *task) { struct thread_struct *ts = &task->thread; if (cpumask_weight(&task->cpus_allowed) != 1) { pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n", task->pid, task->comm, hwt->name, cpumask_weight(&task->cpus_allowed)); BUG(); } BUG_ON(ts->hardwall[hwt->index].info == NULL); ts->hardwall[hwt->index].info = NULL; list_del(&ts->hardwall[hwt->index].list); if (task == current) restrict_hardwall_mpls(hwt); } /* Deactivate a task's hardwall. */ static int hardwall_deactivate(struct hardwall_type *hwt, struct task_struct *task) { unsigned long flags; int activated; spin_lock_irqsave(&hwt->lock, flags); activated = (task->thread.hardwall[hwt->index].info != NULL); if (activated) _hardwall_deactivate(hwt, task); spin_unlock_irqrestore(&hwt->lock, flags); if (!activated) return -EINVAL; printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n", task->pid, task->comm, hwt->name, raw_smp_processor_id()); return 0; } void hardwall_deactivate_all(struct task_struct *task) { int i; for (i = 0; i < HARDWALL_TYPES; ++i) if (task->thread.hardwall[i].info) hardwall_deactivate(&hardwall_types[i], task); } /* Stop the switch before draining the network. */ static void stop_xdn_switch(void *arg) { #if !CHIP_HAS_REV1_XDN() /* Freeze the switch and the demux. */ __insn_mtspr(SPR_UDN_SP_FREEZE, SPR_UDN_SP_FREEZE__SP_FRZ_MASK | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); #else /* * Drop all packets bound for the core or off the edge. * We rely on the normal hardwall protection setup code * to have set the low four bits to trigger firewall interrupts, * and shift those bits up to trigger "drop on send" semantics, * plus adding "drop on send to core" for all switches. * In practice it seems the switches latch the DIRECTION_PROTECT * SPR so they won't start dropping if they're already * delivering the last message to the core, but it doesn't * hurt to enable it here. */ struct hardwall_type *hwt = arg; unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT); mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5); #endif } static void empty_xdn_demuxes(struct hardwall_type *hwt) { #ifndef __tilepro__ if (hwt->is_idn) { while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0)) (void) __tile_idn0_receive(); while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1)) (void) __tile_idn1_receive(); return; } #endif while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) (void) __tile_udn0_receive(); while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) (void) __tile_udn1_receive(); while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) (void) __tile_udn2_receive(); while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) (void) __tile_udn3_receive(); } /* Drain all the state from a stopped switch. */ static void drain_xdn_switch(void *arg) { struct hardwall_info *info = arg; struct hardwall_type *hwt = info->type; #if CHIP_HAS_REV1_XDN() /* * The switches have been configured to drop any messages * destined for cores (or off the edge of the rectangle). * But the current message may continue to be delivered, * so we wait until all the cores have finished any pending * messages before we stop draining. */ int pending = mfspr_XDN(hwt, PENDING); while (pending--) { empty_xdn_demuxes(hwt); if (hwt->is_idn) __tile_idn_send(0); else __tile_udn_send(0); } atomic_dec(&info->xdn_pending_count); while (atomic_read(&info->xdn_pending_count)) empty_xdn_demuxes(hwt); #else int i; int from_tile_words, ca_count; /* Empty out the 5 switch point fifos. */ for (i = 0; i < 5; i++) { int words, j; __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF; for (j = 0; j < words; j++) (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA); BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0); } /* Dump out the 3 word fifo at top. */ from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3; for (i = 0; i < from_tile_words; i++) (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); /* Empty out demuxes. */ empty_xdn_demuxes(hwt); /* Empty out catch all. */ ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); for (i = 0; i < ca_count; i++) (void) __insn_mfspr(SPR_UDN_CA_DATA); BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0); /* Clear demux logic. */ __insn_mtspr(SPR_UDN_DEMUX_CTL, 1); /* * Write switch state; experimentation indicates that 0xc3000 * is an idle switch point. */ for (i = 0; i < 5; i++) { __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000); } #endif } /* Reset random XDN state registers at boot up and during hardwall teardown. */ static void reset_xdn_network_state(struct hardwall_type *hwt) { if (hwt->disabled) return; /* Clear out other random registers so we have a clean slate. */ mtspr_XDN(hwt, DIRECTION_PROTECT, 0); mtspr_XDN(hwt, AVAIL_EN, 0); mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0); #if !CHIP_HAS_REV1_XDN() /* Reset UDN coordinates to their standard value */ { unsigned int cpu = smp_processor_id(); unsigned int x = cpu_x(cpu); unsigned int y = cpu_y(cpu); __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); } /* Set demux tags to predefined values and enable them. */ __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); __insn_mtspr(SPR_UDN_TAG_0, (1 << 0)); __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); /* Set other rev0 random registers to a clean state. */ __insn_mtspr(SPR_UDN_REFILL_EN, 0); __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); /* Start the switch and demux. */ __insn_mtspr(SPR_UDN_SP_FREEZE, 0); #endif } void reset_network_state(void) { reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]); #ifndef __tilepro__ reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]); #endif } /* Restart an XDN switch after draining. */ static void restart_xdn_switch(void *arg) { struct hardwall_type *hwt = arg; #if CHIP_HAS_REV1_XDN() /* One last drain step to avoid races with injection and draining. */ empty_xdn_demuxes(hwt); #endif reset_xdn_network_state(hwt); /* Disable firewall interrupts. */ disable_firewall_interrupts(hwt); } /* Last reference to a hardwall is gone, so clear the network. */ static void hardwall_destroy(struct hardwall_info *info) { struct task_struct *task; struct hardwall_type *hwt; unsigned long flags; /* Make sure this file actually represents a hardwall. */ if (info == NULL) return; /* * Deactivate any remaining tasks. It's possible to race with * some other thread that is exiting and hasn't yet called * deactivate (when freeing its thread_info), so we carefully * deactivate any remaining tasks before freeing the * hardwall_info object itself. */ hwt = info->type; info->teardown_in_progress = 1; spin_lock_irqsave(&hwt->lock, flags); list_for_each_entry(task, &info->task_head, thread.hardwall[hwt->index].list) _hardwall_deactivate(hwt, task); spin_unlock_irqrestore(&hwt->lock, flags); if (hwt->is_xdn) { /* Configure the switches for draining the user network. */ printk(KERN_DEBUG "Clearing %s hardwall rectangle %dx%d %d,%d\n", hwt->name, info->width, info->height, info->ulhc_x, info->ulhc_y); on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1); /* Drain the network. */ #if CHIP_HAS_REV1_XDN() atomic_set(&info->xdn_pending_count, cpumask_weight(&info->cpumask)); on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0); #else on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1); #endif /* Restart switch and disable firewall. */ on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1); } /* Remove the /proc/tile/hardwall entry. */ hardwall_remove_proc(info); /* Now free the hardwall from the list. */ spin_lock_irqsave(&hwt->lock, flags); BUG_ON(!list_empty(&info->task_head)); list_del(&info->list); spin_unlock_irqrestore(&hwt->lock, flags); kfree(info); } static int hardwall_proc_show(struct seq_file *sf, void *v) { struct hardwall_info *info = sf->private; seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask)); return 0; } static int hardwall_proc_open(struct inode *inode, struct file *file) { return single_open(file, hardwall_proc_show, PDE_DATA(inode)); } static const struct file_operations hardwall_proc_fops = { .open = hardwall_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void hardwall_add_proc(struct hardwall_info *info) { char buf[64]; snprintf(buf, sizeof(buf), "%d", info->id); proc_create_data(buf, 0444, info->type->proc_dir, &hardwall_proc_fops, info); } static void hardwall_remove_proc(struct hardwall_info *info) { char buf[64]; snprintf(buf, sizeof(buf), "%d", info->id); remove_proc_entry(buf, info->type->proc_dir); } int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { int i; int n = 0; for (i = 0; i < HARDWALL_TYPES; ++i) { struct hardwall_info *info = task->thread.hardwall[i].info; if (info) seq_printf(m, "%s: %d\n", info->type->name, info->id); } return n; } void proc_tile_hardwall_init(struct proc_dir_entry *root) { int i; for (i = 0; i < HARDWALL_TYPES; ++i) { struct hardwall_type *hwt = &hardwall_types[i]; if (hwt->disabled) continue; if (hardwall_proc_dir == NULL) hardwall_proc_dir = proc_mkdir("hardwall", root); hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir); } } /* * Character device support via ioctl/close. */ static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) { struct hardwall_info *info = file->private_data; int minor = iminor(file->f_mapping->host); struct hardwall_type* hwt; if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) return -EINVAL; BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES); BUILD_BUG_ON(HARDWALL_TYPES != sizeof(hardwall_types)/sizeof(hardwall_types[0])); if (minor < 0 || minor >= HARDWALL_TYPES) return -EINVAL; hwt = &hardwall_types[minor]; WARN_ON(info && hwt != info->type); switch (_IOC_NR(a)) { case _HARDWALL_CREATE: if (hwt->disabled) return -ENOSYS; if (info != NULL) return -EALREADY; info = hardwall_create(hwt, _IOC_SIZE(a), (const unsigned char __user *)b); if (IS_ERR(info)) return PTR_ERR(info); file->private_data = info; return 0; case _HARDWALL_ACTIVATE: return hardwall_activate(info); case _HARDWALL_DEACTIVATE: if (current->thread.hardwall[hwt->index].info != info) return -EINVAL; return hardwall_deactivate(hwt, current); case _HARDWALL_GET_ID: return info ? info->id : -EINVAL; default: return -EINVAL; } } #ifdef CONFIG_COMPAT static long hardwall_compat_ioctl(struct file *file, unsigned int a, unsigned long b) { /* Sign-extend the argument so it can be used as a pointer. */ return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b)); } #endif /* The user process closed the file; revoke access to user networks. */ static int hardwall_flush(struct file *file, fl_owner_t owner) { struct hardwall_info *info = file->private_data; struct task_struct *task, *tmp; unsigned long flags; if (info) { /* * NOTE: if multiple threads are activated on this hardwall * file, the other threads will continue having access to the * user network until they are context-switched out and back * in again. * * NOTE: A NULL files pointer means the task is being torn * down, so in that case we also deactivate it. */ struct hardwall_type *hwt = info->type; spin_lock_irqsave(&hwt->lock, flags); list_for_each_entry_safe(task, tmp, &info->task_head, thread.hardwall[hwt->index].list) { if (task->files == owner || task->files == NULL) _hardwall_deactivate(hwt, task); } spin_unlock_irqrestore(&hwt->lock, flags); } return 0; } /* This hardwall is gone, so destroy it. */ static int hardwall_release(struct inode *inode, struct file *file) { hardwall_destroy(file->private_data); return 0; } static const struct file_operations dev_hardwall_fops = { .open = nonseekable_open, .unlocked_ioctl = hardwall_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = hardwall_compat_ioctl, #endif .flush = hardwall_flush, .release = hardwall_release, }; static struct cdev hardwall_dev; static int __init dev_hardwall_init(void) { int rc; dev_t dev; rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall"); if (rc < 0) return rc; cdev_init(&hardwall_dev, &dev_hardwall_fops); rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES); if (rc < 0) return rc; return 0; } late_initcall(dev_hardwall_init);
gpl-2.0
erikcas/android_kernel_samsung_msm8916-caf
drivers/mtd/nand/nand_bbt.c
2113
38756
/* * drivers/mtd/nand_bbt.c * * Overview: * Bad block table support for the NAND driver * * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Description: * * When nand_scan_bbt is called, then it tries to find the bad block table * depending on the options in the BBT descriptor(s). If no flash based BBT * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory * marked good / bad blocks. This information is used to create a memory BBT. * Once a new bad block is discovered then the "factory" information is updated * on the device. * If a flash based BBT is specified then the function first tries to find the * BBT on flash. If a BBT is found then the contents are read and the memory * based BBT is created. If a mirrored BBT is selected then the mirror is * searched too and the versions are compared. If the mirror has a greater * version number, then the mirror BBT is used to build the memory based BBT. * If the tables are not versioned, then we "or" the bad block information. * If one of the BBTs is out of date or does not exist it is (re)created. * If no BBT exists at all then the device is scanned for factory marked * good / bad blocks and the bad block tables are created. * * For manufacturer created BBTs like the one found on M-SYS DOC devices * the BBT is searched and read but never created * * The auto generated bad block table is located in the last good blocks * of the device. The table is mirrored, so it can be updated eventually. * The table is marked in the OOB area with an ident pattern and a version * number which indicates which of both tables is more up to date. If the NAND * controller needs the complete OOB area for the ECC information then the * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of * course): it moves the ident pattern and the version byte into the data area * and the OOB area will remain untouched. * * The table uses 2 bits per block * 11b: block is good * 00b: block is factory marked bad * 01b, 10b: block is marked bad due to wear * * The memory bad block table uses the following scheme: * 00b: block is good * 01b: block is marked bad due to wear * 10b: block is reserved (to protect the bbt area) * 11b: block is factory marked bad * * Multichip devices like DOC store the bad block info per floor. * * Following assumptions are made: * - bbts start at a page boundary, if autolocated on a block boundary * - the space necessary for a bbt in FLASH does not exceed a block boundary * */ #include <linux/slab.h> #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/bbm.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <linux/string.h> static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) { if (memcmp(buf, td->pattern, td->len)) return -1; return 0; } /** * check_pattern - [GENERIC] check if a pattern is in the buffer * @buf: the buffer to search * @len: the length of buffer to search * @paglen: the pagelength * @td: search pattern descriptor * * Check for a pattern at the given place. Used to search bad block tables and * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if * all bytes except the pattern area contain 0xff. */ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) { int end = 0; uint8_t *p = buf; if (td->options & NAND_BBT_NO_OOB) return check_pattern_no_oob(buf, td); end = paglen + td->offs; if (td->options & NAND_BBT_SCANEMPTY) if (memchr_inv(p, 0xff, end)) return -1; p += end; /* Compare the pattern */ if (memcmp(p, td->pattern, td->len)) return -1; if (td->options & NAND_BBT_SCANEMPTY) { p += td->len; end += td->len; if (memchr_inv(p, 0xff, len - end)) return -1; } return 0; } /** * check_short_pattern - [GENERIC] check if a pattern is in the buffer * @buf: the buffer to search * @td: search pattern descriptor * * Check for a pattern at the given place. Used to search bad block tables and * good / bad block identifiers. Same as check_pattern, but no optional empty * check. */ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) { /* Compare the pattern */ if (memcmp(buf + td->offs, td->pattern, td->len)) return -1; return 0; } /** * add_marker_len - compute the length of the marker in data area * @td: BBT descriptor used for computation * * The length will be 0 if the marker is located in OOB area. */ static u32 add_marker_len(struct nand_bbt_descr *td) { u32 len; if (!(td->options & NAND_BBT_NO_OOB)) return 0; len = td->len; if (td->options & NAND_BBT_VERSION) len++; return len; } /** * read_bbt - [GENERIC] Read the bad block table starting from page * @mtd: MTD device structure * @buf: temporary buffer * @page: the starting page * @num: the number of bbt descriptors to read * @td: the bbt describtion table * @offs: offset in the memory table * * Read the bad block table starting from page. */ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, struct nand_bbt_descr *td, int offs) { int res, ret = 0, i, j, act = 0; struct nand_chip *this = mtd->priv; size_t retlen, len, totlen; loff_t from; int bits = td->options & NAND_BBT_NRBITS_MSK; uint8_t msk = (uint8_t)((1 << bits) - 1); u32 marker_len; int reserved_block_code = td->reserved_block_code; totlen = (num * bits) >> 3; marker_len = add_marker_len(td); from = ((loff_t)page) << this->page_shift; while (totlen) { len = min(totlen, (size_t)(1 << this->bbt_erase_shift)); if (marker_len) { /* * In case the BBT marker is not in the OOB area it * will be just in the first page. */ len -= marker_len; from += marker_len; marker_len = 0; } res = mtd_read(mtd, from, len, &retlen, buf); if (res < 0) { if (mtd_is_eccerr(res)) { pr_info("nand_bbt: ECC error in BBT at " "0x%012llx\n", from & ~mtd->writesize); return res; } else if (mtd_is_bitflip(res)) { pr_info("nand_bbt: corrected error in BBT at " "0x%012llx\n", from & ~mtd->writesize); ret = res; } else { pr_info("nand_bbt: error reading BBT\n"); return res; } } /* Analyse data */ for (i = 0; i < len; i++) { uint8_t dat = buf[i]; for (j = 0; j < 8; j += bits, act += 2) { uint8_t tmp = (dat >> j) & msk; if (tmp == msk) continue; if (reserved_block_code && (tmp == reserved_block_code)) { pr_info("nand_read_bbt: reserved block at 0x%012llx\n", (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); mtd->ecc_stats.bbtblocks++; continue; } /* * Leave it for now, if it's matured we can * move this message to pr_debug. */ pr_info("nand_read_bbt: bad block at 0x%012llx\n", (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); /* Factory marked bad or worn out? */ if (tmp == 0) this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); else this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06); mtd->ecc_stats.badblocks++; } } totlen -= len; from += len; } return ret; } /** * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page * @mtd: MTD device structure * @buf: temporary buffer * @td: descriptor for the bad block table * @chip: read the table for a specific chip, -1 read all chips; applies only if * NAND_BBT_PERCHIP option is set * * Read the bad block table for all chips starting at a given page. We assume * that the bbt bits are in consecutive order. */ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) { struct nand_chip *this = mtd->priv; int res = 0, i; if (td->options & NAND_BBT_PERCHIP) { int offs = 0; for (i = 0; i < this->numchips; i++) { if (chip == -1 || chip == i) res = read_bbt(mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, td, offs); if (res) return res; offs += this->chipsize >> (this->bbt_erase_shift + 2); } } else { res = read_bbt(mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, td, 0); if (res) return res; } return 0; } /* BBT marker is in the first page, no OOB */ static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, struct nand_bbt_descr *td) { size_t retlen; size_t len; len = td->len; if (td->options & NAND_BBT_VERSION) len++; return mtd_read(mtd, offs, len, &retlen, buf); } /** * scan_read_oob - [GENERIC] Scan data+OOB region to buffer * @mtd: MTD device structure * @buf: temporary buffer * @offs: offset at which to scan * @len: length of data region to read * * Scan read data from data+OOB. May traverse multiple pages, interleaving * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest" * ECC condition (error or bitflip). May quit on the first (non-ECC) error. */ static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, size_t len) { struct mtd_oob_ops ops; int res, ret = 0; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = 0; ops.ooblen = mtd->oobsize; while (len > 0) { ops.datbuf = buf; ops.len = min(len, (size_t)mtd->writesize); ops.oobbuf = buf + ops.len; res = mtd_read_oob(mtd, offs, &ops); if (res) { if (!mtd_is_bitflip_or_eccerr(res)) return res; else if (mtd_is_eccerr(res) || !ret) ret = res; } buf += mtd->oobsize + mtd->writesize; len -= mtd->writesize; offs += mtd->writesize; } return ret; } static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs, size_t len, struct nand_bbt_descr *td) { if (td->options & NAND_BBT_NO_OOB) return scan_read_data(mtd, buf, offs, td); else return scan_read_oob(mtd, buf, offs, len); } /* Scan write data with oob to flash */ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, uint8_t *buf, uint8_t *oob) { struct mtd_oob_ops ops; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = 0; ops.ooblen = mtd->oobsize; ops.datbuf = buf; ops.oobbuf = oob; ops.len = len; return mtd_write_oob(mtd, offs, &ops); } static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td) { u32 ver_offs = td->veroffs; if (!(td->options & NAND_BBT_NO_OOB)) ver_offs += mtd->writesize; return ver_offs; } /** * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page * @mtd: MTD device structure * @buf: temporary buffer * @td: descriptor for the bad block table * @md: descriptor for the bad block table mirror * * Read the bad block table(s) for all chips starting at a given page. We * assume that the bbt bits are in consecutive order. */ static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) { struct nand_chip *this = mtd->priv; /* Read the primary version, if available */ if (td->options & NAND_BBT_VERSION) { scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift, mtd->writesize, td); td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; pr_info("Bad block table at page %d, version 0x%02X\n", td->pages[0], td->version[0]); } /* Read the mirror version, if available */ if (md && (md->options & NAND_BBT_VERSION)) { scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, mtd->writesize, md); md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; pr_info("Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]); } } /* Scan a given block full */ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, loff_t offs, uint8_t *buf, size_t readlen, int scanlen, int numpages) { int ret, j; ret = scan_read_oob(mtd, buf, offs, readlen); /* Ignore ECC errors when checking for BBM */ if (ret && !mtd_is_bitflip_or_eccerr(ret)) return ret; for (j = 0; j < numpages; j++, buf += scanlen) { if (check_pattern(buf, scanlen, mtd->writesize, bd)) return 1; } return 0; } /* Scan a given block partially */ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, loff_t offs, uint8_t *buf, int numpages) { struct mtd_oob_ops ops; int j, ret; ops.ooblen = mtd->oobsize; ops.oobbuf = buf; ops.ooboffs = 0; ops.datbuf = NULL; ops.mode = MTD_OPS_PLACE_OOB; for (j = 0; j < numpages; j++) { /* * Read the full oob until read_oob is fixed to handle single * byte reads for 16 bit buswidth. */ ret = mtd_read_oob(mtd, offs, &ops); /* Ignore ECC errors when checking for BBM */ if (ret && !mtd_is_bitflip_or_eccerr(ret)) return ret; if (check_short_pattern(buf, bd)) return 1; offs += mtd->writesize; } return 0; } /** * create_bbt - [GENERIC] Create a bad block table by scanning the device * @mtd: MTD device structure * @buf: temporary buffer * @bd: descriptor for the good/bad block search pattern * @chip: create the table for a specific chip, -1 read all chips; applies only * if NAND_BBT_PERCHIP option is set * * Create a bad block table by scanning the device for the given good/bad block * identify pattern. */ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip) { struct nand_chip *this = mtd->priv; int i, numblocks, numpages, scanlen; int startblock; loff_t from; size_t readlen; pr_info("Scanning device for bad blocks\n"); if (bd->options & NAND_BBT_SCANALLPAGES) numpages = 1 << (this->bbt_erase_shift - this->page_shift); else if (bd->options & NAND_BBT_SCAN2NDPAGE) numpages = 2; else numpages = 1; if (!(bd->options & NAND_BBT_SCANEMPTY)) { /* We need only read few bytes from the OOB area */ scanlen = 0; readlen = bd->len; } else { /* Full page content should be read */ scanlen = mtd->writesize + mtd->oobsize; readlen = numpages * mtd->writesize; } if (chip == -1) { /* * Note that numblocks is 2 * (real numblocks) here, see i+=2 * below as it makes shifting and masking less painful */ numblocks = mtd->size >> (this->bbt_erase_shift - 1); startblock = 0; from = 0; } else { if (chip >= this->numchips) { pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n", chip + 1, this->numchips); return -EINVAL; } numblocks = this->chipsize >> (this->bbt_erase_shift - 1); startblock = chip * numblocks; numblocks += startblock; from = (loff_t)startblock << (this->bbt_erase_shift - 1); } if (this->bbt_options & NAND_BBT_SCANLASTPAGE) from += mtd->erasesize - (mtd->writesize * numpages); for (i = startblock; i < numblocks;) { int ret; BUG_ON(bd->options & NAND_BBT_NO_OOB); if (bd->options & NAND_BBT_SCANALLPAGES) ret = scan_block_full(mtd, bd, from, buf, readlen, scanlen, numpages); else ret = scan_block_fast(mtd, bd, from, buf, numpages); if (ret < 0) return ret; if (ret) { this->bbt[i >> 3] |= 0x03 << (i & 0x6); pr_warn("Bad eraseblock %d at 0x%012llx\n", i >> 1, (unsigned long long)from); mtd->ecc_stats.badblocks++; } i += 2; from += (1 << this->bbt_erase_shift); } return 0; } /** * search_bbt - [GENERIC] scan the device for a specific bad block table * @mtd: MTD device structure * @buf: temporary buffer * @td: descriptor for the bad block table * * Read the bad block table by searching for a given ident pattern. Search is * preformed either from the beginning up or from the end of the device * downwards. The search starts always at the start of a block. If the option * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains * the bad block information of this chip. This is necessary to provide support * for certain DOC devices. * * The bbt ident pattern resides in the oob area of the first page in a block. */ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) { struct nand_chip *this = mtd->priv; int i, chips; int bits, startblock, block, dir; int scanlen = mtd->writesize + mtd->oobsize; int bbtblocks; int blocktopage = this->bbt_erase_shift - this->page_shift; /* Search direction top -> down? */ if (td->options & NAND_BBT_LASTBLOCK) { startblock = (mtd->size >> this->bbt_erase_shift) - 1; dir = -1; } else { startblock = 0; dir = 1; } /* Do we have a bbt per chip? */ if (td->options & NAND_BBT_PERCHIP) { chips = this->numchips; bbtblocks = this->chipsize >> this->bbt_erase_shift; startblock &= bbtblocks - 1; } else { chips = 1; bbtblocks = mtd->size >> this->bbt_erase_shift; } /* Number of bits for each erase block in the bbt */ bits = td->options & NAND_BBT_NRBITS_MSK; for (i = 0; i < chips; i++) { /* Reset version information */ td->version[i] = 0; td->pages[i] = -1; /* Scan the maximum number of blocks */ for (block = 0; block < td->maxblocks; block++) { int actblock = startblock + dir * block; loff_t offs = (loff_t)actblock << this->bbt_erase_shift; /* Read first page */ scan_read(mtd, buf, offs, mtd->writesize, td); if (!check_pattern(buf, scanlen, mtd->writesize, td)) { td->pages[i] = actblock << blocktopage; if (td->options & NAND_BBT_VERSION) { offs = bbt_get_ver_offs(mtd, td); td->version[i] = buf[offs]; } break; } } startblock += this->chipsize >> this->bbt_erase_shift; } /* Check, if we found a bbt for each requested chip */ for (i = 0; i < chips; i++) { if (td->pages[i] == -1) pr_warn("Bad block table not found for chip %d\n", i); else pr_info("Bad block table found at page %d, version " "0x%02X\n", td->pages[i], td->version[i]); } return 0; } /** * search_read_bbts - [GENERIC] scan the device for bad block table(s) * @mtd: MTD device structure * @buf: temporary buffer * @td: descriptor for the bad block table * @md: descriptor for the bad block table mirror * * Search and read the bad block table(s). */ static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) { /* Search the primary table */ search_bbt(mtd, buf, td); /* Search the mirror table */ if (md) search_bbt(mtd, buf, md); } /** * write_bbt - [GENERIC] (Re)write the bad block table * @mtd: MTD device structure * @buf: temporary buffer * @td: descriptor for the bad block table * @md: descriptor for the bad block table mirror * @chipsel: selector for a specific chip, -1 for all * * (Re)write the bad block table. */ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md, int chipsel) { struct nand_chip *this = mtd->priv; struct erase_info einfo; int i, j, res, chip = 0; int bits, startblock, dir, page, offs, numblocks, sft, sftmsk; int nrchips, bbtoffs, pageoffs, ooboffs; uint8_t msk[4]; uint8_t rcode = td->reserved_block_code; size_t retlen, len = 0; loff_t to; struct mtd_oob_ops ops; ops.ooblen = mtd->oobsize; ops.ooboffs = 0; ops.datbuf = NULL; ops.mode = MTD_OPS_PLACE_OOB; if (!rcode) rcode = 0xff; /* Write bad block table per chip rather than per device? */ if (td->options & NAND_BBT_PERCHIP) { numblocks = (int)(this->chipsize >> this->bbt_erase_shift); /* Full device write or specific chip? */ if (chipsel == -1) { nrchips = this->numchips; } else { nrchips = chipsel + 1; chip = chipsel; } } else { numblocks = (int)(mtd->size >> this->bbt_erase_shift); nrchips = 1; } /* Loop through the chips */ for (; chip < nrchips; chip++) { /* * There was already a version of the table, reuse the page * This applies for absolute placement too, as we have the * page nr. in td->pages. */ if (td->pages[chip] != -1) { page = td->pages[chip]; goto write; } /* * Automatic placement of the bad block table. Search direction * top -> down? */ if (td->options & NAND_BBT_LASTBLOCK) { startblock = numblocks * (chip + 1) - 1; dir = -1; } else { startblock = chip * numblocks; dir = 1; } for (i = 0; i < td->maxblocks; i++) { int block = startblock + dir * i; /* Check, if the block is bad */ switch ((this->bbt[block >> 2] >> (2 * (block & 0x03))) & 0x03) { case 0x01: case 0x03: continue; } page = block << (this->bbt_erase_shift - this->page_shift); /* Check, if the block is used by the mirror table */ if (!md || md->pages[chip] != page) goto write; } pr_err("No space left to write bad block table\n"); return -ENOSPC; write: /* Set up shift count and masks for the flash table */ bits = td->options & NAND_BBT_NRBITS_MSK; msk[2] = ~rcode; switch (bits) { case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01; msk[3] = 0x01; break; case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01; msk[3] = 0x03; break; case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C; msk[3] = 0x0f; break; case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F; msk[3] = 0xff; break; default: return -EINVAL; } bbtoffs = chip * (numblocks >> 2); to = ((loff_t)page) << this->page_shift; /* Must we save the block contents? */ if (td->options & NAND_BBT_SAVECONTENT) { /* Make it block aligned */ to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1)); len = 1 << this->bbt_erase_shift; res = mtd_read(mtd, to, len, &retlen, buf); if (res < 0) { if (retlen != len) { pr_info("nand_bbt: error reading block " "for writing the bad block table\n"); return res; } pr_warn("nand_bbt: ECC error while reading " "block for writing bad block table\n"); } /* Read oob data */ ops.ooblen = (len >> this->page_shift) * mtd->oobsize; ops.oobbuf = &buf[len]; res = mtd_read_oob(mtd, to + mtd->writesize, &ops); if (res < 0 || ops.oobretlen != ops.ooblen) goto outerr; /* Calc the byte offset in the buffer */ pageoffs = page - (int)(to >> this->page_shift); offs = pageoffs << this->page_shift; /* Preset the bbt area with 0xff */ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft)); ooboffs = len + (pageoffs * mtd->oobsize); } else if (td->options & NAND_BBT_NO_OOB) { ooboffs = 0; offs = td->len; /* The version byte */ if (td->options & NAND_BBT_VERSION) offs++; /* Calc length */ len = (size_t)(numblocks >> sft); len += offs; /* Make it page aligned! */ len = ALIGN(len, mtd->writesize); /* Preset the buffer with 0xff */ memset(buf, 0xff, len); /* Pattern is located at the begin of first page */ memcpy(buf, td->pattern, td->len); } else { /* Calc length */ len = (size_t)(numblocks >> sft); /* Make it page aligned! */ len = ALIGN(len, mtd->writesize); /* Preset the buffer with 0xff */ memset(buf, 0xff, len + (len >> this->page_shift)* mtd->oobsize); offs = 0; ooboffs = len; /* Pattern is located in oob area of first page */ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len); } if (td->options & NAND_BBT_VERSION) buf[ooboffs + td->veroffs] = td->version[chip]; /* Walk through the memory table */ for (i = 0; i < numblocks;) { uint8_t dat; dat = this->bbt[bbtoffs + (i >> 2)]; for (j = 0; j < 4; j++, i++) { int sftcnt = (i << (3 - sft)) & sftmsk; /* Do not store the reserved bbt blocks! */ buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt); dat >>= 2; } } memset(&einfo, 0, sizeof(einfo)); einfo.mtd = mtd; einfo.addr = to; einfo.len = 1 << this->bbt_erase_shift; res = nand_erase_nand(mtd, &einfo, 1); if (res < 0) goto outerr; res = scan_write_bbt(mtd, to, len, buf, td->options & NAND_BBT_NO_OOB ? NULL : &buf[len]); if (res < 0) goto outerr; pr_info("Bad block table written to 0x%012llx, version 0x%02X\n", (unsigned long long)to, td->version[chip]); /* Mark it as used */ td->pages[chip] = page; } return 0; outerr: pr_warn("nand_bbt: error while writing bad block table %d\n", res); return res; } /** * nand_memory_bbt - [GENERIC] create a memory based bad block table * @mtd: MTD device structure * @bd: descriptor for the good/bad block search pattern * * The function creates a memory based bbt by scanning the device for * manufacturer / software marked good / bad blocks. */ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) { struct nand_chip *this = mtd->priv; bd->options &= ~NAND_BBT_SCANEMPTY; return create_bbt(mtd, this->buffers->databuf, bd, -1); } /** * check_create - [GENERIC] create and write bbt(s) if necessary * @mtd: MTD device structure * @buf: temporary buffer * @bd: descriptor for the good/bad block search pattern * * The function checks the results of the previous call to read_bbt and creates * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found * for the chip/device. Update is necessary if one of the tables is missing or * the version nr. of one table is less than the other. */ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) { int i, chips, writeops, create, chipsel, res, res2; struct nand_chip *this = mtd->priv; struct nand_bbt_descr *td = this->bbt_td; struct nand_bbt_descr *md = this->bbt_md; struct nand_bbt_descr *rd, *rd2; /* Do we have a bbt per chip? */ if (td->options & NAND_BBT_PERCHIP) chips = this->numchips; else chips = 1; for (i = 0; i < chips; i++) { writeops = 0; create = 0; rd = NULL; rd2 = NULL; res = res2 = 0; /* Per chip or per device? */ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; /* Mirrored table available? */ if (md) { if (td->pages[i] == -1 && md->pages[i] == -1) { create = 1; writeops = 0x03; } else if (td->pages[i] == -1) { rd = md; writeops = 0x01; } else if (md->pages[i] == -1) { rd = td; writeops = 0x02; } else if (td->version[i] == md->version[i]) { rd = td; if (!(td->options & NAND_BBT_VERSION)) rd2 = md; } else if (((int8_t)(td->version[i] - md->version[i])) > 0) { rd = td; writeops = 0x02; } else { rd = md; writeops = 0x01; } } else { if (td->pages[i] == -1) { create = 1; writeops = 0x01; } else { rd = td; } } if (create) { /* Create the bad block table by scanning the device? */ if (!(td->options & NAND_BBT_CREATE)) continue; /* Create the table in memory by scanning the chip(s) */ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY)) create_bbt(mtd, buf, bd, chipsel); td->version[i] = 1; if (md) md->version[i] = 1; } /* Read back first? */ if (rd) { res = read_abs_bbt(mtd, buf, rd, chipsel); if (mtd_is_eccerr(res)) { /* Mark table as invalid */ rd->pages[i] = -1; rd->version[i] = 0; i--; continue; } } /* If they weren't versioned, read both */ if (rd2) { res2 = read_abs_bbt(mtd, buf, rd2, chipsel); if (mtd_is_eccerr(res2)) { /* Mark table as invalid */ rd2->pages[i] = -1; rd2->version[i] = 0; i--; continue; } } /* Scrub the flash table(s)? */ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2)) writeops = 0x03; /* Update version numbers before writing */ if (md) { td->version[i] = max(td->version[i], md->version[i]); md->version[i] = td->version[i]; } /* Write the bad block table to the device? */ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { res = write_bbt(mtd, buf, td, md, chipsel); if (res < 0) return res; } /* Write the mirror bad block table to the device? */ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { res = write_bbt(mtd, buf, md, td, chipsel); if (res < 0) return res; } } return 0; } /** * mark_bbt_regions - [GENERIC] mark the bad block table regions * @mtd: MTD device structure * @td: bad block table descriptor * * The bad block table regions are marked as "bad" to prevent accidental * erasures / writes. The regions are identified by the mark 0x02. */ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) { struct nand_chip *this = mtd->priv; int i, j, chips, block, nrblocks, update; uint8_t oldval, newval; /* Do we have a bbt per chip? */ if (td->options & NAND_BBT_PERCHIP) { chips = this->numchips; nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); } else { chips = 1; nrblocks = (int)(mtd->size >> this->bbt_erase_shift); } for (i = 0; i < chips; i++) { if ((td->options & NAND_BBT_ABSPAGE) || !(td->options & NAND_BBT_WRITE)) { if (td->pages[i] == -1) continue; block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift); block <<= 1; oldval = this->bbt[(block >> 3)]; newval = oldval | (0x2 << (block & 0x06)); this->bbt[(block >> 3)] = newval; if ((oldval != newval) && td->reserved_block_code) nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1)); continue; } update = 0; if (td->options & NAND_BBT_LASTBLOCK) block = ((i + 1) * nrblocks) - td->maxblocks; else block = i * nrblocks; block <<= 1; for (j = 0; j < td->maxblocks; j++) { oldval = this->bbt[(block >> 3)]; newval = oldval | (0x2 << (block & 0x06)); this->bbt[(block >> 3)] = newval; if (oldval != newval) update = 1; block += 2; } /* * If we want reserved blocks to be recorded to flash, and some * new ones have been marked, then we need to update the stored * bbts. This should only happen once. */ if (update && td->reserved_block_code) nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); } } /** * verify_bbt_descr - verify the bad block description * @mtd: MTD device structure * @bd: the table to verify * * This functions performs a few sanity checks on the bad block description * table. */ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) { struct nand_chip *this = mtd->priv; u32 pattern_len; u32 bits; u32 table_size; if (!bd) return; pattern_len = bd->len; bits = bd->options & NAND_BBT_NRBITS_MSK; BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) && !(this->bbt_options & NAND_BBT_USE_FLASH)); BUG_ON(!bits); if (bd->options & NAND_BBT_VERSION) pattern_len++; if (bd->options & NAND_BBT_NO_OOB) { BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH)); BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB)); BUG_ON(bd->offs); if (bd->options & NAND_BBT_VERSION) BUG_ON(bd->veroffs != bd->len); BUG_ON(bd->options & NAND_BBT_SAVECONTENT); } if (bd->options & NAND_BBT_PERCHIP) table_size = this->chipsize >> this->bbt_erase_shift; else table_size = mtd->size >> this->bbt_erase_shift; table_size >>= 3; table_size *= bits; if (bd->options & NAND_BBT_NO_OOB) table_size += pattern_len; BUG_ON(table_size > (1 << this->bbt_erase_shift)); } /** * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) * @mtd: MTD device structure * @bd: descriptor for the good/bad block search pattern * * The function checks, if a bad block table(s) is/are already available. If * not it scans the device for manufacturer marked good / bad blocks and writes * the bad block table(s) to the selected place. * * The bad block table memory is allocated here. It must be freed by calling * the nand_free_bbt function. */ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) { struct nand_chip *this = mtd->priv; int len, res = 0; uint8_t *buf; struct nand_bbt_descr *td = this->bbt_td; struct nand_bbt_descr *md = this->bbt_md; len = mtd->size >> (this->bbt_erase_shift + 2); /* * Allocate memory (2bit per block) and clear the memory bad block * table. */ this->bbt = kzalloc(len, GFP_KERNEL); if (!this->bbt) return -ENOMEM; /* * If no primary table decriptor is given, scan the device to build a * memory based bad block table. */ if (!td) { if ((res = nand_memory_bbt(mtd, bd))) { pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n"); kfree(this->bbt); this->bbt = NULL; } return res; } verify_bbt_descr(mtd, td); verify_bbt_descr(mtd, md); /* Allocate a temporary buffer for one eraseblock incl. oob */ len = (1 << this->bbt_erase_shift); len += (len >> this->page_shift) * mtd->oobsize; buf = vmalloc(len); if (!buf) { kfree(this->bbt); this->bbt = NULL; return -ENOMEM; } /* Is the bbt at a given page? */ if (td->options & NAND_BBT_ABSPAGE) { read_abs_bbts(mtd, buf, td, md); } else { /* Search the bad block table using a pattern in oob */ search_read_bbts(mtd, buf, td, md); } res = check_create(mtd, buf, bd); /* Prevent the bbt regions from erasing / writing */ mark_bbt_region(mtd, td); if (md) mark_bbt_region(mtd, md); vfree(buf); return res; } /** * nand_update_bbt - [NAND Interface] update bad block table(s) * @mtd: MTD device structure * @offs: the offset of the newly marked block * * The function updates the bad block table(s). */ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) { struct nand_chip *this = mtd->priv; int len, res = 0; int chip, chipsel; uint8_t *buf; struct nand_bbt_descr *td = this->bbt_td; struct nand_bbt_descr *md = this->bbt_md; if (!this->bbt || !td) return -EINVAL; /* Allocate a temporary buffer for one eraseblock incl. oob */ len = (1 << this->bbt_erase_shift); len += (len >> this->page_shift) * mtd->oobsize; buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; /* Do we have a bbt per chip? */ if (td->options & NAND_BBT_PERCHIP) { chip = (int)(offs >> this->chip_shift); chipsel = chip; } else { chip = 0; chipsel = -1; } td->version[chip]++; if (md) md->version[chip]++; /* Write the bad block table to the device? */ if (td->options & NAND_BBT_WRITE) { res = write_bbt(mtd, buf, td, md, chipsel); if (res < 0) goto out; } /* Write the mirror bad block table to the device? */ if (md && (md->options & NAND_BBT_WRITE)) { res = write_bbt(mtd, buf, md, td, chipsel); } out: kfree(buf); return res; } /* * Define some generic bad / good block scan pattern which are used * while scanning a device for factory marked good / bad blocks. */ static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; /* Generic flash bbt descriptors */ static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; static struct nand_bbt_descr bbt_main_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, .offs = 8, .len = 4, .veroffs = 12, .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, .pattern = bbt_pattern }; static struct nand_bbt_descr bbt_mirror_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, .offs = 8, .len = 4, .veroffs = 12, .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, .pattern = mirror_pattern }; static struct nand_bbt_descr bbt_main_no_oob_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP | NAND_BBT_NO_OOB, .len = 4, .veroffs = 4, .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, .pattern = bbt_pattern }; static struct nand_bbt_descr bbt_mirror_no_oob_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP | NAND_BBT_NO_OOB, .len = 4, .veroffs = 4, .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, .pattern = mirror_pattern }; #define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB) /** * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure * @this: NAND chip to create descriptor for * * This function allocates and initializes a nand_bbt_descr for BBM detection * based on the properties of @this. The new descriptor is stored in * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when * passed to this function. */ static int nand_create_badblock_pattern(struct nand_chip *this) { struct nand_bbt_descr *bd; if (this->badblock_pattern) { pr_warn("Bad block pattern already allocated; not replacing\n"); return -EINVAL; } bd = kzalloc(sizeof(*bd), GFP_KERNEL); if (!bd) return -ENOMEM; bd->options = this->bbt_options & BADBLOCK_SCAN_MASK; bd->offs = this->badblockpos; bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; bd->pattern = scan_ff_pattern; bd->options |= NAND_BBT_DYNAMICSTRUCT; this->badblock_pattern = bd; return 0; } /** * nand_default_bbt - [NAND Interface] Select a default bad block table for the device * @mtd: MTD device structure * * This function selects the default bad block table support for the device and * calls the nand_scan_bbt function. */ int nand_default_bbt(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; /* Is a flash based bad block table requested? */ if (this->bbt_options & NAND_BBT_USE_FLASH) { /* Use the default pattern descriptors */ if (!this->bbt_td) { if (this->bbt_options & NAND_BBT_NO_OOB) { this->bbt_td = &bbt_main_no_oob_descr; this->bbt_md = &bbt_mirror_no_oob_descr; } else { this->bbt_td = &bbt_main_descr; this->bbt_md = &bbt_mirror_descr; } } } else { this->bbt_td = NULL; this->bbt_md = NULL; } if (!this->badblock_pattern) nand_create_badblock_pattern(this); return nand_scan_bbt(mtd, this->badblock_pattern); } /** * nand_isbad_bbt - [NAND Interface] Check if a block is bad * @mtd: MTD device structure * @offs: offset in the device * @allowbbt: allow access to bad block table region */ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) { struct nand_chip *this = mtd->priv; int block; uint8_t res; /* Get block number * 2 */ block = (int)(offs >> (this->bbt_erase_shift - 1)); res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: " "(block %d) 0x%02x\n", (unsigned int)offs, block >> 1, res); switch ((int)res) { case 0x00: return 0; case 0x01: return 1; case 0x02: return allowbbt ? 0 : 1; } return 1; } EXPORT_SYMBOL(nand_scan_bbt); EXPORT_SYMBOL(nand_default_bbt); EXPORT_SYMBOL_GPL(nand_update_bbt);
gpl-2.0
qqzwc/JBX_Kernel
drivers/media/dvb/frontends/dib0090.c
2369
72633
/* * Linux-DVB Driver for DiBcom's DiB0090 base-band RF Tuner. * * Copyright (C) 2005-9 DiBcom (http://www.dibcom.fr/) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * This code is more or less generated from another driver, please * excuse some codingstyle oddities. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include "dvb_frontend.h" #include "dib0090.h" #include "dibx000_common.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { \ if (debug) { \ printk(KERN_DEBUG "DiB0090: "); \ printk(args); \ printk("\n"); \ } \ } while (0) #define CONFIG_SYS_DVBT #define CONFIG_SYS_ISDBT #define CONFIG_BAND_CBAND #define CONFIG_BAND_VHF #define CONFIG_BAND_UHF #define CONFIG_DIB0090_USE_PWM_AGC #define EN_LNA0 0x8000 #define EN_LNA1 0x4000 #define EN_LNA2 0x2000 #define EN_LNA3 0x1000 #define EN_MIX0 0x0800 #define EN_MIX1 0x0400 #define EN_MIX2 0x0200 #define EN_MIX3 0x0100 #define EN_IQADC 0x0040 #define EN_PLL 0x0020 #define EN_TX 0x0010 #define EN_BB 0x0008 #define EN_LO 0x0004 #define EN_BIAS 0x0001 #define EN_IQANA 0x0002 #define EN_DIGCLK 0x0080 /* not in the 0x24 reg, only in 0x1b */ #define EN_CRYSTAL 0x0002 #define EN_UHF 0x22E9 #define EN_VHF 0x44E9 #define EN_LBD 0x11E9 #define EN_SBD 0x44E9 #define EN_CAB 0x88E9 /* Calibration defines */ #define DC_CAL 0x1 #define WBD_CAL 0x2 #define TEMP_CAL 0x4 #define CAPTRIM_CAL 0x8 #define KROSUS_PLL_LOCKED 0x800 #define KROSUS 0x2 /* Use those defines to identify SOC version */ #define SOC 0x02 #define SOC_7090_P1G_11R1 0x82 #define SOC_7090_P1G_21R1 0x8a #define SOC_8090_P1G_11R1 0x86 #define SOC_8090_P1G_21R1 0x8e /* else use thos ones to check */ #define P1A_B 0x0 #define P1C 0x1 #define P1D_E_F 0x3 #define P1G 0x7 #define P1G_21R2 0xf #define MP001 0x1 /* Single 9090/8096 */ #define MP005 0x4 /* Single Sband */ #define MP008 0x6 /* Dual diversity VHF-UHF-LBAND */ #define MP009 0x7 /* Dual diversity 29098 CBAND-UHF-LBAND-SBAND */ #define pgm_read_word(w) (*w) struct dc_calibration; struct dib0090_tuning { u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ u8 switch_trim; u8 lna_tune; u16 lna_bias; u16 v2i; u16 mix; u16 load; u16 tuner_enable; }; struct dib0090_pll { u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ u8 vco_band; u8 hfdiv_code; u8 hfdiv; u8 topresc; }; struct dib0090_identity { u8 version; u8 product; u8 p1g; u8 in_soc; }; struct dib0090_state { struct i2c_adapter *i2c; struct dvb_frontend *fe; const struct dib0090_config *config; u8 current_band; enum frontend_tune_state tune_state; u32 current_rf; u16 wbd_offset; s16 wbd_target; /* in dB */ s16 rf_gain_limit; /* take-over-point: where to split between bb and rf gain */ s16 current_gain; /* keeps the currently programmed gain */ u8 agc_step; /* new binary search */ u16 gain[2]; /* for channel monitoring */ const u16 *rf_ramp; const u16 *bb_ramp; /* for the software AGC ramps */ u16 bb_1_def; u16 rf_lt_def; u16 gain_reg[4]; /* for the captrim/dc-offset search */ s8 step; s16 adc_diff; s16 min_adc_diff; s8 captrim; s8 fcaptrim; const struct dc_calibration *dc; u16 bb6, bb7; const struct dib0090_tuning *current_tune_table_index; const struct dib0090_pll *current_pll_table_index; u8 tuner_is_tuned; u8 agc_freeze; struct dib0090_identity identity; u32 rf_request; u8 current_standard; u8 calibrate; u32 rest; u16 bias; s16 temperature; u8 wbd_calibration_gain; const struct dib0090_wbd_slope *current_wbd_table; u16 wbdmux; /* for the I2C transfer */ struct i2c_msg msg[2]; u8 i2c_write_buffer[3]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; struct dib0090_fw_state { struct i2c_adapter *i2c; struct dvb_frontend *fe; struct dib0090_identity identity; const struct dib0090_config *config; /* for the I2C transfer */ struct i2c_msg msg; u8 i2c_write_buffer[2]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg; memset(state->msg, 0, 2 * sizeof(struct i2c_msg)); state->msg[0].addr = state->config->i2c_address; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 1; state->msg[1].addr = state->config->i2c_address; state->msg[1].flags = I2C_M_RD; state->msg[1].buf = state->i2c_read_buffer; state->msg[1].len = 2; if (i2c_transfer(state->i2c, state->msg, 2) != 2) { printk(KERN_WARNING "DiB0090 I2C read failed\n"); ret = 0; } else ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = reg & 0xff; state->i2c_write_buffer[1] = val >> 8; state->i2c_write_buffer[2] = val & 0xff; memset(state->msg, 0, sizeof(struct i2c_msg)); state->msg[0].addr = state->config->i2c_address; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 3; if (i2c_transfer(state->i2c, state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C write failed\n"); ret = -EREMOTEIO; } else ret = 0; mutex_unlock(&state->i2c_buffer_lock); return ret; } static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg; memset(&state->msg, 0, sizeof(struct i2c_msg)); state->msg.addr = reg; state->msg.flags = I2C_M_RD; state->msg.buf = state->i2c_read_buffer; state->msg.len = 2; if (i2c_transfer(state->i2c, &state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C read failed\n"); ret = 0; } else ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = val >> 8; state->i2c_write_buffer[1] = val & 0xff; memset(&state->msg, 0, sizeof(struct i2c_msg)); state->msg.addr = reg; state->msg.flags = 0; state->msg.buf = state->i2c_write_buffer; state->msg.len = 2; if (i2c_transfer(state->i2c, &state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C write failed\n"); ret = -EREMOTEIO; } else ret = 0; mutex_unlock(&state->i2c_buffer_lock); return ret; } #define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0) #define ADC_TARGET -220 #define GAIN_ALPHA 5 #define WBD_ALPHA 6 #define LPF 100 static void dib0090_write_regs(struct dib0090_state *state, u8 r, const u16 * b, u8 c) { do { dib0090_write_reg(state, r++, *b++); } while (--c); } static int dib0090_identify(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u16 v; struct dib0090_identity *identity = &state->identity; v = dib0090_read_reg(state, 0x1a); identity->p1g = 0; identity->in_soc = 0; dprintk("Tuner identification (Version = 0x%04x)", v); /* without PLL lock info */ v &= ~KROSUS_PLL_LOCKED; identity->version = v & 0xff; identity->product = (v >> 8) & 0xf; if (identity->product != KROSUS) goto identification_error; if ((identity->version & 0x3) == SOC) { identity->in_soc = 1; switch (identity->version) { case SOC_8090_P1G_11R1: dprintk("SOC 8090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_8090_P1G_21R1: dprintk("SOC 8090 P1-G21R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_11R1: dprintk("SOC 7090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_21R1: dprintk("SOC 7090 P1-G21R1 Has been detected"); identity->p1g = 1; break; default: goto identification_error; } } else { switch ((identity->version >> 5) & 0x7) { case MP001: dprintk("MP001 : 9090/8096"); break; case MP005: dprintk("MP005 : Single Sband"); break; case MP008: dprintk("MP008 : diversity VHF-UHF-LBAND"); break; case MP009: dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND"); break; default: goto identification_error; } switch (identity->version & 0x1f) { case P1G_21R2: dprintk("P1G_21R2 detected"); identity->p1g = 1; break; case P1G: dprintk("P1G detected"); identity->p1g = 1; break; case P1D_E_F: dprintk("P1D/E/F detected"); break; case P1C: dprintk("P1C detected"); break; case P1A_B: dprintk("P1-A/B detected: driver is deactivated - not available"); goto identification_error; break; default: goto identification_error; } } return 0; identification_error: return -EIO; } static int dib0090_fw_identify(struct dvb_frontend *fe) { struct dib0090_fw_state *state = fe->tuner_priv; struct dib0090_identity *identity = &state->identity; u16 v = dib0090_fw_read_reg(state, 0x1a); identity->p1g = 0; identity->in_soc = 0; dprintk("FE: Tuner identification (Version = 0x%04x)", v); /* without PLL lock info */ v &= ~KROSUS_PLL_LOCKED; identity->version = v & 0xff; identity->product = (v >> 8) & 0xf; if (identity->product != KROSUS) goto identification_error; if ((identity->version & 0x3) == SOC) { identity->in_soc = 1; switch (identity->version) { case SOC_8090_P1G_11R1: dprintk("SOC 8090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_8090_P1G_21R1: dprintk("SOC 8090 P1-G21R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_11R1: dprintk("SOC 7090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_21R1: dprintk("SOC 7090 P1-G21R1 Has been detected"); identity->p1g = 1; break; default: goto identification_error; } } else { switch ((identity->version >> 5) & 0x7) { case MP001: dprintk("MP001 : 9090/8096"); break; case MP005: dprintk("MP005 : Single Sband"); break; case MP008: dprintk("MP008 : diversity VHF-UHF-LBAND"); break; case MP009: dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND"); break; default: goto identification_error; } switch (identity->version & 0x1f) { case P1G_21R2: dprintk("P1G_21R2 detected"); identity->p1g = 1; break; case P1G: dprintk("P1G detected"); identity->p1g = 1; break; case P1D_E_F: dprintk("P1D/E/F detected"); break; case P1C: dprintk("P1C detected"); break; case P1A_B: dprintk("P1-A/B detected: driver is deactivated - not available"); goto identification_error; break; default: goto identification_error; } } return 0; identification_error: return -EIO;; } static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg) { struct dib0090_state *state = fe->tuner_priv; u16 PllCfg, i, v; HARD_RESET(state); dib0090_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL); dib0090_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */ if (!cfg->in_soc) { /* adcClkOutRatio=8->7, release reset */ dib0090_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (0 << 4) | 0); if (cfg->clkoutdrive != 0) dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8) | (cfg->clkoutdrive << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0)); else dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8) | (7 << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0)); } /* Read Pll current config * */ PllCfg = dib0090_read_reg(state, 0x21); /** Reconfigure PLL if current setting is different from default setting **/ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && (!cfg->in_soc) && !cfg->io.pll_bypass) { /* Set Bypass mode */ PllCfg |= (1 << 15); dib0090_write_reg(state, 0x21, PllCfg); /* Set Reset Pll */ PllCfg &= ~(1 << 13); dib0090_write_reg(state, 0x21, PllCfg); /*** Set new Pll configuration in bypass and reset state ***/ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv); dib0090_write_reg(state, 0x21, PllCfg); /* Remove Reset Pll */ PllCfg |= (1 << 13); dib0090_write_reg(state, 0x21, PllCfg); /*** Wait for PLL lock ***/ i = 100; do { v = !!(dib0090_read_reg(state, 0x1a) & 0x800); if (v) break; } while (--i); if (i == 0) { dprintk("Pll: Unable to lock Pll"); return; } /* Finally Remove Bypass mode */ PllCfg &= ~(1 << 15); dib0090_write_reg(state, 0x21, PllCfg); } if (cfg->io.pll_bypass) { PllCfg |= (cfg->io.pll_bypass << 15); dib0090_write_reg(state, 0x21, PllCfg); } } static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg) { struct dib0090_fw_state *state = fe->tuner_priv; u16 PllCfg; u16 v; int i; dprintk("fw reset digital"); HARD_RESET(state); dib0090_fw_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL); dib0090_fw_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */ dib0090_fw_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (cfg->data_tx_drv << 4) | cfg->ls_cfg_pad_drv); v = (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 9) | (0 << 8) | (cfg->clkouttobamse << 4) | (0 << 2) | (0); if (cfg->clkoutdrive != 0) v |= cfg->clkoutdrive << 5; else v |= 7 << 5; v |= 2 << 10; dib0090_fw_write_reg(state, 0x23, v); /* Read Pll current config * */ PllCfg = dib0090_fw_read_reg(state, 0x21); /** Reconfigure PLL if current setting is different from default setting **/ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && !cfg->io.pll_bypass) { /* Set Bypass mode */ PllCfg |= (1 << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); /* Set Reset Pll */ PllCfg &= ~(1 << 13); dib0090_fw_write_reg(state, 0x21, PllCfg); /*** Set new Pll configuration in bypass and reset state ***/ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv); dib0090_fw_write_reg(state, 0x21, PllCfg); /* Remove Reset Pll */ PllCfg |= (1 << 13); dib0090_fw_write_reg(state, 0x21, PllCfg); /*** Wait for PLL lock ***/ i = 100; do { v = !!(dib0090_fw_read_reg(state, 0x1a) & 0x800); if (v) break; } while (--i); if (i == 0) { dprintk("Pll: Unable to lock Pll"); return -EIO; } /* Finally Remove Bypass mode */ PllCfg &= ~(1 << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); } if (cfg->io.pll_bypass) { PllCfg |= (cfg->io.pll_bypass << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); } return dib0090_fw_identify(fe); } static int dib0090_wakeup(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; if (state->config->sleep) state->config->sleep(fe, 0); /* enable dataTX in case we have been restarted in the wrong moment */ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); return 0; } static int dib0090_sleep(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; if (state->config->sleep) state->config->sleep(fe, 1); return 0; } void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast) { struct dib0090_state *state = fe->tuner_priv; if (fast) dib0090_write_reg(state, 0x04, 0); else dib0090_write_reg(state, 0x04, 1); } EXPORT_SYMBOL(dib0090_dcc_freq); static const u16 bb_ramp_pwm_normal_socs[] = { 550, /* max BB gain in 10th of dB */ (1 << 9) | 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> BB_RAMP2 */ 440, (4 << 9) | 0, /* BB_RAMP3 = 26dB */ (0 << 9) | 208, /* BB_RAMP4 */ (4 << 9) | 208, /* BB_RAMP5 = 29dB */ (0 << 9) | 440, /* BB_RAMP6 */ }; static const u16 rf_ramp_pwm_cband_7090[] = { 280, /* max RF gain in 10th of dB */ 18, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 504, /* ramp_max = maximum X used on the ramp */ (29 << 10) | 364, /* RF_RAMP5, LNA 1 = 8dB */ (0 << 10) | 504, /* RF_RAMP6, LNA 1 */ (60 << 10) | 228, /* RF_RAMP7, LNA 2 = 7.7dB */ (0 << 10) | 364, /* RF_RAMP8, LNA 2 */ (34 << 10) | 109, /* GAIN_4_1, LNA 3 = 6.8dB */ (0 << 10) | 228, /* GAIN_4_2, LNA 3 */ (37 << 10) | 0, /* RF_RAMP3, LNA 4 = 6.2dB */ (0 << 10) | 109, /* RF_RAMP4, LNA 4 */ }; static const u16 rf_ramp_pwm_cband_8090[] = { 345, /* max RF gain in 10th of dB */ 29, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 1000, /* ramp_max = maximum X used on the ramp */ (35 << 10) | 772, /* RF_RAMP3, LNA 1 = 8dB */ (0 << 10) | 1000, /* RF_RAMP4, LNA 1 */ (58 << 10) | 496, /* RF_RAMP5, LNA 2 = 9.5dB */ (0 << 10) | 772, /* RF_RAMP6, LNA 2 */ (27 << 10) | 200, /* RF_RAMP7, LNA 3 = 10.5dB */ (0 << 10) | 496, /* RF_RAMP8, LNA 3 */ (40 << 10) | 0, /* GAIN_4_1, LNA 4 = 7dB */ (0 << 10) | 200, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf_7090[] = { 407, /* max RF gain in 10th of dB */ 13, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 529, /* ramp_max = maximum X used on the ramp */ (23 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */ (0 << 10) | 176, /* RF_RAMP4, LNA 1 */ (63 << 10) | 400, /* RF_RAMP5, LNA 2 = 8dB */ (0 << 10) | 529, /* RF_RAMP6, LNA 2 */ (48 << 10) | 316, /* RF_RAMP7, LNA 3 = 6.8dB */ (0 << 10) | 400, /* RF_RAMP8, LNA 3 */ (29 << 10) | 176, /* GAIN_4_1, LNA 4 = 11.5dB */ (0 << 10) | 316, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf_8090[] = { 388, /* max RF gain in 10th of dB */ 26, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 1008, /* ramp_max = maximum X used on the ramp */ (11 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */ (0 << 10) | 369, /* RF_RAMP4, LNA 1 */ (41 << 10) | 809, /* RF_RAMP5, LNA 2 = 8dB */ (0 << 10) | 1008, /* RF_RAMP6, LNA 2 */ (27 << 10) | 659, /* RF_RAMP7, LNA 3 = 6dB */ (0 << 10) | 809, /* RF_RAMP8, LNA 3 */ (14 << 10) | 369, /* GAIN_4_1, LNA 4 = 11.5dB */ (0 << 10) | 659, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_cband[] = { 0, /* max RF gain in 10th of dB */ 0, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */ 0, /* ramp_max = maximum X used on the ramp */ (0 << 10) | 0, /* 0x2c, LNA 1 = 0dB */ (0 << 10) | 0, /* 0x2d, LNA 1 */ (0 << 10) | 0, /* 0x2e, LNA 2 = 0dB */ (0 << 10) | 0, /* 0x2f, LNA 2 */ (0 << 10) | 0, /* 0x30, LNA 3 = 0dB */ (0 << 10) | 0, /* 0x31, LNA 3 */ (0 << 10) | 0, /* GAIN_4_1, LNA 4 = 0dB */ (0 << 10) | 0, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_vhf[] = { 412, /* max RF gain in 10th of dB */ 132, 307, 127, /* LNA1, 13.2dB */ 105, 412, 255, /* LNA2, 10.5dB */ 50, 50, 127, /* LNA3, 5dB */ 125, 175, 127, /* LNA4, 12.5dB */ 0, 0, 127, /* CBAND, 0dB */ }; static const u16 rf_ramp_uhf[] = { 412, /* max RF gain in 10th of dB */ 132, 307, 127, /* LNA1 : total gain = 13.2dB, point on the ramp where this amp is full gain, value to write to get full gain */ 105, 412, 255, /* LNA2 : 10.5 dB */ 50, 50, 127, /* LNA3 : 5.0 dB */ 125, 175, 127, /* LNA4 : 12.5 dB */ 0, 0, 127, /* CBAND : 0.0 dB */ }; static const u16 rf_ramp_cband_broadmatching[] = /* for p1G only */ { 314, /* Calibrated at 200MHz order has been changed g4-g3-g2-g1 */ 84, 314, 127, /* LNA1 */ 80, 230, 255, /* LNA2 */ 80, 150, 127, /* LNA3 It was measured 12dB, do not lock if 120 */ 70, 70, 127, /* LNA4 */ 0, 0, 127, /* CBAND */ }; static const u16 rf_ramp_cband[] = { 332, /* max RF gain in 10th of dB */ 132, 252, 127, /* LNA1, dB */ 80, 332, 255, /* LNA2, dB */ 0, 0, 127, /* LNA3, dB */ 0, 0, 127, /* LNA4, dB */ 120, 120, 127, /* LT1 CBAND */ }; static const u16 rf_ramp_pwm_vhf[] = { 404, /* max RF gain in 10th of dB */ 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */ 1011, /* ramp_max = maximum X used on the ramp */ (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */ (0 << 10) | 756, /* 0x2d, LNA 1 */ (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */ (0 << 10) | 1011, /* 0x2f, LNA 2 */ (16 << 10) | 290, /* 0x30, LNA 3 = 5dB */ (0 << 10) | 417, /* 0x31, LNA 3 */ (7 << 10) | 0, /* GAIN_4_1, LNA 4 = 12.5dB */ (0 << 10) | 290, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf[] = { 404, /* max RF gain in 10th of dB */ 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */ 1011, /* ramp_max = maximum X used on the ramp */ (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */ (0 << 10) | 756, /* 0x2d, LNA 1 */ (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */ (0 << 10) | 1011, /* 0x2f, LNA 2 */ (16 << 10) | 0, /* 0x30, LNA 3 = 5dB */ (0 << 10) | 127, /* 0x31, LNA 3 */ (7 << 10) | 127, /* GAIN_4_1, LNA 4 = 12.5dB */ (0 << 10) | 417, /* GAIN_4_2, LNA 4 */ }; static const u16 bb_ramp_boost[] = { 550, /* max BB gain in 10th of dB */ 260, 260, 26, /* BB1, 26dB */ 290, 550, 29, /* BB2, 29dB */ }; static const u16 bb_ramp_pwm_normal[] = { 500, /* max RF gain in 10th of dB */ 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x34 */ 400, (2 << 9) | 0, /* 0x35 = 21dB */ (0 << 9) | 168, /* 0x36 */ (2 << 9) | 168, /* 0x37 = 29dB */ (0 << 9) | 400, /* 0x38 */ }; struct slope { s16 range; s16 slope; }; static u16 slopes_to_scale(const struct slope *slopes, u8 num, s16 val) { u8 i; u16 rest; u16 ret = 0; for (i = 0; i < num; i++) { if (val > slopes[i].range) rest = slopes[i].range; else rest = val; ret += (rest * slopes[i].slope) / slopes[i].range; val -= rest; } return ret; } static const struct slope dib0090_wbd_slopes[3] = { {66, 120}, /* -64,-52: offset - 65 */ {600, 170}, /* -52,-35: 65 - 665 */ {170, 250}, /* -45,-10: 665 - 835 */ }; static s16 dib0090_wbd_to_db(struct dib0090_state *state, u16 wbd) { wbd &= 0x3ff; if (wbd < state->wbd_offset) wbd = 0; else wbd -= state->wbd_offset; /* -64dB is the floor */ return -640 + (s16) slopes_to_scale(dib0090_wbd_slopes, ARRAY_SIZE(dib0090_wbd_slopes), wbd); } static void dib0090_wbd_target(struct dib0090_state *state, u32 rf) { u16 offset = 250; /* TODO : DAB digital N+/-1 interferer perfs : offset = 10 */ if (state->current_band == BAND_VHF) offset = 650; #ifndef FIRMWARE_FIREFLY if (state->current_band == BAND_VHF) offset = state->config->wbd_vhf_offset; if (state->current_band == BAND_CBAND) offset = state->config->wbd_cband_offset; #endif state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset); dprintk("wbd-target: %d dB", (u32) state->wbd_target); } static const int gain_reg_addr[4] = { 0x08, 0x0a, 0x0f, 0x01 }; static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16 top_delta, u8 force) { u16 rf, bb, ref; u16 i, v, gain_reg[4] = { 0 }, gain; const u16 *g; if (top_delta < -511) top_delta = -511; if (top_delta > 511) top_delta = 511; if (force) { top_delta *= (1 << WBD_ALPHA); gain_delta *= (1 << GAIN_ALPHA); } if (top_delta >= ((s16) (state->rf_ramp[0] << WBD_ALPHA) - state->rf_gain_limit)) /* overflow */ state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA; else state->rf_gain_limit += top_delta; if (state->rf_gain_limit < 0) /*underflow */ state->rf_gain_limit = 0; /* use gain as a temporary variable and correct current_gain */ gain = ((state->rf_gain_limit >> WBD_ALPHA) + state->bb_ramp[0]) << GAIN_ALPHA; if (gain_delta >= ((s16) gain - state->current_gain)) /* overflow */ state->current_gain = gain; else state->current_gain += gain_delta; /* cannot be less than 0 (only if gain_delta is less than 0 we can have current_gain < 0) */ if (state->current_gain < 0) state->current_gain = 0; /* now split total gain to rf and bb gain */ gain = state->current_gain >> GAIN_ALPHA; /* requested gain is bigger than rf gain limit - ACI/WBD adjustment */ if (gain > (state->rf_gain_limit >> WBD_ALPHA)) { rf = state->rf_gain_limit >> WBD_ALPHA; bb = gain - rf; if (bb > state->bb_ramp[0]) bb = state->bb_ramp[0]; } else { /* high signal level -> all gains put on RF */ rf = gain; bb = 0; } state->gain[0] = rf; state->gain[1] = bb; /* software ramp */ /* Start with RF gains */ g = state->rf_ramp + 1; /* point on RF LNA1 max gain */ ref = rf; for (i = 0; i < 7; i++) { /* Go over all amplifiers => 5RF amps + 2 BB amps = 7 amps */ if (g[0] == 0 || ref < (g[1] - g[0])) /* if total gain of the current amp is null or this amp is not concerned because it starts to work from an higher gain value */ v = 0; /* force the gain to write for the current amp to be null */ else if (ref >= g[1]) /* Gain to set is higher than the high working point of this amp */ v = g[2]; /* force this amp to be full gain */ else /* compute the value to set to this amp because we are somewhere in his range */ v = ((ref - (g[1] - g[0])) * g[2]) / g[0]; if (i == 0) /* LNA 1 reg mapping */ gain_reg[0] = v; else if (i == 1) /* LNA 2 reg mapping */ gain_reg[0] |= v << 7; else if (i == 2) /* LNA 3 reg mapping */ gain_reg[1] = v; else if (i == 3) /* LNA 4 reg mapping */ gain_reg[1] |= v << 7; else if (i == 4) /* CBAND LNA reg mapping */ gain_reg[2] = v | state->rf_lt_def; else if (i == 5) /* BB gain 1 reg mapping */ gain_reg[3] = v << 3; else if (i == 6) /* BB gain 2 reg mapping */ gain_reg[3] |= v << 8; g += 3; /* go to next gain bloc */ /* When RF is finished, start with BB */ if (i == 4) { g = state->bb_ramp + 1; /* point on BB gain 1 max gain */ ref = bb; } } gain_reg[3] |= state->bb_1_def; gain_reg[3] |= ((bb % 10) * 100) / 125; #ifdef DEBUG_AGC dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb, gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]); #endif /* Write the amplifier regs */ for (i = 0; i < 4; i++) { v = gain_reg[i]; if (force || state->gain_reg[i] != v) { state->gain_reg[i] = v; dib0090_write_reg(state, gain_reg_addr[i], v); } } } static void dib0090_set_boost(struct dib0090_state *state, int onoff) { state->bb_1_def &= 0xdfff; state->bb_1_def |= onoff << 13; } static void dib0090_set_rframp(struct dib0090_state *state, const u16 * cfg) { state->rf_ramp = cfg; } static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg) { state->rf_ramp = cfg; dib0090_write_reg(state, 0x2a, 0xffff); dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a)); dib0090_write_regs(state, 0x2c, cfg + 3, 6); dib0090_write_regs(state, 0x3e, cfg + 9, 2); } static void dib0090_set_bbramp(struct dib0090_state *state, const u16 * cfg) { state->bb_ramp = cfg; dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */ } static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg) { state->bb_ramp = cfg; dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */ dib0090_write_reg(state, 0x33, 0xffff); dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33)); dib0090_write_regs(state, 0x35, cfg + 3, 4); } void dib0090_pwm_gain_reset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; /* reset the AGC */ if (state->config->use_pwm_agc) { #ifdef CONFIG_BAND_SBAND if (state->current_band == BAND_SBAND) { dib0090_set_rframp_pwm(state, rf_ramp_pwm_sband); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_boost); } else #endif #ifdef CONFIG_BAND_CBAND if (state->current_band == BAND_CBAND) { if (state->identity.in_soc) { dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs); if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_8090); else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_7090); } else { dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal); } } else #endif #ifdef CONFIG_BAND_VHF if (state->current_band == BAND_VHF) { if (state->identity.in_soc) { dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs); } else { dib0090_set_rframp_pwm(state, rf_ramp_pwm_vhf); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal); } } else #endif { if (state->identity.in_soc) { if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf_8090); else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf_7090); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs); } else { dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal); } } if (state->rf_ramp[0] != 0) dib0090_write_reg(state, 0x32, (3 << 11)); else dib0090_write_reg(state, 0x32, (0 << 11)); dib0090_write_reg(state, 0x04, 0x01); dib0090_write_reg(state, 0x39, (1 << 10)); } } EXPORT_SYMBOL(dib0090_pwm_gain_reset); static u32 dib0090_get_slow_adc_val(struct dib0090_state *state) { u16 adc_val = dib0090_read_reg(state, 0x1d); if (state->identity.in_soc) adc_val >>= 2; return adc_val; } int dib0090_gain_control(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; enum frontend_tune_state *tune_state = &state->tune_state; int ret = 10; u16 wbd_val = 0; u8 apply_gain_immediatly = 1; s16 wbd_error = 0, adc_error = 0; if (*tune_state == CT_AGC_START) { state->agc_freeze = 0; dib0090_write_reg(state, 0x04, 0x0); #ifdef CONFIG_BAND_SBAND if (state->current_band == BAND_SBAND) { dib0090_set_rframp(state, rf_ramp_sband); dib0090_set_bbramp(state, bb_ramp_boost); } else #endif #ifdef CONFIG_BAND_VHF if (state->current_band == BAND_VHF && !state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_vhf); dib0090_set_bbramp(state, bb_ramp_boost); } else #endif #ifdef CONFIG_BAND_CBAND if (state->current_band == BAND_CBAND && !state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_cband); dib0090_set_bbramp(state, bb_ramp_boost); } else #endif if ((state->current_band == BAND_CBAND || state->current_band == BAND_VHF) && state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_cband_broadmatching); dib0090_set_bbramp(state, bb_ramp_boost); } else { dib0090_set_rframp(state, rf_ramp_uhf); dib0090_set_bbramp(state, bb_ramp_boost); } dib0090_write_reg(state, 0x32, 0); dib0090_write_reg(state, 0x39, 0); dib0090_wbd_target(state, state->current_rf); state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA; state->current_gain = ((state->rf_ramp[0] + state->bb_ramp[0]) / 2) << GAIN_ALPHA; *tune_state = CT_AGC_STEP_0; } else if (!state->agc_freeze) { s16 wbd = 0, i, cnt; int adc; wbd_val = dib0090_get_slow_adc_val(state); if (*tune_state == CT_AGC_STEP_0) cnt = 5; else cnt = 1; for (i = 0; i < cnt; i++) { wbd_val = dib0090_get_slow_adc_val(state); wbd += dib0090_wbd_to_db(state, wbd_val); } wbd /= cnt; wbd_error = state->wbd_target - wbd; if (*tune_state == CT_AGC_STEP_0) { if (wbd_error < 0 && state->rf_gain_limit > 0 && !state->identity.p1g) { #ifdef CONFIG_BAND_CBAND /* in case of CBAND tune reduce first the lt_gain2 before adjusting the RF gain */ u8 ltg2 = (state->rf_lt_def >> 10) & 0x7; if (state->current_band == BAND_CBAND && ltg2) { ltg2 >>= 1; state->rf_lt_def &= ltg2 << 10; /* reduce in 3 steps from 7 to 0 */ } #endif } else { state->agc_step = 0; *tune_state = CT_AGC_STEP_1; } } else { /* calc the adc power */ adc = state->config->get_adc_power(fe); adc = (adc * ((s32) 355774) + (((s32) 1) << 20)) >> 21; /* included in [0:-700] */ adc_error = (s16) (((s32) ADC_TARGET) - adc); #ifdef CONFIG_STANDARD_DAB if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) adc_error -= 10; #endif #ifdef CONFIG_STANDARD_DVBT if (state->fe->dtv_property_cache.delivery_system == STANDARD_DVBT && (state->fe->dtv_property_cache.modulation == QAM_64 || state->fe->dtv_property_cache.modulation == QAM_16)) adc_error += 60; #endif #ifdef CONFIG_SYS_ISDBT if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) && (((state->fe->dtv_property_cache.layer[0].segment_count > 0) && ((state->fe->dtv_property_cache.layer[0].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[0].modulation == QAM_16))) || ((state->fe->dtv_property_cache.layer[1].segment_count > 0) && ((state->fe->dtv_property_cache.layer[1].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[1].modulation == QAM_16))) || ((state->fe->dtv_property_cache.layer[2].segment_count > 0) && ((state->fe->dtv_property_cache.layer[2].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[2].modulation == QAM_16))) ) ) adc_error += 60; #endif if (*tune_state == CT_AGC_STEP_1) { /* quickly go to the correct range of the ADC power */ if (ABS(adc_error) < 50 || state->agc_step++ > 5) { #ifdef CONFIG_STANDARD_DAB if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) { dib0090_write_reg(state, 0x02, (1 << 15) | (15 << 11) | (31 << 6) | (63)); /* cap value = 63 : narrow BB filter : Fc = 1.8MHz */ dib0090_write_reg(state, 0x04, 0x0); } else #endif { dib0090_write_reg(state, 0x02, (1 << 15) | (3 << 11) | (6 << 6) | (32)); dib0090_write_reg(state, 0x04, 0x01); /*0 = 1KHz ; 1 = 150Hz ; 2 = 50Hz ; 3 = 50KHz ; 4 = servo fast */ } *tune_state = CT_AGC_STOP; } } else { /* everything higher than or equal to CT_AGC_STOP means tracking */ ret = 100; /* 10ms interval */ apply_gain_immediatly = 0; } } #ifdef DEBUG_AGC dprintk ("tune state %d, ADC = %3ddB (ADC err %3d) WBD %3ddB (WBD err %3d, WBD val SADC: %4d), RFGainLimit (TOP): %3d, signal: %3ddBm", (u32) *tune_state, (u32) adc, (u32) adc_error, (u32) wbd, (u32) wbd_error, (u32) wbd_val, (u32) state->rf_gain_limit >> WBD_ALPHA, (s32) 200 + adc - (state->current_gain >> GAIN_ALPHA)); #endif } /* apply gain */ if (!state->agc_freeze) dib0090_gain_apply(state, adc_error, wbd_error, apply_gain_immediatly); return ret; } EXPORT_SYMBOL(dib0090_gain_control); void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt) { struct dib0090_state *state = fe->tuner_priv; if (rf) *rf = state->gain[0]; if (bb) *bb = state->gain[1]; if (rf_gain_limit) *rf_gain_limit = state->rf_gain_limit; if (rflt) *rflt = (state->rf_lt_def >> 10) & 0x7; } EXPORT_SYMBOL(dib0090_get_current_gain); u16 dib0090_get_wbd_offset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u32 f_MHz = state->fe->dtv_property_cache.frequency / 1000000; s32 current_temp = state->temperature; s32 wbd_thot, wbd_tcold; const struct dib0090_wbd_slope *wbd = state->current_wbd_table; while (f_MHz > wbd->max_freq) wbd++; dprintk("using wbd-table-entry with max freq %d", wbd->max_freq); if (current_temp < 0) current_temp = 0; if (current_temp > 128) current_temp = 128; state->wbdmux &= ~(7 << 13); if (wbd->wbd_gain != 0) state->wbdmux |= (wbd->wbd_gain << 13); else state->wbdmux |= (4 << 13); dib0090_write_reg(state, 0x10, state->wbdmux); wbd_thot = wbd->offset_hot - (((u32) wbd->slope_hot * f_MHz) >> 6); wbd_tcold = wbd->offset_cold - (((u32) wbd->slope_cold * f_MHz) >> 6); wbd_tcold += ((wbd_thot - wbd_tcold) * current_temp) >> 7; state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + wbd_tcold); dprintk("wbd-target: %d dB", (u32) state->wbd_target); dprintk("wbd offset applied is %d", wbd_tcold); return state->wbd_offset + wbd_tcold; } EXPORT_SYMBOL(dib0090_get_wbd_offset); static const u16 dib0090_defaults[] = { 25, 0x01, 0x0000, 0x99a0, 0x6008, 0x0000, 0x8bcb, 0x0000, 0x0405, 0x0000, 0x0000, 0x0000, 0xb802, 0x0300, 0x2d12, 0xbac0, 0x7c00, 0xdbb9, 0x0954, 0x0743, 0x8000, 0x0001, 0x0040, 0x0100, 0x0000, 0xe910, 0x149e, 1, 0x1c, 0xff2d, 1, 0x39, 0x0000, 2, 0x1e, 0x07FF, 0x0007, 1, 0x24, EN_UHF | EN_CRYSTAL, 2, 0x3c, 0x3ff, 0x111, 0 }; static const u16 dib0090_p1g_additionnal_defaults[] = { 1, 0x05, 0xabcd, 1, 0x11, 0x00b4, 1, 0x1c, 0xfffd, 1, 0x40, 0x108, 0 }; static void dib0090_set_default_config(struct dib0090_state *state, const u16 * n) { u16 l, r; l = pgm_read_word(n++); while (l) { r = pgm_read_word(n++); do { dib0090_write_reg(state, r, pgm_read_word(n++)); r++; } while (--l); l = pgm_read_word(n++); } } #define CAP_VALUE_MIN (u8) 9 #define CAP_VALUE_MAX (u8) 40 #define HR_MIN (u8) 25 #define HR_MAX (u8) 40 #define POLY_MIN (u8) 0 #define POLY_MAX (u8) 8 void dib0090_set_EFUSE(struct dib0090_state *state) { u8 c, h, n; u16 e2, e4; u16 cal; e2 = dib0090_read_reg(state, 0x26); e4 = dib0090_read_reg(state, 0x28); if ((state->identity.version == P1D_E_F) || (state->identity.version == P1G) || (e2 == 0xffff)) { dib0090_write_reg(state, 0x22, 0x10); cal = (dib0090_read_reg(state, 0x22) >> 6) & 0x3ff; if ((cal < 670) || (cal == 1023)) cal = 850; n = 165 - ((cal * 10)>>6) ; e2 = e4 = (3<<12) | (34<<6) | (n); } if (e2 != e4) e2 &= e4; /* Remove the redundancy */ if (e2 != 0xffff) { c = e2 & 0x3f; n = (e2 >> 12) & 0xf; h = (e2 >> 6) & 0x3f; if ((c >= CAP_VALUE_MAX) || (c <= CAP_VALUE_MIN)) c = 32; if ((h >= HR_MAX) || (h <= HR_MIN)) h = 34; if ((n >= POLY_MAX) || (n <= POLY_MIN)) n = 3; dib0090_write_reg(state, 0x13, (h << 10)) ; e2 = (n<<11) | ((h>>2)<<6) | (c); dib0090_write_reg(state, 0x2, e2) ; /* Load the BB_2 */ } } static int dib0090_reset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; dib0090_reset_digital(fe, state->config); if (dib0090_identify(fe) < 0) return -EIO; #ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT if (!(state->identity.version & 0x1)) /* it is P1B - reset is already done */ return 0; #endif if (!state->identity.in_soc) { if ((dib0090_read_reg(state, 0x1a) >> 5) & 0x2) dib0090_write_reg(state, 0x1b, (EN_IQADC | EN_BB | EN_BIAS | EN_DIGCLK | EN_PLL | EN_CRYSTAL)); else dib0090_write_reg(state, 0x1b, (EN_DIGCLK | EN_PLL | EN_CRYSTAL)); } dib0090_set_default_config(state, dib0090_defaults); if (state->identity.in_soc) dib0090_write_reg(state, 0x18, 0x2910); /* charge pump current = 0 */ if (state->identity.p1g) dib0090_set_default_config(state, dib0090_p1g_additionnal_defaults); /* Update the efuse : Only available for KROSUS > P1C and SOC as well*/ if (((state->identity.version & 0x1f) >= P1D_E_F) || (state->identity.in_soc)) dib0090_set_EFUSE(state); /* Congigure in function of the crystal */ if (state->config->io.clock_khz >= 24000) dib0090_write_reg(state, 0x14, 1); else dib0090_write_reg(state, 0x14, 2); dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1); state->calibrate = DC_CAL | WBD_CAL | TEMP_CAL; /* enable iq-offset-calibration and wbd-calibration when tuning next time */ return 0; } #define steps(u) (((u) > 15) ? ((u)-16) : (u)) #define INTERN_WAIT 10 static int dib0090_get_offset(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = INTERN_WAIT * 10; switch (*tune_state) { case CT_TUNER_STEP_2: /* Turns to positive */ dib0090_write_reg(state, 0x1f, 0x7); *tune_state = CT_TUNER_STEP_3; break; case CT_TUNER_STEP_3: state->adc_diff = dib0090_read_reg(state, 0x1d); /* Turns to negative */ dib0090_write_reg(state, 0x1f, 0x4); *tune_state = CT_TUNER_STEP_4; break; case CT_TUNER_STEP_4: state->adc_diff -= dib0090_read_reg(state, 0x1d); *tune_state = CT_TUNER_STEP_5; ret = 0; break; default: break; } return ret; } struct dc_calibration { u8 addr; u8 offset; u8 pga:1; u16 bb1; u8 i:1; }; static const struct dc_calibration dc_table[] = { /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */ {0x06, 5, 1, (1 << 13) | (0 << 8) | (26 << 3), 1}, {0x07, 11, 1, (1 << 13) | (0 << 8) | (26 << 3), 0}, /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */ {0x06, 0, 0, (1 << 13) | (29 << 8) | (26 << 3), 1}, {0x06, 10, 0, (1 << 13) | (29 << 8) | (26 << 3), 0}, {0}, }; static const struct dc_calibration dc_p1g_table[] = { /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */ /* addr ; trim reg offset ; pga ; CTRL_BB1 value ; i or q */ {0x06, 5, 1, (1 << 13) | (0 << 8) | (15 << 3), 1}, {0x07, 11, 1, (1 << 13) | (0 << 8) | (15 << 3), 0}, /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */ {0x06, 0, 0, (1 << 13) | (29 << 8) | (15 << 3), 1}, {0x06, 10, 0, (1 << 13) | (29 << 8) | (15 << 3), 0}, {0}, }; static void dib0090_set_trim(struct dib0090_state *state) { u16 *val; if (state->dc->addr == 0x07) val = &state->bb7; else val = &state->bb6; *val &= ~(0x1f << state->dc->offset); *val |= state->step << state->dc->offset; dib0090_write_reg(state, state->dc->addr, *val); } static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 0; u16 reg; switch (*tune_state) { case CT_TUNER_START: dprintk("Start DC offset calibration"); /* force vcm2 = 0.8V */ state->bb6 = 0; state->bb7 = 0x040d; /* the LNA AND LO are off */ reg = dib0090_read_reg(state, 0x24) & 0x0ffb; /* shutdown lna and lo */ dib0090_write_reg(state, 0x24, reg); state->wbdmux = dib0090_read_reg(state, 0x10); dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x7 << 3) | 0x3); dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14)); state->dc = dc_table; if (state->identity.p1g) state->dc = dc_p1g_table; *tune_state = CT_TUNER_STEP_0; /* fall through */ case CT_TUNER_STEP_0: dprintk("Sart/continue DC calibration for %s path", (state->dc->i == 1) ? "I" : "Q"); dib0090_write_reg(state, 0x01, state->dc->bb1); dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7)); state->step = 0; state->min_adc_diff = 1023; *tune_state = CT_TUNER_STEP_1; ret = 50; break; case CT_TUNER_STEP_1: dib0090_set_trim(state); *tune_state = CT_TUNER_STEP_2; break; case CT_TUNER_STEP_2: case CT_TUNER_STEP_3: case CT_TUNER_STEP_4: ret = dib0090_get_offset(state, tune_state); break; case CT_TUNER_STEP_5: /* found an offset */ dprintk("adc_diff = %d, current step= %d", (u32) state->adc_diff, state->step); if (state->step == 0 && state->adc_diff < 0) { state->min_adc_diff = -1023; dprintk("Change of sign of the minimum adc diff"); } dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d", state->adc_diff, state->min_adc_diff, state->step); /* first turn for this frequency */ if (state->step == 0) { if (state->dc->pga && state->adc_diff < 0) state->step = 0x10; if (state->dc->pga == 0 && state->adc_diff > 0) state->step = 0x10; } /* Look for a change of Sign in the Adc_diff.min_adc_diff is used to STORE the setp N-1 */ if ((state->adc_diff & 0x8000) == (state->min_adc_diff & 0x8000) && steps(state->step) < 15) { /* stop search when the delta the sign is changing and Steps =15 and Step=0 is force for continuance */ state->step++; state->min_adc_diff = state->adc_diff; *tune_state = CT_TUNER_STEP_1; } else { /* the minimum was what we have seen in the step before */ if (ABS(state->adc_diff) > ABS(state->min_adc_diff)) { dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step", state->adc_diff, state->min_adc_diff); state->step--; } dib0090_set_trim(state); dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->dc->addr, state->adc_diff, state->step); state->dc++; if (state->dc->addr == 0) /* done */ *tune_state = CT_TUNER_STEP_6; else *tune_state = CT_TUNER_STEP_0; } break; case CT_TUNER_STEP_6: dib0090_write_reg(state, 0x07, state->bb7 & ~0x0008); dib0090_write_reg(state, 0x1f, 0x7); *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */ state->calibrate &= ~DC_CAL; default: break; } return ret; } static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state) { u8 wbd_gain; const struct dib0090_wbd_slope *wbd = state->current_wbd_table; switch (*tune_state) { case CT_TUNER_START: while (state->current_rf / 1000 > wbd->max_freq) wbd++; if (wbd->wbd_gain != 0) wbd_gain = wbd->wbd_gain; else { wbd_gain = 4; #if defined(CONFIG_BAND_LBAND) || defined(CONFIG_BAND_SBAND) if ((state->current_band == BAND_LBAND) || (state->current_band == BAND_SBAND)) wbd_gain = 2; #endif } if (wbd_gain == state->wbd_calibration_gain) { /* the WBD calibration has already been done */ *tune_state = CT_TUNER_START; state->calibrate &= ~WBD_CAL; return 0; } dib0090_write_reg(state, 0x10, 0x1b81 | (1 << 10) | (wbd_gain << 13) | (1 << 3)); dib0090_write_reg(state, 0x24, ((EN_UHF & 0x0fff) | (1 << 1))); *tune_state = CT_TUNER_STEP_0; state->wbd_calibration_gain = wbd_gain; return 90; /* wait for the WBDMUX to switch and for the ADC to sample */ case CT_TUNER_STEP_0: state->wbd_offset = dib0090_get_slow_adc_val(state); dprintk("WBD calibration offset = %d", state->wbd_offset); *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */ state->calibrate &= ~WBD_CAL; break; default: break; } return 0; } static void dib0090_set_bandwidth(struct dib0090_state *state) { u16 tmp; if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 5000) tmp = (3 << 14); else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 6000) tmp = (2 << 14); else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 7000) tmp = (1 << 14); else tmp = (0 << 14); state->bb_1_def &= 0x3fff; state->bb_1_def |= tmp; dib0090_write_reg(state, 0x01, state->bb_1_def); /* be sure that we have the right bb-filter */ dib0090_write_reg(state, 0x03, 0x6008); /* = 0x6008 : vcm3_trim = 1 ; filter2_gm1_trim = 8 ; filter2_cutoff_freq = 0 */ dib0090_write_reg(state, 0x04, 0x1); /* 0 = 1KHz ; 1 = 50Hz ; 2 = 150Hz ; 3 = 50KHz ; 4 = servo fast */ if (state->identity.in_soc) { dib0090_write_reg(state, 0x05, 0x9bcf); /* attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 1 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 15 */ } else { dib0090_write_reg(state, 0x02, (5 << 11) | (8 << 6) | (22 & 0x3f)); /* 22 = cap_value */ dib0090_write_reg(state, 0x05, 0xabcd); /* = 0xabcd : attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 2 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 13 */ } } static const struct dib0090_pll dib0090_pll_table[] = { #ifdef CONFIG_BAND_CBAND {56000, 0, 9, 48, 6}, {70000, 1, 9, 48, 6}, {87000, 0, 8, 32, 4}, {105000, 1, 8, 32, 4}, {115000, 0, 7, 24, 6}, {140000, 1, 7, 24, 6}, {170000, 0, 6, 16, 4}, #endif #ifdef CONFIG_BAND_VHF {200000, 1, 6, 16, 4}, {230000, 0, 5, 12, 6}, {280000, 1, 5, 12, 6}, {340000, 0, 4, 8, 4}, {380000, 1, 4, 8, 4}, {450000, 0, 3, 6, 6}, #endif #ifdef CONFIG_BAND_UHF {580000, 1, 3, 6, 6}, {700000, 0, 2, 4, 4}, {860000, 1, 2, 4, 4}, #endif #ifdef CONFIG_BAND_LBAND {1800000, 1, 0, 2, 4}, #endif #ifdef CONFIG_BAND_SBAND {2900000, 0, 14, 1, 4}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_fm_vhf_on_cband[] = { #ifdef CONFIG_BAND_CBAND {184000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, {227000, 4, 3, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, {380000, 4, 7, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_tuning_table[] = { #ifdef CONFIG_BAND_CBAND {170000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, #endif #ifdef CONFIG_BAND_VHF {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_p1g_tuning_table[] = { #ifdef CONFIG_BAND_CBAND {170000, 4, 1, 0x820f, 0x300, 0x2d22, 0x82cb, EN_CAB}, #endif #ifdef CONFIG_BAND_VHF {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, #endif #ifdef CONFIG_BAND_UHF {510000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {540000, 2, 1, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {600000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {630000, 2, 4, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {680000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {720000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_pll dib0090_p1g_pll_table[] = { #ifdef CONFIG_BAND_CBAND {57000, 0, 11, 48, 6}, {70000, 1, 11, 48, 6}, {86000, 0, 10, 32, 4}, {105000, 1, 10, 32, 4}, {115000, 0, 9, 24, 6}, {140000, 1, 9, 24, 6}, {170000, 0, 8, 16, 4}, #endif #ifdef CONFIG_BAND_VHF {200000, 1, 8, 16, 4}, {230000, 0, 7, 12, 6}, {280000, 1, 7, 12, 6}, {340000, 0, 6, 8, 4}, {380000, 1, 6, 8, 4}, {455000, 0, 5, 6, 6}, #endif #ifdef CONFIG_BAND_UHF {580000, 1, 5, 6, 6}, {680000, 0, 4, 4, 4}, {860000, 1, 4, 4, 4}, #endif #ifdef CONFIG_BAND_LBAND {1800000, 1, 2, 2, 4}, #endif #ifdef CONFIG_BAND_SBAND {2900000, 0, 1, 1, 6}, #endif }; static const struct dib0090_tuning dib0090_p1g_tuning_table_fm_vhf_on_cband[] = { #ifdef CONFIG_BAND_CBAND {184000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, {227000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, {380000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_cband_7090[] = { #ifdef CONFIG_BAND_CBAND {300000, 4, 3, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {380000, 4, 10, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {570000, 4, 10, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {858000, 4, 5, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, #endif }; static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 0; u16 lo4 = 0xe900; s16 adc_target; u16 adc; s8 step_sign; u8 force_soft_search = 0; if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) force_soft_search = 1; if (*tune_state == CT_TUNER_START) { dprintk("Start Captrim search : %s", (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO"); dib0090_write_reg(state, 0x10, 0x2B1); dib0090_write_reg(state, 0x1e, 0x0032); if (!state->tuner_is_tuned) { /* prepare a complete captrim */ if (!state->identity.p1g || force_soft_search) state->step = state->captrim = state->fcaptrim = 64; state->current_rf = state->rf_request; } else { /* we are already tuned to this frequency - the configuration is correct */ if (!state->identity.p1g || force_soft_search) { /* do a minimal captrim even if the frequency has not changed */ state->step = 4; state->captrim = state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7f; } } state->adc_diff = 3000; *tune_state = CT_TUNER_STEP_0; } else if (*tune_state == CT_TUNER_STEP_0) { if (state->identity.p1g && !force_soft_search) { u8 ratio = 31; dib0090_write_reg(state, 0x40, (3 << 7) | (ratio << 2) | (1 << 1) | 1); dib0090_read_reg(state, 0x40); ret = 50; } else { state->step /= 2; dib0090_write_reg(state, 0x18, lo4 | state->captrim); if (state->identity.in_soc) ret = 25; } *tune_state = CT_TUNER_STEP_1; } else if (*tune_state == CT_TUNER_STEP_1) { if (state->identity.p1g && !force_soft_search) { dib0090_write_reg(state, 0x40, 0x18c | (0 << 1) | 0); dib0090_read_reg(state, 0x40); state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7F; dprintk("***Final Captrim= 0x%x", state->fcaptrim); *tune_state = CT_TUNER_STEP_3; } else { /* MERGE for all krosus before P1G */ adc = dib0090_get_slow_adc_val(state); dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024); if (state->rest == 0 || state->identity.in_soc) { /* Just for 8090P SOCS where auto captrim HW bug : TO CHECK IN ACI for SOCS !!! if 400 for 8090p SOC => tune issue !!! */ adc_target = 200; } else adc_target = 400; if (adc >= adc_target) { adc -= adc_target; step_sign = -1; } else { adc = adc_target - adc; step_sign = 1; } if (adc < state->adc_diff) { dprintk("CAPTRIM=%d is closer to target (%d/%d)", (u32) state->captrim, (u32) adc, (u32) state->adc_diff); state->adc_diff = adc; state->fcaptrim = state->captrim; } state->captrim += step_sign * state->step; if (state->step >= 1) *tune_state = CT_TUNER_STEP_0; else *tune_state = CT_TUNER_STEP_2; ret = 25; } } else if (*tune_state == CT_TUNER_STEP_2) { /* this step is only used by krosus < P1G */ /*write the final cptrim config */ dib0090_write_reg(state, 0x18, lo4 | state->fcaptrim); *tune_state = CT_TUNER_STEP_3; } else if (*tune_state == CT_TUNER_STEP_3) { state->calibrate &= ~CAPTRIM_CAL; *tune_state = CT_TUNER_STEP_0; } return ret; } static int dib0090_get_temperature(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 15; s16 val; switch (*tune_state) { case CT_TUNER_START: state->wbdmux = dib0090_read_reg(state, 0x10); dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x8 << 3)); state->bias = dib0090_read_reg(state, 0x13); dib0090_write_reg(state, 0x13, state->bias | (0x3 << 8)); *tune_state = CT_TUNER_STEP_0; /* wait for the WBDMUX to switch and for the ADC to sample */ break; case CT_TUNER_STEP_0: state->adc_diff = dib0090_get_slow_adc_val(state); dib0090_write_reg(state, 0x13, (state->bias & ~(0x3 << 8)) | (0x2 << 8)); *tune_state = CT_TUNER_STEP_1; break; case CT_TUNER_STEP_1: val = dib0090_get_slow_adc_val(state); state->temperature = ((s16) ((val - state->adc_diff) * 180) >> 8) + 55; dprintk("temperature: %d C", state->temperature - 30); *tune_state = CT_TUNER_STEP_2; break; case CT_TUNER_STEP_2: dib0090_write_reg(state, 0x13, state->bias); dib0090_write_reg(state, 0x10, state->wbdmux); /* write back original WBDMUX */ *tune_state = CT_TUNER_START; state->calibrate &= ~TEMP_CAL; if (state->config->analog_output == 0) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); break; default: ret = 0; break; } return ret; } #define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */ static int dib0090_tune(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; const struct dib0090_tuning *tune = state->current_tune_table_index; const struct dib0090_pll *pll = state->current_pll_table_index; enum frontend_tune_state *tune_state = &state->tune_state; u16 lo5, lo6, Den, tmp; u32 FBDiv, Rest, FREF, VCOF_kHz = 0; int ret = 10; /* 1ms is the default delay most of the time */ u8 c, i; /************************* VCO ***************************/ /* Default values for FG */ /* from these are needed : */ /* Cp,HFdiv,VCOband,SD,Num,Den,FB and REFDiv */ /* in any case we first need to do a calibration if needed */ if (*tune_state == CT_TUNER_START) { /* deactivate DataTX before some calibrations */ if (state->calibrate & (DC_CAL | TEMP_CAL | WBD_CAL)) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14)); else /* Activate DataTX in case a calibration has been done before */ if (state->config->analog_output == 0) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); } if (state->calibrate & DC_CAL) return dib0090_dc_offset_calibration(state, tune_state); else if (state->calibrate & WBD_CAL) { if (state->current_rf == 0) state->current_rf = state->fe->dtv_property_cache.frequency / 1000; return dib0090_wbd_calibration(state, tune_state); } else if (state->calibrate & TEMP_CAL) return dib0090_get_temperature(state, tune_state); else if (state->calibrate & CAPTRIM_CAL) return dib0090_captrim_search(state, tune_state); if (*tune_state == CT_TUNER_START) { /* if soc and AGC pwm control, disengage mux to be able to R/W access to 0x01 register to set the right filter (cutoff_freq_select) during the tune sequence, otherwise, SOC SERPAR error when accessing to 0x01 */ if (state->config->use_pwm_agc && state->identity.in_soc) { tmp = dib0090_read_reg(state, 0x39); if ((tmp >> 10) & 0x1) dib0090_write_reg(state, 0x39, tmp & ~(1 << 10)); } state->current_band = (u8) BAND_OF_FREQUENCY(state->fe->dtv_property_cache.frequency / 1000); state->rf_request = state->fe->dtv_property_cache.frequency / 1000 + (state->current_band == BAND_UHF ? state->config->freq_offset_khz_uhf : state->config-> freq_offset_khz_vhf); /* in ISDB-T 1seg we shift tuning frequency */ if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1 && state->fe->dtv_property_cache.isdbt_partial_reception == 0)) { const struct dib0090_low_if_offset_table *LUT_offset = state->config->low_if; u8 found_offset = 0; u32 margin_khz = 100; if (LUT_offset != NULL) { while (LUT_offset->RF_freq != 0xffff) { if (((state->rf_request > (LUT_offset->RF_freq - margin_khz)) && (state->rf_request < (LUT_offset->RF_freq + margin_khz))) && LUT_offset->std == state->fe->dtv_property_cache.delivery_system) { state->rf_request += LUT_offset->offset_khz; found_offset = 1; break; } LUT_offset++; } } if (found_offset == 0) state->rf_request += 400; } if (state->current_rf != state->rf_request || (state->current_standard != state->fe->dtv_property_cache.delivery_system)) { state->tuner_is_tuned = 0; state->current_rf = 0; state->current_standard = 0; tune = dib0090_tuning_table; if (state->identity.p1g) tune = dib0090_p1g_tuning_table; tmp = (state->identity.version >> 5) & 0x7; if (state->identity.in_soc) { if (state->config->force_cband_input) { /* Use the CBAND input for all band */ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF || state->current_band & BAND_UHF) { state->current_band = BAND_CBAND; tune = dib0090_tuning_table_cband_7090; } } else { /* Use the CBAND input for all band under UHF */ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF) { state->current_band = BAND_CBAND; tune = dib0090_tuning_table_cband_7090; } } } else if (tmp == 0x4 || tmp == 0x7) { /* CBAND tuner version for VHF */ if (state->current_band == BAND_FM || state->current_band == BAND_CBAND || state->current_band == BAND_VHF) { state->current_band = BAND_CBAND; /* Force CBAND */ tune = dib0090_tuning_table_fm_vhf_on_cband; if (state->identity.p1g) tune = dib0090_p1g_tuning_table_fm_vhf_on_cband; } } pll = dib0090_pll_table; if (state->identity.p1g) pll = dib0090_p1g_pll_table; /* Look for the interval */ while (state->rf_request > tune->max_freq) tune++; while (state->rf_request > pll->max_freq) pll++; state->current_tune_table_index = tune; state->current_pll_table_index = pll; dib0090_write_reg(state, 0x0b, 0xb800 | (tune->switch_trim)); VCOF_kHz = (pll->hfdiv * state->rf_request) * 2; FREF = state->config->io.clock_khz; if (state->config->fref_clock_ratio != 0) FREF /= state->config->fref_clock_ratio; FBDiv = (VCOF_kHz / pll->topresc / FREF); Rest = (VCOF_kHz / pll->topresc) - FBDiv * FREF; if (Rest < LPF) Rest = 0; else if (Rest < 2 * LPF) Rest = 2 * LPF; else if (Rest > (FREF - LPF)) { Rest = 0; FBDiv += 1; } else if (Rest > (FREF - 2 * LPF)) Rest = FREF - 2 * LPF; Rest = (Rest * 6528) / (FREF / 10); state->rest = Rest; /* external loop filter, otherwise: * lo5 = (0 << 15) | (0 << 12) | (0 << 11) | (3 << 9) | (4 << 6) | (3 << 4) | 4; * lo6 = 0x0e34 */ if (Rest == 0) { if (pll->vco_band) lo5 = 0x049f; else lo5 = 0x041f; } else { if (pll->vco_band) lo5 = 0x049e; else if (state->config->analog_output) lo5 = 0x041d; else lo5 = 0x041c; } if (state->identity.p1g) { /* Bias is done automatically in P1G */ if (state->identity.in_soc) { if (state->identity.version == SOC_8090_P1G_11R1) lo5 = 0x46f; else lo5 = 0x42f; } else lo5 = 0x42c; } lo5 |= (pll->hfdiv_code << 11) | (pll->vco_band << 7); /* bit 15 is the split to the slave, we do not do it here */ if (!state->config->io.pll_int_loop_filt) { if (state->identity.in_soc) lo6 = 0xff98; else if (state->identity.p1g || (Rest == 0)) lo6 = 0xfff8; else lo6 = 0xff28; } else lo6 = (state->config->io.pll_int_loop_filt << 3); Den = 1; if (Rest > 0) { if (state->config->analog_output) lo6 |= (1 << 2) | 2; else { if (state->identity.in_soc) lo6 |= (1 << 2) | 2; else lo6 |= (1 << 2) | 2; } Den = 255; } dib0090_write_reg(state, 0x15, (u16) FBDiv); if (state->config->fref_clock_ratio != 0) dib0090_write_reg(state, 0x16, (Den << 8) | state->config->fref_clock_ratio); else dib0090_write_reg(state, 0x16, (Den << 8) | 1); dib0090_write_reg(state, 0x17, (u16) Rest); dib0090_write_reg(state, 0x19, lo5); dib0090_write_reg(state, 0x1c, lo6); lo6 = tune->tuner_enable; if (state->config->analog_output) lo6 = (lo6 & 0xff9f) | 0x2; dib0090_write_reg(state, 0x24, lo6 | EN_LO | state->config->use_pwm_agc * EN_CRYSTAL); } state->current_rf = state->rf_request; state->current_standard = state->fe->dtv_property_cache.delivery_system; ret = 20; state->calibrate = CAPTRIM_CAL; /* captrim serach now */ } else if (*tune_state == CT_TUNER_STEP_0) { /* Warning : because of captrim cal, if you change this step, change it also in _cal.c file because it is the step following captrim cal state machine */ const struct dib0090_wbd_slope *wbd = state->current_wbd_table; while (state->current_rf / 1000 > wbd->max_freq) wbd++; dib0090_write_reg(state, 0x1e, 0x07ff); dprintk("Final Captrim: %d", (u32) state->fcaptrim); dprintk("HFDIV code: %d", (u32) pll->hfdiv_code); dprintk("VCO = %d", (u32) pll->vco_band); dprintk("VCOF in kHz: %d ((%d*%d) << 1))", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request); dprintk("REFDIV: %d, FREF: %d", (u32) 1, (u32) state->config->io.clock_khz); dprintk("FBDIV: %d, Rest: %d", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17)); dprintk("Num: %d, Den: %d, SD: %d", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8), (u32) dib0090_read_reg(state, 0x1c) & 0x3); #define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */ c = 4; i = 3; if (wbd->wbd_gain != 0) c = wbd->wbd_gain; state->wbdmux = (c << 13) | (i << 11) | (WBD | (state->config->use_pwm_agc << 1)); dib0090_write_reg(state, 0x10, state->wbdmux); if ((tune->tuner_enable == EN_CAB) && state->identity.p1g) { dprintk("P1G : The cable band is selected and lna_tune = %d", tune->lna_tune); dib0090_write_reg(state, 0x09, tune->lna_bias); dib0090_write_reg(state, 0x0b, 0xb800 | (tune->lna_tune << 6) | (tune->switch_trim)); } else dib0090_write_reg(state, 0x09, (tune->lna_tune << 5) | tune->lna_bias); dib0090_write_reg(state, 0x0c, tune->v2i); dib0090_write_reg(state, 0x0d, tune->mix); dib0090_write_reg(state, 0x0e, tune->load); *tune_state = CT_TUNER_STEP_1; } else if (*tune_state == CT_TUNER_STEP_1) { /* initialize the lt gain register */ state->rf_lt_def = 0x7c00; dib0090_set_bandwidth(state); state->tuner_is_tuned = 1; state->calibrate |= WBD_CAL; state->calibrate |= TEMP_CAL; *tune_state = CT_TUNER_STOP; } else ret = FE_CALLBACK_TIME_NEVER; return ret; } static int dib0090_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; return state->tune_state; } EXPORT_SYMBOL(dib0090_get_tune_state); int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state) { struct dib0090_state *state = fe->tuner_priv; state->tune_state = tune_state; return 0; } EXPORT_SYMBOL(dib0090_set_tune_state); static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency) { struct dib0090_state *state = fe->tuner_priv; *frequency = 1000 * state->current_rf; return 0; } static int dib0090_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct dib0090_state *state = fe->tuner_priv; u32 ret; state->tune_state = CT_TUNER_START; do { ret = dib0090_tune(fe); if (ret != FE_CALLBACK_TIME_NEVER) msleep(ret / 10); else break; } while (state->tune_state != CT_TUNER_STOP); return 0; } static const struct dvb_tuner_ops dib0090_ops = { .info = { .name = "DiBcom DiB0090", .frequency_min = 45000000, .frequency_max = 860000000, .frequency_step = 1000, }, .release = dib0090_release, .init = dib0090_wakeup, .sleep = dib0090_sleep, .set_params = dib0090_set_params, .get_frequency = dib0090_get_frequency, }; static const struct dvb_tuner_ops dib0090_fw_ops = { .info = { .name = "DiBcom DiB0090", .frequency_min = 45000000, .frequency_max = 860000000, .frequency_step = 1000, }, .release = dib0090_release, .init = NULL, .sleep = NULL, .set_params = NULL, .get_frequency = NULL, }; static const struct dib0090_wbd_slope dib0090_wbd_table_default[] = { {470, 0, 250, 0, 100, 4}, {860, 51, 866, 21, 375, 4}, {1700, 0, 800, 0, 850, 4}, {2900, 0, 250, 0, 100, 6}, {0xFFFF, 0, 0, 0, 0, 0}, }; struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) { struct dib0090_state *st = kzalloc(sizeof(struct dib0090_state), GFP_KERNEL); if (st == NULL) return NULL; st->config = config; st->i2c = i2c; st->fe = fe; mutex_init(&st->i2c_buffer_lock); fe->tuner_priv = st; if (config->wbd == NULL) st->current_wbd_table = dib0090_wbd_table_default; else st->current_wbd_table = config->wbd; if (dib0090_reset(fe) != 0) goto free_mem; printk(KERN_INFO "DiB0090: successfully identified\n"); memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops)); return fe; free_mem: kfree(st); fe->tuner_priv = NULL; return NULL; } EXPORT_SYMBOL(dib0090_register); struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) { struct dib0090_fw_state *st = kzalloc(sizeof(struct dib0090_fw_state), GFP_KERNEL); if (st == NULL) return NULL; st->config = config; st->i2c = i2c; st->fe = fe; mutex_init(&st->i2c_buffer_lock); fe->tuner_priv = st; if (dib0090_fw_reset_digital(fe, st->config) != 0) goto free_mem; dprintk("DiB0090 FW: successfully identified"); memcpy(&fe->ops.tuner_ops, &dib0090_fw_ops, sizeof(struct dvb_tuner_ops)); return fe; free_mem: kfree(st); fe->tuner_priv = NULL; return NULL; } EXPORT_SYMBOL(dib0090_fw_register); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>"); MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner"); MODULE_LICENSE("GPL");
gpl-2.0
garwynn/D710SPR_FL24_Kernel
drivers/media/video/et61x251/et61x251_core.c
2625
65497
/*************************************************************************** * V4L2 driver for ET61X[12]51 PC Camera Controllers * * * * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/compiler.h> #include <linux/ioctl.h> #include <linux/poll.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/page-flags.h> #include <media/v4l2-ioctl.h> #include <asm/byteorder.h> #include <asm/page.h> #include <asm/uaccess.h> #include "et61x251.h" /*****************************************************************************/ #define ET61X251_MODULE_NAME "V4L2 driver for ET61X[12]51 " \ "PC Camera Controllers" #define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia" #define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>" #define ET61X251_MODULE_LICENSE "GPL" #define ET61X251_MODULE_VERSION "1:1.09" #define ET61X251_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 9) /*****************************************************************************/ MODULE_DEVICE_TABLE(usb, et61x251_id_table); MODULE_AUTHOR(ET61X251_MODULE_AUTHOR " " ET61X251_AUTHOR_EMAIL); MODULE_DESCRIPTION(ET61X251_MODULE_NAME); MODULE_VERSION(ET61X251_MODULE_VERSION); MODULE_LICENSE(ET61X251_MODULE_LICENSE); static short video_nr[] = {[0 ... ET61X251_MAX_DEVICES-1] = -1}; module_param_array(video_nr, short, NULL, 0444); MODULE_PARM_DESC(video_nr, "\n<-1|n[,...]> Specify V4L2 minor mode number." "\n -1 = use next available (default)" "\n n = use minor number n (integer >= 0)" "\nYou can specify up to " __MODULE_STRING(ET61X251_MAX_DEVICES) " cameras this way." "\nFor example:" "\nvideo_nr=-1,2,-1 would assign minor number 2 to" "\nthe second registered camera and use auto for the first" "\none and for every other camera." "\n"); static short force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] = ET61X251_FORCE_MUNMAP}; module_param_array(force_munmap, bool, NULL, 0444); MODULE_PARM_DESC(force_munmap, "\n<0|1[,...]> Force the application to unmap previously" "\nmapped buffer memory before calling any VIDIOC_S_CROP or" "\nVIDIOC_S_FMT ioctl's. Not all the applications support" "\nthis feature. This parameter is specific for each" "\ndetected camera." "\n 0 = do not force memory unmapping" "\n 1 = force memory unmapping (save memory)" "\nDefault value is "__MODULE_STRING(ET61X251_FORCE_MUNMAP)"." "\n"); static unsigned int frame_timeout[] = {[0 ... ET61X251_MAX_DEVICES-1] = ET61X251_FRAME_TIMEOUT}; module_param_array(frame_timeout, uint, NULL, 0644); MODULE_PARM_DESC(frame_timeout, "\n<n[,...]> Timeout for a video frame in seconds." "\nThis parameter is specific for each detected camera." "\nDefault value is " __MODULE_STRING(ET61X251_FRAME_TIMEOUT)"." "\n"); #ifdef ET61X251_DEBUG static unsigned short debug = ET61X251_DEBUG_LEVEL; module_param(debug, ushort, 0644); MODULE_PARM_DESC(debug, "\n<n> Debugging information level, from 0 to 3:" "\n0 = none (use carefully)" "\n1 = critical errors" "\n2 = significant informations" "\n3 = more verbose messages" "\nLevel 3 is useful for testing only, when only " "one device is used." "\nDefault value is "__MODULE_STRING(ET61X251_DEBUG_LEVEL)"." "\n"); #endif /*****************************************************************************/ static u32 et61x251_request_buffers(struct et61x251_device* cam, u32 count, enum et61x251_io_method io) { struct v4l2_pix_format* p = &(cam->sensor.pix_format); struct v4l2_rect* r = &(cam->sensor.cropcap.bounds); const size_t imagesize = cam->module_param.force_munmap || io == IO_READ ? (p->width * p->height * p->priv) / 8 : (r->width * r->height * p->priv) / 8; void* buff = NULL; u32 i; if (count > ET61X251_MAX_FRAMES) count = ET61X251_MAX_FRAMES; cam->nbuffers = count; while (cam->nbuffers > 0) { if ((buff = vmalloc_32_user(cam->nbuffers * PAGE_ALIGN(imagesize)))) break; cam->nbuffers--; } for (i = 0; i < cam->nbuffers; i++) { cam->frame[i].bufmem = buff + i*PAGE_ALIGN(imagesize); cam->frame[i].buf.index = i; cam->frame[i].buf.m.offset = i*PAGE_ALIGN(imagesize); cam->frame[i].buf.length = imagesize; cam->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cam->frame[i].buf.sequence = 0; cam->frame[i].buf.field = V4L2_FIELD_NONE; cam->frame[i].buf.memory = V4L2_MEMORY_MMAP; cam->frame[i].buf.flags = 0; } return cam->nbuffers; } static void et61x251_release_buffers(struct et61x251_device* cam) { if (cam->nbuffers) { vfree(cam->frame[0].bufmem); cam->nbuffers = 0; } cam->frame_current = NULL; } static void et61x251_empty_framequeues(struct et61x251_device* cam) { u32 i; INIT_LIST_HEAD(&cam->inqueue); INIT_LIST_HEAD(&cam->outqueue); for (i = 0; i < ET61X251_MAX_FRAMES; i++) { cam->frame[i].state = F_UNUSED; cam->frame[i].buf.bytesused = 0; } } static void et61x251_requeue_outqueue(struct et61x251_device* cam) { struct et61x251_frame_t *i; list_for_each_entry(i, &cam->outqueue, frame) { i->state = F_QUEUED; list_add(&i->frame, &cam->inqueue); } INIT_LIST_HEAD(&cam->outqueue); } static void et61x251_queue_unusedframes(struct et61x251_device* cam) { unsigned long lock_flags; u32 i; for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].state == F_UNUSED) { cam->frame[i].state = F_QUEUED; spin_lock_irqsave(&cam->queue_lock, lock_flags); list_add_tail(&cam->frame[i].frame, &cam->inqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); } } /*****************************************************************************/ int et61x251_write_reg(struct et61x251_device* cam, u8 value, u16 index) { struct usb_device* udev = cam->usbdev; u8* buff = cam->control_buffer; int res; *buff = value; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, index, buff, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) { DBG(3, "Failed to write a register (value 0x%02X, index " "0x%02X, error %d)", value, index, res); return -1; } return 0; } static int et61x251_read_reg(struct et61x251_device* cam, u16 index) { struct usb_device* udev = cam->usbdev; u8* buff = cam->control_buffer; int res; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1, 0, index, buff, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) DBG(3, "Failed to read a register (index 0x%02X, error %d)", index, res); return (res >= 0) ? (int)(*buff) : -1; } static int et61x251_i2c_wait(struct et61x251_device* cam, const struct et61x251_sensor* sensor) { int i, r; for (i = 1; i <= 8; i++) { if (sensor->interface == ET61X251_I2C_3WIRES) { r = et61x251_read_reg(cam, 0x8e); if (!(r & 0x02) && (r >= 0)) return 0; } else { r = et61x251_read_reg(cam, 0x8b); if (!(r & 0x01) && (r >= 0)) return 0; } if (r < 0) return -EIO; udelay(8*8); /* minimum for sensors at 400kHz */ } return -EBUSY; } int et61x251_i2c_raw_write(struct et61x251_device* cam, u8 n, u8 data1, u8 data2, u8 data3, u8 data4, u8 data5, u8 data6, u8 data7, u8 data8, u8 address) { struct usb_device* udev = cam->usbdev; u8* data = cam->control_buffer; int err = 0, res; data[0] = data2; data[1] = data3; data[2] = data4; data[3] = data5; data[4] = data6; data[5] = data7; data[6] = data8; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x81, data, n-1, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; data[0] = address; data[1] = cam->sensor.i2c_slave_id; data[2] = cam->sensor.rsta | 0x02 | (n << 4); res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; /* Start writing through the serial interface */ data[0] = data1; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; err += et61x251_i2c_wait(cam, &cam->sensor); if (err) DBG(3, "I2C raw write failed for %s image sensor", cam->sensor.name); PDBGG("I2C raw write: %u bytes, address = 0x%02X, data1 = 0x%02X, " "data2 = 0x%02X, data3 = 0x%02X, data4 = 0x%02X, data5 = 0x%02X," " data6 = 0x%02X, data7 = 0x%02X, data8 = 0x%02X", n, address, data1, data2, data3, data4, data5, data6, data7, data8); return err ? -1 : 0; } /*****************************************************************************/ static void et61x251_urb_complete(struct urb *urb) { struct et61x251_device* cam = urb->context; struct et61x251_frame_t** f; size_t imagesize; u8 i; int err = 0; if (urb->status == -ENOENT) return; f = &cam->frame_current; if (cam->stream == STREAM_INTERRUPT) { cam->stream = STREAM_OFF; if ((*f)) (*f)->state = F_QUEUED; DBG(3, "Stream interrupted"); wake_up(&cam->wait_stream); } if (cam->state & DEV_DISCONNECTED) return; if (cam->state & DEV_MISCONFIGURED) { wake_up_interruptible(&cam->wait_frame); return; } if (cam->stream == STREAM_OFF || list_empty(&cam->inqueue)) goto resubmit_urb; if (!(*f)) (*f) = list_entry(cam->inqueue.next, struct et61x251_frame_t, frame); imagesize = (cam->sensor.pix_format.width * cam->sensor.pix_format.height * cam->sensor.pix_format.priv) / 8; for (i = 0; i < urb->number_of_packets; i++) { unsigned int len, status; void *pos; u8* b1, * b2, sof; const u8 VOID_BYTES = 6; size_t imglen; len = urb->iso_frame_desc[i].actual_length; status = urb->iso_frame_desc[i].status; pos = urb->iso_frame_desc[i].offset + urb->transfer_buffer; if (status) { DBG(3, "Error in isochronous frame"); (*f)->state = F_ERROR; continue; } b1 = pos++; b2 = pos++; sof = ((*b1 & 0x3f) == 63); imglen = ((*b1 & 0xc0) << 2) | *b2; PDBGG("Isochrnous frame: length %u, #%u i, image length %zu", len, i, imglen); if ((*f)->state == F_QUEUED || (*f)->state == F_ERROR) start_of_frame: if (sof) { (*f)->state = F_GRABBING; (*f)->buf.bytesused = 0; do_gettimeofday(&(*f)->buf.timestamp); pos += 22; DBG(3, "SOF detected: new video frame"); } if ((*f)->state == F_GRABBING) { if (sof && (*f)->buf.bytesused) { if (cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_ET61X251) goto end_of_frame; else { DBG(3, "Not expected SOF detected " "after %lu bytes", (unsigned long)(*f)->buf.bytesused); (*f)->state = F_ERROR; continue; } } if ((*f)->buf.bytesused + imglen > imagesize) { DBG(3, "Video frame size exceeded"); (*f)->state = F_ERROR; continue; } pos += VOID_BYTES; memcpy((*f)->bufmem+(*f)->buf.bytesused, pos, imglen); (*f)->buf.bytesused += imglen; if ((*f)->buf.bytesused == imagesize) { u32 b; end_of_frame: b = (*f)->buf.bytesused; (*f)->state = F_DONE; (*f)->buf.sequence= ++cam->frame_count; spin_lock(&cam->queue_lock); list_move_tail(&(*f)->frame, &cam->outqueue); if (!list_empty(&cam->inqueue)) (*f) = list_entry(cam->inqueue.next, struct et61x251_frame_t, frame); else (*f) = NULL; spin_unlock(&cam->queue_lock); DBG(3, "Video frame captured: : %lu bytes", (unsigned long)(b)); if (!(*f)) goto resubmit_urb; if (sof && cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_ET61X251) goto start_of_frame; } } } resubmit_urb: urb->dev = cam->usbdev; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0 && err != -EPERM) { cam->state |= DEV_MISCONFIGURED; DBG(1, "usb_submit_urb() failed"); } wake_up_interruptible(&cam->wait_frame); } static int et61x251_start_transfer(struct et61x251_device* cam) { struct usb_device *udev = cam->usbdev; struct urb* urb; struct usb_host_interface* altsetting = usb_altnum_to_altsetting( usb_ifnum_to_if(udev, 0), ET61X251_ALTERNATE_SETTING); const unsigned int psz = le16_to_cpu(altsetting-> endpoint[0].desc.wMaxPacketSize); s8 i, j; int err = 0; for (i = 0; i < ET61X251_URBS; i++) { cam->transfer_buffer[i] = kzalloc(ET61X251_ISO_PACKETS * psz, GFP_KERNEL); if (!cam->transfer_buffer[i]) { err = -ENOMEM; DBG(1, "Not enough memory"); goto free_buffers; } } for (i = 0; i < ET61X251_URBS; i++) { urb = usb_alloc_urb(ET61X251_ISO_PACKETS, GFP_KERNEL); cam->urb[i] = urb; if (!urb) { err = -ENOMEM; DBG(1, "usb_alloc_urb() failed"); goto free_urbs; } urb->dev = udev; urb->context = cam; urb->pipe = usb_rcvisocpipe(udev, 1); urb->transfer_flags = URB_ISO_ASAP; urb->number_of_packets = ET61X251_ISO_PACKETS; urb->complete = et61x251_urb_complete; urb->transfer_buffer = cam->transfer_buffer[i]; urb->transfer_buffer_length = psz * ET61X251_ISO_PACKETS; urb->interval = 1; for (j = 0; j < ET61X251_ISO_PACKETS; j++) { urb->iso_frame_desc[j].offset = psz * j; urb->iso_frame_desc[j].length = psz; } } err = et61x251_write_reg(cam, 0x01, 0x03); err = et61x251_write_reg(cam, 0x00, 0x03); err = et61x251_write_reg(cam, 0x08, 0x03); if (err) { err = -EIO; DBG(1, "I/O hardware error"); goto free_urbs; } err = usb_set_interface(udev, 0, ET61X251_ALTERNATE_SETTING); if (err) { DBG(1, "usb_set_interface() failed"); goto free_urbs; } cam->frame_current = NULL; for (i = 0; i < ET61X251_URBS; i++) { err = usb_submit_urb(cam->urb[i], GFP_KERNEL); if (err) { for (j = i-1; j >= 0; j--) usb_kill_urb(cam->urb[j]); DBG(1, "usb_submit_urb() failed, error %d", err); goto free_urbs; } } return 0; free_urbs: for (i = 0; (i < ET61X251_URBS) && cam->urb[i]; i++) usb_free_urb(cam->urb[i]); free_buffers: for (i = 0; (i < ET61X251_URBS) && cam->transfer_buffer[i]; i++) kfree(cam->transfer_buffer[i]); return err; } static int et61x251_stop_transfer(struct et61x251_device* cam) { struct usb_device *udev = cam->usbdev; s8 i; int err = 0; if (cam->state & DEV_DISCONNECTED) return 0; for (i = ET61X251_URBS-1; i >= 0; i--) { usb_kill_urb(cam->urb[i]); usb_free_urb(cam->urb[i]); kfree(cam->transfer_buffer[i]); } err = usb_set_interface(udev, 0, 0); /* 0 Mb/s */ if (err) DBG(3, "usb_set_interface() failed"); return err; } static int et61x251_stream_interrupt(struct et61x251_device* cam) { long timeout; cam->stream = STREAM_INTERRUPT; timeout = wait_event_timeout(cam->wait_stream, (cam->stream == STREAM_OFF) || (cam->state & DEV_DISCONNECTED), ET61X251_URB_TIMEOUT); if (cam->state & DEV_DISCONNECTED) return -ENODEV; else if (cam->stream != STREAM_OFF) { cam->state |= DEV_MISCONFIGURED; DBG(1, "URB timeout reached. The camera is misconfigured. To " "use it, close and open %s again.", video_device_node_name(cam->v4ldev)); return -EIO; } return 0; } /*****************************************************************************/ #ifdef CONFIG_VIDEO_ADV_DEBUG static int et61x251_i2c_try_read(struct et61x251_device* cam, const struct et61x251_sensor* sensor, u8 address) { struct usb_device* udev = cam->usbdev; u8* data = cam->control_buffer; int err = 0, res; data[0] = address; data[1] = cam->sensor.i2c_slave_id; data[2] = cam->sensor.rsta | 0x10; data[3] = !(et61x251_read_reg(cam, 0x8b) & 0x02); res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x88, data, 4, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; err += et61x251_i2c_wait(cam, sensor); res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1, 0, 0x80, data, 8, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; if (err) DBG(3, "I2C read failed for %s image sensor", sensor->name); PDBGG("I2C read: address 0x%02X, value: 0x%02X", address, data[0]); return err ? -1 : (int)data[0]; } static int et61x251_i2c_try_write(struct et61x251_device* cam, const struct et61x251_sensor* sensor, u8 address, u8 value) { struct usb_device* udev = cam->usbdev; u8* data = cam->control_buffer; int err = 0, res; data[0] = address; data[1] = cam->sensor.i2c_slave_id; data[2] = cam->sensor.rsta | 0x12; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; data[0] = value; res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41, 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT); if (res < 0) err += res; err += et61x251_i2c_wait(cam, sensor); if (err) DBG(3, "I2C write failed for %s image sensor", sensor->name); PDBGG("I2C write: address 0x%02X, value: 0x%02X", address, value); return err ? -1 : 0; } static int et61x251_i2c_read(struct et61x251_device* cam, u8 address) { return et61x251_i2c_try_read(cam, &cam->sensor, address); } static int et61x251_i2c_write(struct et61x251_device* cam, u8 address, u8 value) { return et61x251_i2c_try_write(cam, &cam->sensor, address, value); } static u8 et61x251_strtou8(const char* buff, size_t len, ssize_t* count) { char str[5]; char* endp; unsigned long val; if (len < 4) { strncpy(str, buff, len); str[len] = '\0'; } else { strncpy(str, buff, 4); str[4] = '\0'; } val = simple_strtoul(str, &endp, 0); *count = 0; if (val <= 0xff) *count = (ssize_t)(endp - str); if ((*count) && (len == *count+1) && (buff[*count] == '\n')) *count += 1; return (u8)val; } /* NOTE 1: being inside one of the following methods implies that the v4l device exists for sure (see kobjects and reference counters) NOTE 2: buffers are PAGE_SIZE long */ static ssize_t et61x251_show_reg(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } count = sprintf(buf, "%u\n", cam->sysfs.reg); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_reg(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 index; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } index = et61x251_strtou8(buf, len, &count); if (index > 0x8e || !count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } cam->sysfs.reg = index; DBG(2, "Moved ET61X[12]51 register index to 0x%02X", cam->sysfs.reg); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_show_val(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; int val; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } if ((val = et61x251_read_reg(cam, cam->sysfs.reg)) < 0) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } count = sprintf(buf, "%d\n", val); DBG(3, "Read bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_val(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 value; ssize_t count; int err; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } value = et61x251_strtou8(buf, len, &count); if (!count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } err = et61x251_write_reg(cam, value, cam->sysfs.reg); if (err) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } DBG(2, "Written ET61X[12]51 reg. 0x%02X, val. 0x%02X", cam->sysfs.reg, value); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_show_i2c_reg(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } count = sprintf(buf, "%u\n", cam->sysfs.i2c_reg); DBG(3, "Read bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_i2c_reg(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 index; ssize_t count; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } index = et61x251_strtou8(buf, len, &count); if (!count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } cam->sysfs.i2c_reg = index; DBG(2, "Moved sensor register index to 0x%02X", cam->sysfs.i2c_reg); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_show_i2c_val(struct device* cd, struct device_attribute *attr, char* buf) { struct et61x251_device* cam; ssize_t count; int val; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) { mutex_unlock(&et61x251_sysfs_lock); return -ENOSYS; } if ((val = et61x251_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } count = sprintf(buf, "%d\n", val); DBG(3, "Read bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static ssize_t et61x251_store_i2c_val(struct device* cd, struct device_attribute *attr, const char* buf, size_t len) { struct et61x251_device* cam; u8 value; ssize_t count; int err; if (mutex_lock_interruptible(&et61x251_sysfs_lock)) return -ERESTARTSYS; cam = video_get_drvdata(to_video_device(cd)); if (!cam) { mutex_unlock(&et61x251_sysfs_lock); return -ENODEV; } if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) { mutex_unlock(&et61x251_sysfs_lock); return -ENOSYS; } value = et61x251_strtou8(buf, len, &count); if (!count) { mutex_unlock(&et61x251_sysfs_lock); return -EINVAL; } err = et61x251_i2c_write(cam, cam->sysfs.i2c_reg, value); if (err) { mutex_unlock(&et61x251_sysfs_lock); return -EIO; } DBG(2, "Written sensor reg. 0x%02X, val. 0x%02X", cam->sysfs.i2c_reg, value); DBG(3, "Written bytes: %zd", count); mutex_unlock(&et61x251_sysfs_lock); return count; } static DEVICE_ATTR(reg, S_IRUGO | S_IWUSR, et61x251_show_reg, et61x251_store_reg); static DEVICE_ATTR(val, S_IRUGO | S_IWUSR, et61x251_show_val, et61x251_store_val); static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR, et61x251_show_i2c_reg, et61x251_store_i2c_reg); static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR, et61x251_show_i2c_val, et61x251_store_i2c_val); static int et61x251_create_sysfs(struct et61x251_device* cam) { struct device *classdev = &(cam->v4ldev->dev); int err = 0; if ((err = device_create_file(classdev, &dev_attr_reg))) goto err_out; if ((err = device_create_file(classdev, &dev_attr_val))) goto err_reg; if (cam->sensor.sysfs_ops) { if ((err = device_create_file(classdev, &dev_attr_i2c_reg))) goto err_val; if ((err = device_create_file(classdev, &dev_attr_i2c_val))) goto err_i2c_reg; } err_i2c_reg: if (cam->sensor.sysfs_ops) device_remove_file(classdev, &dev_attr_i2c_reg); err_val: device_remove_file(classdev, &dev_attr_val); err_reg: device_remove_file(classdev, &dev_attr_reg); err_out: return err; } #endif /* CONFIG_VIDEO_ADV_DEBUG */ /*****************************************************************************/ static int et61x251_set_pix_format(struct et61x251_device* cam, struct v4l2_pix_format* pix) { int r, err = 0; if ((r = et61x251_read_reg(cam, 0x12)) < 0) err += r; if (pix->pixelformat == V4L2_PIX_FMT_ET61X251) err += et61x251_write_reg(cam, r & 0xfd, 0x12); else err += et61x251_write_reg(cam, r | 0x02, 0x12); return err ? -EIO : 0; } static int et61x251_set_compression(struct et61x251_device* cam, struct v4l2_jpegcompression* compression) { int r, err = 0; if ((r = et61x251_read_reg(cam, 0x12)) < 0) err += r; if (compression->quality == 0) err += et61x251_write_reg(cam, r & 0xfb, 0x12); else err += et61x251_write_reg(cam, r | 0x04, 0x12); return err ? -EIO : 0; } static int et61x251_set_scale(struct et61x251_device* cam, u8 scale) { int r = 0, err = 0; r = et61x251_read_reg(cam, 0x12); if (r < 0) err += r; if (scale == 1) err += et61x251_write_reg(cam, r & ~0x01, 0x12); else if (scale == 2) err += et61x251_write_reg(cam, r | 0x01, 0x12); if (err) return -EIO; PDBGG("Scaling factor: %u", scale); return 0; } static int et61x251_set_crop(struct et61x251_device* cam, struct v4l2_rect* rect) { struct et61x251_sensor* s = &cam->sensor; u16 fmw_sx = (u16)(rect->left - s->cropcap.bounds.left + s->active_pixel.left), fmw_sy = (u16)(rect->top - s->cropcap.bounds.top + s->active_pixel.top), fmw_length = (u16)(rect->width), fmw_height = (u16)(rect->height); int err = 0; err += et61x251_write_reg(cam, fmw_sx & 0xff, 0x69); err += et61x251_write_reg(cam, fmw_sy & 0xff, 0x6a); err += et61x251_write_reg(cam, fmw_length & 0xff, 0x6b); err += et61x251_write_reg(cam, fmw_height & 0xff, 0x6c); err += et61x251_write_reg(cam, (fmw_sx >> 8) | ((fmw_sy & 0x300) >> 6) | ((fmw_length & 0x300) >> 4) | ((fmw_height & 0x300) >> 2), 0x6d); if (err) return -EIO; PDBGG("fmw_sx, fmw_sy, fmw_length, fmw_height: %u %u %u %u", fmw_sx, fmw_sy, fmw_length, fmw_height); return 0; } static int et61x251_init(struct et61x251_device* cam) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_control ctrl; struct v4l2_queryctrl *qctrl; struct v4l2_rect* rect; u8 i = 0; int err = 0; if (!(cam->state & DEV_INITIALIZED)) { mutex_init(&cam->open_mutex); init_waitqueue_head(&cam->wait_open); qctrl = s->qctrl; rect = &(s->cropcap.defrect); cam->compression.quality = ET61X251_COMPRESSION_QUALITY; } else { /* use current values */ qctrl = s->_qctrl; rect = &(s->_rect); } err += et61x251_set_scale(cam, rect->width / s->pix_format.width); err += et61x251_set_crop(cam, rect); if (err) return err; if (s->init) { err = s->init(cam); if (err) { DBG(3, "Sensor initialization failed"); return err; } } err += et61x251_set_compression(cam, &cam->compression); err += et61x251_set_pix_format(cam, &s->pix_format); if (s->set_pix_format) err += s->set_pix_format(cam, &s->pix_format); if (err) return err; if (s->pix_format.pixelformat == V4L2_PIX_FMT_ET61X251) DBG(3, "Compressed video format is active, quality %d", cam->compression.quality); else DBG(3, "Uncompressed video format is active"); if (s->set_crop) if ((err = s->set_crop(cam, rect))) { DBG(3, "set_crop() failed"); return err; } if (s->set_ctrl) { for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) if (s->qctrl[i].id != 0 && !(s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)) { ctrl.id = s->qctrl[i].id; ctrl.value = qctrl[i].default_value; err = s->set_ctrl(cam, &ctrl); if (err) { DBG(3, "Set %s control failed", s->qctrl[i].name); return err; } DBG(3, "Image sensor supports '%s' control", s->qctrl[i].name); } } if (!(cam->state & DEV_INITIALIZED)) { mutex_init(&cam->fileop_mutex); spin_lock_init(&cam->queue_lock); init_waitqueue_head(&cam->wait_frame); init_waitqueue_head(&cam->wait_stream); cam->nreadbuffers = 2; memcpy(s->_qctrl, s->qctrl, sizeof(s->qctrl)); memcpy(&(s->_rect), &(s->cropcap.defrect), sizeof(struct v4l2_rect)); cam->state |= DEV_INITIALIZED; } DBG(2, "Initialization succeeded"); return 0; } /*****************************************************************************/ static void et61x251_release_resources(struct kref *kref) { struct et61x251_device *cam; mutex_lock(&et61x251_sysfs_lock); cam = container_of(kref, struct et61x251_device, kref); DBG(2, "V4L2 device %s deregistered", video_device_node_name(cam->v4ldev)); video_set_drvdata(cam->v4ldev, NULL); video_unregister_device(cam->v4ldev); usb_put_dev(cam->usbdev); kfree(cam->control_buffer); kfree(cam); mutex_unlock(&et61x251_sysfs_lock); } static int et61x251_open(struct file *filp) { struct et61x251_device* cam; int err = 0; if (!down_read_trylock(&et61x251_dev_lock)) return -ERESTARTSYS; cam = video_drvdata(filp); if (wait_for_completion_interruptible(&cam->probe)) { up_read(&et61x251_dev_lock); return -ERESTARTSYS; } kref_get(&cam->kref); if (mutex_lock_interruptible(&cam->open_mutex)) { kref_put(&cam->kref, et61x251_release_resources); up_read(&et61x251_dev_lock); return -ERESTARTSYS; } if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); err = -ENODEV; goto out; } if (cam->users) { DBG(2, "Device %s is already in use", video_device_node_name(cam->v4ldev)); DBG(3, "Simultaneous opens are not supported"); if ((filp->f_flags & O_NONBLOCK) || (filp->f_flags & O_NDELAY)) { err = -EWOULDBLOCK; goto out; } DBG(2, "A blocking open() has been requested. Wait for the " "device to be released..."); up_read(&et61x251_dev_lock); err = wait_event_interruptible_exclusive(cam->wait_open, (cam->state & DEV_DISCONNECTED) || !cam->users); down_read(&et61x251_dev_lock); if (err) goto out; if (cam->state & DEV_DISCONNECTED) { err = -ENODEV; goto out; } } if (cam->state & DEV_MISCONFIGURED) { err = et61x251_init(cam); if (err) { DBG(1, "Initialization failed again. " "I will retry on next open()."); goto out; } cam->state &= ~DEV_MISCONFIGURED; } if ((err = et61x251_start_transfer(cam))) goto out; filp->private_data = cam; cam->users++; cam->io = IO_NONE; cam->stream = STREAM_OFF; cam->nbuffers = 0; cam->frame_count = 0; et61x251_empty_framequeues(cam); DBG(3, "Video device %s is open", video_device_node_name(cam->v4ldev)); out: mutex_unlock(&cam->open_mutex); if (err) kref_put(&cam->kref, et61x251_release_resources); up_read(&et61x251_dev_lock); return err; } static int et61x251_release(struct file *filp) { struct et61x251_device* cam; down_write(&et61x251_dev_lock); cam = video_drvdata(filp); et61x251_stop_transfer(cam); et61x251_release_buffers(cam); cam->users--; wake_up_interruptible_nr(&cam->wait_open, 1); DBG(3, "Video device %s closed", video_device_node_name(cam->v4ldev)); kref_put(&cam->kref, et61x251_release_resources); up_write(&et61x251_dev_lock); return 0; } static ssize_t et61x251_read(struct file* filp, char __user * buf, size_t count, loff_t* f_pos) { struct et61x251_device *cam = video_drvdata(filp); struct et61x251_frame_t* f, * i; unsigned long lock_flags; long timeout; int err = 0; if (mutex_lock_interruptible(&cam->fileop_mutex)) return -ERESTARTSYS; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); mutex_unlock(&cam->fileop_mutex); return -EIO; } if (cam->io == IO_MMAP) { DBG(3, "Close and open the device again to choose the read " "method"); mutex_unlock(&cam->fileop_mutex); return -EBUSY; } if (cam->io == IO_NONE) { if (!et61x251_request_buffers(cam, cam->nreadbuffers, IO_READ)) { DBG(1, "read() failed, not enough memory"); mutex_unlock(&cam->fileop_mutex); return -ENOMEM; } cam->io = IO_READ; cam->stream = STREAM_ON; } if (list_empty(&cam->inqueue)) { if (!list_empty(&cam->outqueue)) et61x251_empty_framequeues(cam); et61x251_queue_unusedframes(cam); } if (!count) { mutex_unlock(&cam->fileop_mutex); return 0; } if (list_empty(&cam->outqueue)) { if (filp->f_flags & O_NONBLOCK) { mutex_unlock(&cam->fileop_mutex); return -EAGAIN; } timeout = wait_event_interruptible_timeout ( cam->wait_frame, (!list_empty(&cam->outqueue)) || (cam->state & DEV_DISCONNECTED) || (cam->state & DEV_MISCONFIGURED), msecs_to_jiffies( cam->module_param.frame_timeout * 1000 ) ); if (timeout < 0) { mutex_unlock(&cam->fileop_mutex); return timeout; } if (cam->state & DEV_DISCONNECTED) { mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (!timeout || (cam->state & DEV_MISCONFIGURED)) { mutex_unlock(&cam->fileop_mutex); return -EIO; } } f = list_entry(cam->outqueue.prev, struct et61x251_frame_t, frame); if (count > f->buf.bytesused) count = f->buf.bytesused; if (copy_to_user(buf, f->bufmem, count)) { err = -EFAULT; goto exit; } *f_pos += count; exit: spin_lock_irqsave(&cam->queue_lock, lock_flags); list_for_each_entry(i, &cam->outqueue, frame) i->state = F_UNUSED; INIT_LIST_HEAD(&cam->outqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); et61x251_queue_unusedframes(cam); PDBGG("Frame #%lu, bytes read: %zu", (unsigned long)f->buf.index, count); mutex_unlock(&cam->fileop_mutex); return err ? err : count; } static unsigned int et61x251_poll(struct file *filp, poll_table *wait) { struct et61x251_device *cam = video_drvdata(filp); struct et61x251_frame_t* f; unsigned long lock_flags; unsigned int mask = 0; if (mutex_lock_interruptible(&cam->fileop_mutex)) return POLLERR; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); goto error; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); goto error; } if (cam->io == IO_NONE) { if (!et61x251_request_buffers(cam, cam->nreadbuffers, IO_READ)) { DBG(1, "poll() failed, not enough memory"); goto error; } cam->io = IO_READ; cam->stream = STREAM_ON; } if (cam->io == IO_READ) { spin_lock_irqsave(&cam->queue_lock, lock_flags); list_for_each_entry(f, &cam->outqueue, frame) f->state = F_UNUSED; INIT_LIST_HEAD(&cam->outqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); et61x251_queue_unusedframes(cam); } poll_wait(filp, &cam->wait_frame, wait); if (!list_empty(&cam->outqueue)) mask |= POLLIN | POLLRDNORM; mutex_unlock(&cam->fileop_mutex); return mask; error: mutex_unlock(&cam->fileop_mutex); return POLLERR; } static void et61x251_vm_open(struct vm_area_struct* vma) { struct et61x251_frame_t* f = vma->vm_private_data; f->vma_use_count++; } static void et61x251_vm_close(struct vm_area_struct* vma) { /* NOTE: buffers are not freed here */ struct et61x251_frame_t* f = vma->vm_private_data; f->vma_use_count--; } static const struct vm_operations_struct et61x251_vm_ops = { .open = et61x251_vm_open, .close = et61x251_vm_close, }; static int et61x251_mmap(struct file* filp, struct vm_area_struct *vma) { struct et61x251_device *cam = video_drvdata(filp); unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start; void *pos; u32 i; if (mutex_lock_interruptible(&cam->fileop_mutex)) return -ERESTARTSYS; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); mutex_unlock(&cam->fileop_mutex); return -EIO; } if (!(vma->vm_flags & (VM_WRITE | VM_READ))) { mutex_unlock(&cam->fileop_mutex); return -EACCES; } if (cam->io != IO_MMAP || size != PAGE_ALIGN(cam->frame[0].buf.length)) { mutex_unlock(&cam->fileop_mutex); return -EINVAL; } for (i = 0; i < cam->nbuffers; i++) { if ((cam->frame[i].buf.m.offset>>PAGE_SHIFT) == vma->vm_pgoff) break; } if (i == cam->nbuffers) { mutex_unlock(&cam->fileop_mutex); return -EINVAL; } vma->vm_flags |= VM_IO; vma->vm_flags |= VM_RESERVED; pos = cam->frame[i].bufmem; while (size > 0) { /* size is page-aligned */ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) { mutex_unlock(&cam->fileop_mutex); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } vma->vm_ops = &et61x251_vm_ops; vma->vm_private_data = &cam->frame[i]; et61x251_vm_open(vma); mutex_unlock(&cam->fileop_mutex); return 0; } /*****************************************************************************/ static int et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg) { struct v4l2_capability cap = { .driver = "et61x251", .version = ET61X251_MODULE_VERSION_CODE, .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING, }; strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card)); if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0) strlcpy(cap.bus_info, dev_name(&cam->usbdev->dev), sizeof(cap.bus_info)); if (copy_to_user(arg, &cap, sizeof(cap))) return -EFAULT; return 0; } static int et61x251_vidioc_enuminput(struct et61x251_device* cam, void __user * arg) { struct v4l2_input i; if (copy_from_user(&i, arg, sizeof(i))) return -EFAULT; if (i.index) return -EINVAL; memset(&i, 0, sizeof(i)); strcpy(i.name, "Camera"); i.type = V4L2_INPUT_TYPE_CAMERA; i.capabilities = V4L2_IN_CAP_STD; if (copy_to_user(arg, &i, sizeof(i))) return -EFAULT; return 0; } static int et61x251_vidioc_g_input(struct et61x251_device* cam, void __user * arg) { int index = 0; if (copy_to_user(arg, &index, sizeof(index))) return -EFAULT; return 0; } static int et61x251_vidioc_s_input(struct et61x251_device* cam, void __user * arg) { int index; if (copy_from_user(&index, arg, sizeof(index))) return -EFAULT; if (index != 0) return -EINVAL; return 0; } static int et61x251_vidioc_query_ctrl(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_queryctrl qc; u8 i; if (copy_from_user(&qc, arg, sizeof(qc))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) if (qc.id && qc.id == s->qctrl[i].id) { memcpy(&qc, &(s->qctrl[i]), sizeof(qc)); if (copy_to_user(arg, &qc, sizeof(qc))) return -EFAULT; return 0; } return -EINVAL; } static int et61x251_vidioc_g_ctrl(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_control ctrl; int err = 0; u8 i; if (!s->get_ctrl && !s->set_ctrl) return -EINVAL; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; if (!s->get_ctrl) { for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) if (ctrl.id == s->qctrl[i].id) { ctrl.value = s->_qctrl[i].default_value; goto exit; } return -EINVAL; } else err = s->get_ctrl(cam, &ctrl); exit: if (copy_to_user(arg, &ctrl, sizeof(ctrl))) return -EFAULT; return err; } static int et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_control ctrl; u8 i; int err = 0; if (!s->set_ctrl) return -EINVAL; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) { if (ctrl.id == s->qctrl[i].id) { if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED) return -EINVAL; if (ctrl.value < s->qctrl[i].minimum || ctrl.value > s->qctrl[i].maximum) return -ERANGE; ctrl.value -= ctrl.value % s->qctrl[i].step; break; } } if (i == ARRAY_SIZE(s->qctrl)) return -EINVAL; if ((err = s->set_ctrl(cam, &ctrl))) return err; s->_qctrl[i].default_value = ctrl.value; return 0; } static int et61x251_vidioc_cropcap(struct et61x251_device* cam, void __user * arg) { struct v4l2_cropcap* cc = &(cam->sensor.cropcap); cc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cc->pixelaspect.numerator = 1; cc->pixelaspect.denominator = 1; if (copy_to_user(arg, cc, sizeof(*cc))) return -EFAULT; return 0; } static int et61x251_vidioc_g_crop(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_crop crop = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, }; memcpy(&(crop.c), &(s->_rect), sizeof(struct v4l2_rect)); if (copy_to_user(arg, &crop, sizeof(crop))) return -EFAULT; return 0; } static int et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_crop crop; struct v4l2_rect* rect; struct v4l2_rect* bounds = &(s->cropcap.bounds); struct v4l2_pix_format* pix_format = &(s->pix_format); u8 scale; const enum et61x251_stream_state stream = cam->stream; const u32 nbuffers = cam->nbuffers; u32 i; int err = 0; if (copy_from_user(&crop, arg, sizeof(crop))) return -EFAULT; rect = &(crop.c); if (crop.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (cam->module_param.force_munmap) for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].vma_use_count) { DBG(3, "VIDIOC_S_CROP failed. " "Unmap the buffers first."); return -EBUSY; } /* Preserve R,G or B origin */ rect->left = (s->_rect.left & 1L) ? rect->left | 1L : rect->left & ~1L; rect->top = (s->_rect.top & 1L) ? rect->top | 1L : rect->top & ~1L; if (rect->width < 16) rect->width = 16; if (rect->height < 16) rect->height = 16; if (rect->width > bounds->width) rect->width = bounds->width; if (rect->height > bounds->height) rect->height = bounds->height; if (rect->left < bounds->left) rect->left = bounds->left; if (rect->top < bounds->top) rect->top = bounds->top; if (rect->left + rect->width > bounds->left + bounds->width) rect->left = bounds->left+bounds->width - rect->width; if (rect->top + rect->height > bounds->top + bounds->height) rect->top = bounds->top+bounds->height - rect->height; rect->width &= ~15L; rect->height &= ~15L; if (ET61X251_PRESERVE_IMGSCALE) { /* Calculate the actual scaling factor */ u32 a, b; a = rect->width * rect->height; b = pix_format->width * pix_format->height; scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1; } else scale = 1; if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; if (copy_to_user(arg, &crop, sizeof(crop))) { cam->stream = stream; return -EFAULT; } if (cam->module_param.force_munmap || cam->io == IO_READ) et61x251_release_buffers(cam); err = et61x251_set_crop(cam, rect); if (s->set_crop) err += s->set_crop(cam, rect); err += et61x251_set_scale(cam, scale); if (err) { /* atomic, no rollback in ioctl() */ cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -EIO; } s->pix_format.width = rect->width/scale; s->pix_format.height = rect->height/scale; memcpy(&(s->_rect), rect, sizeof(*rect)); if ((cam->module_param.force_munmap || cam->io == IO_READ) && nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) { cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -ENOMEM; } if (cam->io == IO_READ) et61x251_empty_framequeues(cam); else if (cam->module_param.force_munmap) et61x251_requeue_outqueue(cam); cam->stream = stream; return 0; } static int et61x251_vidioc_enum_framesizes(struct et61x251_device* cam, void __user * arg) { struct v4l2_frmsizeenum frmsize; if (copy_from_user(&frmsize, arg, sizeof(frmsize))) return -EFAULT; if (frmsize.index != 0) return -EINVAL; if (frmsize.pixel_format != V4L2_PIX_FMT_ET61X251 && frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8) return -EINVAL; frmsize.type = V4L2_FRMSIZE_TYPE_STEPWISE; frmsize.stepwise.min_width = frmsize.stepwise.step_width = 16; frmsize.stepwise.min_height = frmsize.stepwise.step_height = 16; frmsize.stepwise.max_width = cam->sensor.cropcap.bounds.width; frmsize.stepwise.max_height = cam->sensor.cropcap.bounds.height; memset(&frmsize.reserved, 0, sizeof(frmsize.reserved)); if (copy_to_user(arg, &frmsize, sizeof(frmsize))) return -EFAULT; return 0; } static int et61x251_vidioc_enum_fmt(struct et61x251_device* cam, void __user * arg) { struct v4l2_fmtdesc fmtd; if (copy_from_user(&fmtd, arg, sizeof(fmtd))) return -EFAULT; if (fmtd.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (fmtd.index == 0) { strcpy(fmtd.description, "bayer rgb"); fmtd.pixelformat = V4L2_PIX_FMT_SBGGR8; } else if (fmtd.index == 1) { strcpy(fmtd.description, "compressed"); fmtd.pixelformat = V4L2_PIX_FMT_ET61X251; fmtd.flags = V4L2_FMT_FLAG_COMPRESSED; } else return -EINVAL; fmtd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; memset(&fmtd.reserved, 0, sizeof(fmtd.reserved)); if (copy_to_user(arg, &fmtd, sizeof(fmtd))) return -EFAULT; return 0; } static int et61x251_vidioc_g_fmt(struct et61x251_device* cam, void __user * arg) { struct v4l2_format format; struct v4l2_pix_format* pfmt = &(cam->sensor.pix_format); if (copy_from_user(&format, arg, sizeof(format))) return -EFAULT; if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; pfmt->colorspace = (pfmt->pixelformat == V4L2_PIX_FMT_ET61X251) ? 0 : V4L2_COLORSPACE_SRGB; pfmt->bytesperline = (pfmt->pixelformat==V4L2_PIX_FMT_ET61X251) ? 0 : (pfmt->width * pfmt->priv) / 8; pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8); pfmt->field = V4L2_FIELD_NONE; memcpy(&(format.fmt.pix), pfmt, sizeof(*pfmt)); if (copy_to_user(arg, &format, sizeof(format))) return -EFAULT; return 0; } static int et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd, void __user * arg) { struct et61x251_sensor* s = &cam->sensor; struct v4l2_format format; struct v4l2_pix_format* pix; struct v4l2_pix_format* pfmt = &(s->pix_format); struct v4l2_rect* bounds = &(s->cropcap.bounds); struct v4l2_rect rect; u8 scale; const enum et61x251_stream_state stream = cam->stream; const u32 nbuffers = cam->nbuffers; u32 i; int err = 0; if (copy_from_user(&format, arg, sizeof(format))) return -EFAULT; pix = &(format.fmt.pix); if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; memcpy(&rect, &(s->_rect), sizeof(rect)); { /* calculate the actual scaling factor */ u32 a, b; a = rect.width * rect.height; b = pix->width * pix->height; scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1; } rect.width = scale * pix->width; rect.height = scale * pix->height; if (rect.width < 16) rect.width = 16; if (rect.height < 16) rect.height = 16; if (rect.width > bounds->left + bounds->width - rect.left) rect.width = bounds->left + bounds->width - rect.left; if (rect.height > bounds->top + bounds->height - rect.top) rect.height = bounds->top + bounds->height - rect.top; rect.width &= ~15L; rect.height &= ~15L; { /* adjust the scaling factor */ u32 a, b; a = rect.width * rect.height; b = pix->width * pix->height; scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1; } pix->width = rect.width / scale; pix->height = rect.height / scale; if (pix->pixelformat != V4L2_PIX_FMT_ET61X251 && pix->pixelformat != V4L2_PIX_FMT_SBGGR8) pix->pixelformat = pfmt->pixelformat; pix->priv = pfmt->priv; /* bpp */ pix->colorspace = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ? 0 : V4L2_COLORSPACE_SRGB; pix->colorspace = pfmt->colorspace; pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ? 0 : (pix->width * pix->priv) / 8; pix->sizeimage = pix->height * ((pix->width * pix->priv) / 8); pix->field = V4L2_FIELD_NONE; if (cmd == VIDIOC_TRY_FMT) { if (copy_to_user(arg, &format, sizeof(format))) return -EFAULT; return 0; } if (cam->module_param.force_munmap) for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].vma_use_count) { DBG(3, "VIDIOC_S_FMT failed. " "Unmap the buffers first."); return -EBUSY; } if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; if (copy_to_user(arg, &format, sizeof(format))) { cam->stream = stream; return -EFAULT; } if (cam->module_param.force_munmap || cam->io == IO_READ) et61x251_release_buffers(cam); err += et61x251_set_pix_format(cam, pix); err += et61x251_set_crop(cam, &rect); if (s->set_pix_format) err += s->set_pix_format(cam, pix); if (s->set_crop) err += s->set_crop(cam, &rect); err += et61x251_set_scale(cam, scale); if (err) { /* atomic, no rollback in ioctl() */ cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -EIO; } memcpy(pfmt, pix, sizeof(*pix)); memcpy(&(s->_rect), &rect, sizeof(rect)); if ((cam->module_param.force_munmap || cam->io == IO_READ) && nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) { cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " "use the camera, close and open %s again.", video_device_node_name(cam->v4ldev)); return -ENOMEM; } if (cam->io == IO_READ) et61x251_empty_framequeues(cam); else if (cam->module_param.force_munmap) et61x251_requeue_outqueue(cam); cam->stream = stream; return 0; } static int et61x251_vidioc_g_jpegcomp(struct et61x251_device* cam, void __user * arg) { if (copy_to_user(arg, &cam->compression, sizeof(cam->compression))) return -EFAULT; return 0; } static int et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg) { struct v4l2_jpegcompression jc; const enum et61x251_stream_state stream = cam->stream; int err = 0; if (copy_from_user(&jc, arg, sizeof(jc))) return -EFAULT; if (jc.quality != 0 && jc.quality != 1) return -EINVAL; if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; err += et61x251_set_compression(cam, &jc); if (err) { /* atomic, no rollback in ioctl() */ cam->state |= DEV_MISCONFIGURED; DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " "problems. To use the camera, close and open " "%s again.", video_device_node_name(cam->v4ldev)); return -EIO; } cam->compression.quality = jc.quality; cam->stream = stream; return 0; } static int et61x251_vidioc_reqbufs(struct et61x251_device* cam, void __user * arg) { struct v4l2_requestbuffers rb; u32 i; int err; if (copy_from_user(&rb, arg, sizeof(rb))) return -EFAULT; if (rb.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || rb.memory != V4L2_MEMORY_MMAP) return -EINVAL; if (cam->io == IO_READ) { DBG(3, "Close and open the device again to choose the mmap " "I/O method"); return -EBUSY; } for (i = 0; i < cam->nbuffers; i++) if (cam->frame[i].vma_use_count) { DBG(3, "VIDIOC_REQBUFS failed. " "Previous buffers are still mapped."); return -EBUSY; } if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; et61x251_empty_framequeues(cam); et61x251_release_buffers(cam); if (rb.count) rb.count = et61x251_request_buffers(cam, rb.count, IO_MMAP); if (copy_to_user(arg, &rb, sizeof(rb))) { et61x251_release_buffers(cam); cam->io = IO_NONE; return -EFAULT; } cam->io = rb.count ? IO_MMAP : IO_NONE; return 0; } static int et61x251_vidioc_querybuf(struct et61x251_device* cam, void __user * arg) { struct v4l2_buffer b; if (copy_from_user(&b, arg, sizeof(b))) return -EFAULT; if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || b.index >= cam->nbuffers || cam->io != IO_MMAP) return -EINVAL; memcpy(&b, &cam->frame[b.index].buf, sizeof(b)); if (cam->frame[b.index].vma_use_count) b.flags |= V4L2_BUF_FLAG_MAPPED; if (cam->frame[b.index].state == F_DONE) b.flags |= V4L2_BUF_FLAG_DONE; else if (cam->frame[b.index].state != F_UNUSED) b.flags |= V4L2_BUF_FLAG_QUEUED; if (copy_to_user(arg, &b, sizeof(b))) return -EFAULT; return 0; } static int et61x251_vidioc_qbuf(struct et61x251_device* cam, void __user * arg) { struct v4l2_buffer b; unsigned long lock_flags; if (copy_from_user(&b, arg, sizeof(b))) return -EFAULT; if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || b.index >= cam->nbuffers || cam->io != IO_MMAP) return -EINVAL; if (cam->frame[b.index].state != F_UNUSED) return -EINVAL; cam->frame[b.index].state = F_QUEUED; spin_lock_irqsave(&cam->queue_lock, lock_flags); list_add_tail(&cam->frame[b.index].frame, &cam->inqueue); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); PDBGG("Frame #%lu queued", (unsigned long)b.index); return 0; } static int et61x251_vidioc_dqbuf(struct et61x251_device* cam, struct file* filp, void __user * arg) { struct v4l2_buffer b; struct et61x251_frame_t *f; unsigned long lock_flags; long timeout; if (copy_from_user(&b, arg, sizeof(b))) return -EFAULT; if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io!= IO_MMAP) return -EINVAL; if (list_empty(&cam->outqueue)) { if (cam->stream == STREAM_OFF) return -EINVAL; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; timeout = wait_event_interruptible_timeout ( cam->wait_frame, (!list_empty(&cam->outqueue)) || (cam->state & DEV_DISCONNECTED) || (cam->state & DEV_MISCONFIGURED), cam->module_param.frame_timeout * 1000 * msecs_to_jiffies(1) ); if (timeout < 0) return timeout; if (cam->state & DEV_DISCONNECTED) return -ENODEV; if (!timeout || (cam->state & DEV_MISCONFIGURED)) return -EIO; } spin_lock_irqsave(&cam->queue_lock, lock_flags); f = list_entry(cam->outqueue.next, struct et61x251_frame_t, frame); list_del(cam->outqueue.next); spin_unlock_irqrestore(&cam->queue_lock, lock_flags); f->state = F_UNUSED; memcpy(&b, &f->buf, sizeof(b)); if (f->vma_use_count) b.flags |= V4L2_BUF_FLAG_MAPPED; if (copy_to_user(arg, &b, sizeof(b))) return -EFAULT; PDBGG("Frame #%lu dequeued", (unsigned long)f->buf.index); return 0; } static int et61x251_vidioc_streamon(struct et61x251_device* cam, void __user * arg) { int type; if (copy_from_user(&type, arg, sizeof(type))) return -EFAULT; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP) return -EINVAL; cam->stream = STREAM_ON; DBG(3, "Stream on"); return 0; } static int et61x251_vidioc_streamoff(struct et61x251_device* cam, void __user * arg) { int type, err; if (copy_from_user(&type, arg, sizeof(type))) return -EFAULT; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP) return -EINVAL; if (cam->stream == STREAM_ON) if ((err = et61x251_stream_interrupt(cam))) return err; et61x251_empty_framequeues(cam); DBG(3, "Stream off"); return 0; } static int et61x251_vidioc_g_parm(struct et61x251_device* cam, void __user * arg) { struct v4l2_streamparm sp; if (copy_from_user(&sp, arg, sizeof(sp))) return -EFAULT; if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; sp.parm.capture.extendedmode = 0; sp.parm.capture.readbuffers = cam->nreadbuffers; if (copy_to_user(arg, &sp, sizeof(sp))) return -EFAULT; return 0; } static int et61x251_vidioc_s_parm(struct et61x251_device* cam, void __user * arg) { struct v4l2_streamparm sp; if (copy_from_user(&sp, arg, sizeof(sp))) return -EFAULT; if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; sp.parm.capture.extendedmode = 0; if (sp.parm.capture.readbuffers == 0) sp.parm.capture.readbuffers = cam->nreadbuffers; if (sp.parm.capture.readbuffers > ET61X251_MAX_FRAMES) sp.parm.capture.readbuffers = ET61X251_MAX_FRAMES; if (copy_to_user(arg, &sp, sizeof(sp))) return -EFAULT; cam->nreadbuffers = sp.parm.capture.readbuffers; return 0; } static long et61x251_ioctl_v4l2(struct file *filp, unsigned int cmd, void __user *arg) { struct et61x251_device *cam = video_drvdata(filp); switch (cmd) { case VIDIOC_QUERYCAP: return et61x251_vidioc_querycap(cam, arg); case VIDIOC_ENUMINPUT: return et61x251_vidioc_enuminput(cam, arg); case VIDIOC_G_INPUT: return et61x251_vidioc_g_input(cam, arg); case VIDIOC_S_INPUT: return et61x251_vidioc_s_input(cam, arg); case VIDIOC_QUERYCTRL: return et61x251_vidioc_query_ctrl(cam, arg); case VIDIOC_G_CTRL: return et61x251_vidioc_g_ctrl(cam, arg); case VIDIOC_S_CTRL: return et61x251_vidioc_s_ctrl(cam, arg); case VIDIOC_CROPCAP: return et61x251_vidioc_cropcap(cam, arg); case VIDIOC_G_CROP: return et61x251_vidioc_g_crop(cam, arg); case VIDIOC_S_CROP: return et61x251_vidioc_s_crop(cam, arg); case VIDIOC_ENUM_FMT: return et61x251_vidioc_enum_fmt(cam, arg); case VIDIOC_G_FMT: return et61x251_vidioc_g_fmt(cam, arg); case VIDIOC_TRY_FMT: case VIDIOC_S_FMT: return et61x251_vidioc_try_s_fmt(cam, cmd, arg); case VIDIOC_ENUM_FRAMESIZES: return et61x251_vidioc_enum_framesizes(cam, arg); case VIDIOC_G_JPEGCOMP: return et61x251_vidioc_g_jpegcomp(cam, arg); case VIDIOC_S_JPEGCOMP: return et61x251_vidioc_s_jpegcomp(cam, arg); case VIDIOC_REQBUFS: return et61x251_vidioc_reqbufs(cam, arg); case VIDIOC_QUERYBUF: return et61x251_vidioc_querybuf(cam, arg); case VIDIOC_QBUF: return et61x251_vidioc_qbuf(cam, arg); case VIDIOC_DQBUF: return et61x251_vidioc_dqbuf(cam, filp, arg); case VIDIOC_STREAMON: return et61x251_vidioc_streamon(cam, arg); case VIDIOC_STREAMOFF: return et61x251_vidioc_streamoff(cam, arg); case VIDIOC_G_PARM: return et61x251_vidioc_g_parm(cam, arg); case VIDIOC_S_PARM: return et61x251_vidioc_s_parm(cam, arg); case VIDIOC_G_STD: case VIDIOC_S_STD: case VIDIOC_QUERYSTD: case VIDIOC_ENUMSTD: case VIDIOC_QUERYMENU: case VIDIOC_ENUM_FRAMEINTERVALS: return -EINVAL; default: return -EINVAL; } } static long et61x251_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct et61x251_device *cam = video_drvdata(filp); long err = 0; if (mutex_lock_interruptible(&cam->fileop_mutex)) return -ERESTARTSYS; if (cam->state & DEV_DISCONNECTED) { DBG(1, "Device not present"); mutex_unlock(&cam->fileop_mutex); return -ENODEV; } if (cam->state & DEV_MISCONFIGURED) { DBG(1, "The camera is misconfigured. Close and open it " "again."); mutex_unlock(&cam->fileop_mutex); return -EIO; } V4LDBG(3, "et61x251", cmd); err = et61x251_ioctl_v4l2(filp, cmd, (void __user *)arg); mutex_unlock(&cam->fileop_mutex); return err; } static const struct v4l2_file_operations et61x251_fops = { .owner = THIS_MODULE, .open = et61x251_open, .release = et61x251_release, .unlocked_ioctl = et61x251_ioctl, .read = et61x251_read, .poll = et61x251_poll, .mmap = et61x251_mmap, }; /*****************************************************************************/ /* It exists a single interface only. We do not need to validate anything. */ static int et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id) { struct usb_device *udev = interface_to_usbdev(intf); struct et61x251_device* cam; static unsigned int dev_nr; unsigned int i; int err = 0; if (!(cam = kzalloc(sizeof(struct et61x251_device), GFP_KERNEL))) return -ENOMEM; cam->usbdev = udev; if (!(cam->control_buffer = kzalloc(8, GFP_KERNEL))) { DBG(1, "kmalloc() failed"); err = -ENOMEM; goto fail; } if (!(cam->v4ldev = video_device_alloc())) { DBG(1, "video_device_alloc() failed"); err = -ENOMEM; goto fail; } DBG(2, "ET61X[12]51 PC Camera Controller detected " "(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct); for (i = 0; et61x251_sensor_table[i]; i++) { err = et61x251_sensor_table[i](cam); if (!err) break; } if (!err) DBG(2, "%s image sensor detected", cam->sensor.name); else { DBG(1, "No supported image sensor detected"); err = -ENODEV; goto fail; } if (et61x251_init(cam)) { DBG(1, "Initialization failed. I will retry on open()."); cam->state |= DEV_MISCONFIGURED; } strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera"); cam->v4ldev->fops = &et61x251_fops; cam->v4ldev->release = video_device_release; cam->v4ldev->parent = &udev->dev; video_set_drvdata(cam->v4ldev, cam); init_completion(&cam->probe); err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER, video_nr[dev_nr]); if (err) { DBG(1, "V4L2 device registration failed"); if (err == -ENFILE && video_nr[dev_nr] == -1) DBG(1, "Free /dev/videoX node not found"); video_nr[dev_nr] = -1; dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0; complete_all(&cam->probe); goto fail; } DBG(2, "V4L2 device registered as %s", video_device_node_name(cam->v4ldev)); cam->module_param.force_munmap = force_munmap[dev_nr]; cam->module_param.frame_timeout = frame_timeout[dev_nr]; dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0; #ifdef CONFIG_VIDEO_ADV_DEBUG err = et61x251_create_sysfs(cam); if (!err) DBG(2, "Optional device control through 'sysfs' " "interface ready"); else DBG(2, "Failed to create 'sysfs' interface for optional " "device controlling. Error #%d", err); #else DBG(2, "Optional device control through 'sysfs' interface disabled"); DBG(3, "Compile the kernel with the 'CONFIG_VIDEO_ADV_DEBUG' " "configuration option to enable it."); #endif usb_set_intfdata(intf, cam); kref_init(&cam->kref); usb_get_dev(cam->usbdev); complete_all(&cam->probe); return 0; fail: if (cam) { kfree(cam->control_buffer); if (cam->v4ldev) video_device_release(cam->v4ldev); kfree(cam); } return err; } static void et61x251_usb_disconnect(struct usb_interface* intf) { struct et61x251_device* cam; down_write(&et61x251_dev_lock); cam = usb_get_intfdata(intf); DBG(2, "Disconnecting %s...", cam->v4ldev->name); if (cam->users) { DBG(2, "Device %s is open! Deregistration and memory " "deallocation are deferred.", video_device_node_name(cam->v4ldev)); cam->state |= DEV_MISCONFIGURED; et61x251_stop_transfer(cam); cam->state |= DEV_DISCONNECTED; wake_up_interruptible(&cam->wait_frame); wake_up(&cam->wait_stream); } else cam->state |= DEV_DISCONNECTED; wake_up_interruptible_all(&cam->wait_open); kref_put(&cam->kref, et61x251_release_resources); up_write(&et61x251_dev_lock); } static struct usb_driver et61x251_usb_driver = { .name = "et61x251", .id_table = et61x251_id_table, .probe = et61x251_usb_probe, .disconnect = et61x251_usb_disconnect, }; /*****************************************************************************/ static int __init et61x251_module_init(void) { int err = 0; KDBG(2, ET61X251_MODULE_NAME " v" ET61X251_MODULE_VERSION); KDBG(3, ET61X251_MODULE_AUTHOR); if ((err = usb_register(&et61x251_usb_driver))) KDBG(1, "usb_register() failed"); return err; } static void __exit et61x251_module_exit(void) { usb_deregister(&et61x251_usb_driver); } module_init(et61x251_module_init); module_exit(et61x251_module_exit);
gpl-2.0
insvnx/android_kernel_htc_a11chl
arch/mips/pci/pci.c
4417
7528
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2011 Wind River Systems, * written by Ralf Baechle (ralf@linux-mips.org) */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/export.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <asm/cpu-info.h> /* * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource * assignments. */ /* * The PCI controller list. */ static struct pci_controller *hose_head, **hose_tail = &hose_head; unsigned long PCIBIOS_MIN_IO; unsigned long PCIBIOS_MIN_MEM; static int pci_initialized; /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; struct pci_controller *hose = dev->sysdata; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* Make sure we start at our min on all hoses */ if (start < PCIBIOS_MIN_IO + hose->io_resource->start) start = PCIBIOS_MIN_IO + hose->io_resource->start; /* * Put everything into 0x00-0xff region modulo 0x400 */ if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } else if (res->flags & IORESOURCE_MEM) { /* Make sure we start at our min on all hoses */ if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start) start = PCIBIOS_MIN_MEM + hose->mem_resource->start; } return start; } static void __devinit pcibios_scanbus(struct pci_controller *hose) { static int next_busno; static int need_domain_info; LIST_HEAD(resources); struct pci_bus *bus; if (!hose->iommu) PCI_DMA_BUS_IS_PHYS = 1; if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY)) next_busno = (*hose->get_busno)(); pci_add_resource_offset(&resources, hose->mem_resource, hose->mem_offset); pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset); bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose, &resources); if (!bus) pci_free_resource_list(&resources); hose->bus = bus; need_domain_info = need_domain_info || hose->index; hose->need_domain_info = need_domain_info; if (bus) { next_busno = bus->subordinate + 1; /* Don't allow 8-bit bus number overflow inside the hose - reserve some space for bridges. */ if (next_busno > 224) { next_busno = 0; need_domain_info = 1; } if (!pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); pci_enable_bridges(bus); } } } static DEFINE_MUTEX(pci_scan_mutex); void __devinit register_pci_controller(struct pci_controller *hose) { if (request_resource(&iomem_resource, hose->mem_resource) < 0) goto out; if (request_resource(&ioport_resource, hose->io_resource) < 0) { release_resource(hose->mem_resource); goto out; } *hose_tail = hose; hose_tail = &hose->next; /* * Do not panic here but later - this might happen before console init. */ if (!hose->io_map_base) { printk(KERN_WARNING "registering PCI controller with io_map_base unset\n"); } /* * Scan the bus if it is register after the PCI subsystem * initialization. */ if (pci_initialized) { mutex_lock(&pci_scan_mutex); pcibios_scanbus(hose); mutex_unlock(&pci_scan_mutex); } return; out: printk(KERN_WARNING "Skipping PCI bus scan due to resource conflict\n"); } static void __init pcibios_set_cache_line_size(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int lsize; /* * Set PCI cacheline size to that of the highest level in the * cache hierarchy. */ lsize = c->dcache.linesz; lsize = c->scache.linesz ? : lsize; lsize = c->tcache.linesz ? : lsize; BUG_ON(!lsize); pci_dfl_cache_line_size = lsize >> 2; pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); } static int __init pcibios_init(void) { struct pci_controller *hose; pcibios_set_cache_line_size(); /* Scan all of the recorded PCI controllers. */ for (hose = hose_head; hose; hose = hose->next) pcibios_scanbus(hose); pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq); pci_initialized = 1; return 0; } subsys_initcall(pcibios_init); static int pcibios_enable_resources(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; int idx; struct resource *r; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx=0; idx < PCI_NUM_RESOURCES; idx++) { /* Only set up the requested stuff */ if (!(mask & (1<<idx))) continue; r = &dev->resource[idx]; if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; if ((idx == PCI_ROM_RESOURCE) && (!(r->flags & IORESOURCE_ROM_ENABLE))) continue; if (!r->start && r->end) { printk(KERN_ERR "PCI: Device %s not available " "because of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } unsigned int pcibios_assign_all_busses(void) { return 1; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; if ((err = pcibios_enable_resources(dev, mask)) < 0) return err; return pcibios_plat_dev_init(dev); } void __devinit pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; if (pci_has_flag(PCI_PROBE_ONLY) && dev && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { pci_read_bridge_bases(bus); } } void __init pcibios_update_irq(struct pci_dev *dev, int irq) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } #ifdef CONFIG_HOTPLUG EXPORT_SYMBOL(PCIBIOS_MIN_IO); EXPORT_SYMBOL(PCIBIOS_MIN_MEM); #endif int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot; /* * I/O space can be accessed via normal processor loads and stores on * this platform but for now we elect not to do this and portable * drivers should not do this anyway. */ if (mmap_state == pci_mmap_io) return -EINVAL; /* * Ignore write-combine; for now only return uncached mappings. */ prot = pgprot_val(vma->vm_page_prot); prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; vma->vm_page_prot = __pgprot(prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } char * (*pcibios_plat_setup)(char *str) __devinitdata; char *__devinit pcibios_setup(char *str) { if (pcibios_plat_setup) return pcibios_plat_setup(str); return str; }
gpl-2.0
cameron581/lge-kernel-gproj
net/ipx/ipx_proc.c
5185
8235
/* * IPX proc routines * * Copyright(C) Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2002 */ #include <linux/init.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/seq_file.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/tcp_states.h> #include <net/ipx.h> static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos) { spin_lock_bh(&ipx_interfaces_lock); return seq_list_start_head(&ipx_interfaces, *pos); } static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &ipx_interfaces, pos); } static void ipx_seq_interface_stop(struct seq_file *seq, void *v) { spin_unlock_bh(&ipx_interfaces_lock); } static int ipx_seq_interface_show(struct seq_file *seq, void *v) { struct ipx_interface *i; if (v == &ipx_interfaces) { seq_puts(seq, "Network Node_Address Primary Device " "Frame_Type"); #ifdef IPX_REFCNT_DEBUG seq_puts(seq, " refcnt"); #endif seq_puts(seq, "\n"); goto out; } i = list_entry(v, struct ipx_interface, node); seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", i->if_node[0], i->if_node[1], i->if_node[2], i->if_node[3], i->if_node[4], i->if_node[5]); seq_printf(seq, "%-9s", i == ipx_primary_net ? "Yes" : "No"); seq_printf(seq, "%-11s", ipx_device_name(i)); seq_printf(seq, "%-9s", ipx_frame_name(i->if_dlink_type)); #ifdef IPX_REFCNT_DEBUG seq_printf(seq, "%6d", atomic_read(&i->refcnt)); #endif seq_puts(seq, "\n"); out: return 0; } static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos) { read_lock_bh(&ipx_routes_lock); return seq_list_start_head(&ipx_routes, *pos); } static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &ipx_routes, pos); } static void ipx_seq_route_stop(struct seq_file *seq, void *v) { read_unlock_bh(&ipx_routes_lock); } static int ipx_seq_route_show(struct seq_file *seq, void *v) { struct ipx_route *rt; if (v == &ipx_routes) { seq_puts(seq, "Network Router_Net Router_Node\n"); goto out; } rt = list_entry(v, struct ipx_route, node); seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); if (rt->ir_routed) seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", (long unsigned int)ntohl(rt->ir_intrfc->if_netnum), rt->ir_router_node[0], rt->ir_router_node[1], rt->ir_router_node[2], rt->ir_router_node[3], rt->ir_router_node[4], rt->ir_router_node[5]); else seq_puts(seq, "Directly Connected\n"); out: return 0; } static __inline__ struct sock *ipx_get_socket_idx(loff_t pos) { struct sock *s = NULL; struct hlist_node *node; struct ipx_interface *i; list_for_each_entry(i, &ipx_interfaces, node) { spin_lock_bh(&i->if_sklist_lock); sk_for_each(s, node, &i->if_sklist) { if (!pos) break; --pos; } spin_unlock_bh(&i->if_sklist_lock); if (!pos) { if (node) goto found; break; } } s = NULL; found: return s; } static void *ipx_seq_socket_start(struct seq_file *seq, loff_t *pos) { loff_t l = *pos; spin_lock_bh(&ipx_interfaces_lock); return l ? ipx_get_socket_idx(--l) : SEQ_START_TOKEN; } static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock* sk, *next; struct ipx_interface *i; struct ipx_sock *ipxs; ++*pos; if (v == SEQ_START_TOKEN) { sk = NULL; i = ipx_interfaces_head(); if (!i) goto out; sk = sk_head(&i->if_sklist); if (sk) spin_lock_bh(&i->if_sklist_lock); goto out; } sk = v; next = sk_next(sk); if (next) { sk = next; goto out; } ipxs = ipx_sk(sk); i = ipxs->intrfc; spin_unlock_bh(&i->if_sklist_lock); sk = NULL; for (;;) { if (i->node.next == &ipx_interfaces) break; i = list_entry(i->node.next, struct ipx_interface, node); spin_lock_bh(&i->if_sklist_lock); if (!hlist_empty(&i->if_sklist)) { sk = sk_head(&i->if_sklist); break; } spin_unlock_bh(&i->if_sklist_lock); } out: return sk; } static int ipx_seq_socket_show(struct seq_file *seq, void *v) { struct sock *s; struct ipx_sock *ipxs; if (v == SEQ_START_TOKEN) { #ifdef CONFIG_IPX_INTERN seq_puts(seq, "Local_Address " "Remote_Address Tx_Queue " "Rx_Queue State Uid\n"); #else seq_puts(seq, "Local_Address Remote_Address " "Tx_Queue Rx_Queue State Uid\n"); #endif goto out; } s = v; ipxs = ipx_sk(s); #ifdef CONFIG_IPX_INTERN seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", (unsigned long)ntohl(ipxs->intrfc->if_netnum), ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3], ipxs->node[4], ipxs->node[5], ntohs(ipxs->port)); #else seq_printf(seq, "%08lX:%04X ", (unsigned long) ntohl(ipxs->intrfc->if_netnum), ntohs(ipxs->port)); #endif /* CONFIG_IPX_INTERN */ if (s->sk_state != TCP_ESTABLISHED) seq_printf(seq, "%-28s", "Not_Connected"); else { seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", (unsigned long)ntohl(ipxs->dest_addr.net), ipxs->dest_addr.node[0], ipxs->dest_addr.node[1], ipxs->dest_addr.node[2], ipxs->dest_addr.node[3], ipxs->dest_addr.node[4], ipxs->dest_addr.node[5], ntohs(ipxs->dest_addr.sock)); } seq_printf(seq, "%08X %08X %02X %03d\n", sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); out: return 0; } static const struct seq_operations ipx_seq_interface_ops = { .start = ipx_seq_interface_start, .next = ipx_seq_interface_next, .stop = ipx_seq_interface_stop, .show = ipx_seq_interface_show, }; static const struct seq_operations ipx_seq_route_ops = { .start = ipx_seq_route_start, .next = ipx_seq_route_next, .stop = ipx_seq_route_stop, .show = ipx_seq_route_show, }; static const struct seq_operations ipx_seq_socket_ops = { .start = ipx_seq_socket_start, .next = ipx_seq_socket_next, .stop = ipx_seq_interface_stop, .show = ipx_seq_socket_show, }; static int ipx_seq_route_open(struct inode *inode, struct file *file) { return seq_open(file, &ipx_seq_route_ops); } static int ipx_seq_interface_open(struct inode *inode, struct file *file) { return seq_open(file, &ipx_seq_interface_ops); } static int ipx_seq_socket_open(struct inode *inode, struct file *file) { return seq_open(file, &ipx_seq_socket_ops); } static const struct file_operations ipx_seq_interface_fops = { .owner = THIS_MODULE, .open = ipx_seq_interface_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ipx_seq_route_fops = { .owner = THIS_MODULE, .open = ipx_seq_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ipx_seq_socket_fops = { .owner = THIS_MODULE, .open = ipx_seq_socket_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *ipx_proc_dir; int __init ipx_proc_init(void) { struct proc_dir_entry *p; int rc = -ENOMEM; ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net); if (!ipx_proc_dir) goto out; p = proc_create("interface", S_IRUGO, ipx_proc_dir, &ipx_seq_interface_fops); if (!p) goto out_interface; p = proc_create("route", S_IRUGO, ipx_proc_dir, &ipx_seq_route_fops); if (!p) goto out_route; p = proc_create("socket", S_IRUGO, ipx_proc_dir, &ipx_seq_socket_fops); if (!p) goto out_socket; rc = 0; out: return rc; out_socket: remove_proc_entry("route", ipx_proc_dir); out_route: remove_proc_entry("interface", ipx_proc_dir); out_interface: remove_proc_entry("ipx", init_net.proc_net); goto out; } void __exit ipx_proc_exit(void) { remove_proc_entry("interface", ipx_proc_dir); remove_proc_entry("route", ipx_proc_dir); remove_proc_entry("socket", ipx_proc_dir); remove_proc_entry("ipx", init_net.proc_net); } #else /* CONFIG_PROC_FS */ int __init ipx_proc_init(void) { return 0; } void __exit ipx_proc_exit(void) { } #endif /* CONFIG_PROC_FS */
gpl-2.0
CyanogenMod/android_kernel_htc_m7
drivers/watchdog/wdrtas.c
7233
16518
/* * FIXME: add wdrtas_get_status and wdrtas_get_boot_status as soon as * RTAS calls are available */ /* * RTAS watchdog driver * * (C) Copyright IBM Corp. 2005 * device driver to exploit watchdog RTAS functions * * Authors : Utz Bacher <utz.bacher@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #include <asm/rtas.h> #define WDRTAS_MAGIC_CHAR 42 #define WDRTAS_SUPPORTED_MASK (WDIOF_SETTIMEOUT | \ WDIOF_MAGICCLOSE) MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); MODULE_DESCRIPTION("RTAS watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS_MISCDEV(TEMP_MINOR); static bool wdrtas_nowayout = WATCHDOG_NOWAYOUT; static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0); static char wdrtas_expect_close; static int wdrtas_interval; #define WDRTAS_THERMAL_SENSOR 3 static int wdrtas_token_get_sensor_state; #define WDRTAS_SURVEILLANCE_IND 9000 static int wdrtas_token_set_indicator; #define WDRTAS_SP_SPI 28 static int wdrtas_token_get_sp; static int wdrtas_token_event_scan; #define WDRTAS_DEFAULT_INTERVAL 300 #define WDRTAS_LOGBUFFER_LEN 128 static char wdrtas_logbuffer[WDRTAS_LOGBUFFER_LEN]; /*** watchdog access functions */ /** * wdrtas_set_interval - sets the watchdog interval * @interval: new interval * * returns 0 on success, <0 on failures * * wdrtas_set_interval sets the watchdog keepalive interval by calling the * RTAS function set-indicator (surveillance). The unit of interval is * seconds. */ static int wdrtas_set_interval(int interval) { long result; static int print_msg = 10; /* rtas uses minutes */ interval = (interval + 59) / 60; result = rtas_call(wdrtas_token_set_indicator, 3, 1, NULL, WDRTAS_SURVEILLANCE_IND, 0, interval); if (result < 0 && print_msg) { pr_err("setting the watchdog to %i timeout failed: %li\n", interval, result); print_msg--; } return result; } #define WDRTAS_SP_SPI_LEN 4 /** * wdrtas_get_interval - returns the current watchdog interval * @fallback_value: value (in seconds) to use, if the RTAS call fails * * returns the interval * * wdrtas_get_interval returns the current watchdog keepalive interval * as reported by the RTAS function ibm,get-system-parameter. The unit * of the return value is seconds. */ static int wdrtas_get_interval(int fallback_value) { long result; char value[WDRTAS_SP_SPI_LEN]; spin_lock(&rtas_data_buf_lock); memset(rtas_data_buf, 0, WDRTAS_SP_SPI_LEN); result = rtas_call(wdrtas_token_get_sp, 3, 1, NULL, WDRTAS_SP_SPI, __pa(rtas_data_buf), WDRTAS_SP_SPI_LEN); memcpy(value, rtas_data_buf, WDRTAS_SP_SPI_LEN); spin_unlock(&rtas_data_buf_lock); if (value[0] != 0 || value[1] != 2 || value[3] != 0 || result < 0) { pr_warn("could not get sp_spi watchdog timeout (%li). Continuing\n", result); return fallback_value; } /* rtas uses minutes */ return ((int)value[2]) * 60; } /** * wdrtas_timer_start - starts watchdog * * wdrtas_timer_start starts the watchdog by calling the RTAS function * set-interval (surveillance) */ static void wdrtas_timer_start(void) { wdrtas_set_interval(wdrtas_interval); } /** * wdrtas_timer_stop - stops watchdog * * wdrtas_timer_stop stops the watchdog timer by calling the RTAS function * set-interval (surveillance) */ static void wdrtas_timer_stop(void) { wdrtas_set_interval(0); } /** * wdrtas_log_scanned_event - logs an event we received during keepalive * * wdrtas_log_scanned_event prints a message to the log buffer dumping * the results of the last event-scan call */ static void wdrtas_log_scanned_event(void) { int i; for (i = 0; i < WDRTAS_LOGBUFFER_LEN; i += 16) pr_info("dumping event (line %i/%i), data = " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n", (i / 16) + 1, (WDRTAS_LOGBUFFER_LEN / 16), wdrtas_logbuffer[i + 0], wdrtas_logbuffer[i + 1], wdrtas_logbuffer[i + 2], wdrtas_logbuffer[i + 3], wdrtas_logbuffer[i + 4], wdrtas_logbuffer[i + 5], wdrtas_logbuffer[i + 6], wdrtas_logbuffer[i + 7], wdrtas_logbuffer[i + 8], wdrtas_logbuffer[i + 9], wdrtas_logbuffer[i + 10], wdrtas_logbuffer[i + 11], wdrtas_logbuffer[i + 12], wdrtas_logbuffer[i + 13], wdrtas_logbuffer[i + 14], wdrtas_logbuffer[i + 15]); } /** * wdrtas_timer_keepalive - resets watchdog timer to keep system alive * * wdrtas_timer_keepalive restarts the watchdog timer by calling the * RTAS function event-scan and repeats these calls as long as there are * events available. All events will be dumped. */ static void wdrtas_timer_keepalive(void) { long result; do { result = rtas_call(wdrtas_token_event_scan, 4, 1, NULL, RTAS_EVENT_SCAN_ALL_EVENTS, 0, (void *)__pa(wdrtas_logbuffer), WDRTAS_LOGBUFFER_LEN); if (result < 0) pr_err("event-scan failed: %li\n", result); if (result == 0) wdrtas_log_scanned_event(); } while (result == 0); } /** * wdrtas_get_temperature - returns current temperature * * returns temperature or <0 on failures * * wdrtas_get_temperature returns the current temperature in Fahrenheit. It * uses the RTAS call get-sensor-state, token 3 to do so */ static int wdrtas_get_temperature(void) { int result; int temperature = 0; result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature); if (result < 0) pr_warn("reading the thermal sensor failed: %i\n", result); else temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */ return temperature; } /** * wdrtas_get_status - returns the status of the watchdog * * returns a bitmask of defines WDIOF_... as defined in * include/linux/watchdog.h */ static int wdrtas_get_status(void) { return 0; /* TODO */ } /** * wdrtas_get_boot_status - returns the reason for the last boot * * returns a bitmask of defines WDIOF_... as defined in * include/linux/watchdog.h, indicating why the watchdog rebooted the system */ static int wdrtas_get_boot_status(void) { return 0; /* TODO */ } /*** watchdog API and operations stuff */ /* wdrtas_write - called when watchdog device is written to * @file: file structure * @buf: user buffer with data * @len: amount to data written * @ppos: position in file * * returns the number of successfully processed characters, which is always * the number of bytes passed to this function * * wdrtas_write processes all the data given to it and looks for the magic * character 'V'. This character allows the watchdog device to be closed * properly. */ static ssize_t wdrtas_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { int i; char c; if (!len) goto out; if (!wdrtas_nowayout) { wdrtas_expect_close = 0; /* look for 'V' */ for (i = 0; i < len; i++) { if (get_user(c, buf + i)) return -EFAULT; /* allow to close device */ if (c == 'V') wdrtas_expect_close = WDRTAS_MAGIC_CHAR; } } wdrtas_timer_keepalive(); out: return len; } /** * wdrtas_ioctl - ioctl function for the watchdog device * @file: file structure * @cmd: command for ioctl * @arg: argument pointer * * returns 0 on success, <0 on failure * * wdrtas_ioctl implements the watchdog API ioctls */ static long wdrtas_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int __user *argp = (void __user *)arg; int i; static const struct watchdog_info wdinfo = { .options = WDRTAS_SUPPORTED_MASK, .firmware_version = 0, .identity = "wdrtas", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &wdinfo, sizeof(wdinfo))) return -EFAULT; return 0; case WDIOC_GETSTATUS: i = wdrtas_get_status(); return put_user(i, argp); case WDIOC_GETBOOTSTATUS: i = wdrtas_get_boot_status(); return put_user(i, argp); case WDIOC_GETTEMP: if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE) return -EOPNOTSUPP; i = wdrtas_get_temperature(); return put_user(i, argp); case WDIOC_SETOPTIONS: if (get_user(i, argp)) return -EFAULT; if (i & WDIOS_DISABLECARD) wdrtas_timer_stop(); if (i & WDIOS_ENABLECARD) { wdrtas_timer_keepalive(); wdrtas_timer_start(); } /* not implemented. Done by H8 if (i & WDIOS_TEMPPANIC) { } */ return 0; case WDIOC_KEEPALIVE: wdrtas_timer_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(i, argp)) return -EFAULT; if (wdrtas_set_interval(i)) return -EINVAL; wdrtas_timer_keepalive(); if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE) wdrtas_interval = i; else wdrtas_interval = wdrtas_get_interval(i); /* fallthrough */ case WDIOC_GETTIMEOUT: return put_user(wdrtas_interval, argp); default: return -ENOTTY; } } /** * wdrtas_open - open function of watchdog device * @inode: inode structure * @file: file structure * * returns 0 on success, -EBUSY if the file has been opened already, <0 on * other failures * * function called when watchdog device is opened */ static int wdrtas_open(struct inode *inode, struct file *file) { /* only open once */ if (atomic_inc_return(&wdrtas_miscdev_open) > 1) { atomic_dec(&wdrtas_miscdev_open); return -EBUSY; } wdrtas_timer_start(); wdrtas_timer_keepalive(); return nonseekable_open(inode, file); } /** * wdrtas_close - close function of watchdog device * @inode: inode structure * @file: file structure * * returns 0 on success * * close function. Always succeeds */ static int wdrtas_close(struct inode *inode, struct file *file) { /* only stop watchdog, if this was announced using 'V' before */ if (wdrtas_expect_close == WDRTAS_MAGIC_CHAR) wdrtas_timer_stop(); else { pr_warn("got unexpected close. Watchdog not stopped.\n"); wdrtas_timer_keepalive(); } wdrtas_expect_close = 0; atomic_dec(&wdrtas_miscdev_open); return 0; } /** * wdrtas_temp_read - gives back the temperature in fahrenheit * @file: file structure * @buf: user buffer * @count: number of bytes to be read * @ppos: position in file * * returns always 1 or -EFAULT in case of user space copy failures, <0 on * other failures * * wdrtas_temp_read gives the temperature to the users by copying this * value as one byte into the user space buffer. The unit is Fahrenheit... */ static ssize_t wdrtas_temp_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int temperature = 0; temperature = wdrtas_get_temperature(); if (temperature < 0) return temperature; if (copy_to_user(buf, &temperature, 1)) return -EFAULT; return 1; } /** * wdrtas_temp_open - open function of temperature device * @inode: inode structure * @file: file structure * * returns 0 on success, <0 on failure * * function called when temperature device is opened */ static int wdrtas_temp_open(struct inode *inode, struct file *file) { return nonseekable_open(inode, file); } /** * wdrtas_temp_close - close function of temperature device * @inode: inode structure * @file: file structure * * returns 0 on success * * close function. Always succeeds */ static int wdrtas_temp_close(struct inode *inode, struct file *file) { return 0; } /** * wdrtas_reboot - reboot notifier function * @nb: notifier block structure * @code: reboot code * @ptr: unused * * returns NOTIFY_DONE * * wdrtas_reboot stops the watchdog in case of a reboot */ static int wdrtas_reboot(struct notifier_block *this, unsigned long code, void *ptr) { if (code == SYS_DOWN || code == SYS_HALT) wdrtas_timer_stop(); return NOTIFY_DONE; } /*** initialization stuff */ static const struct file_operations wdrtas_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wdrtas_write, .unlocked_ioctl = wdrtas_ioctl, .open = wdrtas_open, .release = wdrtas_close, }; static struct miscdevice wdrtas_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdrtas_fops, }; static const struct file_operations wdrtas_temp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = wdrtas_temp_read, .open = wdrtas_temp_open, .release = wdrtas_temp_close, }; static struct miscdevice wdrtas_tempdev = { .minor = TEMP_MINOR, .name = "temperature", .fops = &wdrtas_temp_fops, }; static struct notifier_block wdrtas_notifier = { .notifier_call = wdrtas_reboot, }; /** * wdrtas_get_tokens - reads in RTAS tokens * * returns 0 on success, <0 on failure * * wdrtas_get_tokens reads in the tokens for the RTAS calls used in * this watchdog driver. It tolerates, if "get-sensor-state" and * "ibm,get-system-parameter" are not available. */ static int wdrtas_get_tokens(void) { wdrtas_token_get_sensor_state = rtas_token("get-sensor-state"); if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE) { pr_warn("couldn't get token for get-sensor-state. Trying to continue without temperature support.\n"); } wdrtas_token_get_sp = rtas_token("ibm,get-system-parameter"); if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE) { pr_warn("couldn't get token for ibm,get-system-parameter. Trying to continue with a default timeout value of %i seconds.\n", WDRTAS_DEFAULT_INTERVAL); } wdrtas_token_set_indicator = rtas_token("set-indicator"); if (wdrtas_token_set_indicator == RTAS_UNKNOWN_SERVICE) { pr_err("couldn't get token for set-indicator. Terminating watchdog code.\n"); return -EIO; } wdrtas_token_event_scan = rtas_token("event-scan"); if (wdrtas_token_event_scan == RTAS_UNKNOWN_SERVICE) { pr_err("couldn't get token for event-scan. Terminating watchdog code.\n"); return -EIO; } return 0; } /** * wdrtas_unregister_devs - unregisters the misc dev handlers * * wdrtas_register_devs unregisters the watchdog and temperature watchdog * misc devs */ static void wdrtas_unregister_devs(void) { misc_deregister(&wdrtas_miscdev); if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE) misc_deregister(&wdrtas_tempdev); } /** * wdrtas_register_devs - registers the misc dev handlers * * returns 0 on success, <0 on failure * * wdrtas_register_devs registers the watchdog and temperature watchdog * misc devs */ static int wdrtas_register_devs(void) { int result; result = misc_register(&wdrtas_miscdev); if (result) { pr_err("couldn't register watchdog misc device. Terminating watchdog code.\n"); return result; } if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE) { result = misc_register(&wdrtas_tempdev); if (result) { pr_warn("couldn't register watchdog temperature misc device. Continuing without temperature support.\n"); wdrtas_token_get_sensor_state = RTAS_UNKNOWN_SERVICE; } } return 0; } /** * wdrtas_init - init function of the watchdog driver * * returns 0 on success, <0 on failure * * registers the file handlers and the reboot notifier */ static int __init wdrtas_init(void) { if (wdrtas_get_tokens()) return -ENODEV; if (wdrtas_register_devs()) return -ENODEV; if (register_reboot_notifier(&wdrtas_notifier)) { pr_err("could not register reboot notifier. Terminating watchdog code.\n"); wdrtas_unregister_devs(); return -ENODEV; } if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE) wdrtas_interval = WDRTAS_DEFAULT_INTERVAL; else wdrtas_interval = wdrtas_get_interval(WDRTAS_DEFAULT_INTERVAL); return 0; } /** * wdrtas_exit - exit function of the watchdog driver * * unregisters the file handlers and the reboot notifier */ static void __exit wdrtas_exit(void) { if (!wdrtas_nowayout) wdrtas_timer_stop(); wdrtas_unregister_devs(); unregister_reboot_notifier(&wdrtas_notifier); } module_init(wdrtas_init); module_exit(wdrtas_exit);
gpl-2.0
tidatida/linux-stable-grsec
drivers/watchdog/sb_wdog.c
7233
8770
/* * Watchdog driver for SiByte SB1 SoCs * * Copyright (C) 2007 OnStor, Inc. * Andrew Sharp <andy.sharp@lsi.com> * * This driver is intended to make the second of two hardware watchdogs * on the Sibyte 12XX and 11XX SoCs available to the user. There are two * such devices available on the SoC, but it seems that there isn't an * enumeration class for watchdogs in Linux like there is for RTCs. * The second is used rather than the first because it uses IRQ 1, * thereby avoiding all that IRQ 0 problematic nonsense. * * I have not tried this driver on a 1480 processor; it might work * just well enough to really screw things up. * * It is a simple timer, and there is an interrupt that is raised the * first time the timer expires. The second time it expires, the chip * is reset and there is no way to redirect that NMI. Which could * be problematic in some cases where this chip is sitting on the HT * bus and has just taken responsibility for providing a cache block. * Since the reset can't be redirected to the external reset pin, it is * possible that other HT connected processors might hang and not reset. * For Linux, a soft reset would probably be even worse than a hard reset. * There you have it. * * The timer takes 23 bits of a 64 bit register (?) as a count value, * and decrements the count every microsecond, for a max value of * 0x7fffff usec or about 8.3ish seconds. * * This watchdog borrows some user semantics from the softdog driver, * in that if you close the fd, it leaves the watchdog running, unless * you previously wrote a 'V' to the fd, in which case it disables * the watchdog when you close the fd like some other drivers. * * Based on various other watchdog drivers, which are probably all * loosely based on something Alan Cox wrote years ago. * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 1 or 2 as published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/interrupt.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> static DEFINE_SPINLOCK(sbwd_lock); /* * set the initial count value of a timer * * wdog is the iomem address of the cfg register */ void sbwdog_set(char __iomem *wdog, unsigned long t) { spin_lock(&sbwd_lock); __raw_writeb(0, wdog); __raw_writeq(t & 0x7fffffUL, wdog - 0x10); spin_unlock(&sbwd_lock); } /* * cause the timer to [re]load it's initial count and start counting * all over again * * wdog is the iomem address of the cfg register */ void sbwdog_pet(char __iomem *wdog) { spin_lock(&sbwd_lock); __raw_writeb(__raw_readb(wdog) | 1, wdog); spin_unlock(&sbwd_lock); } static unsigned long sbwdog_gate; /* keeps it to one thread only */ static char __iomem *kern_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_0)); static char __iomem *user_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_1)); static unsigned long timeout = 0x7fffffUL; /* useconds: 8.3ish secs. */ static int expect_close; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "SiByte Watchdog", }; /* * Allow only a single thread to walk the dog */ static int sbwdog_open(struct inode *inode, struct file *file) { nonseekable_open(inode, file); if (test_and_set_bit(0, &sbwdog_gate)) return -EBUSY; __module_get(THIS_MODULE); /* * Activate the timer */ sbwdog_set(user_dog, timeout); __raw_writeb(1, user_dog); return 0; } /* * Put the dog back in the kennel. */ static int sbwdog_release(struct inode *inode, struct file *file) { if (expect_close == 42) { __raw_writeb(0, user_dog); module_put(THIS_MODULE); } else { pr_crit("%s: Unexpected close, not stopping watchdog!\n", ident.identity); sbwdog_pet(user_dog); } clear_bit(0, &sbwdog_gate); expect_close = 0; return 0; } /* * 42 - the answer */ static ssize_t sbwdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { int i; if (len) { /* * restart the timer */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } sbwdog_pet(user_dog); } return len; } static long sbwdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; unsigned long time; void __user *argp = (void __user *)arg; int __user *p = argp; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, p); break; case WDIOC_KEEPALIVE: sbwdog_pet(user_dog); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(time, p); if (ret) break; time *= 1000000; if (time > 0x7fffffUL) { ret = -EINVAL; break; } timeout = time; sbwdog_set(user_dog, timeout); sbwdog_pet(user_dog); case WDIOC_GETTIMEOUT: /* * get the remaining count from the ... count register * which is 1*8 before the config register */ ret = put_user(__raw_readq(user_dog - 8) / 1000000, p); break; } return ret; } /* * Notifier for system down */ static int sbwdog_notify_sys(struct notifier_block *this, unsigned long code, void *erf) { if (code == SYS_DOWN || code == SYS_HALT) { /* * sit and sit */ __raw_writeb(0, user_dog); __raw_writeb(0, kern_dog); } return NOTIFY_DONE; } static const struct file_operations sbwdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = sbwdog_write, .unlocked_ioctl = sbwdog_ioctl, .open = sbwdog_open, .release = sbwdog_release, }; static struct miscdevice sbwdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &sbwdog_fops, }; static struct notifier_block sbwdog_notifier = { .notifier_call = sbwdog_notify_sys, }; /* * interrupt handler * * doesn't do a whole lot for user, but oh so cleverly written so kernel * code can use it to re-up the watchdog, thereby saving the kernel from * having to create and maintain a timer, just to tickle another timer, * which is just so wrong. */ irqreturn_t sbwdog_interrupt(int irq, void *addr) { unsigned long wd_init; char *wd_cfg_reg = (char *)addr; u8 cfg; cfg = __raw_readb(wd_cfg_reg); wd_init = __raw_readq(wd_cfg_reg - 8) & 0x7fffff; /* * if it's the second watchdog timer, it's for those users */ if (wd_cfg_reg == user_dog) pr_crit("%s in danger of initiating system reset " "in %ld.%01ld seconds\n", ident.identity, wd_init / 1000000, (wd_init / 100000) % 10); else cfg |= 1; __raw_writeb(cfg, wd_cfg_reg); return IRQ_HANDLED; } static int __init sbwdog_init(void) { int ret; /* * register a reboot notifier */ ret = register_reboot_notifier(&sbwdog_notifier); if (ret) { pr_err("%s: cannot register reboot notifier (err=%d)\n", ident.identity, ret); return ret; } /* * get the resources */ ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED, ident.identity, (void *)user_dog); if (ret) { pr_err("%s: failed to request irq 1 - %d\n", ident.identity, ret); goto out; } ret = misc_register(&sbwdog_miscdev); if (ret == 0) { pr_info("%s: timeout is %ld.%ld secs\n", ident.identity, timeout / 1000000, (timeout / 100000) % 10); return 0; } free_irq(1, (void *)user_dog); out: unregister_reboot_notifier(&sbwdog_notifier); return ret; } static void __exit sbwdog_exit(void) { misc_deregister(&sbwdog_miscdev); free_irq(1, (void *)user_dog); unregister_reboot_notifier(&sbwdog_notifier); } module_init(sbwdog_init); module_exit(sbwdog_exit); MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>"); MODULE_DESCRIPTION("SiByte Watchdog"); module_param(timeout, ulong, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* * example code that can be put in a platform code area to utilize the * first watchdog timer for the kernels own purpose. void platform_wd_setup(void) { int ret; ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED, "Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0)); if (ret) { pr_crit("Watchdog IRQ zero(0) failed to be requested - %d\n", ret); } } */
gpl-2.0
fus1on/3.4.xx_LG_kernel
drivers/scsi/dmx3191d.c
9281
4548
/* dmx3191d.c - driver for the Domex DMX3191D SCSI card. Copyright (C) 2000 by Massimo Piccioni <dafastidio@libero.it> Portions Copyright (C) 2004 by Christoph Hellwig <hch@lst.de> Based on the generic NCR5380 driver by Drew Eckhardt et al. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <asm/io.h> #include <scsi/scsi_host.h> /* * Definitions for the generic 5380 driver. */ #define AUTOSENSE #define NCR5380_read(reg) inb(port + reg) #define NCR5380_write(reg, value) outb(value, port + reg) #define NCR5380_implementation_fields unsigned int port #define NCR5380_local_declare() NCR5380_implementation_fields #define NCR5380_setup(instance) port = instance->io_port /* * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h) */ #include <linux/delay.h> #include "scsi.h" #include "NCR5380.h" #include "NCR5380.c" #define DMX3191D_DRIVER_NAME "dmx3191d" #define DMX3191D_REGION_LEN 8 static struct scsi_host_template dmx3191d_driver_template = { .proc_name = DMX3191D_DRIVER_NAME, .name = "Domex DMX3191D", .queuecommand = NCR5380_queue_command, .eh_abort_handler = NCR5380_abort, .eh_bus_reset_handler = NCR5380_bus_reset, .can_queue = 32, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, }; static int __devinit dmx3191d_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; unsigned long io; int error = -ENODEV; if (pci_enable_device(pdev)) goto out; io = pci_resource_start(pdev, 0); if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n", io, io + DMX3191D_REGION_LEN); goto out_disable_device; } shost = scsi_host_alloc(&dmx3191d_driver_template, sizeof(struct NCR5380_hostdata)); if (!shost) goto out_release_region; shost->io_port = io; shost->irq = pdev->irq; NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED, DMX3191D_DRIVER_NAME, shost)) { /* * Steam powered scsi controllers run without an IRQ anyway */ printk(KERN_WARNING "dmx3191: IRQ %d not available - " "switching to polled mode.\n", pdev->irq); shost->irq = SCSI_IRQ_NONE; } pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_free_irq; scsi_scan_host(shost); return 0; out_free_irq: free_irq(shost->irq, shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: pci_disable_device(pdev); out: return error; } static void __devexit dmx3191d_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); scsi_remove_host(shost); NCR5380_exit(shost); if (shost->irq != SCSI_IRQ_NONE) free_irq(shost->irq, shost); release_region(shost->io_port, DMX3191D_REGION_LEN); pci_disable_device(pdev); scsi_host_put(shost); } static struct pci_device_id dmx3191d_pci_tbl[] = { {PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, { } }; MODULE_DEVICE_TABLE(pci, dmx3191d_pci_tbl); static struct pci_driver dmx3191d_pci_driver = { .name = DMX3191D_DRIVER_NAME, .id_table = dmx3191d_pci_tbl, .probe = dmx3191d_probe_one, .remove = __devexit_p(dmx3191d_remove_one), }; static int __init dmx3191d_init(void) { return pci_register_driver(&dmx3191d_pci_driver); } static void __exit dmx3191d_exit(void) { pci_unregister_driver(&dmx3191d_pci_driver); } module_init(dmx3191d_init); module_exit(dmx3191d_exit); MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("Domex DMX3191D SCSI driver"); MODULE_LICENSE("GPL");
gpl-2.0
Bi-Turbo/VPK_ALE-L21
arch/arm/mm/cache-xsc3l2.c
9793
5060
/* * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support * * Copyright (C) 2007 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/highmem.h> #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/cacheflush.h> #define CR_L2 (1 << 26) #define CACHE_LINE_SIZE 32 #define CACHE_LINE_SHIFT 5 #define CACHE_WAY_PER_SET 8 #define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf)) #define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT) static inline int xsc3_l2_present(void) { unsigned long l2ctype; __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); return !!(l2ctype & 0xf8); } static inline void xsc3_l2_clean_mva(unsigned long addr) { __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); } static inline void xsc3_l2_inv_mva(unsigned long addr) { __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); } static inline void xsc3_l2_inv_all(void) { unsigned long l2ctype, set_way; int set, way; __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { for (way = 0; way < CACHE_WAY_PER_SET; way++) { set_way = (way << 29) | (set << 5); __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way)); } } dsb(); } static inline void l2_unmap_va(unsigned long va) { #ifdef CONFIG_HIGHMEM if (va != -1) kunmap_atomic((void *)va); #endif } static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) { #ifdef CONFIG_HIGHMEM unsigned long va = prev_va & PAGE_MASK; unsigned long pa_offset = pa << (32 - PAGE_SHIFT); if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) { /* * Switching to a new page. Because cache ops are * using virtual addresses only, we must put a mapping * in place for it. */ l2_unmap_va(prev_va); va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); } return va + (pa_offset >> (32 - PAGE_SHIFT)); #else return __phys_to_virt(pa); #endif } static void xsc3_l2_inv_range(unsigned long start, unsigned long end) { unsigned long vaddr; if (start == 0 && end == -1ul) { xsc3_l2_inv_all(); return; } vaddr = -1; /* to force the first mapping */ /* * Clean and invalidate partial first cache line. */ if (start & (CACHE_LINE_SIZE - 1)) { vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); start = (start | (CACHE_LINE_SIZE - 1)) + 1; } /* * Invalidate all full cache lines between 'start' and 'end'. */ while (start < (end & ~(CACHE_LINE_SIZE - 1))) { vaddr = l2_map_va(start, vaddr); xsc3_l2_inv_mva(vaddr); start += CACHE_LINE_SIZE; } /* * Clean and invalidate partial last cache line. */ if (start < end) { vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); } l2_unmap_va(vaddr); dsb(); } static void xsc3_l2_clean_range(unsigned long start, unsigned long end) { unsigned long vaddr; vaddr = -1; /* to force the first mapping */ start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); start += CACHE_LINE_SIZE; } l2_unmap_va(vaddr); dsb(); } /* * optimize L2 flush all operation by set/way format */ static inline void xsc3_l2_flush_all(void) { unsigned long l2ctype, set_way; int set, way; __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { for (way = 0; way < CACHE_WAY_PER_SET; way++) { set_way = (way << 29) | (set << 5); __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way)); } } dsb(); } static void xsc3_l2_flush_range(unsigned long start, unsigned long end) { unsigned long vaddr; if (start == 0 && end == -1ul) { xsc3_l2_flush_all(); return; } vaddr = -1; /* to force the first mapping */ start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); start += CACHE_LINE_SIZE; } l2_unmap_va(vaddr); dsb(); } static int __init xsc3_l2_init(void) { if (!cpu_is_xsc3() || !xsc3_l2_present()) return 0; if (get_cr() & CR_L2) { pr_info("XScale3 L2 cache enabled.\n"); xsc3_l2_inv_all(); outer_cache.inv_range = xsc3_l2_inv_range; outer_cache.clean_range = xsc3_l2_clean_range; outer_cache.flush_range = xsc3_l2_flush_range; } return 0; } core_initcall(xsc3_l2_init);
gpl-2.0
maz-1/android_kernel_lge_msm8974
crypto/blowfish_common.c
10049
16124
/* * Cryptographic API. * * Common Blowfish algorithm parts shared between the c and assembler * implementations. * * Blowfish Cipher Algorithm, by Bruce Schneier. * http://www.counterpane.com/blowfish.html * * Adapted from Kerneli implementation. * * Copyright (c) Herbert Valerio Riedel <hvr@hvrlab.org> * Copyright (c) Kyle McMartin <kyle@debian.org> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #include <crypto/blowfish.h> static const u32 bf_pbox[16 + 2] = { 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, }; static const u32 bf_sbox[256 * 4] = { 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, }; /* * Round loop unrolling macros, S is a pointer to a S-Box array * organized in 4 unsigned longs at a row. */ #define GET32_3(x) (((x) & 0xff)) #define GET32_2(x) (((x) >> (8)) & (0xff)) #define GET32_1(x) (((x) >> (16)) & (0xff)) #define GET32_0(x) (((x) >> (24)) & (0xff)) #define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \ S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) /* * The blowfish encipher, processes 64-bit blocks. * NOTE: This function MUSTN'T respect endianess */ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) { const u32 *P = bctx->p; const u32 *S = bctx->s; u32 yl = src[0]; u32 yr = src[1]; ROUND(yr, yl, 0); ROUND(yl, yr, 1); ROUND(yr, yl, 2); ROUND(yl, yr, 3); ROUND(yr, yl, 4); ROUND(yl, yr, 5); ROUND(yr, yl, 6); ROUND(yl, yr, 7); ROUND(yr, yl, 8); ROUND(yl, yr, 9); ROUND(yr, yl, 10); ROUND(yl, yr, 11); ROUND(yr, yl, 12); ROUND(yl, yr, 13); ROUND(yr, yl, 14); ROUND(yl, yr, 15); yl ^= P[16]; yr ^= P[17]; dst[0] = yr; dst[1] = yl; } /* * Calculates the blowfish S and P boxes for encryption and decryption. */ int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); u32 *P = ctx->p; u32 *S = ctx->s; short i, j, count; u32 data[2], temp; /* Copy the initialization s-boxes */ for (i = 0, count = 0; i < 256; i++) for (j = 0; j < 4; j++, count++) S[count] = bf_sbox[count]; /* Set the p-boxes */ for (i = 0; i < 16 + 2; i++) P[i] = bf_pbox[i]; /* Actual subkey generation */ for (j = 0, i = 0; i < 16 + 2; i++) { temp = (((u32)key[j] << 24) | ((u32)key[(j + 1) % keylen] << 16) | ((u32)key[(j + 2) % keylen] << 8) | ((u32)key[(j + 3) % keylen])); P[i] = P[i] ^ temp; j = (j + 4) % keylen; } data[0] = 0x00000000; data[1] = 0x00000000; for (i = 0; i < 16 + 2; i += 2) { encrypt_block((struct bf_ctx *)ctx, data, data); P[i] = data[0]; P[i + 1] = data[1]; } for (i = 0; i < 4; i++) { for (j = 0, count = i * 256; j < 256; j += 2, count += 2) { encrypt_block((struct bf_ctx *)ctx, data, data); S[count] = data[0]; S[count + 1] = data[1]; } } /* Bruce says not to bother with the weak key check. */ return 0; } EXPORT_SYMBOL_GPL(blowfish_setkey); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher common functions");
gpl-2.0
ghmajx/asuswrt-merlin
release/src-rt/linux/linux-2.6/sound/oss/swarm_cs4297a.c
66
89094
/******************************************************************************* * * "swarm_cs4297a.c" -- Cirrus Logic-Crystal CS4297a linux audio driver. * * Copyright (C) 2001 Broadcom Corporation. * Copyright (C) 2000,2001 Cirrus Logic Corp. * -- adapted from drivers by Thomas Sailer, * -- but don't bug him; Problems should go to: * -- tom woller (twoller@crystal.cirrus.com) or * (audio@crystal.cirrus.com). * -- adapted from cs4281 PCI driver for cs4297a on * BCM1250 Synchronous Serial interface * (Kip Walker, Broadcom Corp.) * Copyright (C) 2004 Maciej W. Rozycki * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Module command line parameters: * none * * Supported devices: * /dev/dsp standard /dev/dsp device, (mostly) OSS compatible * /dev/mixer standard /dev/mixer device, (mostly) OSS compatible * /dev/midi simple MIDI UART interface, no ioctl * * Modification History * 08/20/00 trw - silence and no stopping DAC until release * 08/23/00 trw - added CS_DBG statements, fix interrupt hang issue on DAC stop. * 09/18/00 trw - added 16bit only record with conversion * 09/24/00 trw - added Enhanced Full duplex (separate simultaneous * capture/playback rates) * 10/03/00 trw - fixed mmap (fixed GRECORD and the XMMS mmap test plugin * libOSSm.so) * 10/11/00 trw - modified for 2.4.0-test9 kernel enhancements (NR_MAP removal) * 11/03/00 trw - fixed interrupt loss/stutter, added debug. * 11/10/00 bkz - added __devinit to cs4297a_hw_init() * 11/10/00 trw - fixed SMP and capture spinlock hang. * 12/04/00 trw - cleaned up CSDEBUG flags and added "defaultorder" moduleparm. * 12/05/00 trw - fixed polling (myth2), and added underrun swptr fix. * 12/08/00 trw - added PM support. * 12/14/00 trw - added wrapper code, builds under 2.4.0, 2.2.17-20, 2.2.17-8 * (RH/Dell base), 2.2.18, 2.2.12. cleaned up code mods by ident. * 12/19/00 trw - added PM support for 2.2 base (apm_callback). other PM cleanup. * 12/21/00 trw - added fractional "defaultorder" inputs. if >100 then use * defaultorder-100 as power of 2 for the buffer size. example: * 106 = 2^(106-100) = 2^6 = 64 bytes for the buffer size. * *******************************************************************************/ #include <linux/list.h> #include <linux/module.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/sound.h> #include <linux/slab.h> #include <linux/soundcard.h> #include <linux/ac97_codec.h> #include <linux/pci.h> #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_dma.h> #include <asm/sibyte/sb1250_scd.h> #include <asm/sibyte/sb1250_syncser.h> #include <asm/sibyte/sb1250_mac.h> #include <asm/sibyte/sb1250.h> struct cs4297a_state; static void stop_dac(struct cs4297a_state *s); static void stop_adc(struct cs4297a_state *s); static void start_dac(struct cs4297a_state *s); static void start_adc(struct cs4297a_state *s); #undef OSS_DOCUMENTED_MIXER_SEMANTICS // --------------------------------------------------------------------- #define CS4297a_MAGIC 0xf00beef1 // buffer order determines the size of the dma buffer for the driver. // under Linux, a smaller buffer allows more responsiveness from many of the // applications (e.g. games). A larger buffer allows some of the apps (esound) // to not underrun the dma buffer as easily. As default, use 32k (order=3) // rather than 64k as some of the games work more responsively. // log base 2( buff sz = 32k). //static unsigned long defaultorder = 3; //MODULE_PARM(defaultorder, "i"); // // Turn on/off debugging compilation by commenting out "#define CSDEBUG" // #define CSDEBUG 0 #if CSDEBUG #define CSDEBUG_INTERFACE 1 #else #undef CSDEBUG_INTERFACE #endif // // cs_debugmask areas // #define CS_INIT 0x00000001 // initialization and probe functions #define CS_ERROR 0x00000002 // tmp debugging bit placeholder #define CS_INTERRUPT 0x00000004 // interrupt handler (separate from all other) #define CS_FUNCTION 0x00000008 // enter/leave functions #define CS_WAVE_WRITE 0x00000010 // write information for wave #define CS_WAVE_READ 0x00000020 // read information for wave #define CS_AC97 0x00000040 // AC97 register access #define CS_DESCR 0x00000080 // descriptor management #define CS_OPEN 0x00000400 // all open functions in the driver #define CS_RELEASE 0x00000800 // all release functions in the driver #define CS_PARMS 0x00001000 // functional and operational parameters #define CS_IOCTL 0x00002000 // ioctl (non-mixer) #define CS_TMP 0x10000000 // tmp debug mask bit // // CSDEBUG is usual mode is set to 1, then use the // cs_debuglevel and cs_debugmask to turn on or off debugging. // Debug level of 1 has been defined to be kernel errors and info // that should be printed on any released driver. // #if CSDEBUG #define CS_DBGOUT(mask,level,x) if((cs_debuglevel >= (level)) && ((mask) & cs_debugmask) ) {x;} #else #define CS_DBGOUT(mask,level,x) #endif #if CSDEBUG static unsigned long cs_debuglevel = 4; // levels range from 1-9 static unsigned long cs_debugmask = CS_INIT /*| CS_IOCTL*/; module_param(cs_debuglevel, int, 0); module_param(cs_debugmask, int, 0); #endif #define CS_TRUE 1 #define CS_FALSE 0 #define CS_TYPE_ADC 0 #define CS_TYPE_DAC 1 #define SER_BASE (A_SER_BASE_1 + KSEG1) #define SS_CSR(t) (SER_BASE+t) #define SS_TXTBL(t) (SER_BASE+R_SER_TX_TABLE_BASE+(t*8)) #define SS_RXTBL(t) (SER_BASE+R_SER_RX_TABLE_BASE+(t*8)) #define FRAME_BYTES 32 #define FRAME_SAMPLE_BYTES 4 /* Should this be variable? */ #define SAMPLE_BUF_SIZE (16*1024) #define SAMPLE_FRAME_COUNT (SAMPLE_BUF_SIZE / FRAME_SAMPLE_BYTES) /* The driver can explode/shrink the frames to/from a smaller sample buffer */ #define DMA_BLOAT_FACTOR 1 #define DMA_DESCR (SAMPLE_FRAME_COUNT / DMA_BLOAT_FACTOR) #define DMA_BUF_SIZE (DMA_DESCR * FRAME_BYTES) /* Use the maxmium count (255 == 5.1 ms between interrupts) */ #define DMA_INT_CNT ((1 << S_DMA_INT_PKTCNT) - 1) /* Figure this out: how many TX DMAs ahead to schedule a reg access */ #define REG_LATENCY 150 #define FRAME_TX_US 20 #define SERDMA_NEXTBUF(d,f) (((d)->f+1) % (d)->ringsz) static const char invalid_magic[] = KERN_CRIT "cs4297a: invalid magic value\n"; #define VALIDATE_STATE(s) \ ({ \ if (!(s) || (s)->magic != CS4297a_MAGIC) { \ printk(invalid_magic); \ return -ENXIO; \ } \ }) struct list_head cs4297a_devs = { &cs4297a_devs, &cs4297a_devs }; typedef struct serdma_descr_s { u64 descr_a; u64 descr_b; } serdma_descr_t; typedef unsigned long paddr_t; typedef struct serdma_s { unsigned ringsz; serdma_descr_t *descrtab; serdma_descr_t *descrtab_end; paddr_t descrtab_phys; serdma_descr_t *descr_add; serdma_descr_t *descr_rem; u64 *dma_buf; // buffer for DMA contents (frames) paddr_t dma_buf_phys; u16 *sample_buf; // tmp buffer for sample conversions u16 *sb_swptr; u16 *sb_hwptr; u16 *sb_end; dma_addr_t dmaaddr; // unsigned buforder; // Log base 2 of 'dma_buf' size in bytes.. unsigned numfrag; // # of 'fragments' in the buffer. unsigned fragshift; // Log base 2 of fragment size. unsigned hwptr, swptr; unsigned total_bytes; // # bytes process since open. unsigned blocks; // last returned blocks value GETOPTR unsigned wakeup; // interrupt occurred on block int count; unsigned underrun; // underrun flag unsigned error; // over/underrun wait_queue_head_t wait; wait_queue_head_t reg_wait; // redundant, but makes calculations easier unsigned fragsize; // 2**fragshift.. unsigned sbufsz; // 2**buforder. unsigned fragsamples; // OSS stuff unsigned mapped:1; // Buffer mapped in cs4297a_mmap()? unsigned ready:1; // prog_dmabuf_dac()/adc() successful? unsigned endcleared:1; unsigned type:1; // adc or dac buffer (CS_TYPE_XXX) unsigned ossfragshift; int ossmaxfrags; unsigned subdivision; } serdma_t; struct cs4297a_state { // magic unsigned int magic; struct list_head list; // soundcore stuff int dev_audio; int dev_mixer; // hardware resources unsigned int irq; struct { unsigned int rx_ovrrn; /* FIFO */ unsigned int rx_overflow; /* staging buffer */ unsigned int tx_underrun; unsigned int rx_bad; unsigned int rx_good; } stats; // mixer registers struct { unsigned short vol[10]; unsigned int recsrc; unsigned int modcnt; unsigned short micpreamp; } mix; // wave stuff struct properties { unsigned fmt; unsigned fmt_original; // original requested format unsigned channels; unsigned rate; } prop_dac, prop_adc; unsigned conversion:1; // conversion from 16 to 8 bit in progress unsigned ena; spinlock_t lock; struct mutex open_mutex; struct mutex open_sem_adc; struct mutex open_sem_dac; mode_t open_mode; wait_queue_head_t open_wait; wait_queue_head_t open_wait_adc; wait_queue_head_t open_wait_dac; dma_addr_t dmaaddr_sample_buf; unsigned buforder_sample_buf; // Log base 2 of 'dma_buf' size in bytes.. serdma_t dma_dac, dma_adc; volatile u16 read_value; volatile u16 read_reg; volatile u64 reg_request; }; #if 1 #define prog_codec(a,b) #define dealloc_dmabuf(a,b); #endif static int prog_dmabuf_adc(struct cs4297a_state *s) { s->dma_adc.ready = 1; return 0; } static int prog_dmabuf_dac(struct cs4297a_state *s) { s->dma_dac.ready = 1; return 0; } static void clear_advance(void *buf, unsigned bsize, unsigned bptr, unsigned len, unsigned char c) { if (bptr + len > bsize) { unsigned x = bsize - bptr; memset(((char *) buf) + bptr, c, x); bptr = 0; len -= x; } CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO "cs4297a: clear_advance(): memset %d at 0x%.8x for %d size \n", (unsigned)c, (unsigned)((char *) buf) + bptr, len)); memset(((char *) buf) + bptr, c, len); } #if CSDEBUG // DEBUG ROUTINES #define SOUND_MIXER_CS_GETDBGLEVEL _SIOWR('M',120, int) #define SOUND_MIXER_CS_SETDBGLEVEL _SIOWR('M',121, int) #define SOUND_MIXER_CS_GETDBGMASK _SIOWR('M',122, int) #define SOUND_MIXER_CS_SETDBGMASK _SIOWR('M',123, int) static void cs_printioctl(unsigned int x) { unsigned int i; unsigned char vidx; // Index of mixtable1[] member is Device ID // and must be <= SOUND_MIXER_NRDEVICES. // Value of array member is index into s->mix.vol[] static const unsigned char mixtable1[SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_PCM] = 1, // voice [SOUND_MIXER_LINE1] = 2, // AUX [SOUND_MIXER_CD] = 3, // CD [SOUND_MIXER_LINE] = 4, // Line [SOUND_MIXER_SYNTH] = 5, // FM [SOUND_MIXER_MIC] = 6, // Mic [SOUND_MIXER_SPEAKER] = 7, // Speaker [SOUND_MIXER_RECLEV] = 8, // Recording level [SOUND_MIXER_VOLUME] = 9 // Master Volume }; switch (x) { case SOUND_MIXER_CS_GETDBGMASK: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CS_GETDBGMASK:\n")); break; case SOUND_MIXER_CS_GETDBGLEVEL: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CS_GETDBGLEVEL:\n")); break; case SOUND_MIXER_CS_SETDBGMASK: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CS_SETDBGMASK:\n")); break; case SOUND_MIXER_CS_SETDBGLEVEL: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CS_SETDBGLEVEL:\n")); break; case OSS_GETVERSION: CS_DBGOUT(CS_IOCTL, 4, printk("OSS_GETVERSION:\n")); break; case SNDCTL_DSP_SYNC: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SYNC:\n")); break; case SNDCTL_DSP_SETDUPLEX: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETDUPLEX:\n")); break; case SNDCTL_DSP_GETCAPS: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETCAPS:\n")); break; case SNDCTL_DSP_RESET: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_RESET:\n")); break; case SNDCTL_DSP_SPEED: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SPEED:\n")); break; case SNDCTL_DSP_STEREO: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_STEREO:\n")); break; case SNDCTL_DSP_CHANNELS: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_CHANNELS:\n")); break; case SNDCTL_DSP_GETFMTS: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETFMTS:\n")); break; case SNDCTL_DSP_SETFMT: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETFMT:\n")); break; case SNDCTL_DSP_POST: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_POST:\n")); break; case SNDCTL_DSP_GETTRIGGER: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETTRIGGER:\n")); break; case SNDCTL_DSP_SETTRIGGER: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETTRIGGER:\n")); break; case SNDCTL_DSP_GETOSPACE: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETOSPACE:\n")); break; case SNDCTL_DSP_GETISPACE: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETISPACE:\n")); break; case SNDCTL_DSP_NONBLOCK: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_NONBLOCK:\n")); break; case SNDCTL_DSP_GETODELAY: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETODELAY:\n")); break; case SNDCTL_DSP_GETIPTR: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETIPTR:\n")); break; case SNDCTL_DSP_GETOPTR: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETOPTR:\n")); break; case SNDCTL_DSP_GETBLKSIZE: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETBLKSIZE:\n")); break; case SNDCTL_DSP_SETFRAGMENT: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETFRAGMENT:\n")); break; case SNDCTL_DSP_SUBDIVIDE: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SUBDIVIDE:\n")); break; case SOUND_PCM_READ_RATE: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_RATE:\n")); break; case SOUND_PCM_READ_CHANNELS: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_CHANNELS:\n")); break; case SOUND_PCM_READ_BITS: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_BITS:\n")); break; case SOUND_PCM_WRITE_FILTER: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_WRITE_FILTER:\n")); break; case SNDCTL_DSP_SETSYNCRO: CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETSYNCRO:\n")); break; case SOUND_PCM_READ_FILTER: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_FILTER:\n")); break; case SOUND_MIXER_PRIVATE1: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE1:\n")); break; case SOUND_MIXER_PRIVATE2: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE2:\n")); break; case SOUND_MIXER_PRIVATE3: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE3:\n")); break; case SOUND_MIXER_PRIVATE4: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE4:\n")); break; case SOUND_MIXER_PRIVATE5: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE5:\n")); break; case SOUND_MIXER_INFO: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_INFO:\n")); break; case SOUND_OLD_MIXER_INFO: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_OLD_MIXER_INFO:\n")); break; default: switch (_IOC_NR(x)) { case SOUND_MIXER_VOLUME: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_VOLUME:\n")); break; case SOUND_MIXER_SPEAKER: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_SPEAKER:\n")); break; case SOUND_MIXER_RECLEV: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_RECLEV:\n")); break; case SOUND_MIXER_MIC: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_MIC:\n")); break; case SOUND_MIXER_SYNTH: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_SYNTH:\n")); break; case SOUND_MIXER_RECSRC: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_RECSRC:\n")); break; case SOUND_MIXER_DEVMASK: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_DEVMASK:\n")); break; case SOUND_MIXER_RECMASK: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_RECMASK:\n")); break; case SOUND_MIXER_STEREODEVS: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_STEREODEVS:\n")); break; case SOUND_MIXER_CAPS: CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CAPS:\n")); break; default: i = _IOC_NR(x); if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i])) { CS_DBGOUT(CS_IOCTL, 4, printk ("UNKNOWN IOCTL: 0x%.8x NR=%d\n", x, i)); } else { CS_DBGOUT(CS_IOCTL, 4, printk ("SOUND_MIXER_IOCTL AC9x: 0x%.8x NR=%d\n", x, i)); } break; } } } #endif static int ser_init(struct cs4297a_state *s) { int i; CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO "cs4297a: Setting up serial parameters\n")); __raw_writeq(M_SYNCSER_CMD_RX_RESET | M_SYNCSER_CMD_TX_RESET, SS_CSR(R_SER_CMD)); __raw_writeq(M_SYNCSER_MSB_FIRST, SS_CSR(R_SER_MODE)); __raw_writeq(32, SS_CSR(R_SER_MINFRM_SZ)); __raw_writeq(32, SS_CSR(R_SER_MAXFRM_SZ)); __raw_writeq(1, SS_CSR(R_SER_TX_RD_THRSH)); __raw_writeq(4, SS_CSR(R_SER_TX_WR_THRSH)); __raw_writeq(8, SS_CSR(R_SER_RX_RD_THRSH)); /* This looks good from experimentation */ __raw_writeq((M_SYNCSER_TXSYNC_INT | V_SYNCSER_TXSYNC_DLY(0) | M_SYNCSER_TXCLK_EXT | M_SYNCSER_RXSYNC_INT | V_SYNCSER_RXSYNC_DLY(1) | M_SYNCSER_RXCLK_EXT | M_SYNCSER_RXSYNC_EDGE), SS_CSR(R_SER_LINE_MODE)); /* This looks good from experimentation */ __raw_writeq(V_SYNCSER_SEQ_COUNT(14) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_STROBE, SS_TXTBL(0)); __raw_writeq(V_SYNCSER_SEQ_COUNT(15) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE, SS_TXTBL(1)); __raw_writeq(V_SYNCSER_SEQ_COUNT(13) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE, SS_TXTBL(2)); __raw_writeq(V_SYNCSER_SEQ_COUNT( 0) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_STROBE | M_SYNCSER_SEQ_LAST, SS_TXTBL(3)); __raw_writeq(V_SYNCSER_SEQ_COUNT(14) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_STROBE, SS_RXTBL(0)); __raw_writeq(V_SYNCSER_SEQ_COUNT(15) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE, SS_RXTBL(1)); __raw_writeq(V_SYNCSER_SEQ_COUNT(13) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE, SS_RXTBL(2)); __raw_writeq(V_SYNCSER_SEQ_COUNT( 0) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_STROBE | M_SYNCSER_SEQ_LAST, SS_RXTBL(3)); for (i=4; i<16; i++) { /* Just in case... */ __raw_writeq(M_SYNCSER_SEQ_LAST, SS_TXTBL(i)); __raw_writeq(M_SYNCSER_SEQ_LAST, SS_RXTBL(i)); } return 0; } static int init_serdma(serdma_t *dma) { CS_DBGOUT(CS_INIT, 2, printk(KERN_ERR "cs4297a: desc - %d sbufsize - %d dbufsize - %d\n", DMA_DESCR, SAMPLE_BUF_SIZE, DMA_BUF_SIZE)); /* Descriptors */ dma->ringsz = DMA_DESCR; dma->descrtab = kzalloc(dma->ringsz * sizeof(serdma_descr_t), GFP_KERNEL); if (!dma->descrtab) { printk(KERN_ERR "cs4297a: kzalloc descrtab failed\n"); return -1; } dma->descrtab_end = dma->descrtab + dma->ringsz; /* XXX bloddy mess, use proper DMA API here ... */ dma->descrtab_phys = CPHYSADDR((long)dma->descrtab); dma->descr_add = dma->descr_rem = dma->descrtab; /* Frame buffer area */ dma->dma_buf = kzalloc(DMA_BUF_SIZE, GFP_KERNEL); if (!dma->dma_buf) { printk(KERN_ERR "cs4297a: kzalloc dma_buf failed\n"); kfree(dma->descrtab); return -1; } dma->dma_buf_phys = CPHYSADDR((long)dma->dma_buf); /* Samples buffer area */ dma->sbufsz = SAMPLE_BUF_SIZE; dma->sample_buf = kmalloc(dma->sbufsz, GFP_KERNEL); if (!dma->sample_buf) { printk(KERN_ERR "cs4297a: kmalloc sample_buf failed\n"); kfree(dma->descrtab); kfree(dma->dma_buf); return -1; } dma->sb_swptr = dma->sb_hwptr = dma->sample_buf; dma->sb_end = (u16 *)((void *)dma->sample_buf + dma->sbufsz); dma->fragsize = dma->sbufsz >> 1; CS_DBGOUT(CS_INIT, 4, printk(KERN_ERR "cs4297a: descrtab - %08x dma_buf - %x sample_buf - %x\n", (int)dma->descrtab, (int)dma->dma_buf, (int)dma->sample_buf)); return 0; } static int dma_init(struct cs4297a_state *s) { int i; CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO "cs4297a: Setting up DMA\n")); if (init_serdma(&s->dma_adc) || init_serdma(&s->dma_dac)) return -1; if (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_RX))|| __raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX))) { panic("DMA state corrupted?!"); } /* Initialize now - the descr/buffer pairings will never change... */ for (i=0; i<DMA_DESCR; i++) { s->dma_dac.descrtab[i].descr_a = M_DMA_SERRX_SOP | V_DMA_DSCRA_A_SIZE(1) | (s->dma_dac.dma_buf_phys + i*FRAME_BYTES); s->dma_dac.descrtab[i].descr_b = V_DMA_DSCRB_PKT_SIZE(FRAME_BYTES); s->dma_adc.descrtab[i].descr_a = V_DMA_DSCRA_A_SIZE(1) | (s->dma_adc.dma_buf_phys + i*FRAME_BYTES); s->dma_adc.descrtab[i].descr_b = 0; } __raw_writeq((M_DMA_EOP_INT_EN | V_DMA_INT_PKTCNT(DMA_INT_CNT) | V_DMA_RINGSZ(DMA_DESCR) | M_DMA_TDX_EN), SS_CSR(R_SER_DMA_CONFIG0_RX)); __raw_writeq(M_DMA_L2CA, SS_CSR(R_SER_DMA_CONFIG1_RX)); __raw_writeq(s->dma_adc.descrtab_phys, SS_CSR(R_SER_DMA_DSCR_BASE_RX)); __raw_writeq(V_DMA_RINGSZ(DMA_DESCR), SS_CSR(R_SER_DMA_CONFIG0_TX)); __raw_writeq(M_DMA_L2CA | M_DMA_NO_DSCR_UPDT, SS_CSR(R_SER_DMA_CONFIG1_TX)); __raw_writeq(s->dma_dac.descrtab_phys, SS_CSR(R_SER_DMA_DSCR_BASE_TX)); /* Prep the receive DMA descriptor ring */ __raw_writeq(DMA_DESCR, SS_CSR(R_SER_DMA_DSCR_COUNT_RX)); __raw_writeq(M_SYNCSER_DMA_RX_EN | M_SYNCSER_DMA_TX_EN, SS_CSR(R_SER_DMA_ENABLE)); __raw_writeq((M_SYNCSER_RX_SYNC_ERR | M_SYNCSER_RX_OVERRUN | M_SYNCSER_RX_EOP_COUNT), SS_CSR(R_SER_INT_MASK)); /* Enable the rx/tx; let the codec warm up to the sync and start sending good frames before the receive FIFO is enabled */ __raw_writeq(M_SYNCSER_CMD_TX_EN, SS_CSR(R_SER_CMD)); udelay(1000); __raw_writeq(M_SYNCSER_CMD_RX_EN | M_SYNCSER_CMD_TX_EN, SS_CSR(R_SER_CMD)); /* XXXKW is this magic? (the "1" part) */ while ((__raw_readq(SS_CSR(R_SER_STATUS)) & 0xf1) != 1) ; CS_DBGOUT(CS_INIT, 4, printk(KERN_INFO "cs4297a: status: %08x\n", (unsigned int)(__raw_readq(SS_CSR(R_SER_STATUS)) & 0xffffffff))); return 0; } static int serdma_reg_access(struct cs4297a_state *s, u64 data) { serdma_t *d = &s->dma_dac; u64 *data_p; unsigned swptr; unsigned long flags; serdma_descr_t *descr; if (s->reg_request) { printk(KERN_ERR "cs4297a: attempt to issue multiple reg_access\n"); return -1; } if (s->ena & FMODE_WRITE) { /* Since a writer has the DSP open, we have to mux the request in */ s->reg_request = data; interruptible_sleep_on(&s->dma_dac.reg_wait); /* XXXKW how can I deal with the starvation case where the opener isn't writing? */ } else { /* Be safe when changing ring pointers */ spin_lock_irqsave(&s->lock, flags); if (d->hwptr != d->swptr) { printk(KERN_ERR "cs4297a: reg access found bookkeeping error (hw/sw = %d/%d\n", d->hwptr, d->swptr); spin_unlock_irqrestore(&s->lock, flags); return -1; } swptr = d->swptr; d->hwptr = d->swptr = (d->swptr + 1) % d->ringsz; spin_unlock_irqrestore(&s->lock, flags); descr = &d->descrtab[swptr]; data_p = &d->dma_buf[swptr * 4]; *data_p = cpu_to_be64(data); __raw_writeq(1, SS_CSR(R_SER_DMA_DSCR_COUNT_TX)); CS_DBGOUT(CS_DESCR, 4, printk(KERN_INFO "cs4297a: add_tx %p (%x -> %x)\n", data_p, swptr, d->hwptr)); } CS_DBGOUT(CS_FUNCTION, 6, printk(KERN_INFO "cs4297a: serdma_reg_access()-\n")); return 0; } //**************************************************************************** // "cs4297a_read_ac97" -- Reads an AC97 register //**************************************************************************** static int cs4297a_read_ac97(struct cs4297a_state *s, u32 offset, u32 * value) { CS_DBGOUT(CS_AC97, 1, printk(KERN_INFO "cs4297a: read reg %2x\n", offset)); if (serdma_reg_access(s, (0xCLL << 60) | (1LL << 47) | ((u64)(offset & 0x7F) << 40))) return -1; interruptible_sleep_on(&s->dma_adc.reg_wait); *value = s->read_value; CS_DBGOUT(CS_AC97, 2, printk(KERN_INFO "cs4297a: rdr reg %x -> %x\n", s->read_reg, s->read_value)); return 0; } //**************************************************************************** // "cs4297a_write_ac97()"-- writes an AC97 register //**************************************************************************** static int cs4297a_write_ac97(struct cs4297a_state *s, u32 offset, u32 value) { CS_DBGOUT(CS_AC97, 1, printk(KERN_INFO "cs4297a: write reg %2x -> %04x\n", offset, value)); return (serdma_reg_access(s, (0xELL << 60) | ((u64)(offset & 0x7F) << 40) | ((value & 0xffff) << 12))); } static void stop_dac(struct cs4297a_state *s) { unsigned long flags; CS_DBGOUT(CS_WAVE_WRITE, 3, printk(KERN_INFO "cs4297a: stop_dac():\n")); spin_lock_irqsave(&s->lock, flags); s->ena &= ~FMODE_WRITE; #if 0 /* XXXKW what do I really want here? My theory for now is that I just flip the "ena" bit, and the interrupt handler will stop processing the xmit channel */ __raw_writeq((s->ena & FMODE_READ) ? M_SYNCSER_DMA_RX_EN : 0, SS_CSR(R_SER_DMA_ENABLE)); #endif spin_unlock_irqrestore(&s->lock, flags); } static void start_dac(struct cs4297a_state *s) { unsigned long flags; CS_DBGOUT(CS_FUNCTION, 3, printk(KERN_INFO "cs4297a: start_dac()+\n")); spin_lock_irqsave(&s->lock, flags); if (!(s->ena & FMODE_WRITE) && (s->dma_dac.mapped || (s->dma_dac.count > 0 && s->dma_dac.ready))) { s->ena |= FMODE_WRITE; /* XXXKW what do I really want here? My theory for now is that I just flip the "ena" bit, and the interrupt handler will start processing the xmit channel */ CS_DBGOUT(CS_WAVE_WRITE | CS_PARMS, 8, printk(KERN_INFO "cs4297a: start_dac(): start dma\n")); } spin_unlock_irqrestore(&s->lock, flags); CS_DBGOUT(CS_FUNCTION, 3, printk(KERN_INFO "cs4297a: start_dac()-\n")); } static void stop_adc(struct cs4297a_state *s) { unsigned long flags; CS_DBGOUT(CS_FUNCTION, 3, printk(KERN_INFO "cs4297a: stop_adc()+\n")); spin_lock_irqsave(&s->lock, flags); s->ena &= ~FMODE_READ; if (s->conversion == 1) { s->conversion = 0; s->prop_adc.fmt = s->prop_adc.fmt_original; } /* Nothing to do really, I need to keep the DMA going XXXKW when do I get here, and is there more I should do? */ spin_unlock_irqrestore(&s->lock, flags); CS_DBGOUT(CS_FUNCTION, 3, printk(KERN_INFO "cs4297a: stop_adc()-\n")); } static void start_adc(struct cs4297a_state *s) { unsigned long flags; CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4297a: start_adc()+\n")); if (!(s->ena & FMODE_READ) && (s->dma_adc.mapped || s->dma_adc.count <= (signed) (s->dma_adc.sbufsz - 2 * s->dma_adc.fragsize)) && s->dma_adc.ready) { if (s->prop_adc.fmt & AFMT_S8 || s->prop_adc.fmt & AFMT_U8) { // // now only use 16 bit capture, due to truncation issue // in the chip, noticable distortion occurs. // allocate buffer and then convert from 16 bit to // 8 bit for the user buffer. // s->prop_adc.fmt_original = s->prop_adc.fmt; if (s->prop_adc.fmt & AFMT_S8) { s->prop_adc.fmt &= ~AFMT_S8; s->prop_adc.fmt |= AFMT_S16_LE; } if (s->prop_adc.fmt & AFMT_U8) { s->prop_adc.fmt &= ~AFMT_U8; s->prop_adc.fmt |= AFMT_U16_LE; } // // prog_dmabuf_adc performs a stop_adc() but that is // ok since we really haven't started the DMA yet. // prog_codec(s, CS_TYPE_ADC); prog_dmabuf_adc(s); s->conversion = 1; } spin_lock_irqsave(&s->lock, flags); s->ena |= FMODE_READ; /* Nothing to do really, I am probably already DMAing... XXXKW when do I get here, and is there more I should do? */ spin_unlock_irqrestore(&s->lock, flags); CS_DBGOUT(CS_PARMS, 6, printk(KERN_INFO "cs4297a: start_adc(): start adc\n")); } CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4297a: start_adc()-\n")); } // call with spinlock held! static void cs4297a_update_ptr(struct cs4297a_state *s, int intflag) { int good_diff, diff, diff2; u64 *data_p, data; u32 *s_ptr; unsigned hwptr; u32 status; serdma_t *d; serdma_descr_t *descr; // update ADC pointer status = intflag ? __raw_readq(SS_CSR(R_SER_STATUS)) : 0; if ((s->ena & FMODE_READ) || (status & (M_SYNCSER_RX_EOP_COUNT))) { d = &s->dma_adc; hwptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_RX)) & M_DMA_CURDSCR_ADDR) - d->descrtab_phys) / sizeof(serdma_descr_t)); if (s->ena & FMODE_READ) { CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4297a: upd_rcv sw->hw->hw %x/%x/%x (int-%d)n", d->swptr, d->hwptr, hwptr, intflag)); /* Number of DMA buffers available for software: */ diff2 = diff = (d->ringsz + hwptr - d->hwptr) % d->ringsz; d->hwptr = hwptr; good_diff = 0; s_ptr = (u32 *)&(d->dma_buf[d->swptr*4]); descr = &d->descrtab[d->swptr]; while (diff2--) { u64 data = be64_to_cpu(*(u64 *)s_ptr); u64 descr_a; u16 left, right; descr_a = descr->descr_a; descr->descr_a &= ~M_DMA_SERRX_SOP; if ((descr_a & M_DMA_DSCRA_A_ADDR) != CPHYSADDR((long)s_ptr)) { printk(KERN_ERR "cs4297a: RX Bad address (read)\n"); } if (((data & 0x9800000000000000) != 0x9800000000000000) || (!(descr_a & M_DMA_SERRX_SOP)) || (G_DMA_DSCRB_PKT_SIZE(descr->descr_b) != FRAME_BYTES)) { s->stats.rx_bad++; printk(KERN_DEBUG "cs4297a: RX Bad attributes (read)\n"); continue; } s->stats.rx_good++; if ((data >> 61) == 7) { s->read_value = (data >> 12) & 0xffff; s->read_reg = (data >> 40) & 0x7f; wake_up(&d->reg_wait); } if (d->count && (d->sb_hwptr == d->sb_swptr)) { s->stats.rx_overflow++; printk(KERN_DEBUG "cs4297a: RX overflow\n"); continue; } good_diff++; left = ((be32_to_cpu(s_ptr[1]) & 0xff) << 8) | ((be32_to_cpu(s_ptr[2]) >> 24) & 0xff); right = (be32_to_cpu(s_ptr[2]) >> 4) & 0xffff; *d->sb_hwptr++ = cpu_to_be16(left); *d->sb_hwptr++ = cpu_to_be16(right); if (d->sb_hwptr == d->sb_end) d->sb_hwptr = d->sample_buf; descr++; if (descr == d->descrtab_end) { descr = d->descrtab; s_ptr = (u32 *)s->dma_adc.dma_buf; } else { s_ptr += 8; } } d->total_bytes += good_diff * FRAME_SAMPLE_BYTES; d->count += good_diff * FRAME_SAMPLE_BYTES; if (d->count > d->sbufsz) { printk(KERN_ERR "cs4297a: bogus receive overflow!!\n"); } d->swptr = (d->swptr + diff) % d->ringsz; __raw_writeq(diff, SS_CSR(R_SER_DMA_DSCR_COUNT_RX)); if (d->mapped) { if (d->count >= (signed) d->fragsize) wake_up(&d->wait); } else { if (d->count > 0) { CS_DBGOUT(CS_WAVE_READ, 4, printk(KERN_INFO "cs4297a: update count -> %d\n", d->count)); wake_up(&d->wait); } } } else { /* Receive is going even if no one is listening (for register accesses and to avoid FIFO overrun) */ diff2 = diff = (hwptr + d->ringsz - d->hwptr) % d->ringsz; if (!diff) { printk(KERN_ERR "cs4297a: RX full or empty?\n"); } descr = &d->descrtab[d->swptr]; data_p = &d->dma_buf[d->swptr*4]; /* Force this to happen at least once; I got here because of an interrupt, so there must be a buffer to process. */ do { data = be64_to_cpu(*data_p); if ((descr->descr_a & M_DMA_DSCRA_A_ADDR) != CPHYSADDR((long)data_p)) { printk(KERN_ERR "cs4297a: RX Bad address %d (%llx %lx)\n", d->swptr, (long long)(descr->descr_a & M_DMA_DSCRA_A_ADDR), (long)CPHYSADDR((long)data_p)); } if (!(data & (1LL << 63)) || !(descr->descr_a & M_DMA_SERRX_SOP) || (G_DMA_DSCRB_PKT_SIZE(descr->descr_b) != FRAME_BYTES)) { s->stats.rx_bad++; printk(KERN_DEBUG "cs4297a: RX Bad attributes\n"); } else { s->stats.rx_good++; if ((data >> 61) == 7) { s->read_value = (data >> 12) & 0xffff; s->read_reg = (data >> 40) & 0x7f; wake_up(&d->reg_wait); } } descr->descr_a &= ~M_DMA_SERRX_SOP; descr++; d->swptr++; data_p += 4; if (descr == d->descrtab_end) { descr = d->descrtab; d->swptr = 0; data_p = d->dma_buf; } __raw_writeq(1, SS_CSR(R_SER_DMA_DSCR_COUNT_RX)); } while (--diff); d->hwptr = hwptr; CS_DBGOUT(CS_DESCR, 6, printk(KERN_INFO "cs4297a: hw/sw %x/%x\n", d->hwptr, d->swptr)); } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO "cs4297a: cs4297a_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", (unsigned)s, d->hwptr, d->total_bytes, d->count)); } /* XXXKW worry about s->reg_request -- there is a starvation case if s->ena has FMODE_WRITE on, but the client isn't doing writes */ // update DAC pointer // // check for end of buffer, means that we are going to wait for another interrupt // to allow silence to fill the fifos on the part, to keep pops down to a minimum. // if (s->ena & FMODE_WRITE) { serdma_t *d = &s->dma_dac; hwptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) - d->descrtab_phys) / sizeof(serdma_descr_t)); diff = (d->ringsz + hwptr - d->hwptr) % d->ringsz; CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO "cs4297a: cs4297a_update_ptr(): hw/hw/sw %x/%x/%x diff %d count %d\n", d->hwptr, hwptr, d->swptr, diff, d->count)); d->hwptr = hwptr; /* XXXKW stereo? conversion? Just assume 2 16-bit samples for now */ d->total_bytes += diff * FRAME_SAMPLE_BYTES; if (d->mapped) { d->count += diff * FRAME_SAMPLE_BYTES; if (d->count >= d->fragsize) { d->wakeup = 1; wake_up(&d->wait); if (d->count > d->sbufsz) d->count &= d->sbufsz - 1; } } else { d->count -= diff * FRAME_SAMPLE_BYTES; if (d->count <= 0) { // // fill with silence, and do not shut down the DAC. // Continue to play silence until the _release. // CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO "cs4297a: cs4297a_update_ptr(): memset %d at 0x%.8x for %d size \n", (unsigned)(s->prop_dac.fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0, (unsigned)d->dma_buf, d->ringsz)); memset(d->dma_buf, 0, d->ringsz * FRAME_BYTES); if (d->count < 0) { d->underrun = 1; s->stats.tx_underrun++; d->count = 0; CS_DBGOUT(CS_ERROR, 9, printk(KERN_INFO "cs4297a: cs4297a_update_ptr(): underrun\n")); } } else if (d->count <= (signed) d->fragsize && !d->endcleared) { /* XXXKW what is this for? */ clear_advance(d->dma_buf, d->sbufsz, d->swptr, d->fragsize, 0); d->endcleared = 1; } if ( (d->count <= (signed) d->sbufsz/2) || intflag) { CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO "cs4297a: update count -> %d\n", d->count)); wake_up(&d->wait); } } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO "cs4297a: cs4297a_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", (unsigned) s, d->hwptr, d->total_bytes, d->count)); } } static int mixer_ioctl(struct cs4297a_state *s, unsigned int cmd, unsigned long arg) { // Index to mixer_src[] is value of AC97 Input Mux Select Reg. // Value of array member is recording source Device ID Mask. static const unsigned int mixer_src[8] = { SOUND_MASK_MIC, SOUND_MASK_CD, 0, SOUND_MASK_LINE1, SOUND_MASK_LINE, SOUND_MASK_VOLUME, 0, 0 }; // Index of mixtable1[] member is Device ID // and must be <= SOUND_MIXER_NRDEVICES. // Value of array member is index into s->mix.vol[] static const unsigned char mixtable1[SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_PCM] = 1, // voice [SOUND_MIXER_LINE1] = 2, // AUX [SOUND_MIXER_CD] = 3, // CD [SOUND_MIXER_LINE] = 4, // Line [SOUND_MIXER_SYNTH] = 5, // FM [SOUND_MIXER_MIC] = 6, // Mic [SOUND_MIXER_SPEAKER] = 7, // Speaker [SOUND_MIXER_RECLEV] = 8, // Recording level [SOUND_MIXER_VOLUME] = 9 // Master Volume }; static const unsigned mixreg[] = { AC97_PCMOUT_VOL, AC97_AUX_VOL, AC97_CD_VOL, AC97_LINEIN_VOL }; unsigned char l, r, rl, rr, vidx; unsigned char attentbl[11] = { 63, 42, 26, 17, 14, 11, 8, 6, 4, 2, 0 }; unsigned temp1; int i, val; VALIDATE_STATE(s); CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO "cs4297a: mixer_ioctl(): s=0x%.8x cmd=0x%.8x\n", (unsigned) s, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif #if CSDEBUG_INTERFACE if ((cmd == SOUND_MIXER_CS_GETDBGMASK) || (cmd == SOUND_MIXER_CS_SETDBGMASK) || (cmd == SOUND_MIXER_CS_GETDBGLEVEL) || (cmd == SOUND_MIXER_CS_SETDBGLEVEL)) { switch (cmd) { case SOUND_MIXER_CS_GETDBGMASK: return put_user(cs_debugmask, (unsigned long *) arg); case SOUND_MIXER_CS_GETDBGLEVEL: return put_user(cs_debuglevel, (unsigned long *) arg); case SOUND_MIXER_CS_SETDBGMASK: if (get_user(val, (unsigned long *) arg)) return -EFAULT; cs_debugmask = val; return 0; case SOUND_MIXER_CS_SETDBGLEVEL: if (get_user(val, (unsigned long *) arg)) return -EFAULT; cs_debuglevel = val; return 0; default: CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO "cs4297a: mixer_ioctl(): ERROR unknown debug cmd\n")); return 0; } } #endif if (cmd == SOUND_MIXER_PRIVATE1) { return -EINVAL; } if (cmd == SOUND_MIXER_PRIVATE2) { // enable/disable/query spatializer if (get_user(val, (int *) arg)) return -EFAULT; if (val != -1) { temp1 = (val & 0x3f) >> 2; cs4297a_write_ac97(s, AC97_3D_CONTROL, temp1); cs4297a_read_ac97(s, AC97_GENERAL_PURPOSE, &temp1); cs4297a_write_ac97(s, AC97_GENERAL_PURPOSE, temp1 | 0x2000); } cs4297a_read_ac97(s, AC97_3D_CONTROL, &temp1); return put_user((temp1 << 2) | 3, (int *) arg); } if (cmd == SOUND_MIXER_INFO) { mixer_info info; memset(&info, 0, sizeof(info)); strlcpy(info.id, "CS4297a", sizeof(info.id)); strlcpy(info.name, "Crystal CS4297a", sizeof(info.name)); info.modify_counter = s->mix.modcnt; if (copy_to_user((void *) arg, &info, sizeof(info))) return -EFAULT; return 0; } if (cmd == SOUND_OLD_MIXER_INFO) { _old_mixer_info info; memset(&info, 0, sizeof(info)); strlcpy(info.id, "CS4297a", sizeof(info.id)); strlcpy(info.name, "Crystal CS4297a", sizeof(info.name)); if (copy_to_user((void *) arg, &info, sizeof(info))) return -EFAULT; return 0; } if (cmd == OSS_GETVERSION) return put_user(SOUND_VERSION, (int *) arg); if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int)) return -EINVAL; // If ioctl has only the SIOC_READ bit(bit 31) // on, process the only-read commands. if (_SIOC_DIR(cmd) == _SIOC_READ) { switch (_IOC_NR(cmd)) { case SOUND_MIXER_RECSRC: // Arg contains a bit for each recording source cs4297a_read_ac97(s, AC97_RECORD_SELECT, &temp1); return put_user(mixer_src[temp1 & 7], (int *) arg); case SOUND_MIXER_DEVMASK: // Arg contains a bit for each supported device return put_user(SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_VOLUME | SOUND_MASK_RECLEV, (int *) arg); case SOUND_MIXER_RECMASK: // Arg contains a bit for each supported recording source return put_user(SOUND_MASK_LINE | SOUND_MASK_VOLUME, (int *) arg); case SOUND_MIXER_STEREODEVS: // Mixer channels supporting stereo return put_user(SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_VOLUME | SOUND_MASK_RECLEV, (int *) arg); case SOUND_MIXER_CAPS: return put_user(SOUND_CAP_EXCL_INPUT, (int *) arg); default: i = _IOC_NR(cmd); if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i])) return -EINVAL; return put_user(s->mix.vol[vidx - 1], (int *) arg); } } // If ioctl doesn't have both the SIOC_READ and // the SIOC_WRITE bit set, return invalid. if (_SIOC_DIR(cmd) != (_SIOC_READ | _SIOC_WRITE)) return -EINVAL; // Increment the count of volume writes. s->mix.modcnt++; // Isolate the command; it must be a write. switch (_IOC_NR(cmd)) { case SOUND_MIXER_RECSRC: // Arg contains a bit for each recording source if (get_user(val, (int *) arg)) return -EFAULT; i = hweight32(val); // i = # bits on in val. if (i != 1) // One & only 1 bit must be on. return 0; for (i = 0; i < sizeof(mixer_src) / sizeof(int); i++) { if (val == mixer_src[i]) { temp1 = (i << 8) | i; cs4297a_write_ac97(s, AC97_RECORD_SELECT, temp1); return 0; } } return 0; case SOUND_MIXER_VOLUME: if (get_user(val, (int *) arg)) return -EFAULT; l = val & 0xff; if (l > 100) l = 100; // Max soundcard.h vol is 100. if (l < 6) { rl = 63; l = 0; } else rl = attentbl[(10 * l) / 100]; // Convert 0-100 vol to 63-0 atten. r = (val >> 8) & 0xff; if (r > 100) r = 100; // Max right volume is 100, too if (r < 6) { rr = 63; r = 0; } else rr = attentbl[(10 * r) / 100]; // Convert volume to attenuation. if ((rl > 60) && (rr > 60)) // If both l & r are 'low', temp1 = 0x8000; // turn on the mute bit. else temp1 = 0; temp1 |= (rl << 8) | rr; cs4297a_write_ac97(s, AC97_MASTER_VOL_STEREO, temp1); cs4297a_write_ac97(s, AC97_PHONE_VOL, temp1); #ifdef OSS_DOCUMENTED_MIXER_SEMANTICS s->mix.vol[8] = ((unsigned int) r << 8) | l; #else s->mix.vol[8] = val; #endif return put_user(s->mix.vol[8], (int *) arg); case SOUND_MIXER_SPEAKER: if (get_user(val, (int *) arg)) return -EFAULT; l = val & 0xff; if (l > 100) l = 100; if (l < 3) { rl = 0; l = 0; } else { rl = (l * 2 - 5) / 13; // Convert 0-100 range to 0-15. l = (rl * 13 + 5) / 2; } if (rl < 3) { temp1 = 0x8000; rl = 0; } else temp1 = 0; rl = 15 - rl; // Convert volume to attenuation. temp1 |= rl << 1; cs4297a_write_ac97(s, AC97_PCBEEP_VOL, temp1); #ifdef OSS_DOCUMENTED_MIXER_SEMANTICS s->mix.vol[6] = l << 8; #else s->mix.vol[6] = val; #endif return put_user(s->mix.vol[6], (int *) arg); case SOUND_MIXER_RECLEV: if (get_user(val, (int *) arg)) return -EFAULT; l = val & 0xff; if (l > 100) l = 100; r = (val >> 8) & 0xff; if (r > 100) r = 100; rl = (l * 2 - 5) / 13; // Convert 0-100 scale to 0-15. rr = (r * 2 - 5) / 13; if (rl < 3 && rr < 3) temp1 = 0x8000; else temp1 = 0; temp1 = temp1 | (rl << 8) | rr; cs4297a_write_ac97(s, AC97_RECORD_GAIN, temp1); #ifdef OSS_DOCUMENTED_MIXER_SEMANTICS s->mix.vol[7] = ((unsigned int) r << 8) | l; #else s->mix.vol[7] = val; #endif return put_user(s->mix.vol[7], (int *) arg); case SOUND_MIXER_MIC: if (get_user(val, (int *) arg)) return -EFAULT; l = val & 0xff; if (l > 100) l = 100; if (l < 1) { l = 0; rl = 0; } else { rl = ((unsigned) l * 5 - 4) / 16; // Convert 0-100 range to 0-31. l = (rl * 16 + 4) / 5; } cs4297a_read_ac97(s, AC97_MIC_VOL, &temp1); temp1 &= 0x40; // Isolate 20db gain bit. if (rl < 3) { temp1 |= 0x8000; rl = 0; } rl = 31 - rl; // Convert volume to attenuation. temp1 |= rl; cs4297a_write_ac97(s, AC97_MIC_VOL, temp1); #ifdef OSS_DOCUMENTED_MIXER_SEMANTICS s->mix.vol[5] = val << 8; #else s->mix.vol[5] = val; #endif return put_user(s->mix.vol[5], (int *) arg); case SOUND_MIXER_SYNTH: if (get_user(val, (int *) arg)) return -EFAULT; l = val & 0xff; if (l > 100) l = 100; if (get_user(val, (int *) arg)) return -EFAULT; r = (val >> 8) & 0xff; if (r > 100) r = 100; rl = (l * 2 - 11) / 3; // Convert 0-100 range to 0-63. rr = (r * 2 - 11) / 3; if (rl < 3) // If l is low, turn on temp1 = 0x0080; // the mute bit. else temp1 = 0; rl = 63 - rl; // Convert vol to attenuation. // writel(temp1 | rl, s->pBA0 + FMLVC); if (rr < 3) // If rr is low, turn on temp1 = 0x0080; // the mute bit. else temp1 = 0; rr = 63 - rr; // Convert vol to attenuation. // writel(temp1 | rr, s->pBA0 + FMRVC); #ifdef OSS_DOCUMENTED_MIXER_SEMANTICS s->mix.vol[4] = (r << 8) | l; #else s->mix.vol[4] = val; #endif return put_user(s->mix.vol[4], (int *) arg); default: CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO "cs4297a: mixer_ioctl(): default\n")); i = _IOC_NR(cmd); if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i])) return -EINVAL; if (get_user(val, (int *) arg)) return -EFAULT; l = val & 0xff; if (l > 100) l = 100; if (l < 1) { l = 0; rl = 31; } else rl = (attentbl[(l * 10) / 100]) >> 1; r = (val >> 8) & 0xff; if (r > 100) r = 100; if (r < 1) { r = 0; rr = 31; } else rr = (attentbl[(r * 10) / 100]) >> 1; if ((rl > 30) && (rr > 30)) temp1 = 0x8000; else temp1 = 0; temp1 = temp1 | (rl << 8) | rr; cs4297a_write_ac97(s, mixreg[vidx - 1], temp1); #ifdef OSS_DOCUMENTED_MIXER_SEMANTICS s->mix.vol[vidx - 1] = ((unsigned int) r << 8) | l; #else s->mix.vol[vidx - 1] = val; #endif return put_user(s->mix.vol[vidx - 1], (int *) arg); } } // --------------------------------------------------------------------- static int cs4297a_open_mixdev(struct inode *inode, struct file *file) { int minor = iminor(inode); struct cs4297a_state *s=NULL; struct list_head *entry; CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4, printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()+\n")); list_for_each(entry, &cs4297a_devs) { s = list_entry(entry, struct cs4297a_state, list); if(s->dev_mixer == minor) break; } if (!s) { CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2, printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()- -ENODEV\n")); return -ENODEV; } VALIDATE_STATE(s); file->private_data = s; CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4, printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()- 0\n")); return nonseekable_open(inode, file); } static int cs4297a_release_mixdev(struct inode *inode, struct file *file) { struct cs4297a_state *s = (struct cs4297a_state *) file->private_data; VALIDATE_STATE(s); return 0; } static int cs4297a_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { return mixer_ioctl((struct cs4297a_state *) file->private_data, cmd, arg); } // ****************************************************************************************** // Mixer file operations struct. // ****************************************************************************************** static /*const */ struct file_operations cs4297a_mixer_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .ioctl = cs4297a_ioctl_mixdev, .open = cs4297a_open_mixdev, .release = cs4297a_release_mixdev, }; // --------------------------------------------------------------------- static int drain_adc(struct cs4297a_state *s, int nonblock) { /* This routine serves no purpose currently - any samples sitting in the receive queue will just be processed by the background consumer. This would be different if DMA actually stopped when there were no clients. */ return 0; } static int drain_dac(struct cs4297a_state *s, int nonblock) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; unsigned hwptr; unsigned tmo; int count; if (s->dma_dac.mapped) return 0; if (nonblock) return -EBUSY; add_wait_queue(&s->dma_dac.wait, &wait); while ((count = __raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX))) || (s->dma_dac.count > 0)) { if (!signal_pending(current)) { set_current_state(TASK_INTERRUPTIBLE); /* XXXKW is this calculation working? */ tmo = ((count * FRAME_TX_US) * HZ) / 1000000; schedule_timeout(tmo + 1); } else { /* XXXKW do I care if there is a signal pending? */ } } spin_lock_irqsave(&s->lock, flags); /* Reset the bookkeeping */ hwptr = (int)(((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) - s->dma_dac.descrtab_phys) / sizeof(serdma_descr_t)); s->dma_dac.hwptr = s->dma_dac.swptr = hwptr; spin_unlock_irqrestore(&s->lock, flags); remove_wait_queue(&s->dma_dac.wait, &wait); current->state = TASK_RUNNING; return 0; } // --------------------------------------------------------------------- static ssize_t cs4297a_read(struct file *file, char *buffer, size_t count, loff_t * ppos) { struct cs4297a_state *s = (struct cs4297a_state *) file->private_data; ssize_t ret; unsigned long flags; int cnt, count_fr, cnt_by; unsigned copied = 0; CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, printk(KERN_INFO "cs4297a: cs4297a_read()+ %d \n", count)); VALIDATE_STATE(s); if (s->dma_adc.mapped) return -ENXIO; if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) return ret; if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; ret = 0; // // "count" is the amount of bytes to read (from app), is decremented each loop // by the amount of bytes that have been returned to the user buffer. // "cnt" is the running total of each read from the buffer (changes each loop) // "buffer" points to the app's buffer // "ret" keeps a running total of the amount of bytes that have been copied // to the user buffer. // "copied" is the total bytes copied into the user buffer for each loop. // while (count > 0) { CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO "_read() count>0 count=%d .count=%d .swptr=%d .hwptr=%d \n", count, s->dma_adc.count, s->dma_adc.swptr, s->dma_adc.hwptr)); spin_lock_irqsave(&s->lock, flags); /* cnt will be the number of available samples (16-bit stereo); it starts out as the maxmimum consequetive samples */ cnt = (s->dma_adc.sb_end - s->dma_adc.sb_swptr) / 2; count_fr = s->dma_adc.count / FRAME_SAMPLE_BYTES; // dma_adc.count is the current total bytes that have not been read. // if the amount of unread bytes from the current sw pointer to the // end of the buffer is greater than the current total bytes that // have not been read, then set the "cnt" (unread bytes) to the // amount of unread bytes. if (count_fr < cnt) cnt = count_fr; cnt_by = cnt * FRAME_SAMPLE_BYTES; spin_unlock_irqrestore(&s->lock, flags); // // if we are converting from 8/16 then we need to copy // twice the number of 16 bit bytes then 8 bit bytes. // if (s->conversion) { if (cnt_by > (count * 2)) { cnt = (count * 2) / FRAME_SAMPLE_BYTES; cnt_by = count * 2; } } else { if (cnt_by > count) { cnt = count / FRAME_SAMPLE_BYTES; cnt_by = count; } } // // "cnt" NOW is the smaller of the amount that will be read, // and the amount that is requested in this read (or partial). // if there are no bytes in the buffer to read, then start the // ADC and wait for the interrupt handler to wake us up. // if (cnt <= 0) { // start up the dma engine and then continue back to the top of // the loop when wake up occurs. start_adc(s); if (file->f_flags & O_NONBLOCK) return ret ? ret : -EAGAIN; interruptible_sleep_on(&s->dma_adc.wait); if (signal_pending(current)) return ret ? ret : -ERESTARTSYS; continue; } // there are bytes in the buffer to read. // copy from the hw buffer over to the user buffer. // user buffer is designated by "buffer" // virtual address to copy from is dma_buf+swptr // the "cnt" is the number of bytes to read. CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO "_read() copy_to cnt=%d count=%d ", cnt_by, count)); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO " .sbufsz=%d .count=%d buffer=0x%.8x ret=%d\n", s->dma_adc.sbufsz, s->dma_adc.count, (unsigned) buffer, ret)); if (copy_to_user (buffer, ((void *)s->dma_adc.sb_swptr), cnt_by)) return ret ? ret : -EFAULT; copied = cnt_by; /* Return the descriptors */ spin_lock_irqsave(&s->lock, flags); CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4297a: upd_rcv sw->hw %x/%x\n", s->dma_adc.swptr, s->dma_adc.hwptr)); s->dma_adc.count -= cnt_by; s->dma_adc.sb_swptr += cnt * 2; if (s->dma_adc.sb_swptr == s->dma_adc.sb_end) s->dma_adc.sb_swptr = s->dma_adc.sample_buf; spin_unlock_irqrestore(&s->lock, flags); count -= copied; buffer += copied; ret += copied; start_adc(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, printk(KERN_INFO "cs4297a: cs4297a_read()- %d\n", ret)); return ret; } static ssize_t cs4297a_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) { struct cs4297a_state *s = (struct cs4297a_state *) file->private_data; ssize_t ret; unsigned long flags; unsigned swptr, hwptr; int cnt; CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, printk(KERN_INFO "cs4297a: cs4297a_write()+ count=%d\n", count)); VALIDATE_STATE(s); if (s->dma_dac.mapped) return -ENXIO; if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s))) return ret; if (!access_ok(VERIFY_READ, buffer, count)) return -EFAULT; ret = 0; while (count > 0) { serdma_t *d = &s->dma_dac; int copy_cnt; u32 *s_tmpl; u32 *t_tmpl; u32 left, right; int swap = (s->prop_dac.fmt == AFMT_S16_LE) || (s->prop_dac.fmt == AFMT_U16_LE); /* XXXXXX this is broken for BLOAT_FACTOR */ spin_lock_irqsave(&s->lock, flags); if (d->count < 0) { d->count = 0; d->swptr = d->hwptr; } if (d->underrun) { d->underrun = 0; hwptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) - d->descrtab_phys) / sizeof(serdma_descr_t)); d->swptr = d->hwptr = hwptr; } swptr = d->swptr; cnt = d->sbufsz - (swptr * FRAME_SAMPLE_BYTES); /* Will this write fill up the buffer? */ if (d->count + cnt > d->sbufsz) cnt = d->sbufsz - d->count; spin_unlock_irqrestore(&s->lock, flags); if (cnt > count) cnt = count; if (cnt <= 0) { start_dac(s); if (file->f_flags & O_NONBLOCK) return ret ? ret : -EAGAIN; interruptible_sleep_on(&d->wait); if (signal_pending(current)) return ret ? ret : -ERESTARTSYS; continue; } if (copy_from_user(d->sample_buf, buffer, cnt)) return ret ? ret : -EFAULT; copy_cnt = cnt; s_tmpl = (u32 *)d->sample_buf; t_tmpl = (u32 *)(d->dma_buf + (swptr * 4)); /* XXXKW assuming 16-bit stereo! */ do { u32 tmp; t_tmpl[0] = cpu_to_be32(0x98000000); tmp = be32_to_cpu(s_tmpl[0]); left = tmp & 0xffff; right = tmp >> 16; if (swap) { left = swab16(left); right = swab16(right); } t_tmpl[1] = cpu_to_be32(left >> 8); t_tmpl[2] = cpu_to_be32(((left & 0xff) << 24) | (right << 4)); s_tmpl++; t_tmpl += 8; copy_cnt -= 4; } while (copy_cnt); /* Mux in any pending read/write accesses */ if (s->reg_request) { *(u64 *)(d->dma_buf + (swptr * 4)) |= cpu_to_be64(s->reg_request); s->reg_request = 0; wake_up(&s->dma_dac.reg_wait); } CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO "cs4297a: copy in %d to swptr %x\n", cnt, swptr)); swptr = (swptr + (cnt/FRAME_SAMPLE_BYTES)) % d->ringsz; __raw_writeq(cnt/FRAME_SAMPLE_BYTES, SS_CSR(R_SER_DMA_DSCR_COUNT_TX)); spin_lock_irqsave(&s->lock, flags); d->swptr = swptr; d->count += cnt; d->endcleared = 0; spin_unlock_irqrestore(&s->lock, flags); count -= cnt; buffer += cnt; ret += cnt; start_dac(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, printk(KERN_INFO "cs4297a: cs4297a_write()- %d\n", ret)); return ret; } static unsigned int cs4297a_poll(struct file *file, struct poll_table_struct *wait) { struct cs4297a_state *s = (struct cs4297a_state *) file->private_data; unsigned long flags; unsigned int mask = 0; CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4, printk(KERN_INFO "cs4297a: cs4297a_poll()+\n")); VALIDATE_STATE(s); if (file->f_mode & FMODE_WRITE) { CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4, printk(KERN_INFO "cs4297a: cs4297a_poll() wait on FMODE_WRITE\n")); if(!s->dma_dac.ready && prog_dmabuf_dac(s)) return 0; poll_wait(file, &s->dma_dac.wait, wait); } if (file->f_mode & FMODE_READ) { CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4, printk(KERN_INFO "cs4297a: cs4297a_poll() wait on FMODE_READ\n")); if(!s->dma_dac.ready && prog_dmabuf_adc(s)) return 0; poll_wait(file, &s->dma_adc.wait, wait); } spin_lock_irqsave(&s->lock, flags); cs4297a_update_ptr(s,CS_FALSE); if (file->f_mode & FMODE_WRITE) { if (s->dma_dac.mapped) { if (s->dma_dac.count >= (signed) s->dma_dac.fragsize) { if (s->dma_dac.wakeup) mask |= POLLOUT | POLLWRNORM; else mask = 0; s->dma_dac.wakeup = 0; } } else { if ((signed) (s->dma_dac.sbufsz/2) >= s->dma_dac.count) mask |= POLLOUT | POLLWRNORM; } } else if (file->f_mode & FMODE_READ) { if (s->dma_adc.mapped) { if (s->dma_adc.count >= (signed) s->dma_adc.fragsize) mask |= POLLIN | POLLRDNORM; } else { if (s->dma_adc.count > 0) mask |= POLLIN | POLLRDNORM; } } spin_unlock_irqrestore(&s->lock, flags); CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4, printk(KERN_INFO "cs4297a: cs4297a_poll()- 0x%.8x\n", mask)); return mask; } static int cs4297a_mmap(struct file *file, struct vm_area_struct *vma) { /* XXXKW currently no mmap support */ return -EINVAL; return 0; } static int cs4297a_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct cs4297a_state *s = (struct cs4297a_state *) file->private_data; unsigned long flags; audio_buf_info abinfo; count_info cinfo; int val, mapped, ret; CS_DBGOUT(CS_FUNCTION|CS_IOCTL, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): file=0x%.8x cmd=0x%.8x\n", (unsigned) file, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif VALIDATE_STATE(s); mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) || ((file->f_mode & FMODE_READ) && s->dma_adc.mapped); switch (cmd) { case OSS_GETVERSION: CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): SOUND_VERSION=0x%.8x\n", SOUND_VERSION)); return put_user(SOUND_VERSION, (int *) arg); case SNDCTL_DSP_SYNC: CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_SYNC\n")); if (file->f_mode & FMODE_WRITE) return drain_dac(s, 0 /*file->f_flags & O_NONBLOCK */ ); return 0; case SNDCTL_DSP_SETDUPLEX: return 0; case SNDCTL_DSP_GETCAPS: return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, (int *) arg); case SNDCTL_DSP_RESET: CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_RESET\n")); if (file->f_mode & FMODE_WRITE) { stop_dac(s); synchronize_irq(s->irq); s->dma_dac.count = s->dma_dac.total_bytes = s->dma_dac.blocks = s->dma_dac.wakeup = 0; s->dma_dac.swptr = s->dma_dac.hwptr = (int)(((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) - s->dma_dac.descrtab_phys) / sizeof(serdma_descr_t)); } if (file->f_mode & FMODE_READ) { stop_adc(s); synchronize_irq(s->irq); s->dma_adc.count = s->dma_adc.total_bytes = s->dma_adc.blocks = s->dma_dac.wakeup = 0; s->dma_adc.swptr = s->dma_adc.hwptr = (int)(((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_RX)) & M_DMA_CURDSCR_ADDR) - s->dma_adc.descrtab_phys) / sizeof(serdma_descr_t)); } return 0; case SNDCTL_DSP_SPEED: if (get_user(val, (int *) arg)) return -EFAULT; CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_SPEED val=%d -> 48000\n", val)); val = 48000; return put_user(val, (int *) arg); case SNDCTL_DSP_STEREO: if (get_user(val, (int *) arg)) return -EFAULT; CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_STEREO val=%d\n", val)); if (file->f_mode & FMODE_READ) { stop_adc(s); s->dma_adc.ready = 0; s->prop_adc.channels = val ? 2 : 1; } if (file->f_mode & FMODE_WRITE) { stop_dac(s); s->dma_dac.ready = 0; s->prop_dac.channels = val ? 2 : 1; } return 0; case SNDCTL_DSP_CHANNELS: if (get_user(val, (int *) arg)) return -EFAULT; CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_CHANNELS val=%d\n", val)); if (val != 0) { if (file->f_mode & FMODE_READ) { stop_adc(s); s->dma_adc.ready = 0; if (val >= 2) s->prop_adc.channels = 2; else s->prop_adc.channels = 1; } if (file->f_mode & FMODE_WRITE) { stop_dac(s); s->dma_dac.ready = 0; if (val >= 2) s->prop_dac.channels = 2; else s->prop_dac.channels = 1; } } if (file->f_mode & FMODE_WRITE) val = s->prop_dac.channels; else if (file->f_mode & FMODE_READ) val = s->prop_adc.channels; return put_user(val, (int *) arg); case SNDCTL_DSP_GETFMTS: // Returns a mask CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_GETFMT val=0x%.8x\n", AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 | AFMT_U8)); return put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 | AFMT_U8, (int *) arg); case SNDCTL_DSP_SETFMT: if (get_user(val, (int *) arg)) return -EFAULT; CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_SETFMT val=0x%.8x\n", val)); if (val != AFMT_QUERY) { if (file->f_mode & FMODE_READ) { stop_adc(s); s->dma_adc.ready = 0; if (val != AFMT_S16_LE && val != AFMT_U16_LE && val != AFMT_S8 && val != AFMT_U8) val = AFMT_U8; s->prop_adc.fmt = val; s->prop_adc.fmt_original = s->prop_adc.fmt; } if (file->f_mode & FMODE_WRITE) { stop_dac(s); s->dma_dac.ready = 0; if (val != AFMT_S16_LE && val != AFMT_U16_LE && val != AFMT_S8 && val != AFMT_U8) val = AFMT_U8; s->prop_dac.fmt = val; s->prop_dac.fmt_original = s->prop_dac.fmt; } } else { if (file->f_mode & FMODE_WRITE) val = s->prop_dac.fmt_original; else if (file->f_mode & FMODE_READ) val = s->prop_adc.fmt_original; } CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_SETFMT return val=0x%.8x\n", val)); return put_user(val, (int *) arg); case SNDCTL_DSP_POST: CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): DSP_POST\n")); return 0; case SNDCTL_DSP_GETTRIGGER: val = 0; if (file->f_mode & s->ena & FMODE_READ) val |= PCM_ENABLE_INPUT; if (file->f_mode & s->ena & FMODE_WRITE) val |= PCM_ENABLE_OUTPUT; return put_user(val, (int *) arg); case SNDCTL_DSP_SETTRIGGER: if (get_user(val, (int *) arg)) return -EFAULT; if (file->f_mode & FMODE_READ) { if (val & PCM_ENABLE_INPUT) { if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) return ret; start_adc(s); } else stop_adc(s); } if (file->f_mode & FMODE_WRITE) { if (val & PCM_ENABLE_OUTPUT) { if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s))) return ret; start_dac(s); } else stop_dac(s); } return 0; case SNDCTL_DSP_GETOSPACE: if (!(file->f_mode & FMODE_WRITE)) return -EINVAL; if (!s->dma_dac.ready && (val = prog_dmabuf_dac(s))) return val; spin_lock_irqsave(&s->lock, flags); cs4297a_update_ptr(s,CS_FALSE); abinfo.fragsize = s->dma_dac.fragsize; if (s->dma_dac.mapped) abinfo.bytes = s->dma_dac.sbufsz; else abinfo.bytes = s->dma_dac.sbufsz - s->dma_dac.count; abinfo.fragstotal = s->dma_dac.numfrag; abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift; CS_DBGOUT(CS_FUNCTION | CS_PARMS, 4, printk(KERN_INFO "cs4297a: cs4297a_ioctl(): GETOSPACE .fragsize=%d .bytes=%d .fragstotal=%d .fragments=%d\n", abinfo.fragsize,abinfo.bytes,abinfo.fragstotal, abinfo.fragments)); spin_unlock_irqrestore(&s->lock, flags); return copy_to_user((void *) arg, &abinfo, sizeof(abinfo)) ? -EFAULT : 0; case SNDCTL_DSP_GETISPACE: if (!(file->f_mode & FMODE_READ)) return -EINVAL; if (!s->dma_adc.ready && (val = prog_dmabuf_adc(s))) return val; spin_lock_irqsave(&s->lock, flags); cs4297a_update_ptr(s,CS_FALSE); if (s->conversion) { abinfo.fragsize = s->dma_adc.fragsize / 2; abinfo.bytes = s->dma_adc.count / 2; abinfo.fragstotal = s->dma_adc.numfrag; abinfo.fragments = abinfo.bytes >> (s->dma_adc.fragshift - 1); } else { abinfo.fragsize = s->dma_adc.fragsize; abinfo.bytes = s->dma_adc.count; abinfo.fragstotal = s->dma_adc.numfrag; abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift; } spin_unlock_irqrestore(&s->lock, flags); return copy_to_user((void *) arg, &abinfo, sizeof(abinfo)) ? -EFAULT : 0; case SNDCTL_DSP_NONBLOCK: file->f_flags |= O_NONBLOCK; return 0; case SNDCTL_DSP_GETODELAY: if (!(file->f_mode & FMODE_WRITE)) return -EINVAL; if(!s->dma_dac.ready && prog_dmabuf_dac(s)) return 0; spin_lock_irqsave(&s->lock, flags); cs4297a_update_ptr(s,CS_FALSE); val = s->dma_dac.count; spin_unlock_irqrestore(&s->lock, flags); return put_user(val, (int *) arg); case SNDCTL_DSP_GETIPTR: if (!(file->f_mode & FMODE_READ)) return -EINVAL; if(!s->dma_adc.ready && prog_dmabuf_adc(s)) return 0; spin_lock_irqsave(&s->lock, flags); cs4297a_update_ptr(s,CS_FALSE); cinfo.bytes = s->dma_adc.total_bytes; if (s->dma_adc.mapped) { cinfo.blocks = (cinfo.bytes >> s->dma_adc.fragshift) - s->dma_adc.blocks; s->dma_adc.blocks = cinfo.bytes >> s->dma_adc.fragshift; } else { if (s->conversion) { cinfo.blocks = s->dma_adc.count / 2 >> (s->dma_adc.fragshift - 1); } else cinfo.blocks = s->dma_adc.count >> s->dma_adc. fragshift; } if (s->conversion) cinfo.ptr = s->dma_adc.hwptr / 2; else cinfo.ptr = s->dma_adc.hwptr; if (s->dma_adc.mapped) s->dma_adc.count &= s->dma_adc.fragsize - 1; spin_unlock_irqrestore(&s->lock, flags); return copy_to_user((void *) arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0; case SNDCTL_DSP_GETOPTR: if (!(file->f_mode & FMODE_WRITE)) return -EINVAL; if(!s->dma_dac.ready && prog_dmabuf_dac(s)) return 0; spin_lock_irqsave(&s->lock, flags); cs4297a_update_ptr(s,CS_FALSE); cinfo.bytes = s->dma_dac.total_bytes; if (s->dma_dac.mapped) { cinfo.blocks = (cinfo.bytes >> s->dma_dac.fragshift) - s->dma_dac.blocks; s->dma_dac.blocks = cinfo.bytes >> s->dma_dac.fragshift; } else { cinfo.blocks = s->dma_dac.count >> s->dma_dac.fragshift; } cinfo.ptr = s->dma_dac.hwptr; if (s->dma_dac.mapped) s->dma_dac.count &= s->dma_dac.fragsize - 1; spin_unlock_irqrestore(&s->lock, flags); return copy_to_user((void *) arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0; case SNDCTL_DSP_GETBLKSIZE: if (file->f_mode & FMODE_WRITE) { if ((val = prog_dmabuf_dac(s))) return val; return put_user(s->dma_dac.fragsize, (int *) arg); } if ((val = prog_dmabuf_adc(s))) return val; if (s->conversion) return put_user(s->dma_adc.fragsize / 2, (int *) arg); else return put_user(s->dma_adc.fragsize, (int *) arg); case SNDCTL_DSP_SETFRAGMENT: if (get_user(val, (int *) arg)) return -EFAULT; return 0; // Say OK, but do nothing. case SNDCTL_DSP_SUBDIVIDE: if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) || (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision)) return -EINVAL; if (get_user(val, (int *) arg)) return -EFAULT; if (val != 1 && val != 2 && val != 4) return -EINVAL; if (file->f_mode & FMODE_READ) s->dma_adc.subdivision = val; else if (file->f_mode & FMODE_WRITE) s->dma_dac.subdivision = val; return 0; case SOUND_PCM_READ_RATE: if (file->f_mode & FMODE_READ) return put_user(s->prop_adc.rate, (int *) arg); else if (file->f_mode & FMODE_WRITE) return put_user(s->prop_dac.rate, (int *) arg); case SOUND_PCM_READ_CHANNELS: if (file->f_mode & FMODE_READ) return put_user(s->prop_adc.channels, (int *) arg); else if (file->f_mode & FMODE_WRITE) return put_user(s->prop_dac.channels, (int *) arg); case SOUND_PCM_READ_BITS: if (file->f_mode & FMODE_READ) return put_user( (s->prop_adc. fmt & (AFMT_S8 | AFMT_U8)) ? 8 : 16, (int *) arg); else if (file->f_mode & FMODE_WRITE) return put_user( (s->prop_dac. fmt & (AFMT_S8 | AFMT_U8)) ? 8 : 16, (int *) arg); case SOUND_PCM_WRITE_FILTER: case SNDCTL_DSP_SETSYNCRO: case SOUND_PCM_READ_FILTER: return -EINVAL; } return mixer_ioctl(s, cmd, arg); } static int cs4297a_release(struct inode *inode, struct file *file) { struct cs4297a_state *s = (struct cs4297a_state *) file->private_data; CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO "cs4297a: cs4297a_release(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n", (unsigned) inode, (unsigned) file, file->f_mode)); VALIDATE_STATE(s); if (file->f_mode & FMODE_WRITE) { drain_dac(s, file->f_flags & O_NONBLOCK); mutex_lock(&s->open_sem_dac); stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); s->open_mode &= ~FMODE_WRITE; mutex_unlock(&s->open_sem_dac); wake_up(&s->open_wait_dac); } if (file->f_mode & FMODE_READ) { drain_adc(s, file->f_flags & O_NONBLOCK); mutex_lock(&s->open_sem_adc); stop_adc(s); dealloc_dmabuf(s, &s->dma_adc); s->open_mode &= ~FMODE_READ; mutex_unlock(&s->open_sem_adc); wake_up(&s->open_wait_adc); } return 0; } static int cs4297a_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct cs4297a_state *s=NULL; struct list_head *entry; CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO "cs4297a: cs4297a_open(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n", (unsigned) inode, (unsigned) file, file->f_mode)); CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO "cs4297a: status = %08x\n", (int)__raw_readq(SS_CSR(R_SER_STATUS_DEBUG)))); list_for_each(entry, &cs4297a_devs) { s = list_entry(entry, struct cs4297a_state, list); if (!((s->dev_audio ^ minor) & ~0xf)) break; } if (entry == &cs4297a_devs) return -ENODEV; if (!s) { CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO "cs4297a: cs4297a_open(): Error - unable to find audio state struct\n")); return -ENODEV; } VALIDATE_STATE(s); file->private_data = s; // wait for device to become free if (!(file->f_mode & (FMODE_WRITE | FMODE_READ))) { CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2, printk(KERN_INFO "cs4297a: cs4297a_open(): Error - must open READ and/or WRITE\n")); return -ENODEV; } if (file->f_mode & FMODE_WRITE) { if (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX)) != 0) { printk(KERN_ERR "cs4297a: TX pipe needs to drain\n"); while (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX))) ; } mutex_lock(&s->open_sem_dac); while (s->open_mode & FMODE_WRITE) { if (file->f_flags & O_NONBLOCK) { mutex_unlock(&s->open_sem_dac); return -EBUSY; } mutex_unlock(&s->open_sem_dac); interruptible_sleep_on(&s->open_wait_dac); if (signal_pending(current)) { printk("open - sig pending\n"); return -ERESTARTSYS; } mutex_lock(&s->open_sem_dac); } } if (file->f_mode & FMODE_READ) { mutex_lock(&s->open_sem_adc); while (s->open_mode & FMODE_READ) { if (file->f_flags & O_NONBLOCK) { mutex_unlock(&s->open_sem_adc); return -EBUSY; } mutex_unlock(&s->open_sem_adc); interruptible_sleep_on(&s->open_wait_adc); if (signal_pending(current)) { printk("open - sig pending\n"); return -ERESTARTSYS; } mutex_lock(&s->open_sem_adc); } } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); if (file->f_mode & FMODE_READ) { s->prop_adc.fmt = AFMT_S16_BE; s->prop_adc.fmt_original = s->prop_adc.fmt; s->prop_adc.channels = 2; s->prop_adc.rate = 48000; s->conversion = 0; s->ena &= ~FMODE_READ; s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; mutex_unlock(&s->open_sem_adc); if (prog_dmabuf_adc(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR "cs4297a: adc Program dmabufs failed.\n")); cs4297a_release(inode, file); return -ENOMEM; } } if (file->f_mode & FMODE_WRITE) { s->prop_dac.fmt = AFMT_S16_BE; s->prop_dac.fmt_original = s->prop_dac.fmt; s->prop_dac.channels = 2; s->prop_dac.rate = 48000; s->conversion = 0; s->ena &= ~FMODE_WRITE; s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; mutex_unlock(&s->open_sem_dac); if (prog_dmabuf_dac(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR "cs4297a: dac Program dmabufs failed.\n")); cs4297a_release(inode, file); return -ENOMEM; } } CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO "cs4297a: cs4297a_open()- 0\n")); return nonseekable_open(inode, file); } // ****************************************************************************************** // Wave (audio) file operations struct. // ****************************************************************************************** static /*const */ struct file_operations cs4297a_audio_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = cs4297a_read, .write = cs4297a_write, .poll = cs4297a_poll, .ioctl = cs4297a_ioctl, .mmap = cs4297a_mmap, .open = cs4297a_open, .release = cs4297a_release, }; static void cs4297a_interrupt(int irq, void *dev_id) { struct cs4297a_state *s = (struct cs4297a_state *) dev_id; u32 status; status = __raw_readq(SS_CSR(R_SER_STATUS_DEBUG)); CS_DBGOUT(CS_INTERRUPT, 6, printk(KERN_INFO "cs4297a: cs4297a_interrupt() HISR=0x%.8x\n", status)); #if 0 /* XXXKW what check *should* be done here? */ if (!(status & (M_SYNCSER_RX_EOP_COUNT | M_SYNCSER_RX_OVERRUN | M_SYNCSER_RX_SYNC_ERR))) { status = __raw_readq(SS_CSR(R_SER_STATUS)); printk(KERN_ERR "cs4297a: unexpected interrupt (status %08x)\n", status); return; } #endif if (status & M_SYNCSER_RX_SYNC_ERR) { status = __raw_readq(SS_CSR(R_SER_STATUS)); printk(KERN_ERR "cs4297a: rx sync error (status %08x)\n", status); return; } if (status & M_SYNCSER_RX_OVERRUN) { int newptr, i; s->stats.rx_ovrrn++; printk(KERN_ERR "cs4297a: receive FIFO overrun\n"); /* Fix things up: get the receive descriptor pool clean and give them back to the hardware */ while (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_RX))) ; newptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_RX)) & M_DMA_CURDSCR_ADDR) - s->dma_adc.descrtab_phys) / sizeof(serdma_descr_t)); for (i=0; i<DMA_DESCR; i++) { s->dma_adc.descrtab[i].descr_a &= ~M_DMA_SERRX_SOP; } s->dma_adc.swptr = s->dma_adc.hwptr = newptr; s->dma_adc.count = 0; s->dma_adc.sb_swptr = s->dma_adc.sb_hwptr = s->dma_adc.sample_buf; __raw_writeq(DMA_DESCR, SS_CSR(R_SER_DMA_DSCR_COUNT_RX)); } spin_lock(&s->lock); cs4297a_update_ptr(s,CS_TRUE); spin_unlock(&s->lock); CS_DBGOUT(CS_INTERRUPT, 6, printk(KERN_INFO "cs4297a: cs4297a_interrupt()-\n")); } #if 0 static struct initvol { int mixch; int vol; } initvol[] __initdata = { {SOUND_MIXER_WRITE_VOLUME, 0x4040}, {SOUND_MIXER_WRITE_PCM, 0x4040}, {SOUND_MIXER_WRITE_SYNTH, 0x4040}, {SOUND_MIXER_WRITE_CD, 0x4040}, {SOUND_MIXER_WRITE_LINE, 0x4040}, {SOUND_MIXER_WRITE_LINE1, 0x4040}, {SOUND_MIXER_WRITE_RECLEV, 0x0000}, {SOUND_MIXER_WRITE_SPEAKER, 0x4040}, {SOUND_MIXER_WRITE_MIC, 0x0000} }; #endif static int __init cs4297a_init(void) { struct cs4297a_state *s; u32 pwr, id; mm_segment_t fs; int rval; #ifndef CONFIG_BCM_CS4297A_CSWARM u64 cfg; int mdio_val; #endif CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO "cs4297a: cs4297a_init_module()+ \n")); #ifndef CONFIG_BCM_CS4297A_CSWARM mdio_val = __raw_readq(KSEG1 + A_MAC_REGISTER(2, R_MAC_MDIO)) & (M_MAC_MDIO_DIR|M_MAC_MDIO_OUT); /* Check syscfg for synchronous serial on port 1 */ cfg = __raw_readq(KSEG1 + A_SCD_SYSTEM_CFG); if (!(cfg & M_SYS_SER1_ENABLE)) { __raw_writeq(cfg | M_SYS_SER1_ENABLE, KSEG1+A_SCD_SYSTEM_CFG); cfg = __raw_readq(KSEG1 + A_SCD_SYSTEM_CFG); if (!(cfg & M_SYS_SER1_ENABLE)) { printk(KERN_INFO "cs4297a: serial port 1 not configured for synchronous operation\n"); return -1; } printk(KERN_INFO "cs4297a: serial port 1 switching to synchronous operation\n"); /* Force the codec (on SWARM) to reset by clearing GENO, preserving MDIO (no effect on CSWARM) */ __raw_writeq(mdio_val, KSEG1+A_MAC_REGISTER(2, R_MAC_MDIO)); udelay(10); } /* Now set GENO */ __raw_writeq(mdio_val | M_MAC_GENC, KSEG1+A_MAC_REGISTER(2, R_MAC_MDIO)); /* Give the codec some time to finish resetting (start the bit clock) */ udelay(100); #endif if (!(s = kzalloc(sizeof(struct cs4297a_state), GFP_KERNEL))) { CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR "cs4297a: probe() no memory for state struct.\n")); return -1; } s->magic = CS4297a_MAGIC; init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->dma_adc.reg_wait); init_waitqueue_head(&s->dma_dac.reg_wait); init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->open_wait_adc); init_waitqueue_head(&s->open_wait_dac); mutex_init(&s->open_sem_adc); mutex_init(&s->open_sem_dac); spin_lock_init(&s->lock); s->irq = K_INT_SER_1; if (request_irq (s->irq, cs4297a_interrupt, 0, "Crystal CS4297a", s)) { CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR "cs4297a: irq %u in use\n", s->irq)); goto err_irq; } if ((s->dev_audio = register_sound_dsp(&cs4297a_audio_fops, -1)) < 0) { CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR "cs4297a: probe() register_sound_dsp() failed.\n")); goto err_dev1; } if ((s->dev_mixer = register_sound_mixer(&cs4297a_mixer_fops, -1)) < 0) { CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR "cs4297a: probe() register_sound_mixer() failed.\n")); goto err_dev2; } if (ser_init(s) || dma_init(s)) { CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR "cs4297a: ser_init failed.\n")); goto err_dev3; } do { udelay(4000); rval = cs4297a_read_ac97(s, AC97_POWER_CONTROL, &pwr); } while (!rval && (pwr != 0xf)); if (!rval) { char *sb1250_duart_present; fs = get_fs(); set_fs(KERNEL_DS); #if 0 val = SOUND_MASK_LINE; mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val); for (i = 0; i < ARRAY_SIZE(initvol); i++) { val = initvol[i].vol; mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val); } // cs4297a_write_ac97(s, 0x18, 0x0808); #else // cs4297a_write_ac97(s, 0x5e, 0x180); cs4297a_write_ac97(s, 0x02, 0x0808); cs4297a_write_ac97(s, 0x18, 0x0808); #endif set_fs(fs); list_add(&s->list, &cs4297a_devs); cs4297a_read_ac97(s, AC97_VENDOR_ID1, &id); sb1250_duart_present = symbol_get(sb1250_duart_present); if (sb1250_duart_present) sb1250_duart_present[1] = 0; printk(KERN_INFO "cs4297a: initialized (vendor id = %x)\n", id); CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO "cs4297a: cs4297a_init_module()-\n")); return 0; } err_dev3: unregister_sound_mixer(s->dev_mixer); err_dev2: unregister_sound_dsp(s->dev_audio); err_dev1: free_irq(s->irq, s); err_irq: kfree(s); printk(KERN_INFO "cs4297a: initialization failed\n"); return -1; } static void __exit cs4297a_cleanup(void) { /* XXXKW disable_irq, free_irq drain DMA queue disable DMA disable TX/RX free memory */ CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO "cs4297a: cleanup_cs4297a() finished\n")); } // --------------------------------------------------------------------- MODULE_AUTHOR("Kip Walker, Broadcom Corp."); MODULE_DESCRIPTION("Cirrus Logic CS4297a Driver for Broadcom SWARM board"); // --------------------------------------------------------------------- module_init(cs4297a_init); module_exit(cs4297a_cleanup);
gpl-2.0
1N4148/android_kernel_samsung_msm7x27a
kernel/exit.c
66
45548
/* * linux/kernel/exit.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/tty.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/security.h> #include <linux/cpu.h> #include <linux/acct.h> #include <linux/tsacct_kern.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/binfmts.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> #include <linux/ptrace.h> #include <linux/profile.h> #include <linux/mount.h> #include <linux/proc_fs.h> #include <linux/kthread.h> #include <linux/mempolicy.h> #include <linux/taskstats_kern.h> #include <linux/delayacct.h> #include <linux/freezer.h> #include <linux/cgroup.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/posix-timers.h> #include <linux/cn_proc.h> #include <linux/mutex.h> #include <linux/futex.h> #include <linux/pipe_fs_i.h> #include <linux/audit.h> /* for audit_free() */ #include <linux/resource.h> #include <linux/blkdev.h> #include <linux/task_io_accounting_ops.h> #include <linux/tracehook.h> #include <linux/fs_struct.h> #include <linux/init_task.h> #include <linux/perf_event.h> #include <trace/events/sched.h> #include <linux/hw_breakpoint.h> #include <linux/oom.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> static void exit_mm(struct task_struct * tsk); static void __unhash_process(struct task_struct *p, bool group_dead) { nr_threads--; detach_pid(p, PIDTYPE_PID); if (group_dead) { detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); list_del_rcu(&p->tasks); list_del_init(&p->sibling); __this_cpu_dec(process_counts); } list_del_rcu(&p->thread_group); } /* * This function expects the tasklist_lock write-locked. */ static void __exit_signal(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; bool group_dead = thread_group_leader(tsk); struct sighand_struct *sighand; struct tty_struct *uninitialized_var(tty); sighand = rcu_dereference_check(tsk->sighand, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()); spin_lock(&sighand->siglock); posix_cpu_timers_exit(tsk); if (group_dead) { posix_cpu_timers_exit_group(tsk); tty = sig->tty; sig->tty = NULL; } else { /* * This can only happen if the caller is de_thread(). * FIXME: this is the temporary hack, we should teach * posix-cpu-timers to handle this case correctly. */ if (unlikely(has_group_leader_pid(tsk))) posix_cpu_timers_exit_group(tsk); /* * If there is any task waiting for the group exit * then notify it: */ if (sig->notify_count > 0 && !--sig->notify_count) wake_up_process(sig->group_exit_task); if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); /* * Accumulate here the counters for all threads but the * group leader as they die, so they can be added into * the process-wide totals when those are taken. * The group leader stays around as a zombie as long * as there are other threads. When it gets reaped, * the exit.c code will add its counts into these totals. * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ sig->utime = cputime_add(sig->utime, tsk->utime); sig->stime = cputime_add(sig->stime, tsk->stime); sig->gtime = cputime_add(sig->gtime, tsk->gtime); sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; sig->nvcsw += tsk->nvcsw; sig->nivcsw += tsk->nivcsw; sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); sig->sum_sched_runtime += tsk->se.sum_exec_runtime; } sig->nr_threads--; __unhash_process(tsk, group_dead); /* * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ flush_sigqueue(&tsk->pending); tsk->sighand = NULL; spin_unlock(&sighand->siglock); __cleanup_sighand(sighand); clear_tsk_thread_flag(tsk,TIF_SIGPENDING); if (group_dead) { flush_sigqueue(&sig->shared_pending); tty_kref_put(tty); } } static void delayed_put_task_struct(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); perf_event_delayed_put(tsk); trace_sched_process_free(tsk); put_task_struct(tsk); } void release_task(struct task_struct * p) { struct task_struct *leader; int zap_leader; repeat: tracehook_prepare_release_task(p); /* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials. But shut RCU-lockdep up */ rcu_read_lock(); atomic_dec(&__task_cred(p)->user->processes); rcu_read_unlock(); proc_flush_task(p); write_lock_irq(&tasklist_lock); tracehook_finish_release_task(p); __exit_signal(p); /* * If we are the last non-leader member of the thread * group, and the leader is zombie, then notify the * group leader's parent process. (if it wants notification.) */ zap_leader = 0; leader = p->group_leader; if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { BUG_ON(task_detached(leader)); do_notify_parent(leader, leader->exit_signal); /* * If we were the last child thread and the leader has * exited already, and the leader's parent ignores SIGCHLD, * then we are the one who should release the leader. * * do_notify_parent() will have marked it self-reaping in * that case. */ zap_leader = task_detached(leader); /* * This maintains the invariant that release_task() * only runs on a task in EXIT_DEAD, just for sanity. */ if (zap_leader) leader->exit_state = EXIT_DEAD; } write_unlock_irq(&tasklist_lock); release_thread(p); call_rcu(&p->rcu, delayed_put_task_struct); p = leader; if (unlikely(zap_leader)) goto repeat; } /* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly * without this... * * The caller must hold rcu lock or the tasklist lock. */ struct pid *session_of_pgrp(struct pid *pgrp) { struct task_struct *p; struct pid *sid = NULL; p = pid_task(pgrp, PIDTYPE_PGID); if (p == NULL) p = pid_task(pgrp, PIDTYPE_PID); if (p != NULL) sid = task_session(p); return sid; } /* * Determine if a process group is "orphaned", according to the POSIX * definition in 2.2.2.52. Orphaned process groups are not to be affected * by terminal-generated stop signals. Newly orphaned process groups are * to receive a SIGHUP and a SIGCONT. * * "I ask you, have you ever known what it is to be an orphan?" */ static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) { struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if ((p == ignored_task) || (p->exit_state && thread_group_empty(p)) || is_global_init(p->real_parent)) continue; if (task_pgrp(p->real_parent) != pgrp && task_session(p->real_parent) == task_session(p)) return 0; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return 1; } int is_current_pgrp_orphaned(void) { int retval; read_lock(&tasklist_lock); retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); read_unlock(&tasklist_lock); return retval; } static int has_stopped_jobs(struct pid *pgrp) { int retval = 0; struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if (!task_is_stopped(p)) continue; retval = 1; break; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return retval; } /* * Check to see if any process groups have become orphaned as * a result of our exiting, and if they have any stopped jobs, * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ static void kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) { struct pid *pgrp = task_pgrp(tsk); struct task_struct *ignored_task = tsk; if (!parent) /* exit: our father is in a different pgrp than * we are and we were the only connection outside. */ parent = tsk->real_parent; else /* reparent: our child is in a different pgrp than * we are, and it was the only connection outside. */ ignored_task = NULL; if (task_pgrp(parent) != pgrp && task_session(parent) == task_session(tsk) && will_become_orphaned_pgrp(pgrp, ignored_task) && has_stopped_jobs(pgrp)) { __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); } } /** * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd * * If a kernel thread is launched as a result of a system call, or if * it ever exits, it should generally reparent itself to kthreadd so it * isn't in the way of other processes and is correctly cleaned up on exit. * * The various task state such as scheduling policy and priority may have * been inherited from a user process, so we reset them to sane values here. * * NOTE that reparent_to_kthreadd() gives the caller full capabilities. */ static void reparent_to_kthreadd(void) { write_lock_irq(&tasklist_lock); ptrace_unlink(current); /* Reparent to init */ current->real_parent = current->parent = kthreadd_task; list_move_tail(&current->sibling, &current->real_parent->children); /* Set the exit signal to SIGCHLD so we signal init on exit */ current->exit_signal = SIGCHLD; if (task_nice(current) < 0) set_user_nice(current, 0); /* cpus_allowed? */ /* rt_priority? */ /* signals? */ memcpy(current->signal->rlim, init_task.signal->rlim, sizeof(current->signal->rlim)); atomic_inc(&init_cred.usage); commit_creds(&init_cred); write_unlock_irq(&tasklist_lock); } void __set_special_pids(struct pid *pid) { struct task_struct *curr = current->group_leader; if (task_session(curr) != pid) change_pid(curr, PIDTYPE_SID, pid); if (task_pgrp(curr) != pid) change_pid(curr, PIDTYPE_PGID, pid); } static void set_special_pids(struct pid *pid) { write_lock_irq(&tasklist_lock); __set_special_pids(pid); write_unlock_irq(&tasklist_lock); } /* * Let kernel threads use this to say that they allow a certain signal. * Must not be used if kthread was cloned with CLONE_SIGHAND. */ int allow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); /* This is only needed for daemonize()'ed kthreads */ sigdelset(&current->blocked, sig); /* * Kernel threads handle their own signals. Let the signal code * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(allow_signal); int disallow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(disallow_signal); /* * Put all the gunge required to become a kernel thread without * attached user resources in one place where it belongs. */ void daemonize(const char *name, ...) { va_list args; sigset_t blocked; va_start(args, name); vsnprintf(current->comm, sizeof(current->comm), name, args); va_end(args); /* * If we were started as result of loading a module, close all of the * user space pages. We don't need them, and if we didn't close them * they would be locked into memory. */ exit_mm(current); /* * We don't want to have TIF_FREEZE set if the system-wide hibernation * or suspend transition begins right now. */ current->flags |= (PF_NOFREEZE | PF_KTHREAD); if (current->nsproxy != &init_nsproxy) { get_nsproxy(&init_nsproxy); switch_task_namespaces(current, &init_nsproxy); } set_special_pids(&init_struct_pid); proc_clear_tty(current); /* Block and flush all signals */ sigfillset(&blocked); sigprocmask(SIG_BLOCK, &blocked, NULL); flush_signals(current); /* Become as one with the init task */ daemonize_fs_struct(); exit_files(current); current->files = init_task.files; atomic_inc(&current->files->count); reparent_to_kthreadd(); } EXPORT_SYMBOL(daemonize); static void close_files(struct files_struct * files) { int i, j; struct fdtable *fdt; j = 0; /* * It is safe to dereference the fd table without RCU or * ->file_lock because this is the last reference to the * files structure. But use RCU to shut RCU-lockdep up. */ rcu_read_lock(); fdt = files_fdtable(files); rcu_read_unlock(); for (;;) { unsigned long set; i = j * __NFDBITS; if (i >= fdt->max_fds) break; set = fdt->open_fds->fds_bits[j++]; while (set) { if (set & 1) { struct file * file = xchg(&fdt->fd[i], NULL); if (file) { filp_close(file, files); cond_resched(); } } i++; set >>= 1; } } } struct files_struct *get_files_struct(struct task_struct *task) { struct files_struct *files; task_lock(task); files = task->files; if (files) atomic_inc(&files->count); task_unlock(task); return files; } void put_files_struct(struct files_struct *files) { struct fdtable *fdt; if (atomic_dec_and_test(&files->count)) { close_files(files); /* * Free the fd and fdset arrays if we expanded them. * If the fdtable was embedded, pass files for freeing * at the end of the RCU grace period. Otherwise, * you can free files immediately. */ rcu_read_lock(); fdt = files_fdtable(files); if (fdt != &files->fdtab) kmem_cache_free(files_cachep, files); free_fdtable(fdt); rcu_read_unlock(); } } void reset_files_struct(struct files_struct *files) { struct task_struct *tsk = current; struct files_struct *old; old = tsk->files; task_lock(tsk); tsk->files = files; task_unlock(tsk); put_files_struct(old); } void exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; if (files) { task_lock(tsk); tsk->files = NULL; task_unlock(tsk); put_files_struct(files); } } #ifdef CONFIG_MM_OWNER /* * Task p is exiting and it owned mm, lets find a new owner for it */ static inline int mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) { /* * If there are other users of the mm and the owner (us) is exiting * we need to find a new owner to take on the responsibility. */ if (atomic_read(&mm->mm_users) <= 1) return 0; if (mm->owner != p) return 0; return 1; } void mm_update_next_owner(struct mm_struct *mm) { struct task_struct *c, *g, *p = current; retry: if (!mm_need_new_owner(mm, p)) return; read_lock(&tasklist_lock); /* * Search in the children */ list_for_each_entry(c, &p->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search in the siblings */ list_for_each_entry(c, &p->real_parent->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search through everything else. We should not get * here often */ do_each_thread(g, c) { if (c->mm == mm) goto assign_new_owner; } while_each_thread(g, c); read_unlock(&tasklist_lock); /* * We found no owner yet mm_users > 1: this implies that we are * most likely racing with swapoff (try_to_unuse()) or /proc or * ptrace or page migration (get_task_mm()). Mark owner as NULL. */ mm->owner = NULL; return; assign_new_owner: BUG_ON(c == p); get_task_struct(c); /* * The task_lock protects c->mm from changing. * We always want mm->owner->mm == mm */ task_lock(c); /* * Delay read_unlock() till we have the task_lock() * to ensure that c does not slip away underneath us */ read_unlock(&tasklist_lock); if (c->mm != mm) { task_unlock(c); put_task_struct(c); goto retry; } mm->owner = c; task_unlock(c); put_task_struct(c); } #endif /* CONFIG_MM_OWNER */ /* * Turn us into a lazy TLB process if we * aren't already.. */ static void exit_mm(struct task_struct * tsk) { struct mm_struct *mm = tsk->mm; struct core_state *core_state; mm_release(tsk, mm); if (!mm) return; /* * Serialize with any possible pending coredump. * We must hold mmap_sem around checking core_state * and clearing tsk->mm. The core-inducing thread * will increment ->nr_threads for each thread in the * group with ->mm != NULL. */ down_read(&mm->mmap_sem); core_state = mm->core_state; if (core_state) { struct core_thread self; up_read(&mm->mmap_sem); self.task = tsk; self.next = xchg(&core_state->dumper.next, &self); /* * Implies mb(), the result of xchg() must be visible * to core_state->dumper. */ if (atomic_dec_and_test(&core_state->nr_threads)) complete(&core_state->startup); for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!self.task) /* see coredump_finish() */ break; schedule(); } __set_task_state(tsk, TASK_RUNNING); down_read(&mm->mmap_sem); } atomic_inc(&mm->mm_count); BUG_ON(mm != tsk->active_mm); /* more a memory barrier than a real lock */ task_lock(tsk); tsk->mm = NULL; up_read(&mm->mmap_sem); enter_lazy_tlb(mm, current); /* We don't want this task to be frozen prematurely */ clear_freeze_flag(tsk); if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) atomic_dec(&mm->oom_disable_count); task_unlock(tsk); mm_update_next_owner(mm); mmput(mm); } /* * When we die, we re-parent all our children. * Try to give them to another thread in our thread * group, and if no such member exists, give it to * the child reaper process (ie "init") in our pid * space. */ static struct task_struct *find_new_reaper(struct task_struct *father) __releases(&tasklist_lock) __acquires(&tasklist_lock) { struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *thread; thread = father; while_each_thread(father, thread) { if (thread->flags & PF_EXITING) continue; if (unlikely(pid_ns->child_reaper == father)) pid_ns->child_reaper = thread; return thread; } if (unlikely(pid_ns->child_reaper == father)) { write_unlock_irq(&tasklist_lock); if (unlikely(pid_ns == &init_pid_ns)) panic("Attempted to kill init!"); zap_pid_ns_processes(pid_ns); write_lock_irq(&tasklist_lock); /* * We can not clear ->child_reaper or leave it alone. * There may by stealth EXIT_DEAD tasks on ->children, * forget_original_parent() must move them somewhere. */ pid_ns->child_reaper = init_pid_ns.child_reaper; } return pid_ns->child_reaper; } /* * Any that need to be release_task'd are put on the @dead list. */ static void reparent_leader(struct task_struct *father, struct task_struct *p, struct list_head *dead) { list_move_tail(&p->sibling, &p->real_parent->children); if (task_detached(p)) return; /* * If this is a threaded reparent there is no need to * notify anyone anything has happened. */ if (same_thread_group(p->real_parent, father)) return; /* We don't want people slaying init. */ p->exit_signal = SIGCHLD; /* If it has exited notify the new parent about this child's death. */ if (!task_ptrace(p) && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { do_notify_parent(p, p->exit_signal); if (task_detached(p)) { p->exit_state = EXIT_DEAD; list_move_tail(&p->sibling, dead); } } kill_orphaned_pgrp(p, father); } static void forget_original_parent(struct task_struct *father) { struct task_struct *p, *n, *reaper; LIST_HEAD(dead_children); write_lock_irq(&tasklist_lock); /* * Note that exit_ptrace() and find_new_reaper() might * drop tasklist_lock and reacquire it. */ exit_ptrace(father); reaper = find_new_reaper(father); list_for_each_entry_safe(p, n, &father->children, sibling) { struct task_struct *t = p; do { t->real_parent = reaper; if (t->parent == father) { BUG_ON(task_ptrace(t)); t->parent = t->real_parent; } if (t->pdeath_signal) group_send_sig_info(t->pdeath_signal, SEND_SIG_NOINFO, t); } while_each_thread(p, t); reparent_leader(father, p, &dead_children); } write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&father->children)); list_for_each_entry_safe(p, n, &dead_children, sibling) { list_del_init(&p->sibling); release_task(p); } } /* * Send signals to all our closest relatives so that they know * to properly mourn us.. */ static void exit_notify(struct task_struct *tsk, int group_dead) { int signal; void *cookie; /* * This does two things: * * A. Make init inherit all the child processes * B. Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ forget_original_parent(tsk); exit_task_namespaces(tsk); write_lock_irq(&tasklist_lock); if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); /* Let father know we died * * Thread signals are configurable, but you aren't going to use * that to send signals to arbitary processes. * That stops right now. * * If the parent exec id doesn't match the exec id we saved * when we started then we know the parent has changed security * domain. * * If our self_exec id doesn't match our parent_exec_id then * we have changed execution domain as these two values started * the same after a fork. */ if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && (tsk->parent_exec_id != tsk->real_parent->self_exec_id || tsk->self_exec_id != tsk->parent_exec_id)) tsk->exit_signal = SIGCHLD; signal = tracehook_notify_death(tsk, &cookie, group_dead); if (signal >= 0) signal = do_notify_parent(tsk, signal); tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; /* mt-exec, de_thread() is waiting for group leader */ if (unlikely(tsk->signal->notify_count < 0)) wake_up_process(tsk->signal->group_exit_task); write_unlock_irq(&tasklist_lock); tracehook_report_death(tsk, signal, cookie, group_dead); /* If the process is dead, release it - nobody will wait for it */ if (signal == DEATH_REAP) release_task(tsk); } #ifdef CONFIG_DEBUG_STACK_USAGE static void check_stack_usage(void) { static DEFINE_SPINLOCK(low_water_lock); static int lowest_to_date = THREAD_SIZE; unsigned long free; free = stack_not_used(current); if (free >= lowest_to_date) return; spin_lock(&low_water_lock); if (free < lowest_to_date) { printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " "left\n", current->comm, free); lowest_to_date = free; } spin_unlock(&low_water_lock); } #else static inline void check_stack_usage(void) {} #endif NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; int group_dead; profile_task_exit(tsk); WARN_ON(atomic_read(&tsk->fs_excl)); if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); /* * If do_exit is called because this processes oopsed, it's possible * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before * continuing. Amongst other possible reasons, this is to prevent * mm_release()->clear_child_tid() from writing to a user-controlled * kernel address. */ set_fs(USER_DS); tracehook_report_exit(&code); validate_creds_for_do_exit(tsk); /* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. */ if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed!\n"); /* * We can do this unlocked here. The futex code uses * this flag just to verify whether the pi state * cleanup has been done or not. In the worst case it * loops once more. We pretend that the cleanup was * done as there is no way to return. Either the * OWNER_DIED bit is set by now or we push the blocked * task into the wait for ever nirwana as well. */ tsk->flags |= PF_EXITPIDONE; set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_irq_thread(); exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. */ smp_mb(); raw_spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); /* sync mm's RSS info before statistics gathering */ if (tsk->mm) sync_mm_rss(tsk, tsk->mm); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); if (tsk->mm) setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); } acct_collect(code, group_dead); if (group_dead) tty_audit_exit(); if (unlikely(tsk->audit_context)) audit_free(tsk); tsk->exit_code = code; taskstats_exit(tsk, group_dead); exit_mm(tsk); if (group_dead) acct_process(); trace_sched_process_exit(tsk); exit_sem(tsk); exit_files(tsk); exit_fs(tsk); check_stack_usage(); exit_thread(); /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. * * because of cgroup mode, must be called before cgroup_exit() */ perf_event_exit_task(tsk); cgroup_exit(tsk, 1); if (group_dead) disassociate_ctty(1); module_put(task_thread_info(tsk)->exec_domain->module); proc_exit_connector(tsk); /* * FIXME: do that only when needed, using sched_exit tracepoint */ flush_ptrace_hw_breakpoint(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA task_lock(tsk); mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; task_unlock(tsk); #endif #ifdef CONFIG_FUTEX if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); #endif /* * Make sure we are holding no locks: */ debug_check_no_locks_held(tsk); /* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done * or not. In the worst case it loops once more. */ tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(tsk); if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); validate_creds_for_do_exit(tsk); preempt_disable(); exit_rcu(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; schedule(); BUG(); /* Avoid "noreturn function does return". */ for (;;) cpu_relax(); /* For when BUG is null */ } EXPORT_SYMBOL_GPL(do_exit); NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); do_exit(code); } EXPORT_SYMBOL(complete_and_exit); SYSCALL_DEFINE1(exit, int, error_code) { do_exit((error_code&0xff)<<8); } /* * Take down every thread in the group. This is called by fatal signals * as well as by sys_exit_group (below). */ NORET_TYPE void do_group_exit(int exit_code) { struct signal_struct *sig = current->signal; BUG_ON(exit_code & 0x80); /* core dumps don't get here */ if (signal_group_exit(sig)) exit_code = sig->group_exit_code; else if (!thread_group_empty(current)) { struct sighand_struct *const sighand = current->sighand; spin_lock_irq(&sighand->siglock); if (signal_group_exit(sig)) /* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; else { sig->group_exit_code = exit_code; sig->flags = SIGNAL_GROUP_EXIT; zap_other_threads(current); } spin_unlock_irq(&sighand->siglock); } do_exit(exit_code); /* NOTREACHED */ } /* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this * thread is not the thread group leader. */ SYSCALL_DEFINE1(exit_group, int, error_code) { do_group_exit((error_code & 0xff) << 8); /* NOTREACHED */ return 0; } struct wait_opts { enum pid_type wo_type; int wo_flags; struct pid *wo_pid; struct siginfo __user *wo_info; int __user *wo_stat; struct rusage __user *wo_rusage; wait_queue_t child_wait; int notask_error; }; static inline struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { if (type != PIDTYPE_PID) task = task->group_leader; return task->pids[type].pid; } static int eligible_pid(struct wait_opts *wo, struct task_struct *p) { return wo->wo_type == PIDTYPE_MAX || task_pid_type(p, wo->wo_type) == wo->wo_pid; } static int eligible_child(struct wait_opts *wo, struct task_struct *p) { if (!eligible_pid(wo, p)) return 0; /* Wait for all children (clone and not) if __WALL is set; * otherwise, wait for clone children *only* if __WCLONE is * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) && !(wo->wo_flags & __WALL)) return 0; return 1; } static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, pid_t pid, uid_t uid, int why, int status) { struct siginfo __user *infop; int retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); infop = wo->wo_info; if (infop) { if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval) retval = put_user(0, &infop->si_errno); if (!retval) retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(pid, &infop->si_pid); if (!retval) retval = put_user(uid, &infop->si_uid); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval) retval = pid; return retval; } /* * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) { unsigned long state; int retval, status, traced; pid_t pid = task_pid_vnr(p); uid_t uid = __task_cred(p)->uid; struct siginfo __user *infop; if (!likely(wo->wo_flags & WEXITED)) return 0; if (unlikely(wo->wo_flags & WNOWAIT)) { int exit_code = p->exit_code; int why; get_task_struct(p); read_unlock(&tasklist_lock); if ((exit_code & 0x7f) == 0) { why = CLD_EXITED; status = exit_code >> 8; } else { why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; status = exit_code & 0x7f; } return wait_noreap_copyout(wo, p, pid, uid, why, status); } /* * Try to move the task's state to DEAD * only one thread is allowed to do this: */ state = xchg(&p->exit_state, EXIT_DEAD); if (state != EXIT_ZOMBIE) { BUG_ON(state != EXIT_DEAD); return 0; } traced = ptrace_reparented(p); /* * It can be ptraced but not reparented, check * !task_detached() to filter out sub-threads. */ if (likely(!traced) && likely(!task_detached(p))) { struct signal_struct *psig; struct signal_struct *sig; unsigned long maxrss; cputime_t tgutime, tgstime; /* * The resource counters for the group leader are in its * own task_struct. Those for dead threads in the group * are in its signal_struct, as are those for the child * processes it has previously reaped. All these * accumulate in the parent's signal_struct c* fields. * * We don't bother to take a lock here to protect these * p->signal fields, because they are only touched by * __exit_signal, which runs with tasklist_lock * write-locked anyway, and so is excluded here. We do * need to protect the access to parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. * * We use thread_group_times() to get times for the thread * group, which consolidates times for all threads in the * group including the group leader. */ thread_group_times(p, &tgutime, &tgstime); spin_lock_irq(&p->real_parent->sighand->siglock); psig = p->real_parent->signal; sig = p->signal; psig->cutime = cputime_add(psig->cutime, cputime_add(tgutime, sig->cutime)); psig->cstime = cputime_add(psig->cstime, cputime_add(tgstime, sig->cstime)); psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, cputime_add(sig->gtime, sig->cgtime))); psig->cmin_flt += p->min_flt + sig->min_flt + sig->cmin_flt; psig->cmaj_flt += p->maj_flt + sig->maj_flt + sig->cmaj_flt; psig->cnvcsw += p->nvcsw + sig->nvcsw + sig->cnvcsw; psig->cnivcsw += p->nivcsw + sig->nivcsw + sig->cnivcsw; psig->cinblock += task_io_get_inblock(p) + sig->inblock + sig->cinblock; psig->coublock += task_io_get_oublock(p) + sig->oublock + sig->coublock; maxrss = max(sig->maxrss, sig->cmaxrss); if (psig->cmaxrss < maxrss) psig->cmaxrss = maxrss; task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); spin_unlock_irq(&p->real_parent->sighand->siglock); } /* * Now we are sure this task is interesting, and no other * thread can reap it because we set its state to EXIT_DEAD. */ read_unlock(&tasklist_lock); retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; if (!retval && wo->wo_stat) retval = put_user(status, wo->wo_stat); infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) { int why; if ((status & 0x7f) == 0) { why = CLD_EXITED; status >>= 8; } else { why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; status &= 0x7f; } retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; if (traced) { write_lock_irq(&tasklist_lock); /* We dropped tasklist, ptracer could die and untrace */ ptrace_unlink(p); /* * If this is not a detached task, notify the parent. * If it's still not detached after that, don't release * it now. */ if (!task_detached(p)) { do_notify_parent(p, p->exit_signal); if (!task_detached(p)) { p->exit_state = EXIT_ZOMBIE; p = NULL; } } write_unlock_irq(&tasklist_lock); } if (p != NULL) release_task(p); return retval; } static int *task_stopped_code(struct task_struct *p, bool ptrace) { if (ptrace) { if (task_is_stopped_or_traced(p)) return &p->exit_code; } else { if (p->signal->flags & SIGNAL_STOP_STOPPED) return &p->signal->group_exit_code; } return NULL; } /* * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_stopped(struct wait_opts *wo, int ptrace, struct task_struct *p) { struct siginfo __user *infop; int retval, exit_code, *p_code, why; uid_t uid = 0; /* unneeded, required by compiler */ pid_t pid; /* * Traditionally we see ptrace'd stopped tasks regardless of options. */ if (!ptrace && !(wo->wo_flags & WUNTRACED)) return 0; exit_code = 0; spin_lock_irq(&p->sighand->siglock); p_code = task_stopped_code(p, ptrace); if (unlikely(!p_code)) goto unlock_sig; exit_code = *p_code; if (!exit_code) goto unlock_sig; if (!unlikely(wo->wo_flags & WNOWAIT)) *p_code = 0; uid = task_uid(p); unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) return 0; /* * Now we are pretty sure this task is interesting. * Make sure it doesn't get reaped out from under us while we * give up the lock and then examine it below. We don't want to * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ get_task_struct(p); pid = task_pid_vnr(p); why = ptrace ? CLD_TRAPPED : CLD_STOPPED; read_unlock(&tasklist_lock); if (unlikely(wo->wo_flags & WNOWAIT)) return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; if (!retval && wo->wo_stat) retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) retval = put_user((short)why, &infop->si_code); if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; put_task_struct(p); BUG_ON(!retval); return retval; } /* * Handle do_wait work for one task in a live, non-stopped state. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) { int retval; pid_t pid; uid_t uid; if (!unlikely(wo->wo_flags & WCONTINUED)) return 0; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) return 0; spin_lock_irq(&p->sighand->siglock); /* Re-check with the lock held. */ if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { spin_unlock_irq(&p->sighand->siglock); return 0; } if (!unlikely(wo->wo_flags & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; uid = task_uid(p); spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); get_task_struct(p); read_unlock(&tasklist_lock); if (!wo->wo_info) { retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); if (!retval && wo->wo_stat) retval = put_user(0xffff, wo->wo_stat); if (!retval) retval = pid; } else { retval = wait_noreap_copyout(wo, p, pid, uid, CLD_CONTINUED, SIGCONT); BUG_ON(retval == 0); } return retval; } /* * Consider @p for a wait by @parent. * * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; * then ->notask_error is 0 if @p is an eligible child, * or another error from security_task_wait(), or still -ECHILD. */ static int wait_consider_task(struct wait_opts *wo, int ptrace, struct task_struct *p) { int ret = eligible_child(wo, p); if (!ret) return ret; ret = security_task_wait(p); if (unlikely(ret < 0)) { /* * If we have not yet seen any eligible child, * then let this error code replace -ECHILD. * A permission error will give the user a clue * to look for security policy problems, rather * than for mysterious wait bugs. */ if (wo->notask_error) wo->notask_error = ret; return 0; } if (likely(!ptrace) && unlikely(task_ptrace(p))) { /* * This child is hidden by ptrace. * We aren't allowed to see it now, but eventually we will. */ wo->notask_error = 0; return 0; } if (p->exit_state == EXIT_DEAD) return 0; /* * We don't reap group leaders with subthreads. */ if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) return wait_task_zombie(wo, p); /* * It's stopped or running now, so it might * later continue, exit, or stop again. */ wo->notask_error = 0; if (task_stopped_code(p, ptrace)) return wait_task_stopped(wo, ptrace, p); return wait_task_continued(wo, p); } /* * Do the work of do_wait() for one thread in the group, @tsk. * * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; then * ->notask_error is 0 if there were any eligible children, * or another error from security_task_wait(), or still -ECHILD. */ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; list_for_each_entry(p, &tsk->children, sibling) { int ret = wait_consider_task(wo, 0, p); if (ret) return ret; } return 0; } static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { int ret = wait_consider_task(wo, 1, p); if (ret) return ret; } return 0; } static int child_wait_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); struct task_struct *p = key; if (!eligible_pid(wo, p)) return 0; if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) return 0; return default_wake_function(wait, mode, sync, key); } void __wake_up_parent(struct task_struct *p, struct task_struct *parent) { __wake_up_sync_key(&parent->signal->wait_chldexit, TASK_INTERRUPTIBLE, 1, p); } static long do_wait(struct wait_opts *wo) { struct task_struct *tsk; int retval; trace_sched_process_wait(wo->wo_pid); init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); wo->child_wait.private = current; add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); repeat: /* * If there is nothing that can match our critiera just get out. * We will clear ->notask_error to zero if we see any child that * might later match our criteria, even if we are not able to reap * it yet. */ wo->notask_error = -ECHILD; if ((wo->wo_type < PIDTYPE_MAX) && (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) goto notask; set_current_state(TASK_INTERRUPTIBLE); read_lock(&tasklist_lock); tsk = current; do { retval = do_wait_thread(wo, tsk); if (retval) goto end; retval = ptrace_do_wait(wo, tsk); if (retval) goto end; if (wo->wo_flags & __WNOTHREAD) break; } while_each_thread(current, tsk); read_unlock(&tasklist_lock); notask: retval = wo->notask_error; if (!retval && !(wo->wo_flags & WNOHANG)) { retval = -ERESTARTSYS; if (!signal_pending(current)) { schedule(); goto repeat; } } end: __set_current_state(TASK_RUNNING); remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); return retval; } SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) return -EINVAL; if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) return -EINVAL; switch (which) { case P_ALL: type = PIDTYPE_MAX; break; case P_PID: type = PIDTYPE_PID; if (upid <= 0) return -EINVAL; break; case P_PGID: type = PIDTYPE_PGID; if (upid <= 0) return -EINVAL; break; default: return -EINVAL; } if (type < PIDTYPE_MAX) pid = find_get_pid(upid); wo.wo_type = type; wo.wo_pid = pid; wo.wo_flags = options; wo.wo_info = infop; wo.wo_stat = NULL; wo.wo_rusage = ru; ret = do_wait(&wo); if (ret > 0) { ret = 0; } else if (infop) { /* * For a WNOHANG return, clear out all the fields * we would set so the user can easily tell the * difference. */ if (!ret) ret = put_user(0, &infop->si_signo); if (!ret) ret = put_user(0, &infop->si_errno); if (!ret) ret = put_user(0, &infop->si_code); if (!ret) ret = put_user(0, &infop->si_pid); if (!ret) ret = put_user(0, &infop->si_uid); if (!ret) ret = put_user(0, &infop->si_status); } put_pid(pid); /* avoid REGPARM breakage on x86: */ asmlinkage_protect(5, ret, which, upid, infop, options, ru); return ret; } SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) { struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| __WNOTHREAD|__WCLONE|__WALL)) return -EINVAL; if (upid == -1) type = PIDTYPE_MAX; else if (upid < 0) { type = PIDTYPE_PGID; pid = find_get_pid(-upid); } else if (upid == 0) { type = PIDTYPE_PGID; pid = get_task_pid(current, PIDTYPE_PGID); } else /* upid > 0 */ { type = PIDTYPE_PID; pid = find_get_pid(upid); } wo.wo_type = type; wo.wo_pid = pid; wo.wo_flags = options | WEXITED; wo.wo_info = NULL; wo.wo_stat = stat_addr; wo.wo_rusage = ru; ret = do_wait(&wo); put_pid(pid); /* avoid REGPARM breakage on x86: */ asmlinkage_protect(4, ret, upid, stat_addr, options, ru); return ret; } #ifdef __ARCH_WANT_SYS_WAITPID /* * sys_waitpid() remains for compatibility. waitpid() should be * implemented by calling sys_wait4() from libc.a. */ SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) { return sys_wait4(pid, stat_addr, options, NULL); } #endif
gpl-2.0
puppybane/linux-cyrus
arch/x86/kernel/apic/io_apic.c
66
78968
/* * Intel IO-APIC support for multi-Pentium hosts. * * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo * * Many thanks to Stig Venaas for trying out countless experimental * patches and reporting/debugging problems patiently! * * (c) 1999, Multiple IO-APIC support, developed by * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, * further tested and cleaned up by Zach Brown <zab@redhat.com> * and Ingo Molnar <mingo@redhat.com> * * Fixes * Maciej W. Rozycki : Bits for genuine 82489DX APICs; * thanks to Eric Gilmore * and Rolf G. Tews * for testing these extensively * Paul Diefenbaugh : Added full ACPI support * * Historical information which is worth to be preserved: * * - SiS APIC rmw bug: * * We used to have a workaround for a bug in SiS chips which * required to rewrite the index register for a read-modify-write * operation as the chip lost the index information which was * setup for the read already. We cache the data now, so that * workaround has been removed. */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/mc146818rtc.h> #include <linux/compiler.h> #include <linux/acpi.h> #include <linux/export.h> #include <linux/syscore_ops.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/jiffies.h> /* time_after() */ #include <linux/slab.h> #include <linux/bootmem.h> #include <asm/irqdomain.h> #include <asm/idle.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/cpu.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/acpi.h> #include <asm/dma.h> #include <asm/timer.h> #include <asm/i8259.h> #include <asm/setup.h> #include <asm/irq_remapping.h> #include <asm/hw_irq.h> #include <asm/apic.h> #define for_each_ioapic(idx) \ for ((idx) = 0; (idx) < nr_ioapics; (idx)++) #define for_each_ioapic_reverse(idx) \ for ((idx) = nr_ioapics - 1; (idx) >= 0; (idx)--) #define for_each_pin(idx, pin) \ for ((pin) = 0; (pin) < ioapics[(idx)].nr_registers; (pin)++) #define for_each_ioapic_pin(idx, pin) \ for_each_ioapic((idx)) \ for_each_pin((idx), (pin)) #define for_each_irq_pin(entry, head) \ list_for_each_entry(entry, &head, list) static DEFINE_RAW_SPINLOCK(ioapic_lock); static DEFINE_MUTEX(ioapic_mutex); static unsigned int ioapic_dynirq_base; static int ioapic_initialized; struct irq_pin_list { struct list_head list; int apic, pin; }; struct mp_chip_data { struct list_head irq_2_pin; struct IO_APIC_route_entry entry; int trigger; int polarity; u32 count; bool isa_irq; }; struct mp_ioapic_gsi { u32 gsi_base; u32 gsi_end; }; static struct ioapic { /* * # of IRQ routing registers */ int nr_registers; /* * Saved state during suspend/resume, or while enabling intr-remap. */ struct IO_APIC_route_entry *saved_registers; /* I/O APIC config */ struct mpc_ioapic mp_config; /* IO APIC gsi routing info */ struct mp_ioapic_gsi gsi_config; struct ioapic_domain_cfg irqdomain_cfg; struct irq_domain *irqdomain; struct resource *iomem_res; } ioapics[MAX_IO_APICS]; #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver int mpc_ioapic_id(int ioapic_idx) { return ioapics[ioapic_idx].mp_config.apicid; } unsigned int mpc_ioapic_addr(int ioapic_idx) { return ioapics[ioapic_idx].mp_config.apicaddr; } static inline struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx) { return &ioapics[ioapic_idx].gsi_config; } static inline int mp_ioapic_pin_count(int ioapic) { struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic); return gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1; } static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin; } static inline bool mp_is_legacy_irq(int irq) { return irq >= 0 && irq < nr_legacy_irqs(); } /* * Initialize all legacy IRQs and all pins on the first IOAPIC * if we have legacy interrupt controller. Kernel boot option "pirq=" * may rely on non-legacy pins on the first IOAPIC. */ static inline int mp_init_irq_at_boot(int ioapic, int irq) { if (!nr_legacy_irqs()) return 0; return ioapic == 0 || mp_is_legacy_irq(irq); } static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic) { return ioapics[ioapic].irqdomain; } int nr_ioapics; /* The one past the highest gsi number used */ u32 gsi_top; /* MP IRQ source entries */ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* # of MP IRQ source entries */ int mp_irq_entries; #ifdef CONFIG_EISA int mp_bus_id_to_type[MAX_MP_BUSSES]; #endif DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int skip_ioapic_setup; /** * disable_ioapic_support() - disables ioapic support at runtime */ void disable_ioapic_support(void) { #ifdef CONFIG_PCI noioapicquirk = 1; noioapicreroute = -1; #endif skip_ioapic_setup = 1; } static int __init parse_noapic(char *str) { /* disable IO-APIC */ disable_ioapic_support(); return 0; } early_param("noapic", parse_noapic); /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ void mp_save_irq(struct mpc_intsrc *m) { int i; apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, m->srcbusirq, m->dstapic, m->dstirq); for (i = 0; i < mp_irq_entries; i++) { if (!memcmp(&mp_irqs[i], m, sizeof(*m))) return; } memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); if (++mp_irq_entries == MAX_IRQ_SOURCES) panic("Max # of irq sources exceeded!!\n"); } static void alloc_ioapic_saved_registers(int idx) { size_t size; if (ioapics[idx].saved_registers) return; size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers; ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL); if (!ioapics[idx].saved_registers) pr_err("IOAPIC %d: suspend/resume impossible!\n", idx); } static void free_ioapic_saved_registers(int idx) { kfree(ioapics[idx].saved_registers); ioapics[idx].saved_registers = NULL; } int __init arch_early_ioapic_init(void) { int i; if (!nr_legacy_irqs()) io_apic_irqs = ~0UL; for_each_ioapic(i) alloc_ioapic_saved_registers(i); return 0; } struct io_apic { unsigned int index; unsigned int unused[3]; unsigned int data; unsigned int unused2[11]; unsigned int eoi; }; static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) { return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) + (mpc_ioapic_addr(idx) & ~PAGE_MASK); } static inline void io_apic_eoi(unsigned int apic, unsigned int vector) { struct io_apic __iomem *io_apic = io_apic_base(apic); writel(vector, &io_apic->eoi); } unsigned int native_io_apic_read(unsigned int apic, unsigned int reg) { struct io_apic __iomem *io_apic = io_apic_base(apic); writel(reg, &io_apic->index); return readl(&io_apic->data); } static void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) { struct io_apic __iomem *io_apic = io_apic_base(apic); writel(reg, &io_apic->index); writel(value, &io_apic->data); } union entry_union { struct { u32 w1, w2; }; struct IO_APIC_route_entry entry; }; static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) { union entry_union eu; eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); return eu.entry; } static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) { union entry_union eu; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); eu.entry = __ioapic_read_entry(apic, pin); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return eu.entry; } /* * When we write a new IO APIC routing entry, we need to write the high * word first! If the mask bit in the low word is clear, we will enable * the interrupt, and we need to make sure the entry is fully populated * before that happens. */ static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { union entry_union eu = {{0, 0}}; eu.entry = e; io_apic_write(apic, 0x11 + 2*pin, eu.w2); io_apic_write(apic, 0x10 + 2*pin, eu.w1); } static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); __ioapic_write_entry(apic, pin, e); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * When we mask an IO APIC routing entry, we need to write the low * word first, in order to set the mask bit before we change the * high bits! */ static void ioapic_mask_entry(int apic, int pin) { unsigned long flags; union entry_union eu = { .entry.mask = IOAPIC_MASKED }; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x10 + 2*pin, eu.w1); io_apic_write(apic, 0x11 + 2*pin, eu.w2); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * The common case is 1:1 IRQ<->pin mappings. Sometimes there are * shared ISA-space IRQs, so we have to support them. We are super * fast in the common case, and fast for shared ISA-space IRQs. */ static int __add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin) { struct irq_pin_list *entry; /* don't allow duplicates */ for_each_irq_pin(entry, data->irq_2_pin) if (entry->apic == apic && entry->pin == pin) return 0; entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node); if (!entry) { pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", node, apic, pin); return -ENOMEM; } entry->apic = apic; entry->pin = pin; list_add_tail(&entry->list, &data->irq_2_pin); return 0; } static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin) { struct irq_pin_list *tmp, *entry; list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list) if (entry->apic == apic && entry->pin == pin) { list_del(&entry->list); kfree(entry); return; } } static void add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin) { if (__add_pin_to_irq_node(data, node, apic, pin)) panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); } /* * Reroute an IRQ to a different pin. */ static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node, int oldapic, int oldpin, int newapic, int newpin) { struct irq_pin_list *entry; for_each_irq_pin(entry, data->irq_2_pin) { if (entry->apic == oldapic && entry->pin == oldpin) { entry->apic = newapic; entry->pin = newpin; /* every one is different, right? */ return; } } /* old apic/pin didn't exist, so just add new ones */ add_pin_to_irq_node(data, node, newapic, newpin); } static void io_apic_modify_irq(struct mp_chip_data *data, int mask_and, int mask_or, void (*final)(struct irq_pin_list *entry)) { union entry_union eu; struct irq_pin_list *entry; eu.entry = data->entry; eu.w1 &= mask_and; eu.w1 |= mask_or; data->entry = eu.entry; for_each_irq_pin(entry, data->irq_2_pin) { io_apic_write(entry->apic, 0x10 + 2 * entry->pin, eu.w1); if (final) final(entry); } } static void io_apic_sync(struct irq_pin_list *entry) { /* * Synchronize the IO-APIC and the CPU by doing * a dummy read from the IO-APIC */ struct io_apic __iomem *io_apic; io_apic = io_apic_base(entry->apic); readl(&io_apic->data); } static void mask_ioapic_irq(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_modify_irq(data, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void __unmask_ioapic(struct mp_chip_data *data) { io_apic_modify_irq(data, ~IO_APIC_REDIR_MASKED, 0, NULL); } static void unmask_ioapic_irq(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); __unmask_ioapic(data); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * IO-APIC versions below 0x20 don't support EOI register. * For the record, here is the information about various versions: * 0Xh 82489DX * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant * 2Xh I/O(x)APIC which is PCI 2.2 Compliant * 30h-FFh Reserved * * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic * version as 0x2. This is an error with documentation and these ICH chips * use io-apic's of version 0x20. * * For IO-APIC's with EOI register, we use that to do an explicit EOI. * Otherwise, we simulate the EOI message manually by changing the trigger * mode to edge and then back to level, with RTE being masked during this. */ static void __eoi_ioapic_pin(int apic, int pin, int vector) { if (mpc_ioapic_ver(apic) >= 0x20) { io_apic_eoi(apic, vector); } else { struct IO_APIC_route_entry entry, entry1; entry = entry1 = __ioapic_read_entry(apic, pin); /* * Mask the entry and change the trigger mode to edge. */ entry1.mask = IOAPIC_MASKED; entry1.trigger = IOAPIC_EDGE; __ioapic_write_entry(apic, pin, entry1); /* * Restore the previous level triggered entry. */ __ioapic_write_entry(apic, pin, entry); } } static void eoi_ioapic_pin(int vector, struct mp_chip_data *data) { unsigned long flags; struct irq_pin_list *entry; raw_spin_lock_irqsave(&ioapic_lock, flags); for_each_irq_pin(entry, data->irq_2_pin) __eoi_ioapic_pin(entry->apic, entry->pin, vector); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) { struct IO_APIC_route_entry entry; /* Check delivery_mode to be sure we're not clearing an SMI pin */ entry = ioapic_read_entry(apic, pin); if (entry.delivery_mode == dest_SMI) return; /* * Make sure the entry is masked and re-read the contents to check * if it is a level triggered pin and if the remote-IRR is set. */ if (entry.mask == IOAPIC_UNMASKED) { entry.mask = IOAPIC_MASKED; ioapic_write_entry(apic, pin, entry); entry = ioapic_read_entry(apic, pin); } if (entry.irr) { unsigned long flags; /* * Make sure the trigger mode is set to level. Explicit EOI * doesn't clear the remote-IRR if the trigger mode is not * set to level. */ if (entry.trigger == IOAPIC_EDGE) { entry.trigger = IOAPIC_LEVEL; ioapic_write_entry(apic, pin, entry); } raw_spin_lock_irqsave(&ioapic_lock, flags); __eoi_ioapic_pin(apic, pin, entry.vector); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* * Clear the rest of the bits in the IO-APIC RTE except for the mask * bit. */ ioapic_mask_entry(apic, pin); entry = ioapic_read_entry(apic, pin); if (entry.irr) pr_err("Unable to reset IRR for apic: %d, pin :%d\n", mpc_ioapic_id(apic), pin); } static void clear_IO_APIC (void) { int apic, pin; for_each_ioapic_pin(apic, pin) clear_IO_APIC_pin(apic, pin); } #ifdef CONFIG_X86_32 /* * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to * specific CPU-side IRQs. */ #define MAX_PIRQS 8 static int pirq_entries[MAX_PIRQS] = { [0 ... MAX_PIRQS - 1] = -1 }; static int __init ioapic_pirq_setup(char *str) { int i, max; int ints[MAX_PIRQS+1]; get_options(str, ARRAY_SIZE(ints), ints); apic_printk(APIC_VERBOSE, KERN_INFO "PIRQ redirection, working around broken MP-BIOS.\n"); max = MAX_PIRQS; if (ints[0] < MAX_PIRQS) max = ints[0]; for (i = 0; i < max; i++) { apic_printk(APIC_VERBOSE, KERN_DEBUG "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); /* * PIRQs are mapped upside down, usually. */ pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; } return 1; } __setup("pirq=", ioapic_pirq_setup); #endif /* CONFIG_X86_32 */ /* * Saves all the IO-APIC RTE's */ int save_ioapic_entries(void) { int apic, pin; int err = 0; for_each_ioapic(apic) { if (!ioapics[apic].saved_registers) { err = -ENOMEM; continue; } for_each_pin(apic, pin) ioapics[apic].saved_registers[pin] = ioapic_read_entry(apic, pin); } return err; } /* * Mask all IO APIC entries. */ void mask_ioapic_entries(void) { int apic, pin; for_each_ioapic(apic) { if (!ioapics[apic].saved_registers) continue; for_each_pin(apic, pin) { struct IO_APIC_route_entry entry; entry = ioapics[apic].saved_registers[pin]; if (entry.mask == IOAPIC_UNMASKED) { entry.mask = IOAPIC_MASKED; ioapic_write_entry(apic, pin, entry); } } } } /* * Restore IO APIC entries which was saved in the ioapic structure. */ int restore_ioapic_entries(void) { int apic, pin; for_each_ioapic(apic) { if (!ioapics[apic].saved_registers) continue; for_each_pin(apic, pin) ioapic_write_entry(apic, pin, ioapics[apic].saved_registers[pin]); } return 0; } /* * Find the IRQ entry number of a certain pin. */ static int find_irq_entry(int ioapic_idx, int pin, int type) { int i; for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].irqtype == type && (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) || mp_irqs[i].dstapic == MP_APIC_ALL) && mp_irqs[i].dstirq == pin) return i; return -1; } /* * Find the pin to which IRQ[irq] (ISA) is connected */ static int __init find_isa_irq_pin(int irq, int type) { int i; for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) && (mp_irqs[i].srcbusirq == irq)) return mp_irqs[i].dstirq; } return -1; } static int __init find_isa_irq_apic(int irq, int type) { int i; for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) && (mp_irqs[i].srcbusirq == irq)) break; } if (i < mp_irq_entries) { int ioapic_idx; for_each_ioapic(ioapic_idx) if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic) return ioapic_idx; } return -1; } #ifdef CONFIG_EISA /* * EISA Edge/Level control register, ELCR */ static int EISA_ELCR(unsigned int irq) { if (irq < nr_legacy_irqs()) { unsigned int port = 0x4d0 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } apic_printk(APIC_VERBOSE, KERN_INFO "Broken MPtable reports ISA irq %d\n", irq); return 0; } #endif /* ISA interrupts are always active high edge triggered, * when listed as conforming in the MP table. */ #define default_ISA_trigger(idx) (IOAPIC_EDGE) #define default_ISA_polarity(idx) (IOAPIC_POL_HIGH) /* EISA interrupts are always polarity zero and can be edge or level * trigger depending on the ELCR value. If an interrupt is listed as * EISA conforming in the MP table, that means its trigger type must * be read in from the ELCR */ #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) #define default_EISA_polarity(idx) default_ISA_polarity(idx) /* PCI interrupts are always active low level triggered, * when listed as conforming in the MP table. */ #define default_PCI_trigger(idx) (IOAPIC_LEVEL) #define default_PCI_polarity(idx) (IOAPIC_POL_LOW) static int irq_polarity(int idx) { int bus = mp_irqs[idx].srcbus; /* * Determine IRQ line polarity (high active or low active): */ switch (mp_irqs[idx].irqflag & 0x03) { case 0: /* conforms to spec, ie. bus-type dependent polarity */ if (test_bit(bus, mp_bus_not_pci)) return default_ISA_polarity(idx); else return default_PCI_polarity(idx); case 1: return IOAPIC_POL_HIGH; case 2: pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n"); case 3: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_POL_LOW; } } #ifdef CONFIG_EISA static int eisa_irq_trigger(int idx, int bus, int trigger) { switch (mp_bus_id_to_type[bus]) { case MP_BUS_PCI: case MP_BUS_ISA: return trigger; case MP_BUS_EISA: return default_EISA_trigger(idx); } pr_warn("IOAPIC: Invalid srcbus: %d defaulting to level\n", bus); return IOAPIC_LEVEL; } #else static inline int eisa_irq_trigger(int idx, int bus, int trigger) { return trigger; } #endif static int irq_trigger(int idx) { int bus = mp_irqs[idx].srcbus; int trigger; /* * Determine IRQ trigger mode (edge or level sensitive): */ switch ((mp_irqs[idx].irqflag >> 2) & 0x03) { case 0: /* conforms to spec, ie. bus-type dependent trigger mode */ if (test_bit(bus, mp_bus_not_pci)) trigger = default_ISA_trigger(idx); else trigger = default_PCI_trigger(idx); /* Take EISA into account */ return eisa_irq_trigger(idx, bus, trigger); case 1: return IOAPIC_EDGE; case 2: pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n"); case 3: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_LEVEL; } } void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node, int trigger, int polarity) { init_irq_alloc_info(info, NULL); info->type = X86_IRQ_ALLOC_TYPE_IOAPIC; info->ioapic_node = node; info->ioapic_trigger = trigger; info->ioapic_polarity = polarity; info->ioapic_valid = 1; } #ifndef CONFIG_ACPI int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #endif static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst, struct irq_alloc_info *src, u32 gsi, int ioapic_idx, int pin) { int trigger, polarity; copy_irq_alloc_info(dst, src); dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC; dst->ioapic_id = mpc_ioapic_id(ioapic_idx); dst->ioapic_pin = pin; dst->ioapic_valid = 1; if (src && src->ioapic_valid) { dst->ioapic_node = src->ioapic_node; dst->ioapic_trigger = src->ioapic_trigger; dst->ioapic_polarity = src->ioapic_polarity; } else { dst->ioapic_node = NUMA_NO_NODE; if (acpi_get_override_irq(gsi, &trigger, &polarity) >= 0) { dst->ioapic_trigger = trigger; dst->ioapic_polarity = polarity; } else { /* * PCI interrupts are always active low level * triggered. */ dst->ioapic_trigger = IOAPIC_LEVEL; dst->ioapic_polarity = IOAPIC_POL_LOW; } } } static int ioapic_alloc_attr_node(struct irq_alloc_info *info) { return (info && info->ioapic_valid) ? info->ioapic_node : NUMA_NO_NODE; } static void mp_register_handler(unsigned int irq, unsigned long trigger) { irq_flow_handler_t hdl; bool fasteoi; if (trigger) { irq_set_status_flags(irq, IRQ_LEVEL); fasteoi = true; } else { irq_clear_status_flags(irq, IRQ_LEVEL); fasteoi = false; } hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; __irq_set_handler(irq, hdl, 0, fasteoi ? "fasteoi" : "edge"); } static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) { struct mp_chip_data *data = irq_get_chip_data(irq); /* * setup_IO_APIC_irqs() programs all legacy IRQs with default trigger * and polarity attirbutes. So allow the first user to reprogram the * pin with real trigger and polarity attributes. */ if (irq < nr_legacy_irqs() && data->count == 1) { if (info->ioapic_trigger != data->trigger) mp_register_handler(irq, info->ioapic_trigger); data->entry.trigger = data->trigger = info->ioapic_trigger; data->entry.polarity = data->polarity = info->ioapic_polarity; } return data->trigger == info->ioapic_trigger && data->polarity == info->ioapic_polarity; } static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, struct irq_alloc_info *info) { bool legacy = false; int irq = -1; int type = ioapics[ioapic].irqdomain_cfg.type; switch (type) { case IOAPIC_DOMAIN_LEGACY: /* * Dynamically allocate IRQ number for non-ISA IRQs in the first * 16 GSIs on some weird platforms. */ if (!ioapic_initialized || gsi >= nr_legacy_irqs()) irq = gsi; legacy = mp_is_legacy_irq(irq); break; case IOAPIC_DOMAIN_STRICT: irq = gsi; break; case IOAPIC_DOMAIN_DYNAMIC: break; default: WARN(1, "ioapic: unknown irqdomain type %d\n", type); return -1; } return __irq_domain_alloc_irqs(domain, irq, 1, ioapic_alloc_attr_node(info), info, legacy, NULL); } /* * Need special handling for ISA IRQs because there may be multiple IOAPIC pins * sharing the same ISA IRQ number and irqdomain only supports 1:1 mapping * between IOAPIC pin and IRQ number. A typical IOAPIC has 24 pins, pin 0-15 are * used for legacy IRQs and pin 16-23 are used for PCI IRQs (PIRQ A-H). * When ACPI is disabled, only legacy IRQ numbers (IRQ0-15) are available, and * some BIOSes may use MP Interrupt Source records to override IRQ numbers for * PIRQs instead of reprogramming the interrupt routing logic. Thus there may be * multiple pins sharing the same legacy IRQ number when ACPI is disabled. */ static int alloc_isa_irq_from_domain(struct irq_domain *domain, int irq, int ioapic, int pin, struct irq_alloc_info *info) { struct mp_chip_data *data; struct irq_data *irq_data = irq_get_irq_data(irq); int node = ioapic_alloc_attr_node(info); /* * Legacy ISA IRQ has already been allocated, just add pin to * the pin list assoicated with this IRQ and program the IOAPIC * entry. The IOAPIC entry */ if (irq_data && irq_data->parent_data) { if (!mp_check_pin_attr(irq, info)) return -EBUSY; if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic, info->ioapic_pin)) return -ENOMEM; } else { irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true, NULL); if (irq >= 0) { irq_data = irq_domain_get_irq_data(domain, irq); data = irq_data->chip_data; data->isa_irq = true; } } return irq; } static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, unsigned int flags, struct irq_alloc_info *info) { int irq; bool legacy = false; struct irq_alloc_info tmp; struct mp_chip_data *data; struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); if (!domain) return -ENOSYS; if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) { irq = mp_irqs[idx].srcbusirq; legacy = mp_is_legacy_irq(irq); } mutex_lock(&ioapic_mutex); if (!(flags & IOAPIC_MAP_ALLOC)) { if (!legacy) { irq = irq_find_mapping(domain, pin); if (irq == 0) irq = -ENOENT; } } else { ioapic_copy_alloc_attr(&tmp, info, gsi, ioapic, pin); if (legacy) irq = alloc_isa_irq_from_domain(domain, irq, ioapic, pin, &tmp); else if ((irq = irq_find_mapping(domain, pin)) == 0) irq = alloc_irq_from_domain(domain, ioapic, gsi, &tmp); else if (!mp_check_pin_attr(irq, &tmp)) irq = -EBUSY; if (irq >= 0) { data = irq_get_chip_data(irq); data->count++; } } mutex_unlock(&ioapic_mutex); return irq; } static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags) { u32 gsi = mp_pin_to_gsi(ioapic, pin); /* * Debugging check, we are in big trouble if this message pops up! */ if (mp_irqs[idx].dstirq != pin) pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); #ifdef CONFIG_X86_32 /* * PCI IRQ command line redirection. Yes, limits are hardcoded. */ if ((pin >= 16) && (pin <= 23)) { if (pirq_entries[pin-16] != -1) { if (!pirq_entries[pin-16]) { apic_printk(APIC_VERBOSE, KERN_DEBUG "disabling PIRQ%d\n", pin-16); } else { int irq = pirq_entries[pin-16]; apic_printk(APIC_VERBOSE, KERN_DEBUG "using PIRQ%d -> IRQ %d\n", pin-16, irq); return irq; } } } #endif return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, NULL); } int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info) { int ioapic, pin, idx; ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return -1; pin = mp_find_ioapic_pin(ioapic, gsi); idx = find_irq_entry(ioapic, pin, mp_INT); if ((flags & IOAPIC_MAP_CHECK) && idx < 0) return -1; return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info); } void mp_unmap_irq(int irq) { struct irq_data *irq_data = irq_get_irq_data(irq); struct mp_chip_data *data; if (!irq_data || !irq_data->domain) return; data = irq_data->chip_data; if (!data || data->isa_irq) return; mutex_lock(&ioapic_mutex); if (--data->count == 0) irq_domain_free_irqs(irq, 1); mutex_unlock(&ioapic_mutex); } /* * Find a specific PCI IRQ entry. * Not an __init, possibly needed by modules */ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) { int irq, i, best_ioapic = -1, best_idx = -1; apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", bus, slot, pin); if (test_bit(bus, mp_bus_not_pci)) { apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus); return -1; } for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; int ioapic_idx, found = 0; if (bus != lbus || mp_irqs[i].irqtype != mp_INT || slot != ((mp_irqs[i].srcbusirq >> 2) & 0x1f)) continue; for_each_ioapic(ioapic_idx) if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic || mp_irqs[i].dstapic == MP_APIC_ALL) { found = 1; break; } if (!found) continue; /* Skip ISA IRQs */ irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq, 0); if (irq > 0 && !IO_APIC_IRQ(irq)) continue; if (pin == (mp_irqs[i].srcbusirq & 3)) { best_idx = i; best_ioapic = ioapic_idx; goto out; } /* * Use the first all-but-pin matching entry as a * best-guess fuzzy result for broken mptables. */ if (best_idx < 0) { best_idx = i; best_ioapic = ioapic_idx; } } if (best_idx < 0) return -1; out: return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, IOAPIC_MAP_ALLOC); } EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); static struct irq_chip ioapic_chip, ioapic_ir_chip; #ifdef CONFIG_X86_32 static inline int IO_APIC_irq_trigger(int irq) { int apic, idx, pin; for_each_ioapic_pin(apic, pin) { idx = find_irq_entry(apic, pin, mp_INT); if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin, 0))) return irq_trigger(idx); } /* * nonexistent IRQs are edge default */ return 0; } #else static inline int IO_APIC_irq_trigger(int irq) { return 1; } #endif static void __init setup_IO_APIC_irqs(void) { unsigned int ioapic, pin; int idx; apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); for_each_ioapic_pin(ioapic, pin) { idx = find_irq_entry(ioapic, pin, mp_INT); if (idx < 0) apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", mpc_ioapic_id(ioapic), pin); else pin_2_irq(idx, ioapic, pin, ioapic ? 0 : IOAPIC_MAP_ALLOC); } } void ioapic_zap_locks(void) { raw_spin_lock_init(&ioapic_lock); } static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries) { int i; char buf[256]; struct IO_APIC_route_entry entry; struct IR_IO_APIC_route_entry *ir_entry = (void *)&entry; printk(KERN_DEBUG "IOAPIC %d:\n", apic); for (i = 0; i <= nr_entries; i++) { entry = ioapic_read_entry(apic, i); snprintf(buf, sizeof(buf), " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)", i, entry.mask == IOAPIC_MASKED ? "disabled" : "enabled ", entry.trigger == IOAPIC_LEVEL ? "level" : "edge ", entry.polarity == IOAPIC_POL_LOW ? "low " : "high", entry.vector, entry.irr, entry.delivery_status); if (ir_entry->format) printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n", buf, (ir_entry->index << 15) | ir_entry->index, ir_entry->zero); else printk(KERN_DEBUG "%s, %s, D(%02X), M(%1d)\n", buf, entry.dest_mode == IOAPIC_DEST_MODE_LOGICAL ? "logical " : "physical", entry.dest, entry.delivery_mode); } } static void __init print_IO_APIC(int ioapic_idx) { union IO_APIC_reg_00 reg_00; union IO_APIC_reg_01 reg_01; union IO_APIC_reg_02 reg_02; union IO_APIC_reg_03 reg_03; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); reg_01.raw = io_apic_read(ioapic_idx, 1); if (reg_01.bits.version >= 0x10) reg_02.raw = io_apic_read(ioapic_idx, 2); if (reg_01.bits.version >= 0x20) reg_03.raw = io_apic_read(ioapic_idx, 3); raw_spin_unlock_irqrestore(&ioapic_lock, flags); printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01); printk(KERN_DEBUG "....... : max redirection entries: %02X\n", reg_01.bits.entries); printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); printk(KERN_DEBUG "....... : IO APIC version: %02X\n", reg_01.bits.version); /* * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, * but the value of reg_02 is read as the previous read register * value, so ignore it if reg_02 == reg_01. */ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); } /* * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 * or reg_03, but the value of reg_0[23] is read as the previous read * register value, so ignore it if reg_03 == reg_0[12]. */ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && reg_03.raw != reg_01.raw) { printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); } printk(KERN_DEBUG ".... IRQ redirection table:\n"); io_apic_print_entries(ioapic_idx, reg_01.bits.entries); } void __init print_IO_APICs(void) { int ioapic_idx; unsigned int irq; printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); for_each_ioapic(ioapic_idx) printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", mpc_ioapic_id(ioapic_idx), ioapics[ioapic_idx].nr_registers); /* * We are a bit conservative about what we expect. We have to * know about every hardware change ASAP. */ printk(KERN_INFO "testing the IO APIC.......................\n"); for_each_ioapic(ioapic_idx) print_IO_APIC(ioapic_idx); printk(KERN_DEBUG "IRQ to pin mappings:\n"); for_each_active_irq(irq) { struct irq_pin_list *entry; struct irq_chip *chip; struct mp_chip_data *data; chip = irq_get_chip(irq); if (chip != &ioapic_chip && chip != &ioapic_ir_chip) continue; data = irq_get_chip_data(irq); if (!data) continue; if (list_empty(&data->irq_2_pin)) continue; printk(KERN_DEBUG "IRQ%d ", irq); for_each_irq_pin(entry, data->irq_2_pin) pr_cont("-> %d:%d", entry->apic, entry->pin); pr_cont("\n"); } printk(KERN_INFO ".................................... done.\n"); } /* Where if anywhere is the i8259 connect in external int mode */ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; void __init enable_IO_APIC(void) { int i8259_apic, i8259_pin; int apic, pin; if (skip_ioapic_setup) nr_ioapics = 0; if (!nr_legacy_irqs() || !nr_ioapics) return; for_each_ioapic_pin(apic, pin) { /* See if any of the pins is in ExtINT mode */ struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin); /* If the interrupt line is enabled and in ExtInt mode * I have found the pin where the i8259 is connected. */ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { ioapic_i8259.apic = apic; ioapic_i8259.pin = pin; goto found_i8259; } } found_i8259: /* Look to see what if the MP table has reported the ExtINT */ /* If we could not find the appropriate pin by looking at the ioapic * the i8259 probably is not connected the ioapic but give the * mptable a chance anyway. */ i8259_pin = find_isa_irq_pin(0, mp_ExtINT); i8259_apic = find_isa_irq_apic(0, mp_ExtINT); /* Trust the MP table if nothing is setup in the hardware */ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); ioapic_i8259.pin = i8259_pin; ioapic_i8259.apic = i8259_apic; } /* Complain if the MP table and the hardware disagree */ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) { printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); } /* * Do not trust the IO-APIC being empty at bootup */ clear_IO_APIC(); } void native_disable_io_apic(void) { /* * If the i8259 is routed through an IOAPIC * Put that IOAPIC in virtual wire mode * so legacy interrupts can be delivered. */ if (ioapic_i8259.pin != -1) { struct IO_APIC_route_entry entry; memset(&entry, 0, sizeof(entry)); entry.mask = IOAPIC_UNMASKED; entry.trigger = IOAPIC_EDGE; entry.polarity = IOAPIC_POL_HIGH; entry.dest_mode = IOAPIC_DEST_MODE_PHYSICAL; entry.delivery_mode = dest_ExtINT; entry.dest = read_apic_id(); /* * Add it to the IO-APIC irq-routing table: */ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); } if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config()) disconnect_bsp_APIC(ioapic_i8259.pin != -1); } /* * Not an __init, needed by the reboot code */ void disable_IO_APIC(void) { /* * Clear the IO-APIC before rebooting: */ clear_IO_APIC(); if (!nr_legacy_irqs()) return; x86_io_apic_ops.disable(); } #ifdef CONFIG_X86_32 /* * function to set the IO-APIC physical IDs based on the * values stored in the MPC table. * * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 */ void __init setup_ioapic_ids_from_mpc_nocheck(void) { union IO_APIC_reg_00 reg_00; physid_mask_t phys_id_present_map; int ioapic_idx; int i; unsigned char old_id; unsigned long flags; /* * This is broken; anything with a real cpu count has to * circumvent this idiocy regardless. */ apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); /* * Set the IOAPIC ID to the value stored in the MPC table. */ for_each_ioapic(ioapic_idx) { /* Read the register 0 value */ raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); old_id = mpc_ioapic_id(ioapic_idx); if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", ioapic_idx, mpc_ioapic_id(ioapic_idx)); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", reg_00.bits.ID); ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID; } /* * Sanity check, is the ID really free? Every APIC in a * system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (apic->check_apicid_used(&phys_id_present_map, mpc_ioapic_id(ioapic_idx))) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", ioapic_idx, mpc_ioapic_id(ioapic_idx)); for (i = 0; i < get_physical_broadcast(); i++) if (!physid_isset(i, phys_id_present_map)) break; if (i >= get_physical_broadcast()) panic("Max APIC ID exceeded!\n"); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", i); physid_set(i, phys_id_present_map); ioapics[ioapic_idx].mp_config.apicid = i; } else { physid_mask_t tmp; apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx), &tmp); apic_printk(APIC_VERBOSE, "Setting %d in the " "phys_id_present_map\n", mpc_ioapic_id(ioapic_idx)); physids_or(phys_id_present_map, phys_id_present_map, tmp); } /* * We need to adjust the IRQ routing table * if the ID changed. */ if (old_id != mpc_ioapic_id(ioapic_idx)) for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].dstapic == old_id) mp_irqs[i].dstapic = mpc_ioapic_id(ioapic_idx); /* * Update the ID register according to the right value * from the MPC table if they are different. */ if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID) continue; apic_printk(APIC_VERBOSE, KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", mpc_ioapic_id(ioapic_idx)); reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic_idx, 0, reg_00.raw); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* * Sanity check */ raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) pr_cont("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); } } void __init setup_ioapic_ids_from_mpc(void) { if (acpi_ioapic) return; /* * Don't check I/O APIC IDs for xAPIC systems. They have * no meaning without the serial APIC bus. */ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) || APIC_XAPIC(boot_cpu_apic_version)) return; setup_ioapic_ids_from_mpc_nocheck(); } #endif int no_timer_check __initdata; static int __init notimercheck(char *s) { no_timer_check = 1; return 1; } __setup("no_timer_check", notimercheck); /* * There is a nasty bug in some older SMP boards, their mptable lies * about the timer IRQ. We do the following to work around the situation: * * - timer IRQ defaults to IO-APIC IRQ * - if this function detects that timer IRQs are defunct, then we fall * back to ISA timer IRQs */ static int __init timer_irq_works(void) { unsigned long t1 = jiffies; unsigned long flags; if (no_timer_check) return 1; local_save_flags(flags); local_irq_enable(); /* Let ten ticks pass... */ mdelay((10 * 1000) / HZ); local_irq_restore(flags); /* * Expect a few ticks at least, to be sure some possible * glue logic does not lock up after one or two first * ticks in a non-ExtINT mode. Also the local APIC * might have cached one ExtINT interrupt. Finally, at * least one tick may be lost due to delays. */ /* jiffies wrap? */ if (time_after(jiffies, t1 + 4)) return 1; return 0; } /* * In the SMP+IOAPIC case it might happen that there are an unspecified * number of pending IRQ events unhandled. These cases are very rare, * so we 'resend' these IRQs via IPIs, to the same CPU. It's much * better to do it this way as thus we do not have to be aware of * 'pending' interrupts in the IRQ path, except at this point. */ /* * Edge triggered needs to resend any interrupt * that was delayed but this is now handled in the device * independent code. */ /* * Starting up a edge-triggered IO-APIC interrupt is * nasty - we need to make sure that we get the edge. * If it is already asserted for some reason, we need * return 1 to indicate that is was pending. * * This is not complete - we should be able to fake * an edge even if it isn't on the 8259A... */ static unsigned int startup_ioapic_irq(struct irq_data *data) { int was_pending = 0, irq = data->irq; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); if (irq < nr_legacy_irqs()) { legacy_pic->mask(irq); if (legacy_pic->irq_pending(irq)) was_pending = 1; } __unmask_ioapic(data->chip_data); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return was_pending; } atomic_t irq_mis_count; #ifdef CONFIG_GENERIC_PENDING_IRQ static bool io_apic_level_ack_pending(struct mp_chip_data *data) { struct irq_pin_list *entry; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); for_each_irq_pin(entry, data->irq_2_pin) { unsigned int reg; int pin; pin = entry->pin; reg = io_apic_read(entry->apic, 0x10 + pin*2); /* Is the remote IRR bit set? */ if (reg & IO_APIC_REDIR_REMOTE_IRR) { raw_spin_unlock_irqrestore(&ioapic_lock, flags); return true; } } raw_spin_unlock_irqrestore(&ioapic_lock, flags); return false; } static inline bool ioapic_irqd_mask(struct irq_data *data) { /* If we are moving the irq we need to mask it */ if (unlikely(irqd_is_setaffinity_pending(data))) { mask_ioapic_irq(data); return true; } return false; } static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) { if (unlikely(masked)) { /* Only migrate the irq if the ack has been received. * * On rare occasions the broadcast level triggered ack gets * delayed going to ioapics, and if we reprogram the * vector while Remote IRR is still set the irq will never * fire again. * * To prevent this scenario we read the Remote IRR bit * of the ioapic. This has two effects. * - On any sane system the read of the ioapic will * flush writes (and acks) going to the ioapic from * this cpu. * - We get to see if the ACK has actually been delivered. * * Based on failed experiments of reprogramming the * ioapic entry from outside of irq context starting * with masking the ioapic entry and then polling until * Remote IRR was clear before reprogramming the * ioapic I don't trust the Remote IRR bit to be * completey accurate. * * However there appears to be no other way to plug * this race, so if the Remote IRR bit is not * accurate and is causing problems then it is a hardware bug * and you can go talk to the chipset vendor about it. */ if (!io_apic_level_ack_pending(data->chip_data)) irq_move_masked_irq(data); unmask_ioapic_irq(data); } } #else static inline bool ioapic_irqd_mask(struct irq_data *data) { return false; } static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) { } #endif static void ioapic_ack_level(struct irq_data *irq_data) { struct irq_cfg *cfg = irqd_cfg(irq_data); unsigned long v; bool masked; int i; irq_complete_move(cfg); masked = ioapic_irqd_mask(irq_data); /* * It appears there is an erratum which affects at least version 0x11 * of I/O APIC (that's the 82093AA and cores integrated into various * chipsets). Under certain conditions a level-triggered interrupt is * erroneously delivered as edge-triggered one but the respective IRR * bit gets set nevertheless. As a result the I/O unit expects an EOI * message but it will never arrive and further interrupts are blocked * from the source. The exact reason is so far unknown, but the * phenomenon was observed when two consecutive interrupt requests * from a given source get delivered to the same CPU and the source is * temporarily disabled in between. * * A workaround is to simulate an EOI message manually. We achieve it * by setting the trigger mode to edge and then to level when the edge * trigger mode gets detected in the TMR of a local APIC for a * level-triggered interrupt. We mask the source for the time of the * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro * * Also in the case when cpu goes offline, fixup_irqs() will forward * any unhandled interrupt on the offlined cpu to the new cpu * destination that is handling the corresponding interrupt. This * interrupt forwarding is done via IPI's. Hence, in this case also * level-triggered io-apic interrupt will be seen as an edge * interrupt in the IRR. And we can't rely on the cpu's EOI * to be broadcasted to the IO-APIC's which will clear the remoteIRR * corresponding to the level-triggered interrupt. Hence on IO-APIC's * supporting EOI register, we do an explicit EOI to clear the * remote IRR and on IO-APIC's which don't have an EOI register, * we use the above logic (mask+edge followed by unmask+level) from * Manfred Spraul to clear the remote IRR. */ i = cfg->vector; v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); /* * We must acknowledge the irq before we move it or the acknowledge will * not propagate properly. */ ack_APIC_irq(); /* * Tail end of clearing remote IRR bit (either by delivering the EOI * message via io-apic EOI register write or simulating it using * mask+edge followed by unnask+level logic) manually when the * level triggered interrupt is seen as the edge triggered interrupt * at the cpu. */ if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); eoi_ioapic_pin(cfg->vector, irq_data->chip_data); } ioapic_irqd_unmask(irq_data, masked); } static void ioapic_ir_ack_level(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; /* * Intr-remapping uses pin number as the virtual vector * in the RTE. Actual vector is programmed in * intr-remapping table entry. Hence for the io-apic * EOI we use the pin number. */ ack_APIC_irq(); eoi_ioapic_pin(data->entry.vector, data); } static int ioapic_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { struct irq_data *parent = irq_data->parent_data; struct mp_chip_data *data = irq_data->chip_data; struct irq_pin_list *entry; struct irq_cfg *cfg; unsigned long flags; int ret; ret = parent->chip->irq_set_affinity(parent, mask, force); raw_spin_lock_irqsave(&ioapic_lock, flags); if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { cfg = irqd_cfg(irq_data); data->entry.dest = cfg->dest_apicid; data->entry.vector = cfg->vector; for_each_irq_pin(entry, data->irq_2_pin) __ioapic_write_entry(entry->apic, entry->pin, data->entry); } raw_spin_unlock_irqrestore(&ioapic_lock, flags); return ret; } static struct irq_chip ioapic_chip __read_mostly = { .name = "IO-APIC", .irq_startup = startup_ioapic_irq, .irq_mask = mask_ioapic_irq, .irq_unmask = unmask_ioapic_irq, .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ack_level, .irq_set_affinity = ioapic_set_affinity, .flags = IRQCHIP_SKIP_SET_WAKE, }; static struct irq_chip ioapic_ir_chip __read_mostly = { .name = "IR-IO-APIC", .irq_startup = startup_ioapic_irq, .irq_mask = mask_ioapic_irq, .irq_unmask = unmask_ioapic_irq, .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ir_ack_level, .irq_set_affinity = ioapic_set_affinity, .flags = IRQCHIP_SKIP_SET_WAKE, }; static inline void init_IO_APIC_traps(void) { struct irq_cfg *cfg; unsigned int irq; for_each_active_irq(irq) { cfg = irq_cfg(irq); if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { /* * Hmm.. We don't have an entry for this, * so default to an old-fashioned 8259 * interrupt if we can.. */ if (irq < nr_legacy_irqs()) legacy_pic->make_irq(irq); else /* Strange. Oh, well.. */ irq_set_chip(irq, &no_irq_chip); } } } /* * The local APIC irq-chip implementation: */ static void mask_lapic_irq(struct irq_data *data) { unsigned long v; v = apic_read(APIC_LVT0); apic_write(APIC_LVT0, v | APIC_LVT_MASKED); } static void unmask_lapic_irq(struct irq_data *data) { unsigned long v; v = apic_read(APIC_LVT0); apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); } static void ack_lapic_irq(struct irq_data *data) { ack_APIC_irq(); } static struct irq_chip lapic_chip __read_mostly = { .name = "local-APIC", .irq_mask = mask_lapic_irq, .irq_unmask = unmask_lapic_irq, .irq_ack = ack_lapic_irq, }; static void lapic_register_intr(int irq) { irq_clear_status_flags(irq, IRQ_LEVEL); irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, "edge"); } /* * This looks a bit hackish but it's about the only one way of sending * a few INTA cycles to 8259As and any associated glue logic. ICR does * not support the ExtINT mode, unfortunately. We need to send these * cycles as some i82489DX-based boards have glue logic that keeps the * 8259A interrupt line asserted until INTA. --macro */ static inline void __init unlock_ExtINT_logic(void) { int apic, pin, i; struct IO_APIC_route_entry entry0, entry1; unsigned char save_control, save_freq_select; pin = find_isa_irq_pin(8, mp_INT); if (pin == -1) { WARN_ON_ONCE(1); return; } apic = find_isa_irq_apic(8, mp_INT); if (apic == -1) { WARN_ON_ONCE(1); return; } entry0 = ioapic_read_entry(apic, pin); clear_IO_APIC_pin(apic, pin); memset(&entry1, 0, sizeof(entry1)); entry1.dest_mode = IOAPIC_DEST_MODE_PHYSICAL; entry1.mask = IOAPIC_UNMASKED; entry1.dest = hard_smp_processor_id(); entry1.delivery_mode = dest_ExtINT; entry1.polarity = entry0.polarity; entry1.trigger = IOAPIC_EDGE; entry1.vector = 0; ioapic_write_entry(apic, pin, entry1); save_control = CMOS_READ(RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, RTC_FREQ_SELECT); CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); i = 100; while (i-- > 0) { mdelay(10); if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) i -= 10; } CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); clear_IO_APIC_pin(apic, pin); ioapic_write_entry(apic, pin, entry0); } static int disable_timer_pin_1 __initdata; /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ static int __init disable_timer_pin_setup(char *arg) { disable_timer_pin_1 = 1; return 0; } early_param("disable_timer_pin_1", disable_timer_pin_setup); static int mp_alloc_timer_irq(int ioapic, int pin) { int irq = -1; struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); if (domain) { struct irq_alloc_info info; ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0); info.ioapic_id = mpc_ioapic_id(ioapic); info.ioapic_pin = pin; mutex_lock(&ioapic_mutex); irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info); mutex_unlock(&ioapic_mutex); } return irq; } /* * This code may look a bit paranoid, but it's supposed to cooperate with * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast * fanatically on his truly buggy board. * * FIXME: really need to revamp this for all platforms. */ static inline void __init check_timer(void) { struct irq_data *irq_data = irq_get_irq_data(0); struct mp_chip_data *data = irq_data->chip_data; struct irq_cfg *cfg = irqd_cfg(irq_data); int node = cpu_to_node(0); int apic1, pin1, apic2, pin2; unsigned long flags; int no_pin1 = 0; local_irq_save(flags); /* * get/set the timer IRQ vector: */ legacy_pic->mask(0); /* * As IRQ0 is to be enabled in the 8259A, the virtual * wire has to be disabled in the local APIC. Also * timer interrupts need to be acknowledged manually in * the 8259A for the i82489DX when using the NMI * watchdog as that APIC treats NMIs as level-triggered. * The AEOI mode will finish them in the 8259A * automatically. */ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); legacy_pic->init(1); pin1 = find_isa_irq_pin(0, mp_INT); apic1 = find_isa_irq_apic(0, mp_INT); pin2 = ioapic_i8259.pin; apic2 = ioapic_i8259.apic; apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " "apic1=%d pin1=%d apic2=%d pin2=%d\n", cfg->vector, apic1, pin1, apic2, pin2); /* * Some BIOS writers are clueless and report the ExtINTA * I/O APIC input from the cascaded 8259A as the timer * interrupt input. So just in case, if only one pin * was found above, try it both directly and through the * 8259A. */ if (pin1 == -1) { panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC"); pin1 = pin2; apic1 = apic2; no_pin1 = 1; } else if (pin2 == -1) { pin2 = pin1; apic2 = apic1; } if (pin1 != -1) { /* Ok, does IRQ0 through the IOAPIC work? */ if (no_pin1) { mp_alloc_timer_irq(apic1, pin1); } else { /* * for edge trigger, it's already unmasked, * so only need to unmask if it is level-trigger * do we really have level trigger timer? */ int idx; idx = find_irq_entry(apic1, pin1, mp_INT); if (idx != -1 && irq_trigger(idx)) unmask_ioapic_irq(irq_get_chip_data(0)); } irq_domain_activate_irq(irq_data); if (timer_irq_works()) { if (disable_timer_pin_1 > 0) clear_IO_APIC_pin(0, pin1); goto out; } panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); local_irq_disable(); clear_IO_APIC_pin(apic1, pin1); if (!no_pin1) apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " "8254 timer not connected to IO-APIC\n"); apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " "(IRQ0) through the 8259A ...\n"); apic_printk(APIC_QUIET, KERN_INFO "..... (found apic %d pin %d) ...\n", apic2, pin2); /* * legacy devices should be connected to IO APIC #0 */ replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); irq_domain_activate_irq(irq_data); legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); goto out; } /* * Cleanup, just in case ... */ local_irq_disable(); legacy_pic->mask(0); clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); } apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...\n"); lapic_register_intr(0); apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } local_irq_disable(); legacy_pic->mask(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer as ExtINT IRQ...\n"); legacy_pic->init(0); legacy_pic->make_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); unlock_ExtINT_logic(); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } local_irq_disable(); apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); if (apic_is_x2apic_enabled()) apic_printk(APIC_QUIET, KERN_INFO "Perhaps problem with the pre-enabled x2apic mode\n" "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option.\n"); out: local_irq_restore(flags); } /* * Traditionally ISA IRQ2 is the cascade IRQ, and is not available * to devices. However there may be an I/O APIC pin available for * this interrupt regardless. The pin may be left unconnected, but * typically it will be reused as an ExtINT cascade interrupt for * the master 8259A. In the MPS case such a pin will normally be * reported as an ExtINT interrupt in the MP table. With ACPI * there is no provision for ExtINT interrupts, and in the absence * of an override it would be treated as an ordinary ISA I/O APIC * interrupt, that is edge-triggered and unmasked by default. We * used to do this, but it caused problems on some systems because * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using * the same ExtINT cascade interrupt to drive the local APIC of the * bootstrap processor. Therefore we refrain from routing IRQ2 to * the I/O APIC in all cases now. No actual device should request * it anyway. --macro */ #define PIC_IRQS (1UL << PIC_CASCADE_IR) static int mp_irqdomain_create(int ioapic) { struct irq_alloc_info info; struct irq_domain *parent; int hwirqs = mp_ioapic_pin_count(ioapic); struct ioapic *ip = &ioapics[ioapic]; struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg; struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic); if (cfg->type == IOAPIC_DOMAIN_INVALID) return 0; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_IOAPIC; info.ioapic_id = mpc_ioapic_id(ioapic); parent = irq_remapping_get_ir_irq_domain(&info); if (!parent) parent = x86_vector_domain; ip->irqdomain = irq_domain_add_linear(cfg->dev, hwirqs, cfg->ops, (void *)(long)ioapic); if (!ip->irqdomain) return -ENOMEM; ip->irqdomain->parent = parent; if (cfg->type == IOAPIC_DOMAIN_LEGACY || cfg->type == IOAPIC_DOMAIN_STRICT) ioapic_dynirq_base = max(ioapic_dynirq_base, gsi_cfg->gsi_end + 1); return 0; } static void ioapic_destroy_irqdomain(int idx) { if (ioapics[idx].irqdomain) { irq_domain_remove(ioapics[idx].irqdomain); ioapics[idx].irqdomain = NULL; } } void __init setup_IO_APIC(void) { int ioapic; if (skip_ioapic_setup || !nr_ioapics) return; io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL; apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); for_each_ioapic(ioapic) BUG_ON(mp_irqdomain_create(ioapic)); /* * Set up IO-APIC IRQ routing. */ x86_init.mpparse.setup_ioapic_ids(); sync_Arb_IDs(); setup_IO_APIC_irqs(); init_IO_APIC_traps(); if (nr_legacy_irqs()) check_timer(); ioapic_initialized = 1; } static void resume_ioapic_id(int ioapic_idx) { unsigned long flags; union IO_APIC_reg_00 reg_00; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic_idx, 0); if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) { reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); io_apic_write(ioapic_idx, 0, reg_00.raw); } raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void ioapic_resume(void) { int ioapic_idx; for_each_ioapic_reverse(ioapic_idx) resume_ioapic_id(ioapic_idx); restore_ioapic_entries(); } static struct syscore_ops ioapic_syscore_ops = { .suspend = save_ioapic_entries, .resume = ioapic_resume, }; static int __init ioapic_init_ops(void) { register_syscore_ops(&ioapic_syscore_ops); return 0; } device_initcall(ioapic_init_ops); static int io_apic_get_redir_entries(int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* The register returns the maximum index redir index * supported, which is one less than the total number of redir * entries. */ return reg_01.bits.entries + 1; } unsigned int arch_dynirq_lower_bound(unsigned int from) { /* * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use * gsi_top if ioapic_dynirq_base hasn't been initialized yet. */ return ioapic_initialized ? ioapic_dynirq_base : gsi_top; } #ifdef CONFIG_X86_32 static int io_apic_get_unique_id(int ioapic, int apic_id) { union IO_APIC_reg_00 reg_00; static physid_mask_t apic_id_map = PHYSID_MASK_NONE; physid_mask_t tmp; unsigned long flags; int i = 0; /* * The P4 platform supports up to 256 APIC IDs on two separate APIC * buses (one for LAPICs, one for IOAPICs), where predecessors only * supports up to 16 on one shared APIC bus. * * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full * advantage of new APIC bus architecture. */ if (physids_empty(apic_id_map)) apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); if (apic_id >= get_physical_broadcast()) { printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " "%d\n", ioapic, apic_id, reg_00.bits.ID); apic_id = reg_00.bits.ID; } /* * Every APIC in a system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (apic->check_apicid_used(&apic_id_map, apic_id)) { for (i = 0; i < get_physical_broadcast(); i++) { if (!apic->check_apicid_used(&apic_id_map, i)) break; } if (i == get_physical_broadcast()) panic("Max apic_id exceeded!\n"); printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " "trying %d\n", ioapic, apic_id, i); apic_id = i; } apic->apicid_to_cpu_present(apic_id, &tmp); physids_or(apic_id_map, apic_id_map, tmp); if (reg_00.bits.ID != apic_id) { reg_00.bits.ID = apic_id; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic, 0, reg_00.raw); reg_00.raw = io_apic_read(ioapic, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ if (reg_00.bits.ID != apic_id) { pr_err("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); return -1; } } apic_printk(APIC_VERBOSE, KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); return apic_id; } static u8 io_apic_unique_id(int idx, u8 id) { if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && !APIC_XAPIC(boot_cpu_apic_version)) return io_apic_get_unique_id(idx, id); else return id; } #else static u8 io_apic_unique_id(int idx, u8 id) { union IO_APIC_reg_00 reg_00; DECLARE_BITMAP(used, 256); unsigned long flags; u8 new_id; int i; bitmap_zero(used, 256); for_each_ioapic(i) __set_bit(mpc_ioapic_id(i), used); /* Hand out the requested id if available */ if (!test_bit(id, used)) return id; /* * Read the current id from the ioapic and keep it if * available. */ raw_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); new_id = reg_00.bits.ID; if (!test_bit(new_id, used)) { apic_printk(APIC_VERBOSE, KERN_INFO "IOAPIC[%d]: Using reg apic_id %d instead of %d\n", idx, new_id, id); return new_id; } /* * Get the next free id and write it to the ioapic. */ new_id = find_first_zero_bit(used, 256); reg_00.bits.ID = new_id; raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(idx, 0, reg_00.raw); reg_00.raw = io_apic_read(idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ BUG_ON(reg_00.bits.ID != new_id); return new_id; } #endif static int io_apic_get_version(int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.version; } int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) { int ioapic, pin, idx; if (skip_ioapic_setup) return -1; ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return -1; pin = mp_find_ioapic_pin(ioapic, gsi); if (pin < 0) return -1; idx = find_irq_entry(ioapic, pin, mp_INT); if (idx < 0) return -1; *trigger = irq_trigger(idx); *polarity = irq_polarity(idx); return 0; } /* * This function currently is only a helper for the i386 smp boot process where * we need to reprogram the ioredtbls to cater for the cpus which have come online * so mask in all cases should simply be apic->target_cpus() */ #ifdef CONFIG_SMP void __init setup_ioapic_dest(void) { int pin, ioapic, irq, irq_entry; const struct cpumask *mask; struct irq_desc *desc; struct irq_data *idata; struct irq_chip *chip; if (skip_ioapic_setup == 1) return; for_each_ioapic_pin(ioapic, pin) { irq_entry = find_irq_entry(ioapic, pin, mp_INT); if (irq_entry == -1) continue; irq = pin_2_irq(irq_entry, ioapic, pin, 0); if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq)) continue; desc = irq_to_desc(irq); raw_spin_lock_irq(&desc->lock); idata = irq_desc_get_irq_data(desc); /* * Honour affinities which have been set in early boot */ if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) mask = irq_data_get_affinity_mask(idata); else mask = apic->target_cpus(); chip = irq_data_get_irq_chip(idata); /* Might be lapic_chip for irq 0 */ if (chip->irq_set_affinity) chip->irq_set_affinity(idata, mask, false); raw_spin_unlock_irq(&desc->lock); } } #endif #define IOAPIC_RESOURCE_NAME_SIZE 11 static struct resource *ioapic_resources; static struct resource * __init ioapic_setup_resources(void) { unsigned long n; struct resource *res; char *mem; int i; if (nr_ioapics == 0) return NULL; n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); n *= nr_ioapics; mem = alloc_bootmem(n); res = (void *)mem; mem += sizeof(struct resource) * nr_ioapics; for_each_ioapic(i) { res[i].name = mem; res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); mem += IOAPIC_RESOURCE_NAME_SIZE; ioapics[i].iomem_res = &res[i]; } ioapic_resources = res; return res; } void __init io_apic_init_mappings(void) { unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; struct resource *ioapic_res; int i; ioapic_res = ioapic_setup_resources(); for_each_ioapic(i) { if (smp_found_config) { ioapic_phys = mpc_ioapic_addr(i); #ifdef CONFIG_X86_32 if (!ioapic_phys) { printk(KERN_ERR "WARNING: bogus zero IO-APIC " "address found in MPTABLE, " "disabling IO/APIC support!\n"); smp_found_config = 0; skip_ioapic_setup = 1; goto fake_ioapic_page; } #endif } else { #ifdef CONFIG_X86_32 fake_ioapic_page: #endif ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); ioapic_phys = __pa(ioapic_phys); } set_fixmap_nocache(idx, ioapic_phys); apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), ioapic_phys); idx++; ioapic_res->start = ioapic_phys; ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; ioapic_res++; } } void __init ioapic_insert_resources(void) { int i; struct resource *r = ioapic_resources; if (!r) { if (nr_ioapics > 0) printk(KERN_ERR "IO APIC resources couldn't be allocated.\n"); return; } for_each_ioapic(i) { insert_resource(&iomem_resource, r); r++; } } int mp_find_ioapic(u32 gsi) { int i; if (nr_ioapics == 0) return -1; /* Find the IOAPIC that manages this GSI. */ for_each_ioapic(i) { struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end) return i; } printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); return -1; } int mp_find_ioapic_pin(int ioapic, u32 gsi) { struct mp_ioapic_gsi *gsi_cfg; if (WARN_ON(ioapic < 0)) return -1; gsi_cfg = mp_ioapic_gsi_routing(ioapic); if (WARN_ON(gsi > gsi_cfg->gsi_end)) return -1; return gsi - gsi_cfg->gsi_base; } static int bad_ioapic_register(int idx) { union IO_APIC_reg_00 reg_00; union IO_APIC_reg_01 reg_01; union IO_APIC_reg_02 reg_02; reg_00.raw = io_apic_read(idx, 0); reg_01.raw = io_apic_read(idx, 1); reg_02.raw = io_apic_read(idx, 2); if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) { pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n", mpc_ioapic_addr(idx)); return 1; } return 0; } static int find_free_ioapic_entry(void) { int idx; for (idx = 0; idx < MAX_IO_APICS; idx++) if (ioapics[idx].nr_registers == 0) return idx; return MAX_IO_APICS; } /** * mp_register_ioapic - Register an IOAPIC device * @id: hardware IOAPIC ID * @address: physical address of IOAPIC register area * @gsi_base: base of GSI associated with the IOAPIC * @cfg: configuration information for the IOAPIC */ int mp_register_ioapic(int id, u32 address, u32 gsi_base, struct ioapic_domain_cfg *cfg) { bool hotplug = !!ioapic_initialized; struct mp_ioapic_gsi *gsi_cfg; int idx, ioapic, entries; u32 gsi_end; if (!address) { pr_warn("Bogus (zero) I/O APIC address found, skipping!\n"); return -EINVAL; } for_each_ioapic(ioapic) if (ioapics[ioapic].mp_config.apicaddr == address) { pr_warn("address 0x%x conflicts with IOAPIC%d\n", address, ioapic); return -EEXIST; } idx = find_free_ioapic_entry(); if (idx >= MAX_IO_APICS) { pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n", MAX_IO_APICS, idx); return -ENOSPC; } ioapics[idx].mp_config.type = MP_IOAPIC; ioapics[idx].mp_config.flags = MPC_APIC_USABLE; ioapics[idx].mp_config.apicaddr = address; set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); if (bad_ioapic_register(idx)) { clear_fixmap(FIX_IO_APIC_BASE_0 + idx); return -ENODEV; } ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id); ioapics[idx].mp_config.apicver = io_apic_get_version(idx); /* * Build basic GSI lookup table to facilitate gsi->io_apic lookups * and to prevent reprogramming of IOAPIC pins (PCI GSIs). */ entries = io_apic_get_redir_entries(idx); gsi_end = gsi_base + entries - 1; for_each_ioapic(ioapic) { gsi_cfg = mp_ioapic_gsi_routing(ioapic); if ((gsi_base >= gsi_cfg->gsi_base && gsi_base <= gsi_cfg->gsi_end) || (gsi_end >= gsi_cfg->gsi_base && gsi_end <= gsi_cfg->gsi_end)) { pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n", gsi_base, gsi_end, gsi_cfg->gsi_base, gsi_cfg->gsi_end); clear_fixmap(FIX_IO_APIC_BASE_0 + idx); return -ENOSPC; } } gsi_cfg = mp_ioapic_gsi_routing(idx); gsi_cfg->gsi_base = gsi_base; gsi_cfg->gsi_end = gsi_end; ioapics[idx].irqdomain = NULL; ioapics[idx].irqdomain_cfg = *cfg; /* * If mp_register_ioapic() is called during early boot stage when * walking ACPI/SFI/DT tables, it's too early to create irqdomain, * we are still using bootmem allocator. So delay it to setup_IO_APIC(). */ if (hotplug) { if (mp_irqdomain_create(idx)) { clear_fixmap(FIX_IO_APIC_BASE_0 + idx); return -ENOMEM; } alloc_ioapic_saved_registers(idx); } if (gsi_cfg->gsi_end >= gsi_top) gsi_top = gsi_cfg->gsi_end + 1; if (nr_ioapics <= idx) nr_ioapics = idx + 1; /* Set nr_registers to mark entry present */ ioapics[idx].nr_registers = entries; pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", idx, mpc_ioapic_id(idx), mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), gsi_cfg->gsi_base, gsi_cfg->gsi_end); return 0; } int mp_unregister_ioapic(u32 gsi_base) { int ioapic, pin; int found = 0; for_each_ioapic(ioapic) if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) { found = 1; break; } if (!found) { pr_warn("can't find IOAPIC for GSI %d\n", gsi_base); return -ENODEV; } for_each_pin(ioapic, pin) { u32 gsi = mp_pin_to_gsi(ioapic, pin); int irq = mp_map_gsi_to_irq(gsi, 0, NULL); struct mp_chip_data *data; if (irq >= 0) { data = irq_get_chip_data(irq); if (data && data->count) { pr_warn("pin%d on IOAPIC%d is still in use.\n", pin, ioapic); return -EBUSY; } } } /* Mark entry not present */ ioapics[ioapic].nr_registers = 0; ioapic_destroy_irqdomain(ioapic); free_ioapic_saved_registers(ioapic); if (ioapics[ioapic].iomem_res) release_resource(ioapics[ioapic].iomem_res); clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic); memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic])); return 0; } int mp_ioapic_registered(u32 gsi_base) { int ioapic; for_each_ioapic(ioapic) if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) return 1; return 0; } static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data, struct irq_alloc_info *info) { if (info && info->ioapic_valid) { data->trigger = info->ioapic_trigger; data->polarity = info->ioapic_polarity; } else if (acpi_get_override_irq(gsi, &data->trigger, &data->polarity) < 0) { /* PCI interrupts are always active low level triggered. */ data->trigger = IOAPIC_LEVEL; data->polarity = IOAPIC_POL_LOW; } } static void mp_setup_entry(struct irq_cfg *cfg, struct mp_chip_data *data, struct IO_APIC_route_entry *entry) { memset(entry, 0, sizeof(*entry)); entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->dest = cfg->dest_apicid; entry->vector = cfg->vector; entry->trigger = data->trigger; entry->polarity = data->polarity; /* * Mask level triggered irqs. Edge triggered irqs are masked * by the irq core code in case they fire. */ if (data->trigger == IOAPIC_LEVEL) entry->mask = IOAPIC_MASKED; else entry->mask = IOAPIC_UNMASKED; } int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { int ret, ioapic, pin; struct irq_cfg *cfg; struct irq_data *irq_data; struct mp_chip_data *data; struct irq_alloc_info *info = arg; unsigned long flags; if (!info || nr_irqs > 1) return -EINVAL; irq_data = irq_domain_get_irq_data(domain, virq); if (!irq_data) return -EINVAL; ioapic = mp_irqdomain_ioapic_idx(domain); pin = info->ioapic_pin; if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0) return -EEXIST; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; info->ioapic_entry = &data->entry; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); if (ret < 0) { kfree(data); return ret; } INIT_LIST_HEAD(&data->irq_2_pin); irq_data->hwirq = info->ioapic_pin; irq_data->chip = (domain->parent == x86_vector_domain) ? &ioapic_chip : &ioapic_ir_chip; irq_data->chip_data = data; mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info); cfg = irqd_cfg(irq_data); add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); local_irq_save(flags); if (info->ioapic_entry) mp_setup_entry(cfg, data, info->ioapic_entry); mp_register_handler(virq, data->trigger); if (virq < nr_legacy_irqs()) legacy_pic->mask(virq); local_irq_restore(flags); apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n", ioapic, mpc_ioapic_id(ioapic), pin, cfg->vector, virq, data->trigger, data->polarity, cfg->dest_apicid); return 0; } void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data; struct mp_chip_data *data; BUG_ON(nr_irqs != 1); irq_data = irq_domain_get_irq_data(domain, virq); if (irq_data && irq_data->chip_data) { data = irq_data->chip_data; __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq); WARN_ON(!list_empty(&data->irq_2_pin)); kfree(irq_data->chip_data); } irq_domain_free_irqs_top(domain, virq, nr_irqs); } void mp_irqdomain_activate(struct irq_domain *domain, struct irq_data *irq_data) { unsigned long flags; struct irq_pin_list *entry; struct mp_chip_data *data = irq_data->chip_data; raw_spin_lock_irqsave(&ioapic_lock, flags); for_each_irq_pin(entry, data->irq_2_pin) __ioapic_write_entry(entry->apic, entry->pin, data->entry); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } void mp_irqdomain_deactivate(struct irq_domain *domain, struct irq_data *irq_data) { /* It won't be called for IRQ with multiple IOAPIC pins associated */ ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq); } int mp_irqdomain_ioapic_idx(struct irq_domain *domain) { return (int)(long)domain->host_data; } const struct irq_domain_ops mp_ioapic_irqdomain_ops = { .alloc = mp_irqdomain_alloc, .free = mp_irqdomain_free, .activate = mp_irqdomain_activate, .deactivate = mp_irqdomain_deactivate, };
gpl-2.0
NeverLEX/linux
sound/soc/soc-dapm.c
66
107508
/* * soc-dapm.c -- ALSA SoC Dynamic Audio Power Management * * Copyright 2005 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Features: * o Changes power status of internal codec blocks depending on the * dynamic configuration of codec internal audio paths and active * DACs/ADCs. * o Platform power domain - can support external components i.e. amps and * mic/headphone insertion events. * o Automatic Mic Bias support * o Jack insertion power event initiation - e.g. hp insertion will enable * sinks, dacs, etc * o Delayed power down of audio subsystem to reduce pops between a quick * device reopen. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/async.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/debugfs.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/clk.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <trace/events/asoc.h> #define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++; static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink, const char *control, int (*connected)(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink)); struct snd_soc_dapm_widget * snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget); struct snd_soc_dapm_widget * snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget); /* dapm power sequences - make this per codec in the future */ static int dapm_up_seq[] = { [snd_soc_dapm_pre] = 0, [snd_soc_dapm_regulator_supply] = 1, [snd_soc_dapm_clock_supply] = 1, [snd_soc_dapm_supply] = 2, [snd_soc_dapm_micbias] = 3, [snd_soc_dapm_dai_link] = 2, [snd_soc_dapm_dai_in] = 4, [snd_soc_dapm_dai_out] = 4, [snd_soc_dapm_aif_in] = 4, [snd_soc_dapm_aif_out] = 4, [snd_soc_dapm_mic] = 5, [snd_soc_dapm_mux] = 6, [snd_soc_dapm_demux] = 6, [snd_soc_dapm_dac] = 7, [snd_soc_dapm_switch] = 8, [snd_soc_dapm_mixer] = 8, [snd_soc_dapm_mixer_named_ctl] = 8, [snd_soc_dapm_pga] = 9, [snd_soc_dapm_adc] = 10, [snd_soc_dapm_out_drv] = 11, [snd_soc_dapm_hp] = 11, [snd_soc_dapm_spk] = 11, [snd_soc_dapm_line] = 11, [snd_soc_dapm_kcontrol] = 12, [snd_soc_dapm_post] = 13, }; static int dapm_down_seq[] = { [snd_soc_dapm_pre] = 0, [snd_soc_dapm_kcontrol] = 1, [snd_soc_dapm_adc] = 2, [snd_soc_dapm_hp] = 3, [snd_soc_dapm_spk] = 3, [snd_soc_dapm_line] = 3, [snd_soc_dapm_out_drv] = 3, [snd_soc_dapm_pga] = 4, [snd_soc_dapm_switch] = 5, [snd_soc_dapm_mixer_named_ctl] = 5, [snd_soc_dapm_mixer] = 5, [snd_soc_dapm_dac] = 6, [snd_soc_dapm_mic] = 7, [snd_soc_dapm_micbias] = 8, [snd_soc_dapm_mux] = 9, [snd_soc_dapm_demux] = 9, [snd_soc_dapm_aif_in] = 10, [snd_soc_dapm_aif_out] = 10, [snd_soc_dapm_dai_in] = 10, [snd_soc_dapm_dai_out] = 10, [snd_soc_dapm_dai_link] = 11, [snd_soc_dapm_supply] = 12, [snd_soc_dapm_clock_supply] = 13, [snd_soc_dapm_regulator_supply] = 13, [snd_soc_dapm_post] = 14, }; static void dapm_assert_locked(struct snd_soc_dapm_context *dapm) { if (dapm->card && dapm->card->instantiated) lockdep_assert_held(&dapm->card->dapm_mutex); } static void pop_wait(u32 pop_time) { if (pop_time) schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time)); } static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...) { va_list args; char *buf; if (!pop_time) return; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf == NULL) return; va_start(args, fmt); vsnprintf(buf, PAGE_SIZE, fmt, args); dev_info(dev, "%s", buf); va_end(args); kfree(buf); } static bool dapm_dirty_widget(struct snd_soc_dapm_widget *w) { return !list_empty(&w->dirty); } static void dapm_mark_dirty(struct snd_soc_dapm_widget *w, const char *reason) { dapm_assert_locked(w->dapm); if (!dapm_dirty_widget(w)) { dev_vdbg(w->dapm->dev, "Marking %s dirty due to %s\n", w->name, reason); list_add_tail(&w->dirty, &w->dapm->card->dapm_dirty); } } /* * dapm_widget_invalidate_input_paths() - Invalidate the cached number of input * paths * @w: The widget for which to invalidate the cached number of input paths * * The function resets the cached number of inputs for the specified widget and * all widgets that can be reached via outgoing paths from the widget. * * This function must be called if the number of input paths for a widget might * have changed. E.g. if the source state of a widget changes or a path is added * or activated with the widget as the sink. */ static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_widget *sink; struct snd_soc_dapm_path *p; LIST_HEAD(list); dapm_assert_locked(w->dapm); if (w->inputs == -1) return; w->inputs = -1; list_add_tail(&w->work_list, &list); list_for_each_entry(w, &list, work_list) { list_for_each_entry(p, &w->sinks, list_source) { if (p->is_supply || p->weak || !p->connect) continue; sink = p->sink; if (sink->inputs != -1) { sink->inputs = -1; list_add_tail(&sink->work_list, &list); } } } } /* * dapm_widget_invalidate_output_paths() - Invalidate the cached number of * output paths * @w: The widget for which to invalidate the cached number of output paths * * Resets the cached number of outputs for the specified widget and all widgets * that can be reached via incoming paths from the widget. * * This function must be called if the number of output paths for a widget might * have changed. E.g. if the sink state of a widget changes or a path is added * or activated with the widget as the source. */ static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_widget *source; struct snd_soc_dapm_path *p; LIST_HEAD(list); dapm_assert_locked(w->dapm); if (w->outputs == -1) return; w->outputs = -1; list_add_tail(&w->work_list, &list); list_for_each_entry(w, &list, work_list) { list_for_each_entry(p, &w->sources, list_sink) { if (p->is_supply || p->weak || !p->connect) continue; source = p->source; if (source->outputs != -1) { source->outputs = -1; list_add_tail(&source->work_list, &list); } } } } /* * dapm_path_invalidate() - Invalidates the cached number of inputs and outputs * for the widgets connected to a path * @p: The path to invalidate * * Resets the cached number of inputs for the sink of the path and the cached * number of outputs for the source of the path. * * This function must be called when a path is added, removed or the connected * state changes. */ static void dapm_path_invalidate(struct snd_soc_dapm_path *p) { /* * Weak paths or supply paths do not influence the number of input or * output paths of their neighbors. */ if (p->weak || p->is_supply) return; /* * The number of connected endpoints is the sum of the number of * connected endpoints of all neighbors. If a node with 0 connected * endpoints is either connected or disconnected that sum won't change, * so there is no need to re-check the path. */ if (p->source->inputs != 0) dapm_widget_invalidate_input_paths(p->sink); if (p->sink->outputs != 0) dapm_widget_invalidate_output_paths(p->source); } void dapm_mark_endpoints_dirty(struct snd_soc_card *card) { struct snd_soc_dapm_widget *w; mutex_lock(&card->dapm_mutex); list_for_each_entry(w, &card->widgets, list) { if (w->is_sink || w->is_source) { dapm_mark_dirty(w, "Rechecking endpoints"); if (w->is_sink) dapm_widget_invalidate_output_paths(w); if (w->is_source) dapm_widget_invalidate_input_paths(w); } } mutex_unlock(&card->dapm_mutex); } EXPORT_SYMBOL_GPL(dapm_mark_endpoints_dirty); /* create a new dapm widget */ static inline struct snd_soc_dapm_widget *dapm_cnew_widget( const struct snd_soc_dapm_widget *_widget) { return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL); } struct dapm_kcontrol_data { unsigned int value; struct snd_soc_dapm_widget *widget; struct list_head paths; struct snd_soc_dapm_widget_list *wlist; }; static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data; struct soc_mixer_control *mc; struct soc_enum *e; const char *name; int ret; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; INIT_LIST_HEAD(&data->paths); switch (widget->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: mc = (struct soc_mixer_control *)kcontrol->private_value; if (mc->autodisable) { struct snd_soc_dapm_widget template; name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name, "Autodisable"); if (!name) { ret = -ENOMEM; goto err_data; } memset(&template, 0, sizeof(template)); template.reg = mc->reg; template.mask = (1 << fls(mc->max)) - 1; template.shift = mc->shift; if (mc->invert) template.off_val = mc->max; else template.off_val = 0; template.on_val = template.off_val; template.id = snd_soc_dapm_kcontrol; template.name = name; data->value = template.on_val; data->widget = snd_soc_dapm_new_control_unlocked(widget->dapm, &template); if (!data->widget) { ret = -ENOMEM; goto err_name; } } break; case snd_soc_dapm_demux: case snd_soc_dapm_mux: e = (struct soc_enum *)kcontrol->private_value; if (e->autodisable) { struct snd_soc_dapm_widget template; name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name, "Autodisable"); if (!name) { ret = -ENOMEM; goto err_data; } memset(&template, 0, sizeof(template)); template.reg = e->reg; template.mask = e->mask << e->shift_l; template.shift = e->shift_l; template.off_val = snd_soc_enum_item_to_val(e, 0); template.on_val = template.off_val; template.id = snd_soc_dapm_kcontrol; template.name = name; data->value = template.on_val; data->widget = snd_soc_dapm_new_control(widget->dapm, &template); if (!data->widget) { ret = -ENOMEM; goto err_name; } snd_soc_dapm_add_path(widget->dapm, data->widget, widget, NULL, NULL); } break; default: break; } kcontrol->private_data = data; return 0; err_name: kfree(name); err_data: kfree(data); return ret; } static void dapm_kcontrol_free(struct snd_kcontrol *kctl) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); if (data->widget) kfree(data->widget->name); kfree(data->wlist); kfree(data); } static struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist( const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); return data->wlist; } static int dapm_kcontrol_add_widget(struct snd_kcontrol *kcontrol, struct snd_soc_dapm_widget *widget) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget_list *new_wlist; unsigned int n; if (data->wlist) n = data->wlist->num_widgets + 1; else n = 1; new_wlist = krealloc(data->wlist, sizeof(*new_wlist) + sizeof(widget) * n, GFP_KERNEL); if (!new_wlist) return -ENOMEM; new_wlist->widgets[n - 1] = widget; new_wlist->num_widgets = n; data->wlist = new_wlist; return 0; } static void dapm_kcontrol_add_path(const struct snd_kcontrol *kcontrol, struct snd_soc_dapm_path *path) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); list_add_tail(&path->list_kcontrol, &data->paths); } static bool dapm_kcontrol_is_powered(const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); if (!data->widget) return true; return data->widget->power; } static struct list_head *dapm_kcontrol_get_path_list( const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); return &data->paths; } #define dapm_kcontrol_for_each_path(path, kcontrol) \ list_for_each_entry(path, dapm_kcontrol_get_path_list(kcontrol), \ list_kcontrol) unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); return data->value; } EXPORT_SYMBOL_GPL(dapm_kcontrol_get_value); static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol, unsigned int value) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); if (data->value == value) return false; if (data->widget) data->widget->on_val = value; data->value = value; return true; } /** * snd_soc_dapm_kcontrol_dapm() - Returns the dapm context associated to a * kcontrol * @kcontrol: The kcontrol * * Note: This function must only be used on kcontrols that are known to have * been registered for a CODEC. Otherwise the behaviour is undefined. */ struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm( struct snd_kcontrol *kcontrol) { return dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->dapm; } EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_dapm); static void dapm_reset(struct snd_soc_card *card) { struct snd_soc_dapm_widget *w; lockdep_assert_held(&card->dapm_mutex); memset(&card->dapm_stats, 0, sizeof(card->dapm_stats)); list_for_each_entry(w, &card->widgets, list) { w->new_power = w->power; w->power_checked = false; } } static const char *soc_dapm_prefix(struct snd_soc_dapm_context *dapm) { if (!dapm->component) return NULL; return dapm->component->name_prefix; } static int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg, unsigned int *value) { if (!dapm->component) return -EIO; return snd_soc_component_read(dapm->component, reg, value); } static int soc_dapm_update_bits(struct snd_soc_dapm_context *dapm, int reg, unsigned int mask, unsigned int value) { if (!dapm->component) return -EIO; return snd_soc_component_update_bits(dapm->component, reg, mask, value); } static int soc_dapm_test_bits(struct snd_soc_dapm_context *dapm, int reg, unsigned int mask, unsigned int value) { if (!dapm->component) return -EIO; return snd_soc_component_test_bits(dapm->component, reg, mask, value); } static void soc_dapm_async_complete(struct snd_soc_dapm_context *dapm) { if (dapm->component) snd_soc_component_async_complete(dapm->component); } static struct snd_soc_dapm_widget * dapm_wcache_lookup(struct snd_soc_dapm_wcache *wcache, const char *name) { struct snd_soc_dapm_widget *w = wcache->widget; struct list_head *wlist; const int depth = 2; int i = 0; if (w) { wlist = &w->dapm->card->widgets; list_for_each_entry_from(w, wlist, list) { if (!strcmp(name, w->name)) return w; if (++i == depth) break; } } return NULL; } static inline void dapm_wcache_update(struct snd_soc_dapm_wcache *wcache, struct snd_soc_dapm_widget *w) { wcache->widget = w; } /** * snd_soc_dapm_force_bias_level() - Sets the DAPM bias level * @dapm: The DAPM context for which to set the level * @level: The level to set * * Forces the DAPM bias level to a specific state. It will call the bias level * callback of DAPM context with the specified level. This will even happen if * the context is already at the same level. Furthermore it will not go through * the normal bias level sequencing, meaning any intermediate states between the * current and the target state will not be entered. * * Note that the change in bias level is only temporary and the next time * snd_soc_dapm_sync() is called the state will be set to the level as * determined by the DAPM core. The function is mainly intended to be used to * used during probe or resume from suspend to power up the device so * initialization can be done, before the DAPM core takes over. */ int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { int ret = 0; if (dapm->set_bias_level) ret = dapm->set_bias_level(dapm, level); if (ret == 0) dapm->bias_level = level; return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_force_bias_level); /** * snd_soc_dapm_set_bias_level - set the bias level for the system * @dapm: DAPM context * @level: level to configure * * Configure the bias (power) levels for the SoC audio device. * * Returns 0 for success else error. */ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { struct snd_soc_card *card = dapm->card; int ret = 0; trace_snd_soc_bias_level_start(card, level); if (card && card->set_bias_level) ret = card->set_bias_level(card, dapm, level); if (ret != 0) goto out; if (!card || dapm != &card->dapm) ret = snd_soc_dapm_force_bias_level(dapm, level); if (ret != 0) goto out; if (card && card->set_bias_level_post) ret = card->set_bias_level_post(card, dapm, level); out: trace_snd_soc_bias_level_done(card, level); return ret; } /* connect mux widget to its interconnecting audio paths */ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_path *path, const char *control_name, struct snd_soc_dapm_widget *w) { const struct snd_kcontrol_new *kcontrol = &w->kcontrol_news[0]; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, item; int i; if (e->reg != SND_SOC_NOPM) { soc_dapm_read(dapm, e->reg, &val); val = (val >> e->shift_l) & e->mask; item = snd_soc_enum_val_to_item(e, val); } else { /* since a virtual mux has no backing registers to * decide which path to connect, it will try to match * with the first enumeration. This is to ensure * that the default mux choice (the first) will be * correctly powered up during initialization. */ item = 0; } for (i = 0; i < e->items; i++) { if (!(strcmp(control_name, e->texts[i]))) { path->name = e->texts[i]; if (i == item) path->connect = 1; else path->connect = 0; return 0; } } return -ENODEV; } /* set up initial codec paths */ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i) { struct soc_mixer_control *mc = (struct soc_mixer_control *) p->sink->kcontrol_news[i].private_value; unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val; if (reg != SND_SOC_NOPM) { soc_dapm_read(p->sink->dapm, reg, &val); val = (val >> shift) & mask; if (invert) val = max - val; p->connect = !!val; } else { p->connect = 0; } } /* connect mixer widget to its interconnecting audio paths */ static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_path *path, const char *control_name) { int i; /* search for mixer kcontrol */ for (i = 0; i < path->sink->num_kcontrols; i++) { if (!strcmp(control_name, path->sink->kcontrol_news[i].name)) { path->name = path->sink->kcontrol_news[i].name; dapm_set_mixer_path_status(path, i); return 0; } } return -ENODEV; } static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *kcontrolw, const struct snd_kcontrol_new *kcontrol_new, struct snd_kcontrol **kcontrol) { struct snd_soc_dapm_widget *w; int i; *kcontrol = NULL; list_for_each_entry(w, &dapm->card->widgets, list) { if (w == kcontrolw || w->dapm != kcontrolw->dapm) continue; for (i = 0; i < w->num_kcontrols; i++) { if (&w->kcontrol_news[i] == kcontrol_new) { if (w->kcontrols) *kcontrol = w->kcontrols[i]; return 1; } } } return 0; } /* * Determine if a kcontrol is shared. If it is, look it up. If it isn't, * create it. Either way, add the widget into the control's widget list */ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w, int kci) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_card *card = dapm->card->snd_card; const char *prefix; size_t prefix_len; int shared; struct snd_kcontrol *kcontrol; bool wname_in_long_name, kcname_in_long_name; char *long_name = NULL; const char *name; int ret = 0; prefix = soc_dapm_prefix(dapm); if (prefix) prefix_len = strlen(prefix) + 1; else prefix_len = 0; shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[kci], &kcontrol); if (!kcontrol) { if (shared) { wname_in_long_name = false; kcname_in_long_name = true; } else { switch (w->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: wname_in_long_name = true; kcname_in_long_name = true; break; case snd_soc_dapm_mixer_named_ctl: wname_in_long_name = false; kcname_in_long_name = true; break; case snd_soc_dapm_demux: case snd_soc_dapm_mux: wname_in_long_name = true; kcname_in_long_name = false; break; default: return -EINVAL; } } if (wname_in_long_name && kcname_in_long_name) { /* * The control will get a prefix from the control * creation process but we're also using the same * prefix for widgets so cut the prefix off the * front of the widget name. */ long_name = kasprintf(GFP_KERNEL, "%s %s", w->name + prefix_len, w->kcontrol_news[kci].name); if (long_name == NULL) return -ENOMEM; name = long_name; } else if (wname_in_long_name) { long_name = NULL; name = w->name + prefix_len; } else { long_name = NULL; name = w->kcontrol_news[kci].name; } kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name, prefix); if (!kcontrol) { ret = -ENOMEM; goto exit_free; } kcontrol->private_free = dapm_kcontrol_free; ret = dapm_kcontrol_data_alloc(w, kcontrol); if (ret) { snd_ctl_free_one(kcontrol); goto exit_free; } ret = snd_ctl_add(card, kcontrol); if (ret < 0) { dev_err(dapm->dev, "ASoC: failed to add widget %s dapm kcontrol %s: %d\n", w->name, name, ret); goto exit_free; } } ret = dapm_kcontrol_add_widget(kcontrol, w); if (ret == 0) w->kcontrols[kci] = kcontrol; exit_free: kfree(long_name); return ret; } /* create new dapm mixer control */ static int dapm_new_mixer(struct snd_soc_dapm_widget *w) { int i, ret; struct snd_soc_dapm_path *path; struct dapm_kcontrol_data *data; /* add kcontrol */ for (i = 0; i < w->num_kcontrols; i++) { /* match name */ list_for_each_entry(path, &w->sources, list_sink) { /* mixer/mux paths name must match control name */ if (path->name != (char *)w->kcontrol_news[i].name) continue; if (!w->kcontrols[i]) { ret = dapm_create_or_share_mixmux_kcontrol(w, i); if (ret < 0) return ret; } dapm_kcontrol_add_path(w->kcontrols[i], path); data = snd_kcontrol_chip(w->kcontrols[i]); if (data->widget) snd_soc_dapm_add_path(data->widget->dapm, data->widget, path->source, NULL, NULL); } } return 0; } /* create new dapm mux control */ static int dapm_new_mux(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_soc_dapm_path *path; struct list_head *paths; const char *type; int ret; switch (w->id) { case snd_soc_dapm_mux: paths = &w->sources; type = "mux"; break; case snd_soc_dapm_demux: paths = &w->sinks; type = "demux"; break; default: return -EINVAL; } if (w->num_kcontrols != 1) { dev_err(dapm->dev, "ASoC: %s %s has incorrect number of controls\n", type, w->name); return -EINVAL; } if (list_empty(paths)) { dev_err(dapm->dev, "ASoC: %s %s has no paths\n", type, w->name); return -EINVAL; } ret = dapm_create_or_share_mixmux_kcontrol(w, 0); if (ret < 0) return ret; if (w->id == snd_soc_dapm_mux) { list_for_each_entry(path, &w->sources, list_sink) { if (path->name) dapm_kcontrol_add_path(w->kcontrols[0], path); } } else { list_for_each_entry(path, &w->sinks, list_source) { if (path->name) dapm_kcontrol_add_path(w->kcontrols[0], path); } } return 0; } /* create new dapm volume control */ static int dapm_new_pga(struct snd_soc_dapm_widget *w) { if (w->num_kcontrols) dev_err(w->dapm->dev, "ASoC: PGA controls not supported: '%s'\n", w->name); return 0; } /* create new dapm dai link control */ static int dapm_new_dai_link(struct snd_soc_dapm_widget *w) { int i, ret; struct snd_kcontrol *kcontrol; struct snd_soc_dapm_context *dapm = w->dapm; struct snd_card *card = dapm->card->snd_card; /* create control for links with > 1 config */ if (w->num_params <= 1) return 0; /* add kcontrol */ for (i = 0; i < w->num_kcontrols; i++) { kcontrol = snd_soc_cnew(&w->kcontrol_news[i], w, w->name, NULL); ret = snd_ctl_add(card, kcontrol); if (ret < 0) { dev_err(dapm->dev, "ASoC: failed to add widget %s dapm kcontrol %s: %d\n", w->name, w->kcontrol_news[i].name, ret); return ret; } kcontrol->private_data = w; w->kcontrols[i] = kcontrol; } return 0; } /* We implement power down on suspend by checking the power state of * the ALSA card - when we are suspending the ALSA state for the card * is set to D3. */ static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget) { int level = snd_power_get_state(widget->dapm->card->snd_card); switch (level) { case SNDRV_CTL_POWER_D3hot: case SNDRV_CTL_POWER_D3cold: if (widget->ignore_suspend) dev_dbg(widget->dapm->dev, "ASoC: %s ignoring suspend\n", widget->name); return widget->ignore_suspend; default: return 1; } } /* add widget to list if it's not already in the list */ static int dapm_list_add_widget(struct snd_soc_dapm_widget_list **list, struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_widget_list *wlist; int wlistsize, wlistentries, i; if (*list == NULL) return -EINVAL; wlist = *list; /* is this widget already in the list */ for (i = 0; i < wlist->num_widgets; i++) { if (wlist->widgets[i] == w) return 0; } /* allocate some new space */ wlistentries = wlist->num_widgets + 1; wlistsize = sizeof(struct snd_soc_dapm_widget_list) + wlistentries * sizeof(struct snd_soc_dapm_widget *); *list = krealloc(wlist, wlistsize, GFP_KERNEL); if (*list == NULL) { dev_err(w->dapm->dev, "ASoC: can't allocate widget list for %s\n", w->name); return -ENOMEM; } wlist = *list; /* insert the widget */ dev_dbg(w->dapm->dev, "ASoC: added %s in widget list pos %d\n", w->name, wlist->num_widgets); wlist->widgets[wlist->num_widgets] = w; wlist->num_widgets++; return 1; } /* * Recursively check for a completed path to an active or physically connected * output widget. Returns number of complete paths. */ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, struct snd_soc_dapm_widget_list **list) { struct snd_soc_dapm_path *path; int con = 0; if (widget->outputs >= 0) return widget->outputs; DAPM_UPDATE_STAT(widget, path_checks); if (widget->is_sink && widget->connected) { widget->outputs = snd_soc_dapm_suspend_check(widget); return widget->outputs; } list_for_each_entry(path, &widget->sinks, list_source) { DAPM_UPDATE_STAT(widget, neighbour_checks); if (path->weak || path->is_supply) continue; if (path->walking) return 1; trace_snd_soc_dapm_output_path(widget, path); if (path->connect) { path->walking = 1; /* do we need to add this widget to the list ? */ if (list) { int err; err = dapm_list_add_widget(list, path->sink); if (err < 0) { dev_err(widget->dapm->dev, "ASoC: could not add widget %s\n", widget->name); path->walking = 0; return con; } } con += is_connected_output_ep(path->sink, list); path->walking = 0; } } widget->outputs = con; return con; } /* * Recursively check for a completed path to an active or physically connected * input widget. Returns number of complete paths. */ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, struct snd_soc_dapm_widget_list **list) { struct snd_soc_dapm_path *path; int con = 0; if (widget->inputs >= 0) return widget->inputs; DAPM_UPDATE_STAT(widget, path_checks); if (widget->is_source && widget->connected) { widget->inputs = snd_soc_dapm_suspend_check(widget); return widget->inputs; } list_for_each_entry(path, &widget->sources, list_sink) { DAPM_UPDATE_STAT(widget, neighbour_checks); if (path->weak || path->is_supply) continue; if (path->walking) return 1; trace_snd_soc_dapm_input_path(widget, path); if (path->connect) { path->walking = 1; /* do we need to add this widget to the list ? */ if (list) { int err; err = dapm_list_add_widget(list, path->source); if (err < 0) { dev_err(widget->dapm->dev, "ASoC: could not add widget %s\n", widget->name); path->walking = 0; return con; } } con += is_connected_input_ep(path->source, list); path->walking = 0; } } widget->inputs = con; return con; } /** * snd_soc_dapm_get_connected_widgets - query audio path and it's widgets. * @dai: the soc DAI. * @stream: stream direction. * @list: list of active widgets for this stream. * * Queries DAPM graph as to whether an valid audio stream path exists for * the initial stream specified by name. This takes into account * current mixer and mux kcontrol settings. Creates list of valid widgets. * * Returns the number of valid paths or negative error. */ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, struct snd_soc_dapm_widget_list **list) { struct snd_soc_card *card = dai->component->card; struct snd_soc_dapm_widget *w; int paths; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); /* * For is_connected_{output,input}_ep fully discover the graph we need * to reset the cached number of inputs and outputs. */ list_for_each_entry(w, &card->widgets, list) { w->inputs = -1; w->outputs = -1; } if (stream == SNDRV_PCM_STREAM_PLAYBACK) paths = is_connected_output_ep(dai->playback_widget, list); else paths = is_connected_input_ep(dai->capture_widget, list); trace_snd_soc_dapm_connected(paths, stream); mutex_unlock(&card->dapm_mutex); return paths; } /* * Handler for regulator supply widget. */ int dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { int ret; soc_dapm_async_complete(w->dapm); if (SND_SOC_DAPM_EVENT_ON(event)) { if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) { ret = regulator_allow_bypass(w->regulator, false); if (ret != 0) dev_warn(w->dapm->dev, "ASoC: Failed to unbypass %s: %d\n", w->name, ret); } return regulator_enable(w->regulator); } else { if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) { ret = regulator_allow_bypass(w->regulator, true); if (ret != 0) dev_warn(w->dapm->dev, "ASoC: Failed to bypass %s: %d\n", w->name, ret); } return regulator_disable_deferred(w->regulator, w->shift); } } EXPORT_SYMBOL_GPL(dapm_regulator_event); /* * Handler for clock supply widget. */ int dapm_clock_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { if (!w->clk) return -EIO; soc_dapm_async_complete(w->dapm); #ifdef CONFIG_HAVE_CLK if (SND_SOC_DAPM_EVENT_ON(event)) { return clk_prepare_enable(w->clk); } else { clk_disable_unprepare(w->clk); return 0; } #endif return 0; } EXPORT_SYMBOL_GPL(dapm_clock_event); static int dapm_widget_power_check(struct snd_soc_dapm_widget *w) { if (w->power_checked) return w->new_power; if (w->force) w->new_power = 1; else w->new_power = w->power_check(w); w->power_checked = true; return w->new_power; } /* Generic check to see if a widget should be powered. */ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w) { int in, out; DAPM_UPDATE_STAT(w, power_checks); in = is_connected_input_ep(w, NULL); out = is_connected_output_ep(w, NULL); return out != 0 && in != 0; } /* Check to see if a power supply is needed */ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_path *path; DAPM_UPDATE_STAT(w, power_checks); /* Check if one of our outputs is connected */ list_for_each_entry(path, &w->sinks, list_source) { DAPM_UPDATE_STAT(w, neighbour_checks); if (path->weak) continue; if (path->connected && !path->connected(path->source, path->sink)) continue; if (dapm_widget_power_check(path->sink)) return 1; } return 0; } static int dapm_always_on_check_power(struct snd_soc_dapm_widget *w) { return 1; } static int dapm_seq_compare(struct snd_soc_dapm_widget *a, struct snd_soc_dapm_widget *b, bool power_up) { int *sort; if (power_up) sort = dapm_up_seq; else sort = dapm_down_seq; if (sort[a->id] != sort[b->id]) return sort[a->id] - sort[b->id]; if (a->subseq != b->subseq) { if (power_up) return a->subseq - b->subseq; else return b->subseq - a->subseq; } if (a->reg != b->reg) return a->reg - b->reg; if (a->dapm != b->dapm) return (unsigned long)a->dapm - (unsigned long)b->dapm; return 0; } /* Insert a widget in order into a DAPM power sequence. */ static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget, struct list_head *list, bool power_up) { struct snd_soc_dapm_widget *w; list_for_each_entry(w, list, power_list) if (dapm_seq_compare(new_widget, w, power_up) < 0) { list_add_tail(&new_widget->power_list, &w->power_list); return; } list_add_tail(&new_widget->power_list, list); } static void dapm_seq_check_event(struct snd_soc_card *card, struct snd_soc_dapm_widget *w, int event) { const char *ev_name; int power, ret; switch (event) { case SND_SOC_DAPM_PRE_PMU: ev_name = "PRE_PMU"; power = 1; break; case SND_SOC_DAPM_POST_PMU: ev_name = "POST_PMU"; power = 1; break; case SND_SOC_DAPM_PRE_PMD: ev_name = "PRE_PMD"; power = 0; break; case SND_SOC_DAPM_POST_PMD: ev_name = "POST_PMD"; power = 0; break; case SND_SOC_DAPM_WILL_PMU: ev_name = "WILL_PMU"; power = 1; break; case SND_SOC_DAPM_WILL_PMD: ev_name = "WILL_PMD"; power = 0; break; default: WARN(1, "Unknown event %d\n", event); return; } if (w->new_power != power) return; if (w->event && (w->event_flags & event)) { pop_dbg(w->dapm->dev, card->pop_time, "pop test : %s %s\n", w->name, ev_name); soc_dapm_async_complete(w->dapm); trace_snd_soc_dapm_widget_event_start(w, event); ret = w->event(w, NULL, event); trace_snd_soc_dapm_widget_event_done(w, event); if (ret < 0) dev_err(w->dapm->dev, "ASoC: %s: %s event failed: %d\n", ev_name, w->name, ret); } } /* Apply the coalesced changes from a DAPM sequence */ static void dapm_seq_run_coalesced(struct snd_soc_card *card, struct list_head *pending) { struct snd_soc_dapm_context *dapm; struct snd_soc_dapm_widget *w; int reg; unsigned int value = 0; unsigned int mask = 0; w = list_first_entry(pending, struct snd_soc_dapm_widget, power_list); reg = w->reg; dapm = w->dapm; list_for_each_entry(w, pending, power_list) { WARN_ON(reg != w->reg || dapm != w->dapm); w->power = w->new_power; mask |= w->mask << w->shift; if (w->power) value |= w->on_val << w->shift; else value |= w->off_val << w->shift; pop_dbg(dapm->dev, card->pop_time, "pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n", w->name, reg, value, mask); /* Check for events */ dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMU); dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMD); } if (reg >= 0) { /* Any widget will do, they should all be updating the * same register. */ pop_dbg(dapm->dev, card->pop_time, "pop test : Applying 0x%x/0x%x to %x in %dms\n", value, mask, reg, card->pop_time); pop_wait(card->pop_time); soc_dapm_update_bits(dapm, reg, mask, value); } list_for_each_entry(w, pending, power_list) { dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMU); dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMD); } } /* Apply a DAPM power sequence. * * We walk over a pre-sorted list of widgets to apply power to. In * order to minimise the number of writes to the device required * multiple widgets will be updated in a single write where possible. * Currently anything that requires more than a single write is not * handled. */ static void dapm_seq_run(struct snd_soc_card *card, struct list_head *list, int event, bool power_up) { struct snd_soc_dapm_widget *w, *n; struct snd_soc_dapm_context *d; LIST_HEAD(pending); int cur_sort = -1; int cur_subseq = -1; int cur_reg = SND_SOC_NOPM; struct snd_soc_dapm_context *cur_dapm = NULL; int ret, i; int *sort; if (power_up) sort = dapm_up_seq; else sort = dapm_down_seq; list_for_each_entry_safe(w, n, list, power_list) { ret = 0; /* Do we need to apply any queued changes? */ if (sort[w->id] != cur_sort || w->reg != cur_reg || w->dapm != cur_dapm || w->subseq != cur_subseq) { if (!list_empty(&pending)) dapm_seq_run_coalesced(card, &pending); if (cur_dapm && cur_dapm->seq_notifier) { for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) if (sort[i] == cur_sort) cur_dapm->seq_notifier(cur_dapm, i, cur_subseq); } if (cur_dapm && w->dapm != cur_dapm) soc_dapm_async_complete(cur_dapm); INIT_LIST_HEAD(&pending); cur_sort = -1; cur_subseq = INT_MIN; cur_reg = SND_SOC_NOPM; cur_dapm = NULL; } switch (w->id) { case snd_soc_dapm_pre: if (!w->event) list_for_each_entry_safe_continue(w, n, list, power_list); if (event == SND_SOC_DAPM_STREAM_START) ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU); else if (event == SND_SOC_DAPM_STREAM_STOP) ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD); break; case snd_soc_dapm_post: if (!w->event) list_for_each_entry_safe_continue(w, n, list, power_list); if (event == SND_SOC_DAPM_STREAM_START) ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMU); else if (event == SND_SOC_DAPM_STREAM_STOP) ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD); break; default: /* Queue it up for application */ cur_sort = sort[w->id]; cur_subseq = w->subseq; cur_reg = w->reg; cur_dapm = w->dapm; list_move(&w->power_list, &pending); break; } if (ret < 0) dev_err(w->dapm->dev, "ASoC: Failed to apply widget power: %d\n", ret); } if (!list_empty(&pending)) dapm_seq_run_coalesced(card, &pending); if (cur_dapm && cur_dapm->seq_notifier) { for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) if (sort[i] == cur_sort) cur_dapm->seq_notifier(cur_dapm, i, cur_subseq); } list_for_each_entry(d, &card->dapm_list, list) { soc_dapm_async_complete(d); } } static void dapm_widget_update(struct snd_soc_card *card) { struct snd_soc_dapm_update *update = card->update; struct snd_soc_dapm_widget_list *wlist; struct snd_soc_dapm_widget *w = NULL; unsigned int wi; int ret; if (!update || !dapm_kcontrol_is_powered(update->kcontrol)) return; wlist = dapm_kcontrol_get_wlist(update->kcontrol); for (wi = 0; wi < wlist->num_widgets; wi++) { w = wlist->widgets[wi]; if (w->event && (w->event_flags & SND_SOC_DAPM_PRE_REG)) { ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG); if (ret != 0) dev_err(w->dapm->dev, "ASoC: %s DAPM pre-event failed: %d\n", w->name, ret); } } if (!w) return; ret = soc_dapm_update_bits(w->dapm, update->reg, update->mask, update->val); if (ret < 0) dev_err(w->dapm->dev, "ASoC: %s DAPM update failed: %d\n", w->name, ret); for (wi = 0; wi < wlist->num_widgets; wi++) { w = wlist->widgets[wi]; if (w->event && (w->event_flags & SND_SOC_DAPM_POST_REG)) { ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG); if (ret != 0) dev_err(w->dapm->dev, "ASoC: %s DAPM post-event failed: %d\n", w->name, ret); } } } /* Async callback run prior to DAPM sequences - brings to _PREPARE if * they're changing state. */ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie) { struct snd_soc_dapm_context *d = data; int ret; /* If we're off and we're not supposed to be go into STANDBY */ if (d->bias_level == SND_SOC_BIAS_OFF && d->target_bias_level != SND_SOC_BIAS_OFF) { if (d->dev) pm_runtime_get_sync(d->dev); ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY); if (ret != 0) dev_err(d->dev, "ASoC: Failed to turn on bias: %d\n", ret); } /* Prepare for a transition to ON or away from ON */ if ((d->target_bias_level == SND_SOC_BIAS_ON && d->bias_level != SND_SOC_BIAS_ON) || (d->target_bias_level != SND_SOC_BIAS_ON && d->bias_level == SND_SOC_BIAS_ON)) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE); if (ret != 0) dev_err(d->dev, "ASoC: Failed to prepare bias: %d\n", ret); } } /* Async callback run prior to DAPM sequences - brings to their final * state. */ static void dapm_post_sequence_async(void *data, async_cookie_t cookie) { struct snd_soc_dapm_context *d = data; int ret; /* If we just powered the last thing off drop to standby bias */ if (d->bias_level == SND_SOC_BIAS_PREPARE && (d->target_bias_level == SND_SOC_BIAS_STANDBY || d->target_bias_level == SND_SOC_BIAS_OFF)) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY); if (ret != 0) dev_err(d->dev, "ASoC: Failed to apply standby bias: %d\n", ret); } /* If we're in standby and can support bias off then do that */ if (d->bias_level == SND_SOC_BIAS_STANDBY && d->target_bias_level == SND_SOC_BIAS_OFF) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_OFF); if (ret != 0) dev_err(d->dev, "ASoC: Failed to turn off bias: %d\n", ret); if (d->dev) pm_runtime_put(d->dev); } /* If we just powered up then move to active bias */ if (d->bias_level == SND_SOC_BIAS_PREPARE && d->target_bias_level == SND_SOC_BIAS_ON) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_ON); if (ret != 0) dev_err(d->dev, "ASoC: Failed to apply active bias: %d\n", ret); } } static void dapm_widget_set_peer_power(struct snd_soc_dapm_widget *peer, bool power, bool connect) { /* If a connection is being made or broken then that update * will have marked the peer dirty, otherwise the widgets are * not connected and this update has no impact. */ if (!connect) return; /* If the peer is already in the state we're moving to then we * won't have an impact on it. */ if (power != peer->power) dapm_mark_dirty(peer, "peer state change"); } static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power, struct list_head *up_list, struct list_head *down_list) { struct snd_soc_dapm_path *path; if (w->power == power) return; trace_snd_soc_dapm_widget_power(w, power); /* If we changed our power state perhaps our neigbours changed * also. */ list_for_each_entry(path, &w->sources, list_sink) dapm_widget_set_peer_power(path->source, power, path->connect); /* Supplies can't affect their outputs, only their inputs */ if (!w->is_supply) { list_for_each_entry(path, &w->sinks, list_source) dapm_widget_set_peer_power(path->sink, power, path->connect); } if (power) dapm_seq_insert(w, up_list, true); else dapm_seq_insert(w, down_list, false); } static void dapm_power_one_widget(struct snd_soc_dapm_widget *w, struct list_head *up_list, struct list_head *down_list) { int power; switch (w->id) { case snd_soc_dapm_pre: dapm_seq_insert(w, down_list, false); break; case snd_soc_dapm_post: dapm_seq_insert(w, up_list, true); break; default: power = dapm_widget_power_check(w); dapm_widget_set_power(w, power, up_list, down_list); break; } } static bool dapm_idle_bias_off(struct snd_soc_dapm_context *dapm) { if (dapm->idle_bias_off) return true; switch (snd_power_get_state(dapm->card->snd_card)) { case SNDRV_CTL_POWER_D3hot: case SNDRV_CTL_POWER_D3cold: return dapm->suspend_bias_off; default: break; } return false; } /* * Scan each dapm widget for complete audio path. * A complete path is a route that has valid endpoints i.e.:- * * o DAC to output pin. * o Input Pin to ADC. * o Input pin to Output pin (bypass, sidetone) * o DAC to ADC (loopback). */ static int dapm_power_widgets(struct snd_soc_card *card, int event) { struct snd_soc_dapm_widget *w; struct snd_soc_dapm_context *d; LIST_HEAD(up_list); LIST_HEAD(down_list); ASYNC_DOMAIN_EXCLUSIVE(async_domain); enum snd_soc_bias_level bias; lockdep_assert_held(&card->dapm_mutex); trace_snd_soc_dapm_start(card); list_for_each_entry(d, &card->dapm_list, list) { if (dapm_idle_bias_off(d)) d->target_bias_level = SND_SOC_BIAS_OFF; else d->target_bias_level = SND_SOC_BIAS_STANDBY; } dapm_reset(card); /* Check which widgets we need to power and store them in * lists indicating if they should be powered up or down. We * only check widgets that have been flagged as dirty but note * that new widgets may be added to the dirty list while we * iterate. */ list_for_each_entry(w, &card->dapm_dirty, dirty) { dapm_power_one_widget(w, &up_list, &down_list); } list_for_each_entry(w, &card->widgets, list) { switch (w->id) { case snd_soc_dapm_pre: case snd_soc_dapm_post: /* These widgets always need to be powered */ break; default: list_del_init(&w->dirty); break; } if (w->new_power) { d = w->dapm; /* Supplies and micbiases only bring the * context up to STANDBY as unless something * else is active and passing audio they * generally don't require full power. Signal * generators are virtual pins and have no * power impact themselves. */ switch (w->id) { case snd_soc_dapm_siggen: case snd_soc_dapm_vmid: break; case snd_soc_dapm_supply: case snd_soc_dapm_regulator_supply: case snd_soc_dapm_clock_supply: case snd_soc_dapm_micbias: if (d->target_bias_level < SND_SOC_BIAS_STANDBY) d->target_bias_level = SND_SOC_BIAS_STANDBY; break; default: d->target_bias_level = SND_SOC_BIAS_ON; break; } } } /* Force all contexts in the card to the same bias state if * they're not ground referenced. */ bias = SND_SOC_BIAS_OFF; list_for_each_entry(d, &card->dapm_list, list) if (d->target_bias_level > bias) bias = d->target_bias_level; list_for_each_entry(d, &card->dapm_list, list) if (!dapm_idle_bias_off(d)) d->target_bias_level = bias; trace_snd_soc_dapm_walk_done(card); /* Run card bias changes at first */ dapm_pre_sequence_async(&card->dapm, 0); /* Run other bias changes in parallel */ list_for_each_entry(d, &card->dapm_list, list) { if (d != &card->dapm) async_schedule_domain(dapm_pre_sequence_async, d, &async_domain); } async_synchronize_full_domain(&async_domain); list_for_each_entry(w, &down_list, power_list) { dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMD); } list_for_each_entry(w, &up_list, power_list) { dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMU); } /* Power down widgets first; try to avoid amplifying pops. */ dapm_seq_run(card, &down_list, event, false); dapm_widget_update(card); /* Now power up. */ dapm_seq_run(card, &up_list, event, true); /* Run all the bias changes in parallel */ list_for_each_entry(d, &card->dapm_list, list) { if (d != &card->dapm) async_schedule_domain(dapm_post_sequence_async, d, &async_domain); } async_synchronize_full_domain(&async_domain); /* Run card bias changes at last */ dapm_post_sequence_async(&card->dapm, 0); /* do we need to notify any clients that DAPM event is complete */ list_for_each_entry(d, &card->dapm_list, list) { if (d->stream_event) d->stream_event(d, event); } pop_dbg(card->dev, card->pop_time, "DAPM sequencing finished, waiting %dms\n", card->pop_time); pop_wait(card->pop_time); trace_snd_soc_dapm_done(card); return 0; } #ifdef CONFIG_DEBUG_FS static ssize_t dapm_widget_power_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct snd_soc_dapm_widget *w = file->private_data; char *buf; int in, out; ssize_t ret; struct snd_soc_dapm_path *p = NULL; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* Supply widgets are not handled by is_connected_{input,output}_ep() */ if (w->is_supply) { in = 0; out = 0; } else { in = is_connected_input_ep(w, NULL); out = is_connected_output_ep(w, NULL); } ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", w->name, w->power ? "On" : "Off", w->force ? " (forced)" : "", in, out); if (w->reg >= 0) ret += snprintf(buf + ret, PAGE_SIZE - ret, " - R%d(0x%x) mask 0x%x", w->reg, w->reg, w->mask << w->shift); ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); if (w->sname) ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", w->sname, w->active ? "active" : "inactive"); list_for_each_entry(p, &w->sources, list_sink) { if (p->connected && !p->connected(w, p->source)) continue; if (p->connect) ret += snprintf(buf + ret, PAGE_SIZE - ret, " in \"%s\" \"%s\"\n", p->name ? p->name : "static", p->source->name); } list_for_each_entry(p, &w->sinks, list_source) { if (p->connected && !p->connected(w, p->sink)) continue; if (p->connect) ret += snprintf(buf + ret, PAGE_SIZE - ret, " out \"%s\" \"%s\"\n", p->name ? p->name : "static", p->sink->name); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations dapm_widget_power_fops = { .open = simple_open, .read = dapm_widget_power_read_file, .llseek = default_llseek, }; static ssize_t dapm_bias_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct snd_soc_dapm_context *dapm = file->private_data; char *level; switch (dapm->bias_level) { case SND_SOC_BIAS_ON: level = "On\n"; break; case SND_SOC_BIAS_PREPARE: level = "Prepare\n"; break; case SND_SOC_BIAS_STANDBY: level = "Standby\n"; break; case SND_SOC_BIAS_OFF: level = "Off\n"; break; default: WARN(1, "Unknown bias_level %d\n", dapm->bias_level); level = "Unknown\n"; break; } return simple_read_from_buffer(user_buf, count, ppos, level, strlen(level)); } static const struct file_operations dapm_bias_fops = { .open = simple_open, .read = dapm_bias_read_file, .llseek = default_llseek, }; void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm, struct dentry *parent) { struct dentry *d; if (!parent) return; dapm->debugfs_dapm = debugfs_create_dir("dapm", parent); if (!dapm->debugfs_dapm) { dev_warn(dapm->dev, "ASoC: Failed to create DAPM debugfs directory\n"); return; } d = debugfs_create_file("bias_level", 0444, dapm->debugfs_dapm, dapm, &dapm_bias_fops); if (!d) dev_warn(dapm->dev, "ASoC: Failed to create bias level debugfs file\n"); } static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_context *dapm = w->dapm; struct dentry *d; if (!dapm->debugfs_dapm || !w->name) return; d = debugfs_create_file(w->name, 0444, dapm->debugfs_dapm, w, &dapm_widget_power_fops); if (!d) dev_warn(w->dapm->dev, "ASoC: Failed to create %s debugfs file\n", w->name); } static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm) { debugfs_remove_recursive(dapm->debugfs_dapm); } #else void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm, struct dentry *parent) { } static inline void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w) { } static inline void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm) { } #endif /* * soc_dapm_connect_path() - Connects or disconnects a path * @path: The path to update * @connect: The new connect state of the path. True if the path is connected, * false if it is disconneted. * @reason: The reason why the path changed (for debugging only) */ static void soc_dapm_connect_path(struct snd_soc_dapm_path *path, bool connect, const char *reason) { if (path->connect == connect) return; path->connect = connect; dapm_mark_dirty(path->source, reason); dapm_mark_dirty(path->sink, reason); dapm_path_invalidate(path); } /* test and update the power status of a mux widget */ static int soc_dapm_mux_update_power(struct snd_soc_card *card, struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e) { struct snd_soc_dapm_path *path; int found = 0; bool connect; lockdep_assert_held(&card->dapm_mutex); /* find dapm widget path assoc with kcontrol */ dapm_kcontrol_for_each_path(path, kcontrol) { found = 1; /* we now need to match the string in the enum to the path */ if (!(strcmp(path->name, e->texts[mux]))) connect = true; else connect = false; soc_dapm_connect_path(path, connect, "mux update"); } if (found) dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP); return found; } int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm, struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e, struct snd_soc_dapm_update *update) { struct snd_soc_card *card = dapm->card; int ret; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); card->update = update; ret = soc_dapm_mux_update_power(card, kcontrol, mux, e); card->update = NULL; mutex_unlock(&card->dapm_mutex); if (ret > 0) soc_dpcm_runtime_update(card); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power); /* test and update the power status of a mixer or switch widget */ static int soc_dapm_mixer_update_power(struct snd_soc_card *card, struct snd_kcontrol *kcontrol, int connect) { struct snd_soc_dapm_path *path; int found = 0; lockdep_assert_held(&card->dapm_mutex); /* find dapm widget path assoc with kcontrol */ dapm_kcontrol_for_each_path(path, kcontrol) { found = 1; soc_dapm_connect_path(path, connect, "mixer update"); } if (found) dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP); return found; } int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm, struct snd_kcontrol *kcontrol, int connect, struct snd_soc_dapm_update *update) { struct snd_soc_card *card = dapm->card; int ret; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); card->update = update; ret = soc_dapm_mixer_update_power(card, kcontrol, connect); card->update = NULL; mutex_unlock(&card->dapm_mutex); if (ret > 0) soc_dpcm_runtime_update(card); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power); static ssize_t dapm_widget_show_codec(struct snd_soc_codec *codec, char *buf) { struct snd_soc_dapm_widget *w; int count = 0; char *state = "not set"; list_for_each_entry(w, &codec->component.card->widgets, list) { if (w->dapm != &codec->dapm) continue; /* only display widgets that burnm power */ switch (w->id) { case snd_soc_dapm_hp: case snd_soc_dapm_mic: case snd_soc_dapm_spk: case snd_soc_dapm_line: case snd_soc_dapm_micbias: case snd_soc_dapm_dac: case snd_soc_dapm_adc: case snd_soc_dapm_pga: case snd_soc_dapm_out_drv: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: case snd_soc_dapm_supply: case snd_soc_dapm_regulator_supply: case snd_soc_dapm_clock_supply: if (w->name) count += sprintf(buf + count, "%s: %s\n", w->name, w->power ? "On":"Off"); break; default: break; } } switch (codec->dapm.bias_level) { case SND_SOC_BIAS_ON: state = "On"; break; case SND_SOC_BIAS_PREPARE: state = "Prepare"; break; case SND_SOC_BIAS_STANDBY: state = "Standby"; break; case SND_SOC_BIAS_OFF: state = "Off"; break; } count += sprintf(buf + count, "PM State: %s\n", state); return count; } /* show dapm widget status in sys fs */ static ssize_t dapm_widget_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); int i, count = 0; for (i = 0; i < rtd->num_codecs; i++) { struct snd_soc_codec *codec = rtd->codec_dais[i]->codec; count += dapm_widget_show_codec(codec, buf + count); } return count; } static DEVICE_ATTR(dapm_widget, 0444, dapm_widget_show, NULL); struct attribute *soc_dapm_dev_attrs[] = { &dev_attr_dapm_widget.attr, NULL }; static void dapm_free_path(struct snd_soc_dapm_path *path) { list_del(&path->list_sink); list_del(&path->list_source); list_del(&path->list_kcontrol); list_del(&path->list); kfree(path); } /* free all dapm widgets and resources */ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w, *next_w; struct snd_soc_dapm_path *p, *next_p; list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) { if (w->dapm != dapm) continue; list_del(&w->list); /* * remove source and sink paths associated to this widget. * While removing the path, remove reference to it from both * source and sink widgets so that path is removed only once. */ list_for_each_entry_safe(p, next_p, &w->sources, list_sink) dapm_free_path(p); list_for_each_entry_safe(p, next_p, &w->sinks, list_source) dapm_free_path(p); kfree(w->kcontrols); kfree(w->name); kfree(w); } } static struct snd_soc_dapm_widget *dapm_find_widget( struct snd_soc_dapm_context *dapm, const char *pin, bool search_other_contexts) { struct snd_soc_dapm_widget *w; struct snd_soc_dapm_widget *fallback = NULL; list_for_each_entry(w, &dapm->card->widgets, list) { if (!strcmp(w->name, pin)) { if (w->dapm == dapm) return w; else fallback = w; } } if (search_other_contexts) return fallback; return NULL; } static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, const char *pin, int status) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); dapm_assert_locked(dapm); if (!w) { dev_err(dapm->dev, "ASoC: DAPM unknown pin %s\n", pin); return -EINVAL; } if (w->connected != status) { dapm_mark_dirty(w, "pin configuration"); dapm_widget_invalidate_input_paths(w); dapm_widget_invalidate_output_paths(w); } w->connected = status; if (status == 0) w->force = 0; return 0; } /** * snd_soc_dapm_sync_unlocked - scan and power dapm paths * @dapm: DAPM context * * Walks all dapm audio paths and powers widgets according to their * stream or path usage. * * Requires external locking. * * Returns 0 for success. */ int snd_soc_dapm_sync_unlocked(struct snd_soc_dapm_context *dapm) { /* * Suppress early reports (eg, jacks syncing their state) to avoid * silly DAPM runs during card startup. */ if (!dapm->card || !dapm->card->instantiated) return 0; return dapm_power_widgets(dapm->card, SND_SOC_DAPM_STREAM_NOP); } EXPORT_SYMBOL_GPL(snd_soc_dapm_sync_unlocked); /** * snd_soc_dapm_sync - scan and power dapm paths * @dapm: DAPM context * * Walks all dapm audio paths and powers widgets according to their * stream or path usage. * * Returns 0 for success. */ int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm) { int ret; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); ret = snd_soc_dapm_sync_unlocked(dapm); mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_sync); /* * dapm_update_widget_flags() - Re-compute widget sink and source flags * @w: The widget for which to update the flags * * Some widgets have a dynamic category which depends on which neighbors they * are connected to. This function update the category for these widgets. * * This function must be called whenever a path is added or removed to a widget. */ static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_path *p; switch (w->id) { case snd_soc_dapm_input: /* On a fully routed card a input is never a source */ if (w->dapm->card->fully_routed) break; w->is_source = 1; list_for_each_entry(p, &w->sources, list_sink) { if (p->source->id == snd_soc_dapm_micbias || p->source->id == snd_soc_dapm_mic || p->source->id == snd_soc_dapm_line || p->source->id == snd_soc_dapm_output) { w->is_source = 0; break; } } break; case snd_soc_dapm_output: /* On a fully routed card a output is never a sink */ if (w->dapm->card->fully_routed) break; w->is_sink = 1; list_for_each_entry(p, &w->sinks, list_source) { if (p->sink->id == snd_soc_dapm_spk || p->sink->id == snd_soc_dapm_hp || p->sink->id == snd_soc_dapm_line || p->sink->id == snd_soc_dapm_input) { w->is_sink = 0; break; } } break; case snd_soc_dapm_line: w->is_sink = !list_empty(&w->sources); w->is_source = !list_empty(&w->sinks); break; default: break; } } static int snd_soc_dapm_check_dynamic_path(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink, const char *control) { bool dynamic_source = false; bool dynamic_sink = false; if (!control) return 0; switch (source->id) { case snd_soc_dapm_demux: dynamic_source = true; break; default: break; } switch (sink->id) { case snd_soc_dapm_mux: case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: dynamic_sink = true; break; default: break; } if (dynamic_source && dynamic_sink) { dev_err(dapm->dev, "Direct connection between demux and mixer/mux not supported for path %s -> [%s] -> %s\n", source->name, control, sink->name); return -EINVAL; } else if (!dynamic_source && !dynamic_sink) { dev_err(dapm->dev, "Control not supported for path %s -> [%s] -> %s\n", source->name, control, sink->name); return -EINVAL; } return 0; } static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink, const char *control, int (*connected)(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink)) { struct snd_soc_dapm_path *path; int ret; if (wsink->is_supply && !wsource->is_supply) { dev_err(dapm->dev, "Connecting non-supply widget to supply widget is not supported (%s -> %s)\n", wsource->name, wsink->name); return -EINVAL; } if (connected && !wsource->is_supply) { dev_err(dapm->dev, "connected() callback only supported for supply widgets (%s -> %s)\n", wsource->name, wsink->name); return -EINVAL; } if (wsource->is_supply && control) { dev_err(dapm->dev, "Conditional paths are not supported for supply widgets (%s -> [%s] -> %s)\n", wsource->name, control, wsink->name); return -EINVAL; } ret = snd_soc_dapm_check_dynamic_path(dapm, wsource, wsink, control); if (ret) return ret; path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL); if (!path) return -ENOMEM; path->source = wsource; path->sink = wsink; path->connected = connected; INIT_LIST_HEAD(&path->list); INIT_LIST_HEAD(&path->list_kcontrol); INIT_LIST_HEAD(&path->list_source); INIT_LIST_HEAD(&path->list_sink); if (wsource->is_supply || wsink->is_supply) path->is_supply = 1; /* connect static paths */ if (control == NULL) { path->connect = 1; } else { switch (wsource->id) { case snd_soc_dapm_demux: ret = dapm_connect_mux(dapm, path, control, wsource); if (ret) goto err; break; default: break; } switch (wsink->id) { case snd_soc_dapm_mux: ret = dapm_connect_mux(dapm, path, control, wsink); if (ret != 0) goto err; break; case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: ret = dapm_connect_mixer(dapm, path, control); if (ret != 0) goto err; break; default: break; } } list_add(&path->list, &dapm->card->paths); list_add(&path->list_sink, &wsink->sources); list_add(&path->list_source, &wsource->sinks); dapm_update_widget_flags(wsource); dapm_update_widget_flags(wsink); dapm_mark_dirty(wsource, "Route added"); dapm_mark_dirty(wsink, "Route added"); if (dapm->card->instantiated && path->connect) dapm_path_invalidate(path); return 0; err: kfree(path); return ret; } static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route) { struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w; struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL; const char *sink; const char *source; char prefixed_sink[80]; char prefixed_source[80]; const char *prefix; int ret; prefix = soc_dapm_prefix(dapm); if (prefix) { snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s", prefix, route->sink); sink = prefixed_sink; snprintf(prefixed_source, sizeof(prefixed_source), "%s %s", prefix, route->source); source = prefixed_source; } else { sink = route->sink; source = route->source; } wsource = dapm_wcache_lookup(&dapm->path_source_cache, source); wsink = dapm_wcache_lookup(&dapm->path_sink_cache, sink); if (wsink && wsource) goto skip_search; /* * find src and dest widgets over all widgets but favor a widget from * current DAPM context */ list_for_each_entry(w, &dapm->card->widgets, list) { if (!wsink && !(strcmp(w->name, sink))) { wtsink = w; if (w->dapm == dapm) { wsink = w; if (wsource) break; } continue; } if (!wsource && !(strcmp(w->name, source))) { wtsource = w; if (w->dapm == dapm) { wsource = w; if (wsink) break; } } } /* use widget from another DAPM context if not found from this */ if (!wsink) wsink = wtsink; if (!wsource) wsource = wtsource; if (wsource == NULL) { dev_err(dapm->dev, "ASoC: no source widget found for %s\n", route->source); return -ENODEV; } if (wsink == NULL) { dev_err(dapm->dev, "ASoC: no sink widget found for %s\n", route->sink); return -ENODEV; } skip_search: dapm_wcache_update(&dapm->path_sink_cache, wsink); dapm_wcache_update(&dapm->path_source_cache, wsource); ret = snd_soc_dapm_add_path(dapm, wsource, wsink, route->control, route->connected); if (ret) goto err; return 0; err: dev_warn(dapm->dev, "ASoC: no dapm match for %s --> %s --> %s\n", source, route->control, sink); return ret; } static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route) { struct snd_soc_dapm_widget *wsource, *wsink; struct snd_soc_dapm_path *path, *p; const char *sink; const char *source; char prefixed_sink[80]; char prefixed_source[80]; const char *prefix; if (route->control) { dev_err(dapm->dev, "ASoC: Removal of routes with controls not supported\n"); return -EINVAL; } prefix = soc_dapm_prefix(dapm); if (prefix) { snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s", prefix, route->sink); sink = prefixed_sink; snprintf(prefixed_source, sizeof(prefixed_source), "%s %s", prefix, route->source); source = prefixed_source; } else { sink = route->sink; source = route->source; } path = NULL; list_for_each_entry(p, &dapm->card->paths, list) { if (strcmp(p->source->name, source) != 0) continue; if (strcmp(p->sink->name, sink) != 0) continue; path = p; break; } if (path) { wsource = path->source; wsink = path->sink; dapm_mark_dirty(wsource, "Route removed"); dapm_mark_dirty(wsink, "Route removed"); if (path->connect) dapm_path_invalidate(path); dapm_free_path(path); /* Update any path related flags */ dapm_update_widget_flags(wsource); dapm_update_widget_flags(wsink); } else { dev_warn(dapm->dev, "ASoC: Route %s->%s does not exist\n", source, sink); } return 0; } /** * snd_soc_dapm_add_routes - Add routes between DAPM widgets * @dapm: DAPM context * @route: audio routes * @num: number of routes * * Connects 2 dapm widgets together via a named audio path. The sink is * the widget receiving the audio signal, whilst the source is the sender * of the audio signal. * * Returns 0 for success else error. On error all resources can be freed * with a call to snd_soc_card_free(). */ int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num) { int i, r, ret = 0; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT); for (i = 0; i < num; i++) { r = snd_soc_dapm_add_route(dapm, route); if (r < 0) { dev_err(dapm->dev, "ASoC: Failed to add route %s -> %s -> %s\n", route->source, route->control ? route->control : "direct", route->sink); ret = r; } route++; } mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes); /** * snd_soc_dapm_del_routes - Remove routes between DAPM widgets * @dapm: DAPM context * @route: audio routes * @num: number of routes * * Removes routes from the DAPM context. */ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num) { int i, ret = 0; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT); for (i = 0; i < num; i++) { snd_soc_dapm_del_route(dapm, route); route++; } mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_del_routes); static int snd_soc_dapm_weak_route(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route) { struct snd_soc_dapm_widget *source = dapm_find_widget(dapm, route->source, true); struct snd_soc_dapm_widget *sink = dapm_find_widget(dapm, route->sink, true); struct snd_soc_dapm_path *path; int count = 0; if (!source) { dev_err(dapm->dev, "ASoC: Unable to find source %s for weak route\n", route->source); return -ENODEV; } if (!sink) { dev_err(dapm->dev, "ASoC: Unable to find sink %s for weak route\n", route->sink); return -ENODEV; } if (route->control || route->connected) dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n", route->source, route->sink); list_for_each_entry(path, &source->sinks, list_source) { if (path->sink == sink) { path->weak = 1; count++; } } if (count == 0) dev_err(dapm->dev, "ASoC: No path found for weak route %s->%s\n", route->source, route->sink); if (count > 1) dev_warn(dapm->dev, "ASoC: %d paths found for weak route %s->%s\n", count, route->source, route->sink); return 0; } /** * snd_soc_dapm_weak_routes - Mark routes between DAPM widgets as weak * @dapm: DAPM context * @route: audio routes * @num: number of routes * * Mark existing routes matching those specified in the passed array * as being weak, meaning that they are ignored for the purpose of * power decisions. The main intended use case is for sidetone paths * which couple audio between other independent paths if they are both * active in order to make the combination work better at the user * level but which aren't intended to be "used". * * Note that CODEC drivers should not use this as sidetone type paths * can frequently also be used as bypass paths. */ int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num) { int i, err; int ret = 0; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT); for (i = 0; i < num; i++) { err = snd_soc_dapm_weak_route(dapm, route); if (err) ret = err; route++; } mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_weak_routes); /** * snd_soc_dapm_new_widgets - add new dapm widgets * @dapm: DAPM context * * Checks the codec for any new dapm widgets and creates them if found. * * Returns 0 for success. */ int snd_soc_dapm_new_widgets(struct snd_soc_card *card) { struct snd_soc_dapm_widget *w; unsigned int val; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT); list_for_each_entry(w, &card->widgets, list) { if (w->new) continue; if (w->num_kcontrols) { w->kcontrols = kzalloc(w->num_kcontrols * sizeof(struct snd_kcontrol *), GFP_KERNEL); if (!w->kcontrols) { mutex_unlock(&card->dapm_mutex); return -ENOMEM; } } switch(w->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: dapm_new_mixer(w); break; case snd_soc_dapm_mux: case snd_soc_dapm_demux: dapm_new_mux(w); break; case snd_soc_dapm_pga: case snd_soc_dapm_out_drv: dapm_new_pga(w); break; case snd_soc_dapm_dai_link: dapm_new_dai_link(w); break; default: break; } /* Read the initial power state from the device */ if (w->reg >= 0) { soc_dapm_read(w->dapm, w->reg, &val); val = val >> w->shift; val &= w->mask; if (val == w->on_val) w->power = 1; } w->new = 1; dapm_mark_dirty(w, "new widget"); dapm_debugfs_add_widget(w); } dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP); mutex_unlock(&card->dapm_mutex); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets); /** * snd_soc_dapm_get_volsw - dapm mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a dapm mixer control. * * Returns 0 for success. */ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_card *card = dapm->card; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int reg = mc->reg; unsigned int shift = mc->shift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val; int ret = 0; if (snd_soc_volsw_is_stereo(mc)) dev_warn(dapm->dev, "ASoC: Control '%s' is stereo, which is not supported\n", kcontrol->id.name); mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM) { ret = soc_dapm_read(dapm, reg, &val); val = (val >> shift) & mask; } else { val = dapm_kcontrol_get_value(kcontrol); } mutex_unlock(&card->dapm_mutex); if (invert) ucontrol->value.integer.value[0] = max - val; else ucontrol->value.integer.value[0] = val; return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw); /** * snd_soc_dapm_put_volsw - dapm mixer set callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a dapm mixer control. * * Returns 0 for success. */ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_card *card = dapm->card; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int reg = mc->reg; unsigned int shift = mc->shift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val; int connect, change, reg_change = 0; struct snd_soc_dapm_update update; int ret = 0; if (snd_soc_volsw_is_stereo(mc)) dev_warn(dapm->dev, "ASoC: Control '%s' is stereo, which is not supported\n", kcontrol->id.name); val = (ucontrol->value.integer.value[0] & mask); connect = !!val; if (invert) val = max - val; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); change = dapm_kcontrol_set_value(kcontrol, val); if (reg != SND_SOC_NOPM) { mask = mask << shift; val = val << shift; reg_change = soc_dapm_test_bits(dapm, reg, mask, val); } if (change || reg_change) { if (reg_change) { update.kcontrol = kcontrol; update.reg = reg; update.mask = mask; update.val = val; card->update = &update; } change |= reg_change; ret = soc_dapm_mixer_update_power(card, kcontrol, connect); card->update = NULL; } mutex_unlock(&card->dapm_mutex); if (ret > 0) soc_dpcm_runtime_update(card); return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw); /** * snd_soc_dapm_get_enum_double - dapm enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a dapm enumerated double mixer control. * * Returns 0 for success. */ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_card *card = dapm->card; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int reg_val, val; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); if (e->reg != SND_SOC_NOPM && dapm_kcontrol_is_powered(kcontrol)) { int ret = soc_dapm_read(dapm, e->reg, &reg_val); if (ret) { mutex_unlock(&card->dapm_mutex); return ret; } } else { reg_val = dapm_kcontrol_get_value(kcontrol); } mutex_unlock(&card->dapm_mutex); val = (reg_val >> e->shift_l) & e->mask; ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val); if (e->shift_l != e->shift_r) { val = (reg_val >> e->shift_r) & e->mask; val = snd_soc_enum_val_to_item(e, val); ucontrol->value.enumerated.item[1] = val; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_double); /** * snd_soc_dapm_put_enum_double - dapm enumerated double mixer set callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a dapm enumerated double mixer control. * * Returns 0 for success. */ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_card *card = dapm->card; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int *item = ucontrol->value.enumerated.item; unsigned int val, change, reg_change = 0; unsigned int mask; struct snd_soc_dapm_update update; int ret = 0; if (item[0] >= e->items) return -EINVAL; val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l; mask = e->mask << e->shift_l; if (e->shift_l != e->shift_r) { if (item[1] > e->items) return -EINVAL; val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_l; mask |= e->mask << e->shift_r; } mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); change = dapm_kcontrol_set_value(kcontrol, val); if (e->reg != SND_SOC_NOPM) reg_change = soc_dapm_test_bits(dapm, e->reg, mask, val); if (change || reg_change) { if (reg_change) { update.kcontrol = kcontrol; update.reg = e->reg; update.mask = mask; update.val = val; card->update = &update; } change |= reg_change; ret = soc_dapm_mux_update_power(card, kcontrol, item[0], e); card->update = NULL; } mutex_unlock(&card->dapm_mutex); if (ret > 0) soc_dpcm_runtime_update(card); return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double); /** * snd_soc_dapm_info_pin_switch - Info for a pin switch * * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a pin switch control. */ int snd_soc_dapm_info_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_info_pin_switch); /** * snd_soc_dapm_get_pin_switch - Get information for a pin switch * * @kcontrol: mixer control * @ucontrol: Value */ int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); const char *pin = (const char *)kcontrol->private_value; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); ucontrol->value.integer.value[0] = snd_soc_dapm_get_pin_status(&card->dapm, pin); mutex_unlock(&card->dapm_mutex); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_switch); /** * snd_soc_dapm_put_pin_switch - Set information for a pin switch * * @kcontrol: mixer control * @ucontrol: Value */ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); const char *pin = (const char *)kcontrol->private_value; if (ucontrol->value.integer.value[0]) snd_soc_dapm_enable_pin(&card->dapm, pin); else snd_soc_dapm_disable_pin(&card->dapm, pin); snd_soc_dapm_sync(&card->dapm); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch); struct snd_soc_dapm_widget * snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget) { struct snd_soc_dapm_widget *w; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); w = snd_soc_dapm_new_control_unlocked(dapm, widget); if (!w) dev_err(dapm->dev, "ASoC: Failed to create DAPM control %s\n", widget->name); mutex_unlock(&dapm->card->dapm_mutex); return w; } struct snd_soc_dapm_widget * snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget) { struct snd_soc_dapm_widget *w; const char *prefix; int ret; if ((w = dapm_cnew_widget(widget)) == NULL) return NULL; switch (w->id) { case snd_soc_dapm_regulator_supply: w->regulator = devm_regulator_get(dapm->dev, w->name); if (IS_ERR(w->regulator)) { ret = PTR_ERR(w->regulator); dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n", w->name, ret); return NULL; } if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) { ret = regulator_allow_bypass(w->regulator, true); if (ret != 0) dev_warn(w->dapm->dev, "ASoC: Failed to bypass %s: %d\n", w->name, ret); } break; case snd_soc_dapm_clock_supply: #ifdef CONFIG_CLKDEV_LOOKUP w->clk = devm_clk_get(dapm->dev, w->name); if (IS_ERR(w->clk)) { ret = PTR_ERR(w->clk); dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n", w->name, ret); return NULL; } #else return NULL; #endif break; default: break; } prefix = soc_dapm_prefix(dapm); if (prefix) { w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); if (widget->sname) w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->sname); } else { w->name = kasprintf(GFP_KERNEL, "%s", widget->name); if (widget->sname) w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname); } if (w->name == NULL) { kfree(w); return NULL; } switch (w->id) { case snd_soc_dapm_mic: w->is_source = 1; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_input: if (!dapm->card->fully_routed) w->is_source = 1; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_spk: case snd_soc_dapm_hp: w->is_sink = 1; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_output: if (!dapm->card->fully_routed) w->is_sink = 1; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_vmid: case snd_soc_dapm_siggen: w->is_source = 1; w->power_check = dapm_always_on_check_power; break; case snd_soc_dapm_mux: case snd_soc_dapm_demux: case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: case snd_soc_dapm_adc: case snd_soc_dapm_aif_out: case snd_soc_dapm_dac: case snd_soc_dapm_aif_in: case snd_soc_dapm_pga: case snd_soc_dapm_out_drv: case snd_soc_dapm_micbias: case snd_soc_dapm_line: case snd_soc_dapm_dai_link: case snd_soc_dapm_dai_out: case snd_soc_dapm_dai_in: w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_supply: case snd_soc_dapm_regulator_supply: case snd_soc_dapm_clock_supply: case snd_soc_dapm_kcontrol: w->is_supply = 1; w->power_check = dapm_supply_check_power; break; default: w->power_check = dapm_always_on_check_power; break; } w->dapm = dapm; INIT_LIST_HEAD(&w->sources); INIT_LIST_HEAD(&w->sinks); INIT_LIST_HEAD(&w->list); INIT_LIST_HEAD(&w->dirty); list_add_tail(&w->list, &dapm->card->widgets); w->inputs = -1; w->outputs = -1; /* machine layer set ups unconnected pins and insertions */ w->connected = 1; return w; } /** * snd_soc_dapm_new_controls - create new dapm controls * @dapm: DAPM context * @widget: widget array * @num: number of widgets * * Creates new DAPM controls based upon the templates. * * Returns 0 for success else error. */ int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget, int num) { struct snd_soc_dapm_widget *w; int i; int ret = 0; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT); for (i = 0; i < num; i++) { w = snd_soc_dapm_new_control_unlocked(dapm, widget); if (!w) { dev_err(dapm->dev, "ASoC: Failed to create DAPM control %s\n", widget->name); ret = -ENOMEM; break; } widget++; } mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls); static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_dapm_path *source_p, *sink_p; struct snd_soc_dai *source, *sink; const struct snd_soc_pcm_stream *config = w->params + w->params_select; struct snd_pcm_substream substream; struct snd_pcm_hw_params *params = NULL; u64 fmt; int ret; if (WARN_ON(!config) || WARN_ON(list_empty(&w->sources) || list_empty(&w->sinks))) return -EINVAL; /* We only support a single source and sink, pick the first */ source_p = list_first_entry(&w->sources, struct snd_soc_dapm_path, list_sink); sink_p = list_first_entry(&w->sinks, struct snd_soc_dapm_path, list_source); if (WARN_ON(!source_p || !sink_p) || WARN_ON(!sink_p->source || !source_p->sink) || WARN_ON(!source_p->source || !sink_p->sink)) return -EINVAL; source = source_p->source->priv; sink = sink_p->sink->priv; /* Be a little careful as we don't want to overflow the mask array */ if (config->formats) { fmt = ffs(config->formats) - 1; } else { dev_warn(w->dapm->dev, "ASoC: Invalid format %llx specified\n", config->formats); fmt = 0; } /* Currently very limited parameter selection */ params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) { ret = -ENOMEM; goto out; } snd_mask_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), fmt); hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min = config->rate_min; hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->max = config->rate_max; hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS)->min = config->channels_min; hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS)->max = config->channels_max; memset(&substream, 0, sizeof(substream)); switch (event) { case SND_SOC_DAPM_PRE_PMU: substream.stream = SNDRV_PCM_STREAM_CAPTURE; ret = soc_dai_hw_params(&substream, params, source); if (ret < 0) goto out; substream.stream = SNDRV_PCM_STREAM_PLAYBACK; ret = soc_dai_hw_params(&substream, params, sink); if (ret < 0) goto out; break; case SND_SOC_DAPM_POST_PMU: ret = snd_soc_dai_digital_mute(sink, 0, SNDRV_PCM_STREAM_PLAYBACK); if (ret != 0 && ret != -ENOTSUPP) dev_warn(sink->dev, "ASoC: Failed to unmute: %d\n", ret); ret = 0; break; case SND_SOC_DAPM_PRE_PMD: ret = snd_soc_dai_digital_mute(sink, 1, SNDRV_PCM_STREAM_PLAYBACK); if (ret != 0 && ret != -ENOTSUPP) dev_warn(sink->dev, "ASoC: Failed to mute: %d\n", ret); ret = 0; break; default: WARN(1, "Unknown event %d\n", event); return -EINVAL; } out: kfree(params); return ret; } static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = w->params_select; return 0; } static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol); /* Can't change the config when widget is already powered */ if (w->power) return -EBUSY; if (ucontrol->value.integer.value[0] == w->params_select) return 0; if (ucontrol->value.integer.value[0] >= w->num_params) return -EINVAL; w->params_select = ucontrol->value.integer.value[0]; return 0; } int snd_soc_dapm_new_pcm(struct snd_soc_card *card, const struct snd_soc_pcm_stream *params, unsigned int num_params, struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_dapm_widget template; struct snd_soc_dapm_widget *w; char *link_name; int ret, count; unsigned long private_value; const char **w_param_text; struct soc_enum w_param_enum[] = { SOC_ENUM_SINGLE(0, 0, 0, NULL), }; struct snd_kcontrol_new kcontrol_dai_link[] = { SOC_ENUM_EXT(NULL, w_param_enum[0], snd_soc_dapm_dai_link_get, snd_soc_dapm_dai_link_put), }; const struct snd_soc_pcm_stream *config = params; w_param_text = devm_kcalloc(card->dev, num_params, sizeof(char *), GFP_KERNEL); if (!w_param_text) return -ENOMEM; link_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-%s", source->name, sink->name); if (!link_name) { ret = -ENOMEM; goto outfree_w_param; } for (count = 0 ; count < num_params; count++) { if (!config->stream_name) { dev_warn(card->dapm.dev, "ASoC: anonymous config %d for dai link %s\n", count, link_name); w_param_text[count] = devm_kasprintf(card->dev, GFP_KERNEL, "Anonymous Configuration %d", count); if (!w_param_text[count]) { ret = -ENOMEM; goto outfree_link_name; } } else { w_param_text[count] = devm_kmemdup(card->dev, config->stream_name, strlen(config->stream_name) + 1, GFP_KERNEL); if (!w_param_text[count]) { ret = -ENOMEM; goto outfree_link_name; } } config++; } w_param_enum[0].items = num_params; w_param_enum[0].texts = w_param_text; memset(&template, 0, sizeof(template)); template.reg = SND_SOC_NOPM; template.id = snd_soc_dapm_dai_link; template.name = link_name; template.event = snd_soc_dai_link_event; template.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD; template.num_kcontrols = 1; /* duplicate w_param_enum on heap so that memory persists */ private_value = (unsigned long) devm_kmemdup(card->dev, (void *)(kcontrol_dai_link[0].private_value), sizeof(struct soc_enum), GFP_KERNEL); if (!private_value) { dev_err(card->dev, "ASoC: Failed to create control for %s widget\n", link_name); ret = -ENOMEM; goto outfree_link_name; } kcontrol_dai_link[0].private_value = private_value; /* duplicate kcontrol_dai_link on heap so that memory persists */ template.kcontrol_news = devm_kmemdup(card->dev, &kcontrol_dai_link[0], sizeof(struct snd_kcontrol_new), GFP_KERNEL); if (!template.kcontrol_news) { dev_err(card->dev, "ASoC: Failed to create control for %s widget\n", link_name); ret = -ENOMEM; goto outfree_private_value; } dev_dbg(card->dev, "ASoC: adding %s widget\n", link_name); w = snd_soc_dapm_new_control_unlocked(&card->dapm, &template); if (!w) { dev_err(card->dev, "ASoC: Failed to create %s widget\n", link_name); ret = -ENOMEM; goto outfree_kcontrol_news; } w->params = params; w->num_params = num_params; ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL); if (ret) goto outfree_w; return snd_soc_dapm_add_path(&card->dapm, w, sink, NULL, NULL); outfree_w: devm_kfree(card->dev, w); outfree_kcontrol_news: devm_kfree(card->dev, (void *)template.kcontrol_news); outfree_private_value: devm_kfree(card->dev, (void *)private_value); outfree_link_name: devm_kfree(card->dev, link_name); outfree_w_param: for (count = 0 ; count < num_params; count++) devm_kfree(card->dev, (void *)w_param_text[count]); devm_kfree(card->dev, w_param_text); return ret; } int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai) { struct snd_soc_dapm_widget template; struct snd_soc_dapm_widget *w; WARN_ON(dapm->dev != dai->dev); memset(&template, 0, sizeof(template)); template.reg = SND_SOC_NOPM; if (dai->driver->playback.stream_name) { template.id = snd_soc_dapm_dai_in; template.name = dai->driver->playback.stream_name; template.sname = dai->driver->playback.stream_name; dev_dbg(dai->dev, "ASoC: adding %s widget\n", template.name); w = snd_soc_dapm_new_control_unlocked(dapm, &template); if (!w) { dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", dai->driver->playback.stream_name); return -ENOMEM; } w->priv = dai; dai->playback_widget = w; } if (dai->driver->capture.stream_name) { template.id = snd_soc_dapm_dai_out; template.name = dai->driver->capture.stream_name; template.sname = dai->driver->capture.stream_name; dev_dbg(dai->dev, "ASoC: adding %s widget\n", template.name); w = snd_soc_dapm_new_control_unlocked(dapm, &template); if (!w) { dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", dai->driver->capture.stream_name); return -ENOMEM; } w->priv = dai; dai->capture_widget = w; } return 0; } int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card) { struct snd_soc_dapm_widget *dai_w, *w; struct snd_soc_dapm_widget *src, *sink; struct snd_soc_dai *dai; /* For each DAI widget... */ list_for_each_entry(dai_w, &card->widgets, list) { switch (dai_w->id) { case snd_soc_dapm_dai_in: case snd_soc_dapm_dai_out: break; default: continue; } dai = dai_w->priv; /* ...find all widgets with the same stream and link them */ list_for_each_entry(w, &card->widgets, list) { if (w->dapm != dai_w->dapm) continue; switch (w->id) { case snd_soc_dapm_dai_in: case snd_soc_dapm_dai_out: continue; default: break; } if (!w->sname || !strstr(w->sname, dai_w->name)) continue; if (dai_w->id == snd_soc_dapm_dai_in) { src = dai_w; sink = w; } else { src = w; sink = dai_w; } dev_dbg(dai->dev, "%s -> %s\n", src->name, sink->name); snd_soc_dapm_add_path(w->dapm, src, sink, NULL, NULL); } } return 0; } static void dapm_connect_dai_link_widgets(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dapm_widget *sink, *source; int i; for (i = 0; i < rtd->num_codecs; i++) { struct snd_soc_dai *codec_dai = rtd->codec_dais[i]; /* there is no point in connecting BE DAI links with dummies */ if (snd_soc_dai_is_dummy(codec_dai) || snd_soc_dai_is_dummy(cpu_dai)) continue; /* connect BE DAI playback if widgets are valid */ if (codec_dai->playback_widget && cpu_dai->playback_widget) { source = cpu_dai->playback_widget; sink = codec_dai->playback_widget; dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n", cpu_dai->component->name, source->name, codec_dai->component->name, sink->name); snd_soc_dapm_add_path(&card->dapm, source, sink, NULL, NULL); } /* connect BE DAI capture if widgets are valid */ if (codec_dai->capture_widget && cpu_dai->capture_widget) { source = codec_dai->capture_widget; sink = cpu_dai->capture_widget; dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n", codec_dai->component->name, source->name, cpu_dai->component->name, sink->name); snd_soc_dapm_add_path(&card->dapm, source, sink, NULL, NULL); } } } static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream, int event) { struct snd_soc_dapm_widget *w; if (stream == SNDRV_PCM_STREAM_PLAYBACK) w = dai->playback_widget; else w = dai->capture_widget; if (w) { dapm_mark_dirty(w, "stream event"); switch (event) { case SND_SOC_DAPM_STREAM_START: w->active = 1; break; case SND_SOC_DAPM_STREAM_STOP: w->active = 0; break; case SND_SOC_DAPM_STREAM_SUSPEND: case SND_SOC_DAPM_STREAM_RESUME: case SND_SOC_DAPM_STREAM_PAUSE_PUSH: case SND_SOC_DAPM_STREAM_PAUSE_RELEASE: break; } if (w->id == snd_soc_dapm_dai_in) { w->is_source = w->active; dapm_widget_invalidate_input_paths(w); } else { w->is_sink = w->active; dapm_widget_invalidate_output_paths(w); } } } void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd = card->rtd; int i; /* for each BE DAI link... */ for (i = 0; i < card->num_rtd; i++) { rtd = &card->rtd[i]; /* * dynamic FE links have no fixed DAI mapping. * CODEC<->CODEC links have no direct connection. */ if (rtd->dai_link->dynamic || rtd->dai_link->params) continue; dapm_connect_dai_link_widgets(card, rtd); } } static void soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, int event) { int i; soc_dapm_dai_stream_event(rtd->cpu_dai, stream, event); for (i = 0; i < rtd->num_codecs; i++) soc_dapm_dai_stream_event(rtd->codec_dais[i], stream, event); dapm_power_widgets(rtd->card, event); } /** * snd_soc_dapm_stream_event - send a stream event to the dapm core * @rtd: PCM runtime data * @stream: stream name * @event: stream event * * Sends a stream event to the dapm core. The core then makes any * necessary widget power changes. * * Returns 0 for success else error. */ void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, int event) { struct snd_soc_card *card = rtd->card; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); soc_dapm_stream_event(rtd, stream, event); mutex_unlock(&card->dapm_mutex); } /** * snd_soc_dapm_enable_pin_unlocked - enable pin. * @dapm: DAPM context * @pin: pin name * * Enables input/output pin and its parents or children widgets iff there is * a valid audio route and active audio stream. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { return snd_soc_dapm_set_pin(dapm, pin, 1); } EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin_unlocked); /** * snd_soc_dapm_enable_pin - enable pin. * @dapm: DAPM context * @pin: pin name * * Enables input/output pin and its parents or children widgets iff there is * a valid audio route and active audio stream. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); ret = snd_soc_dapm_set_pin(dapm, pin, 1); mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin); /** * snd_soc_dapm_force_enable_pin_unlocked - force a pin to be enabled * @dapm: DAPM context * @pin: pin name * * Enables input/output pin regardless of any other state. This is * intended for use with microphone bias supplies used in microphone * jack detection. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); if (!w) { dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin); return -EINVAL; } dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin); if (!w->connected) { /* * w->force does not affect the number of input or output paths, * so we only have to recheck if w->connected is changed */ dapm_widget_invalidate_input_paths(w); dapm_widget_invalidate_output_paths(w); w->connected = 1; } w->force = 1; dapm_mark_dirty(w, "force enable"); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin_unlocked); /** * snd_soc_dapm_force_enable_pin - force a pin to be enabled * @dapm: DAPM context * @pin: pin name * * Enables input/output pin regardless of any other state. This is * intended for use with microphone bias supplies used in microphone * jack detection. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); ret = snd_soc_dapm_force_enable_pin_unlocked(dapm, pin); mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin); /** * snd_soc_dapm_disable_pin_unlocked - disable pin. * @dapm: DAPM context * @pin: pin name * * Disables input/output pin and its parents or children widgets. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { return snd_soc_dapm_set_pin(dapm, pin, 0); } EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin_unlocked); /** * snd_soc_dapm_disable_pin - disable pin. * @dapm: DAPM context * @pin: pin name * * Disables input/output pin and its parents or children widgets. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); ret = snd_soc_dapm_set_pin(dapm, pin, 0); mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin); /** * snd_soc_dapm_nc_pin_unlocked - permanently disable pin. * @dapm: DAPM context * @pin: pin name * * Marks the specified pin as being not connected, disabling it along * any parent or child widgets. At present this is identical to * snd_soc_dapm_disable_pin() but in future it will be extended to do * additional things such as disabling controls which only affect * paths through the pin. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { return snd_soc_dapm_set_pin(dapm, pin, 0); } EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin_unlocked); /** * snd_soc_dapm_nc_pin - permanently disable pin. * @dapm: DAPM context * @pin: pin name * * Marks the specified pin as being not connected, disabling it along * any parent or child widgets. At present this is identical to * snd_soc_dapm_disable_pin() but in future it will be extended to do * additional things such as disabling controls which only affect * paths through the pin. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); ret = snd_soc_dapm_set_pin(dapm, pin, 0); mutex_unlock(&dapm->card->dapm_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin); /** * snd_soc_dapm_get_pin_status - get audio pin status * @dapm: DAPM context * @pin: audio signal pin endpoint (or start point) * * Get audio pin status - connected or disconnected. * * Returns 1 for connected otherwise 0. */ int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, const char *pin) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); if (w) return w->connected; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_status); /** * snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint * @dapm: DAPM context * @pin: audio signal pin endpoint (or start point) * * Mark the given endpoint or pin as ignoring suspend. When the * system is disabled a path between two endpoints flagged as ignoring * suspend will not be disabled. The path must already be enabled via * normal means at suspend time, it will not be turned on if it was not * already enabled. */ int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, false); if (!w) { dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin); return -EINVAL; } w->ignore_suspend = 1; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend); /** * snd_soc_dapm_free - free dapm resources * @dapm: DAPM context * * Free all dapm widgets and resources. */ void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm) { dapm_debugfs_cleanup(dapm); dapm_free_widgets(dapm); list_del(&dapm->list); } EXPORT_SYMBOL_GPL(snd_soc_dapm_free); static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm) { struct snd_soc_card *card = dapm->card; struct snd_soc_dapm_widget *w; LIST_HEAD(down_list); int powerdown = 0; mutex_lock(&card->dapm_mutex); list_for_each_entry(w, &dapm->card->widgets, list) { if (w->dapm != dapm) continue; if (w->power) { dapm_seq_insert(w, &down_list, false); w->power = 0; powerdown = 1; } } /* If there were no widgets to power down we're already in * standby. */ if (powerdown) { if (dapm->bias_level == SND_SOC_BIAS_ON) snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_PREPARE); dapm_seq_run(card, &down_list, 0, false); if (dapm->bias_level == SND_SOC_BIAS_PREPARE) snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_STANDBY); } mutex_unlock(&card->dapm_mutex); } /* * snd_soc_dapm_shutdown - callback for system shutdown */ void snd_soc_dapm_shutdown(struct snd_soc_card *card) { struct snd_soc_dapm_context *dapm; list_for_each_entry(dapm, &card->dapm_list, list) { if (dapm != &card->dapm) { soc_dapm_shutdown_dapm(dapm); if (dapm->bias_level == SND_SOC_BIAS_STANDBY) snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_OFF); } } soc_dapm_shutdown_dapm(&card->dapm); if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY) snd_soc_dapm_set_bias_level(&card->dapm, SND_SOC_BIAS_OFF); } /* Module information */ MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk"); MODULE_DESCRIPTION("Dynamic Audio Power Management core for ALSA SoC"); MODULE_LICENSE("GPL");
gpl-2.0
lollipop-og/F93_LGE975_KK_Kernel
drivers/staging/prima_kk_2_7/CORE/SME/src/pmc/pmcApi.c
66
120396
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /****************************************************************************** * * Name: pmcApi.c * * Description: Routines that make up the Power Management Control (PMC) API. * ******************************************************************************/ #include "palTypes.h" #include "aniGlobal.h" #include "palTimer.h" #include "csrLinkList.h" #include "smsDebug.h" #include "pmcApi.h" #include "pmc.h" #include "cfgApi.h" #include "smeInside.h" #include "csrInsideApi.h" #include "wlan_ps_wow_diag.h" #include "wlan_qct_wda.h" #include "limSessionUtils.h" #include "csrInsideApi.h" extern void pmcReleaseCommand( tpAniSirGlobal pMac, tSmeCmd *pCommand ); void pmcCloseDeferredMsgList(tpAniSirGlobal pMac); void pmcCloseDeviceStateUpdateList(tpAniSirGlobal pMac); void pmcCloseRequestStartUapsdList(tpAniSirGlobal pMac); void pmcCloseRequestBmpsList(tpAniSirGlobal pMac); void pmcCloseRequestFullPowerList(tpAniSirGlobal pMac); void pmcClosePowerSaveCheckList(tpAniSirGlobal pMac); /****************************************************************************** * * Name: pmcOpen * * Description: * Does a PMC open operation on the device. * * Parameters: * hHal - HAL handle for device * * Returns: * eHAL_STATUS_SUCCESS - open successful * eHAL_STATUS_FAILURE - open not successful * ******************************************************************************/ eHalStatus pmcOpen (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pmcLog(pMac, LOG2, FL("Entering pmcOpen")); /* Initialize basic PMC information about device. */ pMac->pmc.powerSource = BATTERY_POWER; pMac->pmc.pmcState = STOPPED; pMac->pmc.pmcReady = FALSE; /* Initialize Power Save Modes */ pMac->pmc.impsEnabled = FALSE; pMac->pmc.autoBmpsEntryEnabled = FALSE; pMac->pmc.smpsEnabled = FALSE; pMac->pmc.uapsdEnabled = TRUE; pMac->pmc.bmpsEnabled = TRUE; pMac->pmc.standbyEnabled = TRUE; pMac->pmc.wowlEnabled = TRUE; pMac->pmc.rfSuppliesVotedOff= FALSE; palZeroMemory(pMac->hHdd, &(pMac->pmc.bmpsConfig), sizeof(tPmcBmpsConfigParams)); palZeroMemory(pMac->hHdd, &(pMac->pmc.impsConfig), sizeof(tPmcImpsConfigParams)); palZeroMemory(pMac->hHdd, &(pMac->pmc.smpsConfig), sizeof(tPmcSmpsConfigParams)); /* Allocate a timer to use with IMPS. */ if (palTimerAlloc(pMac->hHdd, &pMac->pmc.hImpsTimer, pmcImpsTimerExpired, hHal) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot allocate timer for IMPS")); return eHAL_STATUS_FAILURE; } /* Allocate a timer used in Full Power State to measure traffic levels and determine when to enter BMPS. */ if (!VOS_IS_STATUS_SUCCESS(vos_timer_init(&pMac->pmc.hTrafficTimer, VOS_TIMER_TYPE_SW, pmcTrafficTimerExpired, hHal))) { pmcLog(pMac, LOGE, FL("Cannot allocate timer for traffic measurement")); return eHAL_STATUS_FAILURE; } #ifdef FEATURE_WLAN_DIAG_SUPPORT /* Allocate a timer used to report current PMC state through periodic DIAG event */ if (palTimerAlloc(pMac->hHdd, &pMac->pmc.hDiagEvtTimer, pmcDiagEvtTimerExpired, hHal) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot allocate timer for diag event reporting")); return eHAL_STATUS_FAILURE; } #endif //Initialize the default value for Bmps related config. pMac->pmc.bmpsConfig.trafficMeasurePeriod = BMPS_TRAFFIC_TIMER_DEFAULT; pMac->pmc.bmpsConfig.bmpsPeriod = WNI_CFG_LISTEN_INTERVAL_STADEF; /* Allocate a timer used to schedule a deferred power save mode exit. */ if (palTimerAlloc(pMac->hHdd, &pMac->pmc.hExitPowerSaveTimer, pmcExitPowerSaveTimerExpired, hHal) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot allocate exit power save mode timer")); PMC_ABORT; return eHAL_STATUS_FAILURE; } /* Initialize lists for power save check routines and request full power callback routines. */ if (csrLLOpen(pMac->hHdd, &pMac->pmc.powerSaveCheckList) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot initialize power save check routine list")); PMC_ABORT; return eHAL_STATUS_FAILURE; } if (csrLLOpen(pMac->hHdd, &pMac->pmc.requestFullPowerList) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot initialize request full power callback routine list")); PMC_ABORT; return eHAL_STATUS_FAILURE; } /* Initialize lists for request BMPS callback routines. */ if (csrLLOpen(pMac->hHdd, &pMac->pmc.requestBmpsList) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, "PMC: cannot initialize request BMPS callback routine list"); return eHAL_STATUS_FAILURE; } /* Initialize lists for request start UAPSD callback routines. */ if (csrLLOpen(pMac->hHdd, &pMac->pmc.requestStartUapsdList) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, "PMC: cannot initialize request start UAPSD callback routine list"); return eHAL_STATUS_FAILURE; } /* Initialize lists for device state update indication callback routines. */ if (csrLLOpen(pMac->hHdd, &pMac->pmc.deviceStateUpdateIndList) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, "PMC: cannot initialize device state update indication callback list"); return eHAL_STATUS_FAILURE; } if (csrLLOpen(pMac->hHdd, &pMac->pmc.deferredMsgList) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot initialize deferred msg list")); PMC_ABORT; return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcStart * * Description: * Does a PMC start operation on the device. * * Parameters: * hHal - HAL handle for device * * Returns: * eHAL_STATUS_SUCCESS - start successful * eHAL_STATUS_FAILURE - start not successful * ******************************************************************************/ eHalStatus pmcStart (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tSirMacHTMIMOPowerSaveState htMimoPowerSaveState; pmcLog(pMac, LOG2, FL("Entering pmcStart")); /* Initialize basic PMC information about device. */ pMac->pmc.pmcState = FULL_POWER; pMac->pmc.requestFullPowerPending = FALSE; pMac->pmc.uapsdSessionRequired = FALSE; pMac->pmc.wowlModeRequired = FALSE; pMac->pmc.bmpsRequestedByHdd = FALSE; pMac->pmc.remainInPowerActiveTillDHCP = FALSE; pMac->pmc.remainInPowerActiveThreshold = 0; /* WLAN Switch initial states. */ pMac->pmc.hwWlanSwitchState = ePMC_SWITCH_ON; pMac->pmc.swWlanSwitchState = ePMC_SWITCH_ON; /* No IMPS callback routine yet. */ pMac->pmc.impsCallbackRoutine = NULL; /* No STANDBY callback routine yet. */ pMac->pmc.standbyCallbackRoutine = NULL; /* No WOWL callback routine yet. */ pMac->pmc.enterWowlCallbackRoutine = NULL; /* Initialize BMPS traffic counts. */ pMac->pmc.cLastTxUnicastFrames = 0; pMac->pmc.cLastRxUnicastFrames = 0; pMac->pmc.ImpsReqFailed = VOS_FALSE; pMac->pmc.ImpsReqFailCnt = 0; pMac->pmc.ImpsReqTimerFailed = 0; pMac->pmc.ImpsReqTimerfailCnt = 0; /* Configure SMPS. */ if (pMac->pmc.smpsEnabled && (pMac->pmc.powerSource != AC_POWER || pMac->pmc.smpsConfig.enterOnAc)) { if (pMac->pmc.smpsConfig.mode == ePMC_DYNAMIC_SMPS) htMimoPowerSaveState = eSIR_HT_MIMO_PS_DYNAMIC; if (pMac->pmc.smpsConfig.mode == ePMC_STATIC_SMPS) htMimoPowerSaveState = eSIR_HT_MIMO_PS_STATIC; } else htMimoPowerSaveState = eSIR_HT_MIMO_PS_NO_LIMIT; if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState, sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; #ifdef FEATURE_WLAN_DIAG_SUPPORT if (pmcStartDiagEvtTimer(hHal) != eHAL_STATUS_SUCCESS) { return eHAL_STATUS_FAILURE; } #endif #if defined(ANI_LOGDUMP) pmcDumpInit(hHal); #endif return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcStop * * Description: * Does a PMC stop operation on the device. * * Parameters: * hHal - HAL handle for device * * Returns: * eHAL_STATUS_SUCCESS - stop successful * eHAL_STATUS_FAILURE - stop not successful * ******************************************************************************/ eHalStatus pmcStop (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tListElem *pEntry; tPmcDeferredMsg *pDeferredMsg; pmcLog(pMac, LOG2, FL("Entering pmcStop")); /* Cancel any running timers. */ if (palTimerStop(pMac->hHdd, pMac->pmc.hImpsTimer) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot cancel IMPS timer")); } pmcStopTrafficTimer(hHal); #ifdef FEATURE_WLAN_DIAG_SUPPORT pmcStopDiagEvtTimer(hHal); #endif if (palTimerStop(pMac->hHdd, pMac->pmc.hExitPowerSaveTimer) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot cancel exit power save mode timer")); } /* Do all the callbacks. */ pmcDoCallbacks(hHal, eHAL_STATUS_FAILURE); pmcDoBmpsCallbacks(hHal, eHAL_STATUS_FAILURE); pMac->pmc.uapsdSessionRequired = FALSE; pmcDoStartUapsdCallbacks(hHal, eHAL_STATUS_FAILURE); pmcDoStandbyCallbacks(hHal, eHAL_STATUS_FAILURE); //purge the deferred msg list csrLLLock( &pMac->pmc.deferredMsgList ); while( NULL != ( pEntry = csrLLRemoveHead( &pMac->pmc.deferredMsgList, eANI_BOOLEAN_FALSE ) ) ) { pDeferredMsg = GET_BASE_ADDR( pEntry, tPmcDeferredMsg, link ); palFreeMemory( pMac->hHdd, pDeferredMsg ); } csrLLUnlock( &pMac->pmc.deferredMsgList ); /* PMC is stopped. */ pMac->pmc.pmcState = STOPPED; pMac->pmc.pmcReady = FALSE; return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcClose * * Description: * Does a PMC close operation on the device. * * Parameters: * hHal - HAL handle for device * * Returns: * eHAL_STATUS_SUCCESS - close successful * eHAL_STATUS_FAILURE - close not successful * ******************************************************************************/ eHalStatus pmcClose (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pmcLog(pMac, LOG2, FL("Entering pmcClose")); /* Free up allocated resources. */ if (palTimerFree(pMac->hHdd, pMac->pmc.hImpsTimer) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot deallocate IMPS timer")); } if (!VOS_IS_STATUS_SUCCESS(vos_timer_destroy(&pMac->pmc.hTrafficTimer))) { pmcLog(pMac, LOGE, FL("Cannot deallocate traffic timer")); } #ifdef FEATURE_WLAN_DIAG_SUPPORT if (palTimerFree(pMac->hHdd, pMac->pmc.hDiagEvtTimer) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot deallocate timer for diag event reporting")); } #endif if (palTimerFree(pMac->hHdd, pMac->pmc.hExitPowerSaveTimer) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot deallocate exit power save mode timer")); } /* The following list's entries are dynamically allocated so they need their own cleanup function */ pmcClosePowerSaveCheckList(pMac); pmcCloseRequestFullPowerList(pMac); pmcCloseRequestBmpsList(pMac); pmcCloseRequestStartUapsdList(pMac); pmcCloseDeviceStateUpdateList(pMac); pmcCloseDeferredMsgList(pMac); return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcSignalPowerEvent * * Description: * Signals to PMC that a power event has occurred. * * Parameters: * hHal - HAL handle for device * event - the event that has occurred * * Returns: * eHAL_STATUS_SUCCESS - signaling successful * eHAL_STATUS_FAILURE - signaling not successful * ******************************************************************************/ eHalStatus pmcSignalPowerEvent (tHalHandle hHal, tPmcPowerEvent event) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); #ifndef GEN6_ONWARDS tSirMacHTMIMOPowerSaveState htMimoPowerSaveState; #endif pmcLog(pMac, LOG2, FL("Entering pmcSignalPowerEvent, event %d"), event); /* Take action based on the event being signaled. */ switch (event) { #ifndef GEN6_ONWARDS case ePMC_SYSTEM_HIBERNATE: return pmcEnterLowPowerState(hHal); case ePMC_SYSTEM_RESUME: return pmcExitLowPowerState(hHal); case ePMC_HW_WLAN_SWITCH_OFF: pMac->pmc.hwWlanSwitchState = ePMC_SWITCH_OFF; return pmcEnterLowPowerState(hHal); case ePMC_HW_WLAN_SWITCH_ON: pMac->pmc.hwWlanSwitchState = ePMC_SWITCH_ON; return pmcExitLowPowerState(hHal); case ePMC_SW_WLAN_SWITCH_OFF: pMac->pmc.swWlanSwitchState = ePMC_SWITCH_OFF; return pmcEnterLowPowerState(hHal); case ePMC_SW_WLAN_SWITCH_ON: pMac->pmc.swWlanSwitchState = ePMC_SWITCH_ON; return pmcExitLowPowerState(hHal); case ePMC_BATTERY_OPERATION: pMac->pmc.powerSource = BATTERY_POWER; /* Turn on SMPS. */ if (pMac->pmc.smpsEnabled) { if (pMac->pmc.smpsConfig.mode == ePMC_DYNAMIC_SMPS) htMimoPowerSaveState = eSIR_HT_MIMO_PS_DYNAMIC; if (pMac->pmc.smpsConfig.mode == ePMC_STATIC_SMPS) htMimoPowerSaveState = eSIR_HT_MIMO_PS_STATIC; if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState, sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; case ePMC_AC_OPERATION: pMac->pmc.powerSource = AC_POWER; /* Turn off SMPS. */ if (!pMac->pmc.smpsConfig.enterOnAc) { htMimoPowerSaveState = eSIR_HT_MIMO_PS_NO_LIMIT; if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState, sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; #endif //GEN6_ONWARDS default: pmcLog(pMac, LOGE, FL("Invalid event %d"), event); PMC_ABORT; return eHAL_STATUS_FAILURE; } } /****************************************************************************** * * Name: pmcSetConfigPowerSave * * Description: * Configures one of the power saving modes. * * Parameters: * hHal - HAL handle for device * psMode - the power saving mode to configure * pConfigParams - pointer to configuration parameters specific to the * power saving mode * * Returns: * eHAL_STATUS_SUCCESS - configuration successful * eHAL_STATUS_FAILURE - configuration not successful * ******************************************************************************/ eHalStatus pmcSetConfigPowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode, void *pConfigParams) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); #endif pmcLog(pMac, LOG2, FL("Entering pmcSetConfigPowerSave, power save mode %d"), psMode); /* Configure the specified power saving mode. */ switch (psMode) { case ePMC_IDLE_MODE_POWER_SAVE: pMac->pmc.impsConfig = *(tpPmcImpsConfigParams)pConfigParams; pmcLog(pMac, LOG3, FL("IMPS configuration")); pmcLog(pMac, LOG3, " enter on AC: %d", pMac->pmc.impsConfig.enterOnAc); break; case ePMC_BEACON_MODE_POWER_SAVE: pMac->pmc.bmpsConfig = *(tpPmcBmpsConfigParams)pConfigParams; pmcLog(pMac, LOG3, FL("BMPS configuration")); pmcLog(pMac, LOG3, " enter on AC: %d", pMac->pmc.bmpsConfig.enterOnAc); pmcLog(pMac, LOG3, " TX threshold: %d", pMac->pmc.bmpsConfig.txThreshold); pmcLog(pMac, LOG3, " RX threshold: %d", pMac->pmc.bmpsConfig.rxThreshold); pmcLog(pMac, LOG3, " traffic measurement period (ms): %d", pMac->pmc.bmpsConfig.trafficMeasurePeriod); pmcLog(pMac, LOG3, " BMPS period: %d", pMac->pmc.bmpsConfig.bmpsPeriod); pmcLog(pMac, LOG3, " beacons to forward code: %d", pMac->pmc.bmpsConfig.forwardBeacons); pmcLog(pMac, LOG3, " value of N: %d", pMac->pmc.bmpsConfig.valueOfN); pmcLog(pMac, LOG3, " use PS poll: %d", pMac->pmc.bmpsConfig.usePsPoll); pmcLog(pMac, LOG3, " set PM on last frame: %d", pMac->pmc.bmpsConfig.setPmOnLastFrame); pmcLog(pMac, LOG3, " value of enableBeaconEarlyTermination: %d", pMac->pmc.bmpsConfig.enableBeaconEarlyTermination); pmcLog(pMac, LOG3, " value of bcnEarlyTermWakeInterval: %d", pMac->pmc.bmpsConfig.bcnEarlyTermWakeInterval); #ifdef FEATURE_WLAN_DIAG_SUPPORT vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_BMPS_SET_CONFIG; /* possible loss of data due to mismatch but expectation is that values can reasonably be expected to fit in target widths */ psRequest.bmps_auto_timer_duration = (v_U16_t)pMac->pmc.bmpsConfig.trafficMeasurePeriod; psRequest.bmps_period = (v_U16_t)pMac->pmc.bmpsConfig.bmpsPeriod; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif break; case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE: pMac->pmc.smpsConfig = *(tpPmcSmpsConfigParams)pConfigParams; pmcLog(pMac, LOG3, FL("SMPS configuration")); pmcLog(pMac, LOG3, " mode: %d", pMac->pmc.smpsConfig.mode); pmcLog(pMac, LOG3, " enter on AC: %d", pMac->pmc.smpsConfig.enterOnAc); break; default: pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode); PMC_ABORT; return eHAL_STATUS_FAILURE; } //Send the power save config down to PE/HAL/FW if BMPS mode is being configured //and pmcReady has been invoked if(PMC_IS_READY(pMac) && psMode == ePMC_BEACON_MODE_POWER_SAVE) { if (pmcSendPowerSaveConfigMessage(hHal) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcGetConfigPowerSave * * Description: * Get the config for the specified power save mode * * Parameters: * hHal - HAL handle for device * psMode - the power saving mode to configure * pConfigParams - pointer to configuration parameters specific to the * power saving mode * * Returns: * eHAL_STATUS_SUCCESS - configuration successful * eHAL_STATUS_FAILURE - configuration not successful * ******************************************************************************/ eHalStatus pmcGetConfigPowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode, void *pConfigParams) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pmcLog(pMac, LOG2, FL("Entering pmcGetConfigPowerSave, power save mode %d"), psMode); /* Configure the specified power saving mode. */ switch (psMode) { case ePMC_IDLE_MODE_POWER_SAVE: *(tpPmcImpsConfigParams)pConfigParams = pMac->pmc.impsConfig; break; case ePMC_BEACON_MODE_POWER_SAVE: *(tpPmcBmpsConfigParams)pConfigParams = pMac->pmc.bmpsConfig; break; case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE: *(tpPmcSmpsConfigParams)pConfigParams = pMac->pmc.smpsConfig; break; default: pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcEnablePowerSave * * Description: * Enables one of the power saving modes. * * Parameters: * hHal - HAL handle for device * psMode - the power saving mode to enable * * Returns: * eHAL_STATUS_SUCCESS - successfully enabled * eHAL_STATUS_FAILURE - not successfully enabled * ******************************************************************************/ eHalStatus pmcEnablePowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tSirMacHTMIMOPowerSaveState htMimoPowerSaveState; #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_PS_MODE_ENABLE_REQ; psRequest.enable_disable_powersave_mode = psMode; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, FL("Entering pmcEnablePowerSave, power save mode %d"), psMode); /* Enable the specified power saving mode. */ switch (psMode) { case ePMC_IDLE_MODE_POWER_SAVE: pMac->pmc.impsEnabled = TRUE; break; case ePMC_BEACON_MODE_POWER_SAVE: pMac->pmc.bmpsEnabled = TRUE; break; case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE: pMac->pmc.smpsEnabled = TRUE; /* If PMC already started, then turn on SMPS. */ if (pMac->pmc.pmcState != STOPPED) if (pMac->pmc.powerSource != AC_POWER || pMac->pmc.smpsConfig.enterOnAc) { if (pMac->pmc.smpsConfig.mode == ePMC_DYNAMIC_SMPS) htMimoPowerSaveState = eSIR_HT_MIMO_PS_DYNAMIC; if (pMac->pmc.smpsConfig.mode == ePMC_STATIC_SMPS) htMimoPowerSaveState = eSIR_HT_MIMO_PS_STATIC; if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState, sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; } break; case ePMC_UAPSD_MODE_POWER_SAVE: pMac->pmc.uapsdEnabled = TRUE; break; case ePMC_STANDBY_MODE_POWER_SAVE: pMac->pmc.standbyEnabled = TRUE; break; case ePMC_WOWL_MODE_POWER_SAVE: pMac->pmc.wowlEnabled = TRUE; break; default: pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode); PMC_ABORT; return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcStartAutoBmpsTimer \brief Starts a timer that periodically polls all the registered module for entry into Bmps mode. This timer is started only if BMPS is enabled and whenever the device is in full power. \param hHal - The handle returned by macOpen. \return eHalStatus ---------------------------------------------------------------------------*/ eHalStatus pmcStartAutoBmpsTimer (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_START_BMPS_AUTO_TIMER_REQ; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, FL("Entering pmcStartAutoBmpsTimer")); /* Check if BMPS is enabled. */ if (!pMac->pmc.bmpsEnabled) { pmcLog(pMac, LOGE, "PMC: Cannot enable BMPS timer. BMPS is disabled"); return eHAL_STATUS_FAILURE; } pMac->pmc.autoBmpsEntryEnabled = TRUE; /* Check if there is an Infra session. If there is no Infra session, timer will be started when STA associates to AP */ if (pmcShouldBmpsTimerRun(pMac)) { if (pmcStartTrafficTimer(hHal, pMac->pmc.bmpsConfig.trafficMeasurePeriod) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcStopAutoBmpsTimer \brief Stops the Auto BMPS Timer that was started using sme_startAutoBmpsTimer Stopping the timer does not cause a device state change. Only the timer is stopped. If "Full Power" is desired, use the pmcRequestFullPower API \param hHal - The handle returned by macOpen. \return eHalStatus ---------------------------------------------------------------------------*/ eHalStatus pmcStopAutoBmpsTimer (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_STOP_BMPS_AUTO_TIMER_REQ; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, FL("Entering pmcStopAutoBmpsTimer")); pMac->pmc.autoBmpsEntryEnabled = FALSE; /* If uapsd session is not required or HDD has not requested BMPS, stop the auto bmps timer.*/ if (!pMac->pmc.uapsdSessionRequired && !pMac->pmc.bmpsRequestedByHdd) pmcStopTrafficTimer(hHal); return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcDisablePowerSave * * Description: * Disables one of the power saving modes. * * Parameters: * hHal - HAL handle for device * psMode - the power saving mode to disable * * Returns: * eHAL_STATUS_SUCCESS - successfully disabled * eHAL_STATUS_FAILURE - not successfully disabled * ******************************************************************************/ eHalStatus pmcDisablePowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tSirMacHTMIMOPowerSaveState htMimoPowerSaveState; #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_PS_MODE_DISABLE_REQ; psRequest.enable_disable_powersave_mode = psMode; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, FL("Entering pmcDisablePowerSave, power save mode %d"), psMode); /* Disable the specified power saving mode. */ switch (psMode) { case ePMC_IDLE_MODE_POWER_SAVE: pMac->pmc.impsEnabled = FALSE; break; case ePMC_BEACON_MODE_POWER_SAVE: pMac->pmc.bmpsEnabled = FALSE; break; case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE: pMac->pmc.smpsEnabled = FALSE; /* Turn off SMPS. */ htMimoPowerSaveState = eSIR_HT_MIMO_PS_NO_LIMIT; if (pmcSendMessage(hHal, eWNI_PMC_SMPS_STATE_IND, &htMimoPowerSaveState, sizeof(tSirMacHTMIMOPowerSaveState)) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; break; case ePMC_UAPSD_MODE_POWER_SAVE: pMac->pmc.uapsdEnabled = FALSE; break; case ePMC_STANDBY_MODE_POWER_SAVE: pMac->pmc.standbyEnabled = FALSE; break; case ePMC_WOWL_MODE_POWER_SAVE: pMac->pmc.wowlEnabled = FALSE; break; default: pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode); PMC_ABORT; return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcQueryPowerState * * Description: * Returns the current power state of the device. * * Parameters: * hHal - HAL handle for device * pPowerState - pointer to location to return power state * pHwWlanSwitchState - pointer to location to return Hardware WLAN * Switch state * pSwWlanSwitchState - pointer to location to return Software WLAN * Switch state * * Returns: * eHAL_STATUS_SUCCESS - power state successfully returned * eHAL_STATUS_FAILURE - power state not successfully returned * ******************************************************************************/ eHalStatus pmcQueryPowerState (tHalHandle hHal, tPmcPowerState *pPowerState, tPmcSwitchState *pHwWlanSwitchState, tPmcSwitchState *pSwWlanSwitchState) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pmcLog(pMac, LOG2, FL("Entering pmcQueryPowerState")); /* Return current power state based on PMC state. */ if(pPowerState != NULL) { /* Return current power state based on PMC state. */ switch (pMac->pmc.pmcState) { case FULL_POWER: *pPowerState = ePMC_FULL_POWER; break; default: *pPowerState = ePMC_LOW_POWER; break; } } /* Return current switch settings. */ if(pHwWlanSwitchState != NULL) *pHwWlanSwitchState = pMac->pmc.hwWlanSwitchState; if(pSwWlanSwitchState != NULL) *pSwWlanSwitchState = pMac->pmc.swWlanSwitchState; return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcIsPowerSaveEnabled * * Description: * Checks if the device is able to enter one of the power save modes. * "Able to enter" means the power save mode is enabled for the device * and the host is using the correct power source for entry into the * power save mode. This routine does not indicate whether the device * is actually in the power save mode at a particular point in time. * * Parameters: * hHal - HAL handle for device * psMode - the power saving mode * * Returns: * TRUE if device is able to enter the power save mode, FALSE otherwise * ******************************************************************************/ tANI_BOOLEAN pmcIsPowerSaveEnabled (tHalHandle hHal, tPmcPowerSavingMode psMode) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pmcLog(pMac, LOG2, FL("Entering pmcIsPowerSaveEnabled, power save mode %d"), psMode); /* Check ability to enter based on the specified power saving mode. */ switch (psMode) { case ePMC_IDLE_MODE_POWER_SAVE: return pMac->pmc.impsEnabled && (pMac->pmc.powerSource != AC_POWER || pMac->pmc.impsConfig.enterOnAc); case ePMC_BEACON_MODE_POWER_SAVE: return pMac->pmc.bmpsEnabled; case ePMC_SPATIAL_MULTIPLEX_POWER_SAVE: return pMac->pmc.smpsEnabled && (pMac->pmc.powerSource != AC_POWER || pMac->pmc.smpsConfig.enterOnAc); case ePMC_UAPSD_MODE_POWER_SAVE: return pMac->pmc.uapsdEnabled; case ePMC_STANDBY_MODE_POWER_SAVE: return pMac->pmc.standbyEnabled; case ePMC_WOWL_MODE_POWER_SAVE: return pMac->pmc.wowlEnabled; break; default: pmcLog(pMac, LOGE, FL("Invalid power save mode %d"), psMode); PMC_ABORT; return FALSE; } } /****************************************************************************** * * Name: pmcRequestFullPower * * Description: * Request that the device be brought to full power state. * * Parameters: * hHal - HAL handle for device * callbackRoutine - routine to call when device actually achieves full * power state if "eHAL_STATUS_PMC_PENDING" is returned * callbackContext - value to be passed as parameter to routine specified * above * fullPowerReason - Reason for requesting full power mode. This is used * by PE to decide whether data null should be sent to * AP when exiting BMPS mode. Caller should use the * eSME_LINK_DISCONNECTED reason if link is disconnected * and there is no need to tell the AP that we are going * out of power save. * * Returns: * eHAL_STATUS_SUCCESS - device brought to full power state * eHAL_STATUS_FAILURE - device cannot be brought to full power state * eHAL_STATUS_PMC_PENDING - device is being brought to full power state, * callbackRoutine will be called when completed * ******************************************************************************/ eHalStatus pmcRequestFullPower (tHalHandle hHal, void (*callbackRoutine) (void *callbackContext, eHalStatus status), void *callbackContext, tRequestFullPowerReason fullPowerReason) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tpRequestFullPowerEntry pEntry; #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_ENTER_FULL_POWER_REQ; psRequest.full_power_request_reason = fullPowerReason; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, FL("Entering pmcRequestFullPower")); if( !PMC_IS_READY(pMac) ) { pmcLog(pMac, LOGE, FL("Requesting Full Power when PMC not ready")); pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"), pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState)); return eHAL_STATUS_FAILURE; } /* If HDD is requesting full power, clear any buffered requests for WOWL and BMPS that were requested by HDD previously */ if(SIR_IS_FULL_POWER_NEEDED_BY_HDD(fullPowerReason)) { pMac->pmc.bmpsRequestedByHdd = FALSE; pMac->pmc.wowlModeRequired = FALSE; } /* If already in full power, just return. */ if (pMac->pmc.pmcState == FULL_POWER) return eHAL_STATUS_SUCCESS; /* If in IMPS State, then cancel the timer. */ if (pMac->pmc.pmcState == IMPS) if (palTimerStop(pMac->hHdd, pMac->pmc.hImpsTimer) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot cancel IMPS timer")); return eHAL_STATUS_FAILURE; } /* Enter Request Full Power State. */ if (pmcEnterRequestFullPowerState(hHal, fullPowerReason) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; /* If able to enter Request Full Power State, then request is pending. Allocate entry for request full power callback routine list. */ //If caller doesn't need a callback, simply waits up the chip. if( callbackRoutine ) { if (palAllocateMemory(pMac->hHdd, (void **)&pEntry, sizeof(tRequestFullPowerEntry)) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot allocate memory for request full power routine list entry")); PMC_ABORT; return eHAL_STATUS_FAILURE; } /* Store routine and context in entry. */ pEntry->callbackRoutine = callbackRoutine; pEntry->callbackContext = callbackContext; /* Add entry to list. */ csrLLInsertTail(&pMac->pmc.requestFullPowerList, &pEntry->link, TRUE); } return eHAL_STATUS_PMC_PENDING; } /****************************************************************************** * * Name: pmcRequestImps * * Description: * Request that the device be placed in Idle Mode Power Save (IMPS). * The Common Scan/Roam Module makes this request. The device will be * placed into IMPS for the specified amount of time, and then returned * to full power. * * Parameters: * hHal - HAL handle for device * impsPeriod - amount of time to remain in IMPS (milliseconds) * callbackRoutine - routine to call when IMPS period has finished and * the device has been brought to full power * callbackContext - value to be passed as parameter to routine specified * above * * Returns: * eHAL_STATUS_SUCCESS - device will enter IMPS * eHAL_STATUS_PMC_DISABLED - IMPS is disabled * eHAL_STATUS_PMC_NOT_NOW - another module is prohibiting entering IMPS * at this time * eHAL_STATUS_PMC_AC_POWER - IMPS is disabled when host operating from * AC power * eHAL_STATUS_PMC_ALREADY_IN_IMPS - device is already in IMPS * eHAL_STATUS_PMC_SYS_ERROR - system error that prohibits entering IMPS * ******************************************************************************/ eHalStatus pmcRequestImps (tHalHandle hHal, tANI_U32 impsPeriod, void (*callbackRoutine) (void *callbackContext, eHalStatus status), void *callbackContext) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); eHalStatus status; #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_IMPS_ENTER_REQ; psRequest.imps_period = impsPeriod; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, FL("Entering pmcRequestImps")); status = pmcEnterImpsCheck( pMac ); if( HAL_STATUS_SUCCESS( status ) ) { /* Enter Request IMPS State. */ status = pmcEnterRequestImpsState( hHal ); if (HAL_STATUS_SUCCESS( status )) { /* Save the period and callback routine for when we need it. */ pMac->pmc.impsPeriod = impsPeriod; pMac->pmc.impsCallbackRoutine = callbackRoutine; pMac->pmc.impsCallbackContext = callbackContext; } else { status = eHAL_STATUS_PMC_SYS_ERROR; } } return status; } /****************************************************************************** * * Name: pmcRegisterPowerSaveCheck * * Description: * Allows a routine to be registered so that the routine is called whenever * the device is about to enter one of the power save modes. This routine * will say whether the device is allowed to enter the power save mode at * the time of the call. * * Parameters: * hHal - HAL handle for device * checkRoutine - routine to call before entering a power save mode, should * return TRUE if the device is allowed to enter the power * save mode, FALSE otherwise * checkContext - value to be passed as parameter to routine specified above * * Returns: * eHAL_STATUS_SUCCESS - successfully registered * eHAL_STATUS_FAILURE - not successfully registered * ******************************************************************************/ eHalStatus pmcRegisterPowerSaveCheck (tHalHandle hHal, tANI_BOOLEAN (*checkRoutine) (void *checkContext), void *checkContext) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tpPowerSaveCheckEntry pEntry; pmcLog(pMac, LOG2, FL("Entering pmcRegisterPowerSaveCheck")); /* Allocate entry for power save check routine list. */ if (palAllocateMemory(pMac->hHdd, (void **)&pEntry, sizeof(tPowerSaveCheckEntry)) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot allocate memory for power save check routine list entry")); PMC_ABORT; return eHAL_STATUS_FAILURE; } /* Store routine and context in entry. */ pEntry->checkRoutine = checkRoutine; pEntry->checkContext = checkContext; /* Add entry to list. */ csrLLInsertTail(&pMac->pmc.powerSaveCheckList, &pEntry->link, FALSE); return eHAL_STATUS_SUCCESS; } /****************************************************************************** * * Name: pmcDeregisterPowerSaveCheck * * Description: * Reregisters a routine that was previously registered with * pmcRegisterPowerSaveCheck. * * Parameters: * hHal - HAL handle for device * checkRoutine - routine to deregister * * Returns: * eHAL_STATUS_SUCCESS - successfully deregistered * eHAL_STATUS_FAILURE - not successfully deregistered * ******************************************************************************/ eHalStatus pmcDeregisterPowerSaveCheck (tHalHandle hHal, tANI_BOOLEAN (*checkRoutine) (void *checkContext)) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tListElem *pEntry; tpPowerSaveCheckEntry pPowerSaveCheckEntry; pmcLog(pMac, LOG2, FL("Entering pmcDeregisterPowerSaveCheck")); /* Find entry in the power save check routine list that matches the specified routine and remove it. */ pEntry = csrLLPeekHead(&pMac->pmc.powerSaveCheckList, FALSE); while (pEntry != NULL) { pPowerSaveCheckEntry = GET_BASE_ADDR(pEntry, tPowerSaveCheckEntry, link); if (pPowerSaveCheckEntry->checkRoutine == checkRoutine) { if (csrLLRemoveEntry(&pMac->pmc.powerSaveCheckList, pEntry, FALSE)) { if (palFreeMemory(pMac->hHdd, pPowerSaveCheckEntry) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory for power save check routine list entry")); PMC_ABORT; return eHAL_STATUS_FAILURE; } } else { pmcLog(pMac, LOGE, FL("Cannot remove power save check routine list entry")); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } pEntry = csrLLNext(&pMac->pmc.powerSaveCheckList, pEntry, FALSE); } /* Could not find matching entry. */ return eHAL_STATUS_FAILURE; } static void pmcProcessResponse( tpAniSirGlobal pMac, tSirSmeRsp *pMsg ) { tListElem *pEntry = NULL; tSmeCmd *pCommand = NULL; tANI_BOOLEAN fRemoveCommand = eANI_BOOLEAN_TRUE; pEntry = csrLLPeekHead(&pMac->sme.smeCmdActiveList, LL_ACCESS_LOCK); if(pEntry) { pCommand = GET_BASE_ADDR(pEntry, tSmeCmd, Link); pmcLog(pMac, LOG2, FL("process message = %d"), pMsg->messageType); /* Process each different type of message. */ switch (pMsg->messageType) { /* We got a response to our IMPS request. */ case eWNI_PMC_ENTER_IMPS_RSP: pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_ENTER_IMPS_RSP with status = %d"), pMsg->statusCode); if( (eSmeCommandEnterImps != pCommand->command) && (eSmeCommandEnterStandby != pCommand->command) ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_IMPS_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } if(pMac->pmc.pmcState == REQUEST_IMPS) { /* Enter IMPS State if response indicates success. */ if (pMsg->statusCode == eSIR_SME_SUCCESS) { pMac->pmc.ImpsReqFailed = VOS_FALSE; pmcEnterImpsState(pMac); if (!(pMac->pmc.ImpsReqFailed || pMac->pmc.ImpsReqTimerFailed) && pMac->pmc.ImpsReqFailCnt) { pmcLog(pMac, LOGE, FL("Response message to request to enter IMPS was failed %d times before success"), pMac->pmc.ImpsReqFailCnt); pMac->pmc.ImpsReqFailCnt = 0; } } /* If response is failure, then we stay in Full Power State and tell everyone that we aren't going into IMPS. */ else { pMac->pmc.ImpsReqFailed = VOS_TRUE; if (!(pMac->pmc.ImpsReqFailCnt & 0xF)) { pmcLog(pMac, LOGE, FL("Response message to request to enter IMPS indicates failure, status %x, FailCnt - %d"), pMsg->statusCode, ++pMac->pmc.ImpsReqFailCnt); } else { pMac->pmc.ImpsReqFailCnt++; } pmcEnterFullPowerState(pMac); } } else if (pMac->pmc.pmcState == REQUEST_STANDBY) { /* Enter STANDBY State if response indicates success. */ if (pMsg->statusCode == eSIR_SME_SUCCESS) { pmcEnterStandbyState(pMac); pmcDoStandbyCallbacks(pMac, eHAL_STATUS_SUCCESS); } /* If response is failure, then we stay in Full Power State and tell everyone that we aren't going into STANDBY. */ else { pmcLog(pMac, LOGE, "PMC: response message to request to enter " "standby indicates failure, status %x", pMsg->statusCode); pmcEnterFullPowerState(pMac); pmcDoStandbyCallbacks(pMac, eHAL_STATUS_FAILURE); } } else { pmcLog(pMac, LOGE, "PMC: Enter IMPS rsp rcvd when device is " "in %d state", pMac->pmc.pmcState); } break; /* We got a response to our wake from IMPS request. */ case eWNI_PMC_EXIT_IMPS_RSP: pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_EXIT_IMPS_RSP with status = %d"), pMsg->statusCode); if( eSmeCommandExitImps != pCommand->command ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_IMPS_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } /* Check that we are in the correct state for this message. */ if (pMac->pmc.pmcState != REQUEST_FULL_POWER) { pmcLog(pMac, LOGE, FL("Got Exit IMPS Response Message while " "in state %d"), pMac->pmc.pmcState); break; } /* Enter Full Power State. */ if (pMsg->statusCode != eSIR_SME_SUCCESS) { pmcLog(pMac, LOGE, FL("Response message to request to exit " "IMPS indicates failure, status %x"), pMsg->statusCode); } pmcEnterFullPowerState(pMac); break; /* We got a response to our BMPS request. */ case eWNI_PMC_ENTER_BMPS_RSP: pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_ENTER_BMPS_RSP with status = %d"), pMsg->statusCode); if( eSmeCommandEnterBmps != pCommand->command ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_BMPS_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } pMac->pmc.bmpsRequestQueued = eANI_BOOLEAN_FALSE; /* Check that we are in the correct state for this message. */ if (pMac->pmc.pmcState != REQUEST_BMPS) { pmcLog(pMac, LOGE, FL("Got Enter BMPS Response Message while in state %d"), pMac->pmc.pmcState); break; } /* Enter BMPS State if response indicates success. */ if (pMsg->statusCode == eSIR_SME_SUCCESS) { pmcEnterBmpsState(pMac); /* Note: If BMPS was requested because of start UAPSD, there will no entries for BMPS callback routines and pmcDoBmpsCallbacks will be a No-Op*/ pmcDoBmpsCallbacks(pMac, eHAL_STATUS_SUCCESS); } /* If response is failure, then we stay in Full Power State and tell everyone that we aren't going into BMPS. */ else { pmcLog(pMac, LOGE, FL("Response message to request to enter BMPS indicates failure, status %x"), pMsg->statusCode); pmcEnterFullPowerState(pMac); //Do not call UAPSD callback here since it may be re-entered pmcDoBmpsCallbacks(pMac, eHAL_STATUS_FAILURE); } break; /* We got a response to our wake from BMPS request. */ case eWNI_PMC_EXIT_BMPS_RSP: pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_EXIT_BMPS_RSP with status = %d"), pMsg->statusCode); if( eSmeCommandExitBmps != pCommand->command ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_BMPS_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } /* Check that we are in the correct state for this message. */ if (pMac->pmc.pmcState != REQUEST_FULL_POWER) { pmcLog(pMac, LOGE, FL("Got Exit BMPS Response Message while in state %d"), pMac->pmc.pmcState); break; } /* Enter Full Power State. */ if (pMsg->statusCode != eSIR_SME_SUCCESS) { pmcLog(pMac, LOGP, FL("Response message to request to exit BMPS indicates failure, status %x"), pMsg->statusCode); } pmcEnterFullPowerState(pMac); break; /* We got a response to our Start UAPSD request. */ case eWNI_PMC_ENTER_UAPSD_RSP: pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_ENTER_UAPSD_RSP with status = %d"), pMsg->statusCode); if( eSmeCommandEnterUapsd != pCommand->command ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_UAPSD_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } /* Check that we are in the correct state for this message. */ if (pMac->pmc.pmcState != REQUEST_START_UAPSD) { pmcLog(pMac, LOGE, FL("Got Enter Uapsd rsp Message while in state %d"), pMac->pmc.pmcState); break; } /* Enter UAPSD State if response indicates success. */ if (pMsg->statusCode == eSIR_SME_SUCCESS) { pmcEnterUapsdState(pMac); pmcDoStartUapsdCallbacks(pMac, eHAL_STATUS_SUCCESS); } /* If response is failure, then we try to put the chip back in BMPS mode*/ else { pmcLog(pMac, LOGE, "PMC: response message to request to enter " "UAPSD indicates failure, status %x", pMsg->statusCode); //Need to reset the UAPSD flag so pmcEnterBmpsState won't try to enter UAPSD. pMac->pmc.uapsdSessionRequired = FALSE; pmcEnterBmpsState(pMac); //UAPSD will not be retied in this case so tell requester we are done with failure pmcDoStartUapsdCallbacks(pMac, eHAL_STATUS_FAILURE); } break; /* We got a response to our Stop UAPSD request. */ case eWNI_PMC_EXIT_UAPSD_RSP: pmcLog(pMac, LOG2, FL("Rcvd eWNI_PMC_EXIT_UAPSD_RSP with status = %d"), pMsg->statusCode); if( eSmeCommandExitUapsd != pCommand->command ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_UAPSD_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } /* Check that we are in the correct state for this message. */ if (pMac->pmc.pmcState != REQUEST_STOP_UAPSD) { pmcLog(pMac, LOGE, FL("Got Exit Uapsd rsp Message while in state %d"), pMac->pmc.pmcState); break; } /* Enter BMPS State */ if (pMsg->statusCode != eSIR_SME_SUCCESS) { pmcLog(pMac, LOGP, "PMC: response message to request to exit " "UAPSD indicates failure, status %x", pMsg->statusCode); } pmcEnterBmpsState(pMac); break; /* We got a response to our enter WOWL request. */ case eWNI_PMC_ENTER_WOWL_RSP: if( eSmeCommandEnterWowl != pCommand->command ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_ENTER_WOWL_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } /* Check that we are in the correct state for this message. */ if (pMac->pmc.pmcState != REQUEST_ENTER_WOWL) { pmcLog(pMac, LOGE, FL("Got eWNI_PMC_ENTER_WOWL_RSP while in state %s"), pmcGetPmcStateStr(pMac->pmc.pmcState)); break; } /* Enter WOWL State if response indicates success. */ if (pMsg->statusCode == eSIR_SME_SUCCESS) { pmcEnterWowlState(pMac); pmcDoEnterWowlCallbacks(pMac, eHAL_STATUS_SUCCESS); } /* If response is failure, then we try to put the chip back in BMPS mode*/ else { pmcLog(pMac, LOGE, "PMC: response message to request to enter " "WOWL indicates failure, status %x", pMsg->statusCode); pmcEnterBmpsState(pMac); pmcDoEnterWowlCallbacks(pMac, eHAL_STATUS_FAILURE); } break; /* We got a response to our exit WOWL request. */ case eWNI_PMC_EXIT_WOWL_RSP: if( eSmeCommandExitWowl != pCommand->command ) { pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_WOWL_RSP without request")); fRemoveCommand = eANI_BOOLEAN_FALSE; break; } /* Check that we are in the correct state for this message. */ if (pMac->pmc.pmcState != REQUEST_EXIT_WOWL) { pmcLog(pMac, LOGE, FL("Got Exit WOWL rsp Message while in state %d"), pMac->pmc.pmcState); break; } /* Enter BMPS State */ if (pMsg->statusCode != eSIR_SME_SUCCESS) { pmcLog(pMac, LOGP, "PMC: response message to request to exit " "WOWL indicates failure, status %x", pMsg->statusCode); } pmcEnterBmpsState(pMac); break; default: pmcLog(pMac, LOGE, FL("Invalid message type %d received"), pMsg->messageType); PMC_ABORT; break; }//switch if( fRemoveCommand ) { if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList, pEntry, LL_ACCESS_LOCK ) ) { pmcReleaseCommand( pMac, pCommand ); smeProcessPendingQueue( pMac ); } } } else { pmcLog(pMac, LOGE, FL("message type %d received but no request is found"), pMsg->messageType); } } /****************************************************************************** * * Name: pmcMessageProcessor * * Description: * Process a message received by PMC. * * Parameters: * hHal - HAL handle for device * pMsg - pointer to received message * * Returns: * nothing * ******************************************************************************/ void pmcMessageProcessor (tHalHandle hHal, tSirSmeRsp *pMsg) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pmcLog(pMac, LOG2, FL("Entering pmcMessageProcessor, message type %d"), pMsg->messageType); switch( pMsg->messageType ) { case eWNI_PMC_EXIT_BMPS_IND: //When PMC needs to handle more indication from PE, they need to be added here. { /* Device left BMPS on its own. */ pmcLog(pMac, LOGW, FL("Rcvd eWNI_PMC_EXIT_BMPS_IND with status = %d"), pMsg->statusCode); /* Check that we are in the correct state for this message. */ switch(pMac->pmc.pmcState) { case BMPS: case REQUEST_START_UAPSD: case UAPSD: case REQUEST_STOP_UAPSD: case REQUEST_ENTER_WOWL: case WOWL: case REQUEST_EXIT_WOWL: case REQUEST_FULL_POWER: pmcLog(pMac, LOGW, FL("Got eWNI_PMC_EXIT_BMPS_IND while in state %d"), pMac->pmc.pmcState); break; default: pmcLog(pMac, LOGE, FL("Got eWNI_PMC_EXIT_BMPS_IND while in state %d"), pMac->pmc.pmcState); PMC_ABORT; break; } /* Enter Full Power State. */ if (pMsg->statusCode != eSIR_SME_SUCCESS) { pmcLog(pMac, LOGP, FL("Exit BMPS indication indicates failure, status %x"), pMsg->statusCode); } else { tpSirSmeExitBmpsInd pExitBmpsInd = (tpSirSmeExitBmpsInd)pMsg; pmcEnterRequestFullPowerState(hHal, pExitBmpsInd->exitBmpsReason); } break; } default: pmcProcessResponse( pMac, pMsg ); break; } } tANI_BOOLEAN pmcValidateConnectState( tHalHandle hHal ) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); if ( !csrIsInfraConnected( pMac ) ) { pmcLog(pMac, LOGW, "PMC: STA not associated. BMPS cannot be entered"); return eANI_BOOLEAN_FALSE; } //Cannot have other session if ( csrIsIBSSStarted( pMac ) ) { pmcLog(pMac, LOGW, "PMC: IBSS started. BMPS cannot be entered"); return eANI_BOOLEAN_FALSE; } if ( csrIsBTAMPStarted( pMac ) ) { pmcLog(pMac, LOGW, "PMC: BT-AMP exists. BMPS cannot be entered"); return eANI_BOOLEAN_FALSE; } if ((vos_concurrent_sessions_running()) && (csrIsConcurrentInfraConnected( pMac ) || (vos_get_concurrency_mode()& VOS_SAP) || (vos_get_concurrency_mode()& VOS_P2P_GO))) { pmcLog(pMac, LOGW, "PMC: Multiple active sessions exists. BMPS cannot be entered"); return eANI_BOOLEAN_FALSE; } return eANI_BOOLEAN_TRUE; } tANI_BOOLEAN pmcAllowImps( tHalHandle hHal ) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); //Cannot have other session like IBSS or BT AMP running if ( csrIsIBSSStarted( pMac ) ) { pmcLog(pMac, LOGW, "PMC: IBSS started. IMPS cannot be entered"); return eANI_BOOLEAN_FALSE; } if ( csrIsBTAMPStarted( pMac ) ) { pmcLog(pMac, LOGW, "PMC: BT-AMP exists. IMPS cannot be entered"); return eANI_BOOLEAN_FALSE; } //All sessions must be disconnected to allow IMPS if ( !csrIsAllSessionDisconnected( pMac ) ) { pmcLog(pMac, LOGW, "PMC: Atleast one connected session. IMPS cannot be entered"); return eANI_BOOLEAN_FALSE; } return eANI_BOOLEAN_TRUE; } /****************************************************************************** * * Name: pmcRequestBmps * * Description: * Request that the device be put in BMPS state. * * Parameters: * hHal - HAL handle for device * callbackRoutine - Callback routine invoked in case of success/failure * callbackContext - value to be passed as parameter to routine specified * above * * Returns: * eHAL_STATUS_SUCCESS - device is in BMPS state * eHAL_STATUS_FAILURE - device cannot be brought to BMPS state * eHAL_STATUS_PMC_PENDING - device is being brought to BMPS state, * ******************************************************************************/ eHalStatus pmcRequestBmps ( tHalHandle hHal, void (*callbackRoutine) (void *callbackContext, eHalStatus status), void *callbackContext) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tpRequestBmpsEntry pEntry; eHalStatus status; #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_BMPS_ENTER_REQ; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, "PMC: entering pmcRequestBmps"); /* If already in BMPS, just return. */ if (pMac->pmc.pmcState == BMPS || REQUEST_START_UAPSD == pMac->pmc.pmcState || UAPSD == pMac->pmc.pmcState) { pmcLog(pMac, LOG2, "PMC: Device already in BMPS pmcState %d", pMac->pmc.pmcState); pMac->pmc.bmpsRequestedByHdd = TRUE; return eHAL_STATUS_SUCCESS; } status = pmcEnterBmpsCheck( pMac ); if(HAL_STATUS_SUCCESS( status )) { status = pmcEnterRequestBmpsState(hHal); /* Enter Request BMPS State. */ if ( HAL_STATUS_SUCCESS( status ) ) { /* Remember that HDD requested BMPS. This flag will be used to put the device back into BMPS if any module other than HDD (e.g. CSR, QoS, or BAP) requests full power for any reason */ pMac->pmc.bmpsRequestedByHdd = TRUE; /* If able to enter Request BMPS State, then request is pending. Allocate entry for request BMPS callback routine list. */ if (palAllocateMemory( pMac->hHdd, (void **)&pEntry, sizeof(tRequestBmpsEntry)) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, "PMC: cannot allocate memory for request " "BMPS routine list entry"); return eHAL_STATUS_FAILURE; } /* Store routine and context in entry. */ pEntry->callbackRoutine = callbackRoutine; pEntry->callbackContext = callbackContext; /* Add entry to list. */ csrLLInsertTail(&pMac->pmc.requestBmpsList, &pEntry->link, FALSE); status = eHAL_STATUS_PMC_PENDING; } else { status = eHAL_STATUS_FAILURE; } } /* Retry to enter the BMPS if the status = eHAL_STATUS_PMC_NOT_NOW */ else if (status == eHAL_STATUS_PMC_NOT_NOW) { pmcStopTrafficTimer(hHal); pmcLog(pMac, LOG1, FL("Can't enter BMPS+++")); if (pmcShouldBmpsTimerRun(pMac)) { if (pmcStartTrafficTimer(pMac, pMac->pmc.bmpsConfig.trafficMeasurePeriod) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOG1, FL("Cannot start BMPS Retry timer")); } pmcLog(pMac, LOG1, FL("BMPS Retry Timer already running or started")); } } return status; } /****************************************************************************** * * Name: pmcStartUapsd * * Description: * Request that the device be put in UAPSD state. * * Parameters: * hHal - HAL handle for device * callbackRoutine - Callback routine invoked in case of success/failure * callbackContext - value to be passed as parameter to routine specified * above * * Returns: * eHAL_STATUS_SUCCESS - device is in UAPSD state * eHAL_STATUS_FAILURE - device cannot be brought to UAPSD state * eHAL_STATUS_PMC_PENDING - device is being brought to UAPSD state * eHAL_STATUS_PMC_DISABLED - UAPSD is disabled or BMPS mode is disabled * ******************************************************************************/ eHalStatus pmcStartUapsd ( tHalHandle hHal, void (*callbackRoutine) (void *callbackContext, eHalStatus status), void *callbackContext) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tpStartUapsdEntry pEntry; #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_UAPSD_START_REQ; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, "PMC: entering pmcStartUapsd"); if( !PMC_IS_READY(pMac) ) { pmcLog(pMac, LOGE, FL("Requesting UAPSD when PMC not ready")); pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"), pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState)); return eHAL_STATUS_FAILURE; } /* Check if BMPS is enabled. */ if (!pMac->pmc.bmpsEnabled) { pmcLog(pMac, LOGE, "PMC: Cannot enter UAPSD. BMPS is disabled"); return eHAL_STATUS_PMC_DISABLED; } /* Check if UAPSD is enabled. */ if (!pMac->pmc.uapsdEnabled) { pmcLog(pMac, LOGE, "PMC: Cannot enter UAPSD. UAPSD is disabled"); return eHAL_STATUS_PMC_DISABLED; } /* If already in UAPSD, just return. */ if (pMac->pmc.pmcState == UAPSD) return eHAL_STATUS_SUCCESS; /* Check that we are associated. */ if (!pmcValidateConnectState( pMac )) { pmcLog(pMac, LOGE, "PMC: STA not associated with an AP. UAPSD cannot be entered"); return eHAL_STATUS_FAILURE; } /* Enter REQUEST_START_UAPSD State. */ if (pmcEnterRequestStartUapsdState(hHal) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; if( NULL != callbackRoutine ) { /* If success then request is pending. Allocate entry for callback routine list. */ if (palAllocateMemory(pMac->hHdd, (void **)&pEntry, sizeof(tStartUapsdEntry)) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, "PMC: cannot allocate memory for request " "start UAPSD routine list entry"); return eHAL_STATUS_FAILURE; } /* Store routine and context in entry. */ pEntry->callbackRoutine = callbackRoutine; pEntry->callbackContext = callbackContext; /* Add entry to list. */ csrLLInsertTail(&pMac->pmc.requestStartUapsdList, &pEntry->link, FALSE); } return eHAL_STATUS_PMC_PENDING; } /****************************************************************************** * * Name: pmcStopUapsd * * Description: * Request that the device be put out of UAPSD state. * * Parameters: * hHal - HAL handle for device * * Returns: * eHAL_STATUS_SUCCESS - device is put out of UAPSD and back in BMPS state * eHAL_STATUS_FAILURE - device cannot be brought out of UAPSD state * ******************************************************************************/ eHalStatus pmcStopUapsd (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_UAPSD_STOP_REQ; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, "PMC: entering pmcStopUapsd"); /* Clear any buffered command for entering UAPSD */ pMac->pmc.uapsdSessionRequired = FALSE; /* Nothing to be done if we are already out of UAPSD. This can happen if some other module (HDD, BT-AMP) requested Full Power.*/ if (pMac->pmc.pmcState != UAPSD && pMac->pmc.pmcState != REQUEST_STOP_UAPSD) { pmcLog(pMac, LOGW, "PMC: Device is already out of UAPSD " "state. Current state is %d", pMac->pmc.pmcState); return eHAL_STATUS_SUCCESS; } /* Enter REQUEST_STOP_UAPSD State*/ if (pmcEnterRequestStopUapsdState(hHal) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcRequestStandby \brief Request that the device be put in standby. \param hHal - The handle returned by macOpen. \param callbackRoutine - Callback routine invoked in case of success/failure \param callbackContext - value to be passed as parameter to callback \return eHalStatus eHAL_STATUS_SUCCESS - device is in Standby mode eHAL_STATUS_FAILURE - device cannot be put in standby mode eHAL_STATUS_PMC_PENDING - device is being put in standby mode ---------------------------------------------------------------------------*/ extern eHalStatus pmcRequestStandby ( tHalHandle hHal, void (*callbackRoutine) (void *callbackContext, eHalStatus status), void *callbackContext) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(psRequest, vos_event_wlan_powersave_payload_type); vos_mem_zero(&psRequest, sizeof(vos_event_wlan_powersave_payload_type)); psRequest.event_subtype = WLAN_ENTER_STANDBY_REQ; WLAN_VOS_DIAG_EVENT_REPORT(&psRequest, EVENT_WLAN_POWERSAVE_GENERIC); #endif pmcLog(pMac, LOG2, "PMC: entering pmcRequestStandby"); /* Check if standby is enabled. */ if (!pMac->pmc.standbyEnabled) { pmcLog(pMac, LOGE, "PMC: Cannot enter standby. Standby is disabled"); return eHAL_STATUS_PMC_DISABLED; } if( !PMC_IS_READY(pMac) ) { pmcLog(pMac, LOGE, FL("Requesting standby when PMC not ready")); pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"), pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState)); return eHAL_STATUS_FAILURE; } /* If already in STANDBY, just return. */ if (pMac->pmc.pmcState == STANDBY) return eHAL_STATUS_SUCCESS; if (csrIsIBSSStarted(pMac) || csrIsBTAMPStarted(pMac)) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL, "WLAN: IBSS or BT-AMP session present. Cannot honor standby request"); return eHAL_STATUS_PMC_NOT_NOW; } /* Enter Request Standby State. */ if (pmcEnterRequestStandbyState(hHal) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; /* Save the callback routine for when we need it. */ pMac->pmc.standbyCallbackRoutine = callbackRoutine; pMac->pmc.standbyCallbackContext = callbackContext; return eHAL_STATUS_PMC_PENDING; } /* --------------------------------------------------------------------------- \fn pmcRegisterDeviceStateUpdateInd \brief Register a callback routine that is called whenever the device enters a new device state (Full Power, BMPS, UAPSD) \param hHal - The handle returned by macOpen. \param callbackRoutine - Callback routine to be registered \param callbackContext - Cookie to be passed back during callback \return eHalStatus eHAL_STATUS_SUCCESS - successfully registered eHAL_STATUS_FAILURE - not successfully registered ---------------------------------------------------------------------------*/ extern eHalStatus pmcRegisterDeviceStateUpdateInd (tHalHandle hHal, void (*callbackRoutine) (void *callbackContext, tPmcState pmcState), void *callbackContext) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tpDeviceStateUpdateIndEntry pEntry; pmcLog(pMac, LOG2, FL("Entering pmcRegisterDeviceStateUpdateInd")); /* Allocate entry for device power state update indication. */ if (palAllocateMemory(pMac->hHdd, (void **)&pEntry, sizeof(tDeviceStateUpdateIndEntry)) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot allocate memory for device power state update indication")); PMC_ABORT; return eHAL_STATUS_FAILURE; } /* Store routine in entry. */ pEntry->callbackRoutine = callbackRoutine; pEntry->callbackContext = callbackContext; /* Add entry to list. */ csrLLInsertTail(&pMac->pmc.deviceStateUpdateIndList, &pEntry->link, FALSE); return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcDeregisterDeviceStateUpdateInd \brief Deregister a routine that was registered for device state changes \param hHal - The handle returned by macOpen. \param callbackRoutine - Callback routine to be deregistered \return eHalStatus eHAL_STATUS_SUCCESS - successfully deregistered eHAL_STATUS_FAILURE - not successfully deregistered ---------------------------------------------------------------------------*/ eHalStatus pmcDeregisterDeviceStateUpdateInd (tHalHandle hHal, void (*callbackRoutine) (void *callbackContext, tPmcState pmcState)) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tListElem *pEntry; tpDeviceStateUpdateIndEntry pDeviceStateUpdateIndEntry; pmcLog(pMac, LOG2, FL("Entering pmcDeregisterDeviceStateUpdateInd")); /* Find entry in the power save update routine list that matches the specified routine and remove it. */ pEntry = csrLLPeekHead(&pMac->pmc.deviceStateUpdateIndList, FALSE); while (pEntry != NULL) { pDeviceStateUpdateIndEntry = GET_BASE_ADDR(pEntry, tDeviceStateUpdateIndEntry, link); if (pDeviceStateUpdateIndEntry->callbackRoutine == callbackRoutine) { if (!csrLLRemoveEntry(&pMac->pmc.deviceStateUpdateIndList, pEntry, FALSE)) { pmcLog(pMac, LOGE, FL("Cannot remove device state update ind entry from list")); return eHAL_STATUS_FAILURE; } if (palFreeMemory(pMac->hHdd, pDeviceStateUpdateIndEntry) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory for device state update ind routine list entry")); PMC_ABORT; return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } pEntry = csrLLNext(&pMac->pmc.deviceStateUpdateIndList, pEntry, FALSE); } /* Could not find matching entry. */ return eHAL_STATUS_FAILURE; } /* --------------------------------------------------------------------------- \fn pmcReady \brief fn to inform PMC that eWNI_SME_SYS_READY_IND has been sent to PE. This acts as a trigger to send a message to PE to update the power save related conig to FW. Note that if HDD configures any power save related stuff before this API is invoked, PMC will buffer all the configutaion. \param hHal - The handle returned by macOpen. \return eHalStatus ---------------------------------------------------------------------------*/ eHalStatus pmcReady(tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pmcLog(pMac, LOG2, FL("Entering pmcReady")); if(pMac->pmc.pmcState == STOPPED) { pmcLog(pMac, LOGP, FL("pmcReady is invoked even before pmcStart")); return eHAL_STATUS_FAILURE; } pMac->pmc.pmcReady = TRUE; if (pmcSendPowerSaveConfigMessage(hHal) != eHAL_STATUS_SUCCESS) { return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcWowlAddBcastPattern \brief Add a pattern for Pattern Byte Matching in Wowl mode. Firmware will do a pattern match on these patterns when Wowl is enabled during BMPS mode. Note that Firmware performs the pattern matching only on broadcast frames and while Libra is in BMPS mode. \param hHal - The handle returned by macOpen. \param pattern - Pointer to the pattern to be added \return eHalStatus eHAL_STATUS_FAILURE Cannot add pattern eHAL_STATUS_SUCCESS Request accepted. ---------------------------------------------------------------------------*/ eHalStatus pmcWowlAddBcastPattern ( tHalHandle hHal, tpSirWowlAddBcastPtrn pattern, tANI_U8 sessionId) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); #ifdef FEATURE_WLAN_DIAG_SUPPORT vos_log_powersave_wow_add_ptrn_pkt_type *log_ptr = NULL; WLAN_VOS_DIAG_LOG_ALLOC(log_ptr, vos_log_powersave_wow_add_ptrn_pkt_type, LOG_WLAN_POWERSAVE_WOW_ADD_PTRN_C); #endif //#ifdef FEATURE_WLAN_DIAG_SUPPORT pmcLog(pMac, LOG2, "PMC: entering pmcWowlAddBcastPattern"); if(pattern == NULL) { pmcLog(pMac, LOGE, FL("Null broadcast pattern being passed")); return eHAL_STATUS_FAILURE; } if( pSession == NULL) { pmcLog(pMac, LOGE, FL("Session not found ")); return eHAL_STATUS_FAILURE; } #ifdef FEATURE_WLAN_DIAG_SUPPORT if( log_ptr ) { log_ptr->pattern_id = pattern->ucPatternId; log_ptr->pattern_byte_offset = pattern->ucPatternByteOffset; log_ptr->pattern_size = pattern->ucPatternSize; log_ptr->pattern_mask_size = pattern->ucPatternMaskSize; vos_mem_copy(log_ptr->pattern, pattern->ucPattern, SIR_WOWL_BCAST_PATTERN_MAX_SIZE); /* 1 bit in the pattern mask denotes 1 byte of pattern hence pattern mask size is 1/8 */ vos_mem_copy(log_ptr->pattern_mask, pattern->ucPatternMask, SIR_WOWL_BCAST_PATTERN_MAX_SIZE >> 3); } WLAN_VOS_DIAG_LOG_REPORT(log_ptr); #endif if(pattern->ucPatternId >= SIR_WOWL_BCAST_MAX_NUM_PATTERNS ) { pmcLog(pMac, LOGE, FL("Pattern Id must range from 0 to %d"), SIR_WOWL_BCAST_MAX_NUM_PATTERNS-1); return eHAL_STATUS_FAILURE; } if( pMac->pmc.pmcState == STANDBY || pMac->pmc.pmcState == REQUEST_STANDBY ) { pmcLog(pMac, LOGE, FL("Cannot add WoWL Pattern as chip is in %s state"), pmcGetPmcStateStr(pMac->pmc.pmcState)); return eHAL_STATUS_FAILURE; } if( pMac->pmc.pmcState == IMPS || pMac->pmc.pmcState == REQUEST_IMPS ) { eHalStatus status; vos_mem_copy(pattern->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); //Wake up the chip first status = pmcDeferMsg( pMac, eWNI_PMC_WOWL_ADD_BCAST_PTRN, pattern, sizeof(tSirWowlAddBcastPtrn) ); if( eHAL_STATUS_PMC_PENDING == status ) { return eHAL_STATUS_SUCCESS; } else { //either fail or already in full power if( !HAL_STATUS_SUCCESS( status ) ) { return ( status ); } //else let it through because it is in full power state } } if (pmcSendMessage(hHal, eWNI_PMC_WOWL_ADD_BCAST_PTRN, pattern, sizeof(tSirWowlAddBcastPtrn)) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Send of eWNI_PMC_WOWL_ADD_BCAST_PTRN to PE failed")); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcWowlDelBcastPattern \brief Delete a pattern that was added for Pattern Byte Matching. \param hHal - The handle returned by macOpen. \param pattern - Pattern to be deleted \return eHalStatus eHAL_STATUS_FAILURE Cannot delete pattern eHAL_STATUS_SUCCESS Request accepted. ---------------------------------------------------------------------------*/ eHalStatus pmcWowlDelBcastPattern ( tHalHandle hHal, tpSirWowlDelBcastPtrn pattern, tANI_U8 sessionId) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(wowRequest, vos_event_wlan_powersave_wow_payload_type); vos_mem_zero(&wowRequest, sizeof(vos_event_wlan_powersave_wow_payload_type)); wowRequest.event_subtype = WLAN_WOW_DEL_PTRN_REQ; wowRequest.wow_del_ptrn_id = pattern->ucPatternId; WLAN_VOS_DIAG_EVENT_REPORT(&wowRequest, EVENT_WLAN_POWERSAVE_WOW); #endif pmcLog(pMac, LOG2, "PMC: entering pmcWowlDelBcastPattern"); if( NULL == pSession ) { pmcLog(pMac, LOGE, FL("Session not found ")); return eHAL_STATUS_FAILURE; } if(pattern->ucPatternId >= SIR_WOWL_BCAST_MAX_NUM_PATTERNS ) { pmcLog(pMac, LOGE, FL("Pattern Id must range from 0 to %d"), SIR_WOWL_BCAST_MAX_NUM_PATTERNS-1); return eHAL_STATUS_FAILURE; } if(pMac->pmc.pmcState == STANDBY || pMac->pmc.pmcState == REQUEST_STANDBY) { pmcLog(pMac, LOGE, FL("Cannot delete WoWL Pattern as chip is in %s state"), pmcGetPmcStateStr(pMac->pmc.pmcState)); return eHAL_STATUS_FAILURE; } if( pMac->pmc.pmcState == IMPS || pMac->pmc.pmcState == REQUEST_IMPS ) { eHalStatus status; vos_mem_copy(pattern->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); //Wake up the chip first status = pmcDeferMsg( pMac, eWNI_PMC_WOWL_DEL_BCAST_PTRN, pattern, sizeof(tSirWowlDelBcastPtrn) ); if( eHAL_STATUS_PMC_PENDING == status ) { return eHAL_STATUS_SUCCESS; } else { //either fail or already in full power if( !HAL_STATUS_SUCCESS( status ) ) { return ( status ); } //else let it through because it is in full power state } } if (pmcSendMessage(hHal, eWNI_PMC_WOWL_DEL_BCAST_PTRN, pattern, sizeof(tSirWowlDelBcastPtrn)) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Send of eWNI_PMC_WOWL_DEL_BCAST_PTRN to PE failed")); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcEnterWowl \brief Request that the device be brought to full power state. Note 1: If "fullPowerReason" specificied in this API is set to eSME_FULL_PWR_NEEDED_BY_HDD, PMC will clear any "buffered wowl" requests and also clear any "buffered BMPS requests by HDD". Assumption is that since HDD is requesting full power, we need to undo any previous HDD requests for BMPS (using sme_RequestBmps) or WoWL (using sme_EnterWoWL). If the reason is specified anything other than above, the buffered requests for BMPS and WoWL will not be cleared. Note 2: Requesting full power (no matter what the fullPowerReason is) doesn't disable the "auto bmps timer" (if it is enabled) or clear any "buffered uapsd request". Note 3: When the device finally enters Full Power PMC will start a timer if any of the following holds true: - Auto BMPS mode is enabled - Uapsd request is pending - HDD's request for BMPS is pending - HDD's request for WoWL is pending On timer expiry PMC will attempt to put the device in BMPS mode if following (in addition to those listed above) holds true: - Polling of all modules through the Power Save Check routine passes - STA is associated to an access point \param hHal - The handle returned by macOpen. \param - enterWowlCallbackRoutine Callback routine invoked in case of success/failure \param - enterWowlCallbackContext - Cookie to be passed back during callback \param - wakeReasonIndCB Callback routine invoked for Wake Reason Indication \param - wakeReasonIndCBContext - Cookie to be passed back during callback \param - fullPowerReason - Reason why this API is being invoked. SME needs to distinguish between BAP and HDD requests \return eHalStatus - status eHAL_STATUS_SUCCESS - device brought to full power state eHAL_STATUS_FAILURE - device cannot be brought to full power state eHAL_STATUS_PMC_PENDING - device is being brought to full power state, ---------------------------------------------------------------------------*/ eHalStatus pmcEnterWowl ( tHalHandle hHal, void (*enterWowlCallbackRoutine) (void *callbackContext, eHalStatus status), void *enterWowlCallbackContext, #ifdef WLAN_WAKEUP_EVENTS void (*wakeReasonIndCB) (void *callbackContext, tpSirWakeReasonInd pWakeReasonInd), void *wakeReasonIndCBContext, #endif // WLAN_WAKEUP_EVENTS tpSirSmeWowlEnterParams wowlEnterParams, tANI_U8 sessionId) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(wowRequest, vos_event_wlan_powersave_wow_payload_type); vos_mem_zero(&wowRequest, sizeof(vos_event_wlan_powersave_wow_payload_type)); wowRequest.event_subtype = WLAN_WOW_ENTER_REQ; wowRequest.wow_type = 0; if(wowlEnterParams->ucMagicPktEnable) { wowRequest.wow_type |= 1; vos_mem_copy(wowRequest.wow_magic_pattern, (tANI_U8 *)wowlEnterParams->magicPtrn, 6); } if(wowlEnterParams->ucPatternFilteringEnable) { wowRequest.wow_type |= 2; } WLAN_VOS_DIAG_EVENT_REPORT(&wowRequest, EVENT_WLAN_POWERSAVE_WOW); #endif pmcLog(pMac, LOG2, FL("PMC: entering pmcEnterWowl")); if( NULL == pSession ) { pmcLog(pMac, LOGE, FL("Session not found ")); return eHAL_STATUS_FAILURE; } vos_mem_copy(wowlEnterParams->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); if( !PMC_IS_READY(pMac) ) { pmcLog(pMac, LOGE, FL("Requesting WoWL when PMC not ready")); pmcLog(pMac, LOGE, FL("pmcReady = %d pmcState = %s"), pMac->pmc.pmcReady, pmcGetPmcStateStr(pMac->pmc.pmcState)); return eHAL_STATUS_FAILURE; } /* Check if BMPS is enabled. */ if (!pMac->pmc.bmpsEnabled) { pmcLog(pMac, LOGE, "PMC: Cannot enter WoWL. BMPS is disabled"); return eHAL_STATUS_PMC_DISABLED; } /* Check if WoWL is enabled. */ if (!pMac->pmc.wowlEnabled) { pmcLog(pMac, LOGE, "PMC: Cannot enter WoWL. WoWL is disabled"); return eHAL_STATUS_PMC_DISABLED; } /* Check that we are associated with single Session. */ if (!pmcValidateConnectState( pMac )) { pmcLog(pMac, LOGE, "PMC: Cannot enable WOWL. STA not associated " "with an Access Point in Infra Mode with single active session"); return eHAL_STATUS_FAILURE; } /* Is there a pending UAPSD request? HDD should have triggered QoS module to do the necessary cleanup before triggring WOWL*/ if(pMac->pmc.uapsdSessionRequired) { pmcLog(pMac, LOGE, "PMC: Cannot request WOWL. Pending UAPSD request"); return eHAL_STATUS_FAILURE; } /* Check that entry into a power save mode is allowed at this time. */ if (pMac->pmc.pmcState == FULL_POWER && !pmcPowerSaveCheck(hHal)) { pmcLog(pMac, LOGE, "PMC: Power save check failed. WOWL request " "will not be accepted"); return eHAL_STATUS_FAILURE; } // To avoid race condition, set callback routines before sending message. /* cache the WOWL information */ pMac->pmc.wowlEnterParams = *wowlEnterParams; pMac->pmc.enterWowlCallbackRoutine = enterWowlCallbackRoutine; pMac->pmc.enterWowlCallbackContext = enterWowlCallbackContext; #ifdef WLAN_WAKEUP_EVENTS /* Cache the Wake Reason Indication callback information */ pMac->pmc.wakeReasonIndCB = wakeReasonIndCB; pMac->pmc.wakeReasonIndCBContext = wakeReasonIndCBContext; #endif // WLAN_WAKEUP_EVENTS /* Enter Request WOWL State. */ if (pmcRequestEnterWowlState(hHal, wowlEnterParams) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; pMac->pmc.wowlModeRequired = TRUE; return eHAL_STATUS_PMC_PENDING; } /* --------------------------------------------------------------------------- \fn pmcExitWowl \brief This is the SME API exposed to HDD to request exit from WoWLAN mode. SME will initiate exit from WoWLAN mode and device will be put in BMPS mode. \param hHal - The handle returned by macOpen. \return eHalStatus eHAL_STATUS_FAILURE Device cannot exit WoWLAN mode. eHAL_STATUS_SUCCESS Request accepted to exit WoWLAN mode. ---------------------------------------------------------------------------*/ eHalStatus pmcExitWowl (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_EVENT_DEF(wowRequest, vos_event_wlan_powersave_wow_payload_type); vos_mem_zero(&wowRequest, sizeof(vos_event_wlan_powersave_wow_payload_type)); wowRequest.event_subtype = WLAN_WOW_EXIT_REQ; WLAN_VOS_DIAG_EVENT_REPORT(&wowRequest, EVENT_WLAN_POWERSAVE_WOW); #endif pmcLog(pMac, LOG2, "PMC: entering pmcExitWowl"); /* Clear any buffered command for entering WOWL */ pMac->pmc.wowlModeRequired = FALSE; /* Enter REQUEST_EXIT_WOWL State*/ if (pmcRequestExitWowlState(hHal) != eHAL_STATUS_SUCCESS) return eHAL_STATUS_FAILURE; /* Clear the callback routines */ pMac->pmc.enterWowlCallbackRoutine = NULL; pMac->pmc.enterWowlCallbackContext = NULL; return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcSetHostOffload \brief Set the host offload feature. \param hHal - The handle returned by macOpen. \param pRequest - Pointer to the offload request. \return eHalStatus eHAL_STATUS_FAILURE Cannot set the offload. eHAL_STATUS_SUCCESS Request accepted. ---------------------------------------------------------------------------*/ eHalStatus pmcSetHostOffload (tHalHandle hHal, tpSirHostOffloadReq pRequest, tANI_U8 sessionId) { tpSirHostOffloadReq pRequestBuf; vos_msg_t msg; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: IP address = %d.%d.%d.%d", __func__, pRequest->params.hostIpv4Addr[0], pRequest->params.hostIpv4Addr[1], pRequest->params.hostIpv4Addr[2], pRequest->params.hostIpv4Addr[3]); if(NULL == pSession ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: SESSION not Found\n", __func__); return eHAL_STATUS_FAILURE; } pRequestBuf = vos_mem_malloc(sizeof(tSirHostOffloadReq)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for host offload request", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pRequest->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirHostOffloadReq)); msg.type = WDA_SET_HOST_OFFLOAD; msg.reserved = 0; msg.bodyptr = pRequestBuf; if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_HOST_OFFLOAD message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcSetKeepAlive \brief Set the Keep Alive feature. \param hHal - The handle returned by macOpen. \param pRequest - Pointer to the Keep Alive. \return eHalStatus eHAL_STATUS_FAILURE Cannot set the keepalive. eHAL_STATUS_SUCCESS Request accepted. ---------------------------------------------------------------------------*/ eHalStatus pmcSetKeepAlive (tHalHandle hHal, tpSirKeepAliveReq pRequest, tANI_U8 sessionId) { tpSirKeepAliveReq pRequestBuf; vos_msg_t msg; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO_LOW, "%s: " "WDA_SET_KEEP_ALIVE message", __func__); if(pSession == NULL ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: " " Session not Found", __func__); return eHAL_STATUS_FAILURE; } pRequestBuf = vos_mem_malloc(sizeof(tSirKeepAliveReq)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: " "Not able to allocate memory for keep alive request", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pRequest->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirKeepAliveReq)); VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO_LOW, "buff TP %d " "input TP %d ", pRequestBuf->timePeriod, pRequest->timePeriod); msg.type = WDA_SET_KEEP_ALIVE; msg.reserved = 0; msg.bodyptr = pRequestBuf; if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: " "Not able to post WDA_SET_KEEP_ALIVE message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } #ifdef WLAN_NS_OFFLOAD /* --------------------------------------------------------------------------- \fn pmcSetNSOffload \brief Set the host offload feature. \param hHal - The handle returned by macOpen. \param pRequest - Pointer to the offload request. \return eHalStatus eHAL_STATUS_FAILURE Cannot set the offload. eHAL_STATUS_SUCCESS Request accepted. ---------------------------------------------------------------------------*/ eHalStatus pmcSetNSOffload (tHalHandle hHal, tpSirHostOffloadReq pRequest, tANI_U8 sessionId) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tpSirHostOffloadReq pRequestBuf; vos_msg_t msg; tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); if( NULL == pSession ) { pmcLog(pMac, LOGE, FL("Session not found ")); return eHAL_STATUS_FAILURE; } vos_mem_copy(pRequest->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); pRequestBuf = vos_mem_malloc(sizeof(tSirHostOffloadReq)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for NS offload request", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirHostOffloadReq)); msg.type = WDA_SET_NS_OFFLOAD; msg.reserved = 0; msg.bodyptr = pRequestBuf; if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post SIR_HAL_SET_HOST_OFFLOAD message to HAL", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } #endif //WLAN_NS_OFFLOAD void pmcClosePowerSaveCheckList(tpAniSirGlobal pMac) { tListElem *pEntry; tpPowerSaveCheckEntry pPowerSaveCheckEntry; csrLLLock(&pMac->pmc.powerSaveCheckList); while ( (pEntry = csrLLRemoveHead(&pMac->pmc.powerSaveCheckList, FALSE)) ) { pPowerSaveCheckEntry = GET_BASE_ADDR(pEntry, tPowerSaveCheckEntry, link); if (palFreeMemory(pMac->hHdd, pPowerSaveCheckEntry) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory ")); PMC_ABORT; break; } } csrLLUnlock(&pMac->pmc.powerSaveCheckList); csrLLClose(&pMac->pmc.powerSaveCheckList); } void pmcCloseRequestFullPowerList(tpAniSirGlobal pMac) { tListElem *pEntry; tpRequestFullPowerEntry pRequestFullPowerEntry; csrLLLock(&pMac->pmc.requestFullPowerList); while ( (pEntry = csrLLRemoveHead(&pMac->pmc.requestFullPowerList, FALSE)) ) { pRequestFullPowerEntry = GET_BASE_ADDR(pEntry, tRequestFullPowerEntry, link); if (palFreeMemory(pMac->hHdd, pRequestFullPowerEntry) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory ")); PMC_ABORT; break; } } csrLLUnlock(&pMac->pmc.requestFullPowerList); csrLLClose(&pMac->pmc.requestFullPowerList); } void pmcCloseRequestBmpsList(tpAniSirGlobal pMac) { tListElem *pEntry; tpRequestBmpsEntry pRequestBmpsEntry; csrLLLock(&pMac->pmc.requestBmpsList); while ( (pEntry = csrLLRemoveHead(&pMac->pmc.requestBmpsList, FALSE)) ) { pRequestBmpsEntry = GET_BASE_ADDR(pEntry, tRequestBmpsEntry, link); if (palFreeMemory(pMac->hHdd, pRequestBmpsEntry) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory ")); PMC_ABORT; break; } } csrLLUnlock(&pMac->pmc.requestBmpsList); csrLLClose(&pMac->pmc.requestBmpsList); } void pmcCloseRequestStartUapsdList(tpAniSirGlobal pMac) { tListElem *pEntry; tpStartUapsdEntry pStartUapsdEntry; csrLLLock(&pMac->pmc.requestStartUapsdList); while ( (pEntry = csrLLRemoveHead(&pMac->pmc.requestStartUapsdList, FALSE)) ) { pStartUapsdEntry = GET_BASE_ADDR(pEntry, tStartUapsdEntry, link); if (palFreeMemory(pMac->hHdd, pStartUapsdEntry) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory ")); PMC_ABORT; break; } } csrLLUnlock(&pMac->pmc.requestStartUapsdList); csrLLClose(&pMac->pmc.requestStartUapsdList); } void pmcCloseDeviceStateUpdateList(tpAniSirGlobal pMac) { tListElem *pEntry; tpDeviceStateUpdateIndEntry pDeviceStateUpdateIndEntry; csrLLLock(&pMac->pmc.deviceStateUpdateIndList); while ( (pEntry = csrLLRemoveHead(&pMac->pmc.deviceStateUpdateIndList, FALSE)) ) { pDeviceStateUpdateIndEntry = GET_BASE_ADDR(pEntry, tDeviceStateUpdateIndEntry, link); if (palFreeMemory(pMac->hHdd, pDeviceStateUpdateIndEntry) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory ")); PMC_ABORT; break; } } csrLLUnlock(&pMac->pmc.deviceStateUpdateIndList); csrLLClose(&pMac->pmc.deviceStateUpdateIndList); } void pmcCloseDeferredMsgList(tpAniSirGlobal pMac) { tListElem *pEntry; tPmcDeferredMsg *pDeferredMsg; csrLLLock(&pMac->pmc.deferredMsgList); while ( (pEntry = csrLLRemoveHead(&pMac->pmc.deferredMsgList, FALSE)) ) { pDeferredMsg = GET_BASE_ADDR(pEntry, tPmcDeferredMsg, link); if (palFreeMemory(pMac->hHdd, pDeferredMsg) != eHAL_STATUS_SUCCESS) { pmcLog(pMac, LOGE, FL("Cannot free memory ")); PMC_ABORT; break; } } csrLLUnlock(&pMac->pmc.deferredMsgList); csrLLClose(&pMac->pmc.deferredMsgList); } #ifdef FEATURE_WLAN_SCAN_PNO static tSirRetStatus pmcPopulateMacHeader( tpAniSirGlobal pMac, tANI_U8* pBD, tANI_U8 type, tANI_U8 subType, tSirMacAddr peerAddr , tSirMacAddr selfMacAddr) { tSirRetStatus statusCode = eSIR_SUCCESS; tpSirMacMgmtHdr pMacHdr; /// Prepare MAC management header pMacHdr = (tpSirMacMgmtHdr) (pBD); // Prepare FC pMacHdr->fc.protVer = SIR_MAC_PROTOCOL_VERSION; pMacHdr->fc.type = type; pMacHdr->fc.subType = subType; // Prepare Address 1 palCopyMemory( pMac->hHdd, (tANI_U8 *) pMacHdr->da, (tANI_U8 *) peerAddr, sizeof( tSirMacAddr )); sirCopyMacAddr(pMacHdr->sa,selfMacAddr); // Prepare Address 3 palCopyMemory( pMac->hHdd, (tANI_U8 *) pMacHdr->bssId, (tANI_U8 *) peerAddr, sizeof( tSirMacAddr )); return statusCode; } /*** pmcPopulateMacHeader() ***/ static tSirRetStatus pmcPrepareProbeReqTemplate(tpAniSirGlobal pMac, tANI_U8 nChannelNum, tANI_U32 dot11mode, tSirMacAddr selfMacAddr, tANI_U8 *pFrame, tANI_U16 *pusLen) { tDot11fProbeRequest pr; tANI_U32 nStatus, nBytes, nPayload; tSirRetStatus nSirStatus; /*Bcast tx*/ tSirMacAddr bssId = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ // The scheme here is to fill out a 'tDot11fProbeRequest' structure // and then hand it off to 'dot11fPackProbeRequest' (for // serialization). We start by zero-initializing the structure: palZeroMemory( pMac->hHdd, ( tANI_U8* )&pr, sizeof( pr ) ); PopulateDot11fSuppRates( pMac, nChannelNum, &pr.SuppRates,NULL); if ( WNI_CFG_DOT11_MODE_11B != dot11mode ) { PopulateDot11fExtSuppRates1( pMac, nChannelNum, &pr.ExtSuppRates ); } if (IS_DOT11_MODE_HT(dot11mode)) { PopulateDot11fHTCaps( pMac, NULL, &pr.HTCaps ); } // That's it-- now we pack it. First, how much space are we going to // need? nStatus = dot11fGetPackedProbeRequestSize( pMac, &pr, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "Failed to calculate the packed size f" "or a Probe Request (0x%08x).", nStatus ); // We'll fall back on the worst case scenario: nPayload = sizeof( tDot11fProbeRequest ); } else if ( DOT11F_WARNED( nStatus ) ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "There were warnings while calculating" "the packed size for a Probe Request (" "0x%08x).", nStatus ); } nBytes = nPayload + sizeof( tSirMacMgmtHdr ); /* Prepare outgoing frame*/ palZeroMemory( pMac->hHdd, pFrame, nBytes ); // Next, we fill out the buffer descriptor: nSirStatus = pmcPopulateMacHeader( pMac, pFrame, SIR_MAC_MGMT_FRAME, SIR_MAC_MGMT_PROBE_REQ, bssId ,selfMacAddr); if ( eSIR_SUCCESS != nSirStatus ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "Failed to populate the buffer descriptor for a Probe Request (%d).", nSirStatus ); return nSirStatus; // allocated! } // That done, pack the Probe Request: nStatus = dot11fPackProbeRequest( pMac, &pr, pFrame + sizeof( tSirMacMgmtHdr ), nPayload, &nPayload ); if ( DOT11F_FAILED( nStatus ) ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "Failed to pack a Probe Request (0x%08x).", nStatus ); return eSIR_FAILURE; // allocated! } else if ( DOT11F_WARNED( nStatus ) ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "There were warnings while packing a Probe Request" ); } *pusLen = nPayload + sizeof(tSirMacMgmtHdr); return eSIR_SUCCESS; } // End pmcPrepareProbeReqTemplate. eHalStatus pmcSetPreferredNetworkList ( tHalHandle hHal, tpSirPNOScanReq pRequest, tANI_U8 sessionId, preferredNetworkFoundIndCallback callbackRoutine, void *callbackContext ) { tpSirPNOScanReq pRequestBuf; vos_msg_t msg; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); tANI_U8 ucDot11Mode; VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: SSID = 0x%08lx%08lx%08lx%08lx%08lx%08lx%08lx%08lx, " "0x%08lx%08lx%08lx%08lx%08lx%08lx%08lx%08lx", __func__, *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[0]), *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[4]), *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[8]), *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[12]), *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[16]), *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[20]), *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[24]), *((v_U32_t *) &pRequest->aNetworks[0].ssId.ssId[28]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[0]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[4]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[8]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[12]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[16]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[20]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[24]), *((v_U32_t *) &pRequest->aNetworks[1].ssId.ssId[28])); pRequestBuf = vos_mem_malloc(sizeof(tSirPNOScanReq)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for PNO request", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirPNOScanReq)); /*Must translate the mode first*/ ucDot11Mode = (tANI_U8) csrTranslateToWNICfgDot11Mode(pMac, csrFindBestPhyMode( pMac, pMac->roam.configParam.phyMode )); /*Prepare a probe request for 2.4GHz band and one for 5GHz band*/ if (eSIR_SUCCESS == pmcPrepareProbeReqTemplate(pMac, SIR_PNO_24G_DEFAULT_CH, ucDot11Mode, pSession->selfMacAddr, pRequestBuf->p24GProbeTemplate, &pRequestBuf->us24GProbeTemplateLen)) { /* Append IE passed by supplicant(if any) to probe request */ if ((0 < pRequest->us24GProbeTemplateLen) && ((pRequestBuf->us24GProbeTemplateLen + pRequest->us24GProbeTemplateLen) < SIR_PNO_MAX_PB_REQ_SIZE )) { vos_mem_copy((tANI_U8 *)&pRequestBuf->p24GProbeTemplate + pRequestBuf->us24GProbeTemplateLen, (tANI_U8 *)&pRequest->p24GProbeTemplate, pRequest->us24GProbeTemplateLen); pRequestBuf->us24GProbeTemplateLen += pRequest->us24GProbeTemplateLen; VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: pRequest->us24GProbeTemplateLen = %d", __func__, pRequest->us24GProbeTemplateLen); } else { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: Extra ie discarded on 2.4G, IE length = %d", __func__, pRequest->us24GProbeTemplateLen); } } if (eSIR_SUCCESS == pmcPrepareProbeReqTemplate(pMac, SIR_PNO_5G_DEFAULT_CH, ucDot11Mode, pSession->selfMacAddr, pRequestBuf->p5GProbeTemplate, &pRequestBuf->us5GProbeTemplateLen)) { /* Append IE passed by supplicant(if any) to probe request */ if ((0 < pRequest->us5GProbeTemplateLen ) && ((pRequestBuf->us5GProbeTemplateLen + pRequest->us5GProbeTemplateLen) < SIR_PNO_MAX_PB_REQ_SIZE )) { vos_mem_copy((tANI_U8 *)&pRequestBuf->p5GProbeTemplate + pRequestBuf->us5GProbeTemplateLen, (tANI_U8 *)&pRequest->p5GProbeTemplate, pRequest->us5GProbeTemplateLen); pRequestBuf->us5GProbeTemplateLen += pRequest->us5GProbeTemplateLen; VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: pRequestBuf->us5GProbeTemplateLen = %d", __func__, pRequest->us5GProbeTemplateLen); } else { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: Extra IE discarded on 5G, IE length = %d", __func__, pRequest->us5GProbeTemplateLen); } } msg.type = WDA_SET_PNO_REQ; msg.reserved = 0; msg.bodyptr = pRequestBuf; if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_PNO_REQ message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } /* Cache the Preferred Network Found Indication callback information */ pMac->pmc.prefNetwFoundCB = callbackRoutine; pMac->pmc.preferredNetworkFoundIndCallbackContext = callbackContext; VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "-%s", __func__); return eHAL_STATUS_SUCCESS; } eHalStatus pmcSetRssiFilter(tHalHandle hHal, v_U8_t rssiThreshold) { tpSirSetRSSIFilterReq pRequestBuf; vos_msg_t msg; pRequestBuf = vos_mem_malloc(sizeof(tpSirSetRSSIFilterReq)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for PNO request", __func__); return eHAL_STATUS_FAILED_ALLOC; } pRequestBuf->rssiThreshold = rssiThreshold; msg.type = WDA_SET_RSSI_FILTER_REQ; msg.reserved = 0; msg.bodyptr = pRequestBuf; if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_PNO_REQ message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } eHalStatus pmcUpdateScanParams(tHalHandle hHal, tCsrConfig *pRequest, tCsrChannel *pChannelList, tANI_U8 b11dResolved) { tpSirUpdateScanParams pRequestBuf; vos_msg_t msg; int i; VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s started", __func__); pRequestBuf = vos_mem_malloc(sizeof(tSirUpdateScanParams)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for UpdateScanParams request", __func__); return eHAL_STATUS_FAILED_ALLOC; } // // Fill pRequestBuf structure from pRequest // pRequestBuf->b11dEnabled = pRequest->Is11eSupportEnabled; pRequestBuf->b11dResolved = b11dResolved; pRequestBuf->ucChannelCount = ( pChannelList->numChannels < SIR_PNO_MAX_NETW_CHANNELS_EX )? pChannelList->numChannels:SIR_PNO_MAX_NETW_CHANNELS_EX; for (i=0; i < pRequestBuf->ucChannelCount; i++) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: Channel List %d: %d", __FUNCTION__, i, pChannelList->channelList[i] ); pRequestBuf->aChannels[i] = pChannelList->channelList[i]; } pRequestBuf->usPassiveMinChTime = pRequest->nPassiveMinChnTime; pRequestBuf->usPassiveMaxChTime = pRequest->nPassiveMaxChnTime; pRequestBuf->usActiveMinChTime = pRequest->nActiveMinChnTime; pRequestBuf->usActiveMaxChTime = pRequest->nActiveMaxChnTime; pRequestBuf->ucCBState = PHY_SINGLE_CHANNEL_CENTERED; msg.type = WDA_UPDATE_SCAN_PARAMS_REQ; msg.reserved = 0; msg.bodyptr = pRequestBuf; if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_UPDATE_SCAN_PARAMS message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } #endif // FEATURE_WLAN_SCAN_PNO eHalStatus pmcSetPowerParams(tHalHandle hHal, tSirSetPowerParamsReq* pwParams, tANI_BOOLEAN forced) { tSirSetPowerParamsReq* pRequestBuf; vos_msg_t msg; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tpPESession psessionEntry; psessionEntry = peGetValidPowerSaveSession(pMac); if (!forced && (psessionEntry == NULL)) { return eHAL_STATUS_NOT_INITIALIZED; } pRequestBuf = vos_mem_malloc(sizeof(tSirSetPowerParamsReq)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for Power Paramrequest", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pRequestBuf, pwParams, sizeof(*pRequestBuf)); msg.type = WDA_SET_POWER_PARAMS_REQ; msg.reserved = 0; msg.bodyptr = pRequestBuf; if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_POWER_PARAMS_REQ message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } #ifdef WLAN_FEATURE_PACKET_FILTERING eHalStatus pmcGetFilterMatchCount ( tHalHandle hHal, FilterMatchCountCallback callbackRoutine, void *callbackContext, tANI_U8 sessionId ) { tpSirRcvFltPktMatchRsp pRequestBuf; vos_msg_t msg; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s", __func__); if(NULL == pSession ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Session not found ", __func__); return eHAL_STATUS_FAILURE; } pRequestBuf = vos_mem_malloc(sizeof(tSirRcvFltPktMatchRsp)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate " "memory for Get PC Filter Match Count request", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pRequestBuf->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); msg.type = WDA_PACKET_COALESCING_FILTER_MATCH_COUNT_REQ; msg.reserved = 0; msg.bodyptr = pRequestBuf; /* Cache the Packet Coalescing Filter Match Count callback information */ if (NULL != pMac->pmc.FilterMatchCountCB) { // Do we need to check if the callback is in use? // Because we are not sending the same message again when it is pending, // the only case when the callback is not NULL is that the previous message //was timed out or failed. // So, it will be safe to set the callback in this case. } pMac->pmc.FilterMatchCountCB = callbackRoutine; pMac->pmc.FilterMatchCountCBContext = callbackContext; if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_PACKET_COALESCING_FILTER_MATCH_COUNT_REQ " "message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } #endif // WLAN_FEATURE_PACKET_FILTERING #ifdef WLAN_FEATURE_GTK_OFFLOAD /* --------------------------------------------------------------------------- \fn pmcSetGTKOffload \brief Set GTK offload feature. \param hHal - The handle returned by macOpen. \param pGtkOffload - Pointer to the GTK offload request. \return eHalStatus eHAL_STATUS_FAILURE Cannot set the offload. eHAL_STATUS_SUCCESS Request accepted. ---------------------------------------------------------------------------*/ eHalStatus pmcSetGTKOffload (tHalHandle hHal, tpSirGtkOffloadParams pGtkOffload, tANI_U8 sessionId) { tpSirGtkOffloadParams pRequestBuf; vos_msg_t msg; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: KeyReplayCounter: %d", __func__, pGtkOffload->ullKeyReplayCounter); if(NULL == pSession ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Session not found ", __func__); return eHAL_STATUS_FAILURE; } pRequestBuf = (tpSirGtkOffloadParams)vos_mem_malloc(sizeof(tSirGtkOffloadParams)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate " "memory for GTK offload request", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pGtkOffload->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); vos_mem_copy(pRequestBuf, pGtkOffload, sizeof(tSirGtkOffloadParams)); msg.type = WDA_GTK_OFFLOAD_REQ; msg.reserved = 0; msg.bodyptr = pRequestBuf; if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post " "SIR_HAL_SET_GTK_OFFLOAD message to HAL", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } /* --------------------------------------------------------------------------- \fn pmcGetGTKOffload \brief Get GTK offload information. \param hHal - The handle returned by macOpen. \param callbackRoutine - Pointer to the GTK Offload Get Info response callback routine. \return eHalStatus eHAL_STATUS_FAILURE Cannot set the offload. eHAL_STATUS_SUCCESS Request accepted. ---------------------------------------------------------------------------*/ eHalStatus pmcGetGTKOffload(tHalHandle hHal, GTKOffloadGetInfoCallback callbackRoutine, void *callbackContext, tANI_U8 sessionId) { tpSirGtkOffloadGetInfoRspParams pRequestBuf; vos_msg_t msg; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId ); VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: filterId = %d", __func__); if(NULL == pSession ) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Session not found ", __func__); return eHAL_STATUS_FAILURE; } pRequestBuf = (tpSirGtkOffloadGetInfoRspParams) vos_mem_malloc(sizeof (tSirGtkOffloadGetInfoRspParams)); if (NULL == pRequestBuf) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate " "memory for Get GTK offload request", __func__); return eHAL_STATUS_FAILED_ALLOC; } vos_mem_copy(pRequestBuf->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr)); msg.type = WDA_GTK_OFFLOAD_GETINFO_REQ; msg.reserved = 0; msg.bodyptr = pRequestBuf; /* Cache the Get GTK Offload callback information */ if (NULL != pMac->pmc.GtkOffloadGetInfoCB) { // Do we need to check if the callback is in use? // Because we are not sending the same message again when it is pending, // the only case when the callback is not NULL is that the previous message was timed out or failed. // So, it will be safe to set the callback in this case. } pMac->pmc.GtkOffloadGetInfoCB = callbackRoutine; pMac->pmc.GtkOffloadGetInfoCBContext = callbackContext; if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))) { VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_GTK_OFFLOAD_GETINFO_REQ message to WDA", __func__); vos_mem_free(pRequestBuf); return eHAL_STATUS_FAILURE; } return eHAL_STATUS_SUCCESS; } #endif // WLAN_FEATURE_GTK_OFFLOAD v_BOOL_t IsPmcImpsReqFailed (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); v_BOOL_t impsReqFailStatus; impsReqFailStatus = (pMac->pmc.ImpsReqFailed || pMac->pmc.ImpsReqTimerFailed); return impsReqFailStatus; } void pmcResetImpsFailStatus (tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT(hHal); pMac->pmc.ImpsReqFailed = VOS_FALSE; pMac->pmc.ImpsReqTimerFailed = VOS_FALSE; }
gpl-2.0
kaylorchen/Linux_for_mini2440
drivers/acpi/acpica/exstorob.c
578
7221
/****************************************************************************** * * Module Name: exstorob - AML Interpreter object store support, store to object * *****************************************************************************/ /* * Copyright (C) 2000 - 2008, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exstorob") /******************************************************************************* * * FUNCTION: acpi_ex_store_buffer_to_buffer * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a buffer object to another buffer object. * ******************************************************************************/ acpi_status acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a buffer by now */ buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); length = source_desc->buffer.length; /* * If target is a buffer of length zero or is a static buffer, * allocate a new buffer of the proper length */ if ((target_desc->buffer.length == 0) || (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) { target_desc->buffer.pointer = ACPI_ALLOCATE(length); if (!target_desc->buffer.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->buffer.length = length; } /* Copy source buffer to target buffer */ if (length <= target_desc->buffer.length) { /* Clear existing buffer and copy in the new one */ ACPI_MEMSET(target_desc->buffer.pointer, 0, target_desc->buffer.length); ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length); #ifdef ACPI_OBSOLETE_BEHAVIOR /* * NOTE: ACPI versions up to 3.0 specified that the buffer must be * truncated if the string is smaller than the buffer. However, "other" * implementations of ACPI never did this and thus became the defacto * standard. ACPI 3.0_a changes this behavior such that the buffer * is no longer truncated. */ /* * OBSOLETE BEHAVIOR: * If the original source was a string, we must truncate the buffer, * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer * copy must not truncate the original buffer. */ if (original_src_type == ACPI_TYPE_STRING) { /* Set the new length of the target */ target_desc->buffer.length = length; } #endif } else { /* Truncate the source, copy only what will fit */ ACPI_MEMCPY(target_desc->buffer.pointer, buffer, target_desc->buffer.length); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Truncating source buffer from %X to %X\n", length, target_desc->buffer.length)); } /* Copy flags */ target_desc->buffer.flags = source_desc->buffer.flags; target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_store_string_to_string * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a String object to another String object * ******************************************************************************/ acpi_status acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a string by now */ buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); length = source_desc->string.length; /* * Replace existing string value if it will fit and the string * pointer is not a static pointer (part of an ACPI table) */ if ((length < target_desc->string.length) && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* * String will fit in existing non-static buffer. * Clear old string and copy in the new one */ ACPI_MEMSET(target_desc->string.pointer, 0, (acpi_size) target_desc->string.length + 1); ACPI_MEMCPY(target_desc->string.pointer, buffer, length); } else { /* * Free the current buffer, then allocate a new buffer * large enough to hold the value */ if (target_desc->string.pointer && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* Only free if not a pointer into the DSDT */ ACPI_FREE(target_desc->string.pointer); } target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1); if (!target_desc->string.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; ACPI_MEMCPY(target_desc->string.pointer, buffer, length); } /* Set the new target length */ target_desc->string.length = length; return_ACPI_STATUS(AE_OK); }
gpl-2.0
qkdxorjs1002/android_kernel_samsung_slteskt
drivers/net/wireless/bcmdhd/aiutils.c
578
25543
/* * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: aiutils.c 432226 2013-10-26 04:34:36Z $ */ #include <bcm_cfg.h> #include <typedefs.h> #include <bcmdefs.h> #include <osl.h> #include <bcmutils.h> #include <siutils.h> #include <hndsoc.h> #include <sbchipc.h> #include <pcicfg.h> #include "siutils_priv.h" #define BCM47162_DMP() (0) #define BCM5357_DMP() (0) #define BCM4707_DMP() (0) #define remap_coreid(sih, coreid) (coreid) #define remap_corerev(sih, corerev) (corerev) /* EROM parsing */ static uint32 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) { uint32 ent; uint inv = 0, nom = 0; while (TRUE) { ent = R_REG(si_osh(sih), *eromptr); (*eromptr)++; if (mask == 0) break; if ((ent & ER_VALID) == 0) { inv++; continue; } if (ent == (ER_END | ER_VALID)) break; if ((ent & mask) == match) break; nom++; } SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent)); if (inv + nom) { SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom)); } return ent; } static uint32 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh) { uint32 asd, sz, szd; asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); if (((asd & ER_TAG1) != ER_ADD) || (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || ((asd & AD_ST_MASK) != st)) { /* This is not what we want, "push" it back */ (*eromptr)--; return 0; } *addrl = asd & AD_ADDR_MASK; if (asd & AD_AG32) *addrh = get_erom_ent(sih, eromptr, 0, 0); else *addrh = 0; *sizeh = 0; sz = asd & AD_SZ_MASK; if (sz == AD_SZ_SZD) { szd = get_erom_ent(sih, eromptr, 0, 0); *sizel = szd & SD_SZ_MASK; if (szd & SD_SG32) *sizeh = get_erom_ent(sih, eromptr, 0, 0); } else *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); return asd; } static void ai_hwfixup(si_info_t *sii) { } /* parse the enumeration rom to identify all cores */ void ai_scan(si_t *sih, void *regs, uint devid) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; chipcregs_t *cc = (chipcregs_t *)regs; uint32 erombase, *eromptr, *eromlim; erombase = R_REG(sii->osh, &cc->eromptr); switch (BUSTYPE(sih->bustype)) { case SI_BUS: eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); break; case PCI_BUS: /* Set wrappers address */ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); /* Now point the window at the erom */ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); eromptr = regs; break; case SPI_BUS: case SDIO_BUS: eromptr = (uint32 *)(uintptr)erombase; break; case PCMCIA_BUS: default: SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); ASSERT(0); return; } eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim)); while (eromptr < eromlim) { uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; uint32 mpd, asd, addrl, addrh, sizel, sizeh; uint i, j, idx; bool br; br = FALSE; /* Grok a component */ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); if (cia == (ER_END | ER_VALID)) { SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); ai_hwfixup(sii); return; } cib = get_erom_ent(sih, &eromptr, 0, 0); if ((cib & ER_TAG) != ER_CI) { SI_ERROR(("CIA not followed by CIB\n")); goto error; } cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; #ifdef BCMDBG_SI SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp)); #else BCM_REFERENCE(crev); #endif if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) continue; if ((nmw + nsw == 0)) { /* A component which is not a core */ if (cid == OOB_ROUTER_CORE_ID) { asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); if (asd != 0) { sii->oob_router = addrl; } } if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID) continue; } idx = sii->numcores; cores_info->cia[idx] = cia; cores_info->cib[idx] = cib; cores_info->coreid[idx] = remap_coreid(sih, cid); for (i = 0; i < nmp; i++) { mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); if ((mpd & ER_TAG) != ER_MP) { SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); goto error; } SI_VMSG((" Master port %d, mp: %d id: %d\n", i, (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); } /* First Slave Address Descriptor should be port 0: * the main register space for the core */ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); if (asd == 0) { do { /* Try again to see if it is a bridge */ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, &sizel, &sizeh); if (asd != 0) br = TRUE; else { if (br == TRUE) { break; } else if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) { SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 =" "0x%x\n", addrh, sizeh, sizel)); SI_ERROR(("First Slave ASD for" "core 0x%04x malformed " "(0x%08x)\n", cid, asd)); goto error; } } } while (1); } cores_info->coresba[idx] = addrl; cores_info->coresba_size[idx] = sizel; /* Get any more ASDs in port 0 */ j = 1; do { asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { cores_info->coresba2[idx] = addrl; cores_info->coresba2_size[idx] = sizel; } j++; } while (asd != 0); /* Go through the ASDs for other slave ports */ for (i = 1; i < nsp; i++) { j = 0; do { asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); if (asd == 0) break; j++; } while (1); if (j == 0) { SI_ERROR((" SP %d has no address descriptors\n", i)); goto error; } } /* Now get master wrappers */ for (i = 0; i < nmw; i++) { asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, &sizel, &sizeh); if (asd == 0) { SI_ERROR(("Missing descriptor for MW %d\n", i)); goto error; } if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { SI_ERROR(("Master wrapper %d is not 4KB\n", i)); goto error; } if (i == 0) cores_info->wrapba[idx] = addrl; } /* And finally slave wrappers */ for (i = 0; i < nsw; i++) { uint fwp = (nsp == 1) ? 0 : 1; asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, &sizel, &sizeh); if (asd == 0) { SI_ERROR(("Missing descriptor for SW %d\n", i)); goto error; } if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); goto error; } if ((nmw == 0) && (i == 0)) cores_info->wrapba[idx] = addrl; } /* Don't record bridges */ if (br) continue; /* Done with core */ sii->numcores++; } SI_ERROR(("Reached end of erom without finding END")); error: sii->numcores = 0; return; } /* This function changes the logical "focus" to the indicated core. * Return the current core's virtual address. */ void * ai_setcoreidx(si_t *sih, uint coreidx) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint32 addr, wrap; void *regs; if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) return (NULL); addr = cores_info->coresba[coreidx]; wrap = cores_info->wrapba[coreidx]; /* * If the user has provided an interrupt mask enabled function, * then assert interrupts are disabled before switching the core. */ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); switch (BUSTYPE(sih->bustype)) { case SI_BUS: /* map new one */ if (!cores_info->regs[coreidx]) { cores_info->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->regs[coreidx])); } sii->curmap = regs = cores_info->regs[coreidx]; if (!cores_info->wrappers[coreidx] && (wrap != 0)) { cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->wrappers[coreidx])); } sii->curwrap = cores_info->wrappers[coreidx]; break; case PCI_BUS: /* point bar0 window */ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); regs = sii->curmap; /* point bar0 2nd 4KB window to the primary wrapper */ if (PCIE_GEN2(sii)) OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap); else OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap); break; case SPI_BUS: case SDIO_BUS: sii->curmap = regs = (void *)((uintptr)addr); sii->curwrap = (void *)((uintptr)wrap); break; case PCMCIA_BUS: default: ASSERT(0); regs = NULL; break; } sii->curmap = regs; sii->curidx = coreidx; return regs; } void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; chipcregs_t *cc = NULL; uint32 erombase, *eromptr, *eromlim; uint i, j, cidx; uint32 cia, cib, nmp, nsp; uint32 asd, addrl, addrh, sizel, sizeh; for (i = 0; i < sii->numcores; i++) { if (cores_info->coreid[i] == CC_CORE_ID) { cc = (chipcregs_t *)cores_info->regs[i]; break; } } if (cc == NULL) goto error; erombase = R_REG(sii->osh, &cc->eromptr); eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); cidx = sii->curidx; cia = cores_info->cia[cidx]; cib = cores_info->cib[cidx]; nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; /* scan for cores */ while (eromptr < eromlim) { if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) && (get_erom_ent(sih, &eromptr, 0, 0) == cib)) { break; } } /* skip master ports */ for (i = 0; i < nmp; i++) get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); /* Skip ASDs in port 0 */ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); if (asd == 0) { /* Try again to see if it is a bridge */ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, &sizel, &sizeh); } j = 1; do { asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); j++; } while (asd != 0); /* Go through the ASDs for other slave ports */ for (i = 1; i < nsp; i++) { j = 0; do { asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); if (asd == 0) break; if (!asidx--) { *addr = addrl; *size = sizel; return; } j++; } while (1); if (j == 0) { SI_ERROR((" SP %d has no address descriptors\n", i)); break; } } error: *size = 0; return; } /* Return the number of address spaces in current core */ int ai_numaddrspaces(si_t *sih) { return 2; } /* Return the address of the nth address space in the current core */ uint32 ai_addrspace(si_t *sih, uint asidx) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint cidx; cidx = sii->curidx; if (asidx == 0) return cores_info->coresba[cidx]; else if (asidx == 1) return cores_info->coresba2[cidx]; else { SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __FUNCTION__, asidx)); return 0; } } /* Return the size of the nth address space in the current core */ uint32 ai_addrspacesize(si_t *sih, uint asidx) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint cidx; cidx = sii->curidx; if (asidx == 0) return cores_info->coresba_size[cidx]; else if (asidx == 1) return cores_info->coresba2_size[cidx]; else { SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __FUNCTION__, asidx)); return 0; } } uint ai_flag(si_t *sih) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; if (BCM47162_DMP()) { SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); return sii->curidx; } if (BCM5357_DMP()) { SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); return sii->curidx; } if (BCM4707_DMP()) { SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", __FUNCTION__)); return sii->curidx; } ai = sii->curwrap; return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); } uint ai_flag_alt(si_t *sih) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; if (BCM47162_DMP()) { SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); return sii->curidx; } if (BCM5357_DMP()) { SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); return sii->curidx; } if (BCM4707_DMP()) { SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", __FUNCTION__)); return sii->curidx; } ai = sii->curwrap; return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK); } void ai_setint(si_t *sih, int siflag) { } uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val) { si_info_t *sii = SI_INFO(sih); uint32 *map = (uint32 *) sii->curwrap; if (mask || val) { uint32 w = R_REG(sii->osh, map+(offset/4)); w &= ~mask; w |= val; W_REG(sii->osh, map+(offset/4), w); } return (R_REG(sii->osh, map+(offset/4))); } uint ai_corevendor(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint32 cia; cia = cores_info->cia[sii->curidx]; return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); } uint ai_corerev(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint32 cib; cib = cores_info->cib[sii->curidx]; return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT); } bool ai_iscoreup(si_t *sih) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; ai = sii->curwrap; return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); } /* * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, * switch back to the original core, and return the new value. * * When using the silicon backplane, no fiddling with interrupts or core switches is needed. * * Also, when using pci/pcie, we can optimize away the core switching for pci registers * and (on newer pci cores) chipcommon registers. */ uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) { uint origidx = 0; uint32 *r = NULL; uint w; uint intr_val = 0; bool fast = FALSE; si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; ASSERT(GOODIDX(coreidx)); ASSERT(regoff < SI_CORE_SIZE); ASSERT((val & ~mask) == 0); if (coreidx >= SI_MAXCORES) return 0; if (BUSTYPE(sih->bustype) == SI_BUS) { /* If internal bus, we can always get at everything */ fast = TRUE; /* map if does not exist */ if (!cores_info->regs[coreidx]) { cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->regs[coreidx])); } r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); } else if (BUSTYPE(sih->bustype) == PCI_BUS) { /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { /* Chipc registers are mapped at 12KB */ fast = TRUE; r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); } else if (sii->pub.buscoreidx == coreidx) { /* pci registers are at either in the last 2KB of an 8KB window * or, in pcie and pci rev 13 at 8KB */ fast = TRUE; if (SI_FAST(sii)) r = (uint32 *)((char *)sii->curmap + PCI_16KB0_PCIREGS_OFFSET + regoff); else r = (uint32 *)((char *)sii->curmap + ((regoff >= SBCONFIGOFF) ? PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff); } } if (!fast) { INTR_OFF(sii, intr_val); /* save current core index */ origidx = si_coreidx(&sii->pub); /* switch core */ r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff); } ASSERT(r != NULL); /* mask and set */ if (mask || val) { w = (R_REG(sii->osh, r) & ~mask) | val; W_REG(sii->osh, r, w); } /* readback */ w = R_REG(sii->osh, r); if (!fast) { /* restore core index */ if (origidx != coreidx) ai_setcoreidx(&sii->pub, origidx); INTR_RESTORE(sii, intr_val); } return (w); } /* * If there is no need for fiddling with interrupts or core switches (typically silicon * back plane registers, pci registers and chipcommon registers), this function * returns the register offset on this core to a mapped address. This address can * be used for W_REG/R_REG directly. * * For accessing registers that would need a core switch, this function will return * NULL. */ uint32 * ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) { uint32 *r = NULL; bool fast = FALSE; si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; ASSERT(GOODIDX(coreidx)); ASSERT(regoff < SI_CORE_SIZE); if (coreidx >= SI_MAXCORES) return 0; if (BUSTYPE(sih->bustype) == SI_BUS) { /* If internal bus, we can always get at everything */ fast = TRUE; /* map if does not exist */ if (!cores_info->regs[coreidx]) { cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->regs[coreidx])); } r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); } else if (BUSTYPE(sih->bustype) == PCI_BUS) { /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { /* Chipc registers are mapped at 12KB */ fast = TRUE; r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); } else if (sii->pub.buscoreidx == coreidx) { /* pci registers are at either in the last 2KB of an 8KB window * or, in pcie and pci rev 13 at 8KB */ fast = TRUE; if (SI_FAST(sii)) r = (uint32 *)((char *)sii->curmap + PCI_16KB0_PCIREGS_OFFSET + regoff); else r = (uint32 *)((char *)sii->curmap + ((regoff >= SBCONFIGOFF) ? PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff); } } if (!fast) return 0; return (r); } void ai_core_disable(si_t *sih, uint32 bits) { si_info_t *sii = SI_INFO(sih); volatile uint32 dummy; uint32 status; aidmp_t *ai; ASSERT(GOODREGS(sii->curwrap)); ai = sii->curwrap; /* if core is already in reset, just return */ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) return; /* ensure there are no pending backplane operations */ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); /* if pending backplane ops still, try waiting longer */ if (status != 0) { /* 300usecs was sufficient to allow backplane ops to clear for big hammer */ /* during driver load we may need more time */ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000); /* if still pending ops, continue on and try disable anyway */ /* this is in big hammer path, so don't call wl_reinit in this case... */ } W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); dummy = R_REG(sii->osh, &ai->resetctrl); BCM_REFERENCE(dummy); OSL_DELAY(1); W_REG(sii->osh, &ai->ioctrl, bits); dummy = R_REG(sii->osh, &ai->ioctrl); BCM_REFERENCE(dummy); OSL_DELAY(10); } /* reset and re-enable a core * inputs: * bits - core specific bits that are set during and after reset sequence * resetbits - core specific bits that are set only during reset sequence */ void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; volatile uint32 dummy; uint loop_counter = 10; ASSERT(GOODREGS(sii->curwrap)); ai = sii->curwrap; /* ensure there are no pending backplane operations */ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); /* put core into reset state */ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); OSL_DELAY(10); /* ensure there are no pending backplane operations */ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN)); dummy = R_REG(sii->osh, &ai->ioctrl); BCM_REFERENCE(dummy); /* ensure there are no pending backplane operations */ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) { /* ensure there are no pending backplane operations */ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); /* take core out of reset */ W_REG(sii->osh, &ai->resetctrl, 0); /* ensure there are no pending backplane operations */ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); } W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); dummy = R_REG(sii->osh, &ai->ioctrl); BCM_REFERENCE(dummy); OSL_DELAY(1); } void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; uint32 w; if (BCM47162_DMP()) { SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", __FUNCTION__)); return; } if (BCM5357_DMP()) { SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", __FUNCTION__)); return; } if (BCM4707_DMP()) { SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", __FUNCTION__)); return; } ASSERT(GOODREGS(sii->curwrap)); ai = sii->curwrap; ASSERT((val & ~mask) == 0); if (mask || val) { w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); W_REG(sii->osh, &ai->ioctrl, w); } } uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; uint32 w; if (BCM47162_DMP()) { SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", __FUNCTION__)); return 0; } if (BCM5357_DMP()) { SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", __FUNCTION__)); return 0; } if (BCM4707_DMP()) { SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", __FUNCTION__)); return 0; } ASSERT(GOODREGS(sii->curwrap)); ai = sii->curwrap; ASSERT((val & ~mask) == 0); if (mask || val) { w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); W_REG(sii->osh, &ai->ioctrl, w); } return R_REG(sii->osh, &ai->ioctrl); } uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; uint32 w; if (BCM47162_DMP()) { SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __FUNCTION__)); return 0; } if (BCM5357_DMP()) { SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n", __FUNCTION__)); return 0; } if (BCM4707_DMP()) { SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", __FUNCTION__)); return 0; } ASSERT(GOODREGS(sii->curwrap)); ai = sii->curwrap; ASSERT((val & ~mask) == 0); ASSERT((mask & ~SISF_CORE_BITS) == 0); if (mask || val) { w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); W_REG(sii->osh, &ai->iostatus, w); } return R_REG(sii->osh, &ai->iostatus); }
gpl-2.0
deepjyotisaran/android_kernel_samsung_exynos5410
net/bluetooth/bnep/netdev.c
1602
6147
/* BNEP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2001-2002 Inventel Systemes Written 2001-2002 by Clément Moreau <clement.moreau@inventel.fr> David Libault <david.libault@inventel.fr> Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/wait.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "bnep.h" #define BNEP_TX_QUEUE_LEN 20 static int bnep_net_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static int bnep_net_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } static void bnep_net_set_mc_list(struct net_device *dev) { #ifdef CONFIG_BT_BNEP_MC_FILTER struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; struct bnep_set_filter_req *r; struct sk_buff *skb; int size; BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev)); size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { BT_ERR("%s Multicast list allocation failed", dev->name); return; } r = (void *) skb->data; __skb_put(skb, sizeof(*r)); r->type = BNEP_CONTROL; r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { u8 start[ETH_ALEN] = { 0x01 }; /* Request all addresses */ memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); r->len = htons(ETH_ALEN * 2); } else { struct netdev_hw_addr *ha; int i, len = skb->len; if (dev->flags & IFF_BROADCAST) { memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); } /* FIXME: We should group addresses here. */ i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == BNEP_MAX_MULTICAST_FILTERS) break; memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); i++; } r->len = htons(skb->len - len); } skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); #endif } static int bnep_net_set_mac_addr(struct net_device *dev, void *arg) { BT_DBG("%s", dev->name); return 0; } static void bnep_net_timeout(struct net_device *dev) { BT_DBG("net_timeout"); netif_wake_queue(dev); } #ifdef CONFIG_BT_BNEP_MC_FILTER static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) { struct ethhdr *eh = (void *) skb->data; if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter)) return 1; return 0; } #endif #ifdef CONFIG_BT_BNEP_PROTO_FILTER /* Determine ether protocol. Based on eth_type_trans. */ static inline u16 bnep_net_eth_proto(struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); if (proto >= 1536) return proto; if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) return ETH_P_802_3; return ETH_P_802_2; } static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) { u16 proto = bnep_net_eth_proto(skb); struct bnep_proto_filter *f = s->proto_filter; int i; for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { if (proto >= f[i].start && proto <= f[i].end) return 0; } BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); return 1; } #endif static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; BT_DBG("skb %p, dev %p", skb, dev); #ifdef CONFIG_BT_BNEP_MC_FILTER if (bnep_net_mc_filter(skb, s)) { kfree_skb(skb); return NETDEV_TX_OK; } #endif #ifdef CONFIG_BT_BNEP_PROTO_FILTER if (bnep_net_proto_filter(skb, s)) { kfree_skb(skb); return NETDEV_TX_OK; } #endif /* * We cannot send L2CAP packets from here as we are potentially in a bh. * So we have to queue them and wake up session thread which is sleeping * on the sk_sleep(sk). */ dev->trans_start = jiffies; skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { BT_DBG("tx queue is full"); /* Stop queuing. * Session thread will do netif_wake_queue() */ netif_stop_queue(dev); } return NETDEV_TX_OK; } static const struct net_device_ops bnep_netdev_ops = { .ndo_open = bnep_net_open, .ndo_stop = bnep_net_close, .ndo_start_xmit = bnep_net_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = bnep_net_set_mc_list, .ndo_set_mac_address = bnep_net_set_mac_addr, .ndo_tx_timeout = bnep_net_timeout, .ndo_change_mtu = eth_change_mtu, }; void bnep_net_setup(struct net_device *dev) { memset(dev->broadcast, 0xff, ETH_ALEN); dev->addr_len = ETH_ALEN; ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2; }
gpl-2.0
kennysgithub/toshp75a
arch/arm/mach-imx/iomux-v3.c
2882
2401
/* * Copyright 2004-2006 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de> * Copyright (C) 2009 by Jan Weitzel Phytec Messtechnik GmbH, * <armlinux@phytec.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/gpio.h> #include <asm/mach/map.h> #include "hardware.h" #include "iomux-v3.h" static void __iomem *base; /* * configures a single pad in the iomuxer */ int mxc_iomux_v3_setup_pad(iomux_v3_cfg_t pad) { u32 mux_ctrl_ofs = (pad & MUX_CTRL_OFS_MASK) >> MUX_CTRL_OFS_SHIFT; u32 mux_mode = (pad & MUX_MODE_MASK) >> MUX_MODE_SHIFT; u32 sel_input_ofs = (pad & MUX_SEL_INPUT_OFS_MASK) >> MUX_SEL_INPUT_OFS_SHIFT; u32 sel_input = (pad & MUX_SEL_INPUT_MASK) >> MUX_SEL_INPUT_SHIFT; u32 pad_ctrl_ofs = (pad & MUX_PAD_CTRL_OFS_MASK) >> MUX_PAD_CTRL_OFS_SHIFT; u32 pad_ctrl = (pad & MUX_PAD_CTRL_MASK) >> MUX_PAD_CTRL_SHIFT; if (mux_ctrl_ofs) __raw_writel(mux_mode, base + mux_ctrl_ofs); if (sel_input_ofs) __raw_writel(sel_input, base + sel_input_ofs); if (!(pad_ctrl & NO_PAD_CTRL) && pad_ctrl_ofs) __raw_writel(pad_ctrl, base + pad_ctrl_ofs); return 0; } EXPORT_SYMBOL(mxc_iomux_v3_setup_pad); int mxc_iomux_v3_setup_multiple_pads(iomux_v3_cfg_t *pad_list, unsigned count) { iomux_v3_cfg_t *p = pad_list; int i; int ret; for (i = 0; i < count; i++) { ret = mxc_iomux_v3_setup_pad(*p); if (ret) return ret; p++; } return 0; } EXPORT_SYMBOL(mxc_iomux_v3_setup_multiple_pads); void mxc_iomux_v3_init(void __iomem *iomux_v3_base) { base = iomux_v3_base; }
gpl-2.0
boa19861105/android_kernel_htc_b3uhl-JP
arch/arm/mach-imx/iomux-v3.c
2882
2401
/* * Copyright 2004-2006 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de> * Copyright (C) 2009 by Jan Weitzel Phytec Messtechnik GmbH, * <armlinux@phytec.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/gpio.h> #include <asm/mach/map.h> #include "hardware.h" #include "iomux-v3.h" static void __iomem *base; /* * configures a single pad in the iomuxer */ int mxc_iomux_v3_setup_pad(iomux_v3_cfg_t pad) { u32 mux_ctrl_ofs = (pad & MUX_CTRL_OFS_MASK) >> MUX_CTRL_OFS_SHIFT; u32 mux_mode = (pad & MUX_MODE_MASK) >> MUX_MODE_SHIFT; u32 sel_input_ofs = (pad & MUX_SEL_INPUT_OFS_MASK) >> MUX_SEL_INPUT_OFS_SHIFT; u32 sel_input = (pad & MUX_SEL_INPUT_MASK) >> MUX_SEL_INPUT_SHIFT; u32 pad_ctrl_ofs = (pad & MUX_PAD_CTRL_OFS_MASK) >> MUX_PAD_CTRL_OFS_SHIFT; u32 pad_ctrl = (pad & MUX_PAD_CTRL_MASK) >> MUX_PAD_CTRL_SHIFT; if (mux_ctrl_ofs) __raw_writel(mux_mode, base + mux_ctrl_ofs); if (sel_input_ofs) __raw_writel(sel_input, base + sel_input_ofs); if (!(pad_ctrl & NO_PAD_CTRL) && pad_ctrl_ofs) __raw_writel(pad_ctrl, base + pad_ctrl_ofs); return 0; } EXPORT_SYMBOL(mxc_iomux_v3_setup_pad); int mxc_iomux_v3_setup_multiple_pads(iomux_v3_cfg_t *pad_list, unsigned count) { iomux_v3_cfg_t *p = pad_list; int i; int ret; for (i = 0; i < count; i++) { ret = mxc_iomux_v3_setup_pad(*p); if (ret) return ret; p++; } return 0; } EXPORT_SYMBOL(mxc_iomux_v3_setup_multiple_pads); void mxc_iomux_v3_init(void __iomem *iomux_v3_base) { base = iomux_v3_base; }
gpl-2.0
loglud/acclaim_kernel
arch/mips/jazz/setup.c
3906
4552
/* * Setup pointers to hardware-dependent routines. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1997, 1998, 2001, 07, 08 by Ralf Baechle * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2007 by Thomas Bogendoerfer */ #include <linux/eisa.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/console.h> #include <linux/screen_info.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <asm/jazz.h> #include <asm/jazzdma.h> #include <asm/reboot.h> #include <asm/pgtable.h> extern asmlinkage void jazz_handle_int(void); extern void jazz_machine_restart(char *command); static struct resource jazz_io_resources[] = { { .start = 0x00, .end = 0x1f, .name = "dma1", .flags = IORESOURCE_BUSY }, { .start = 0x40, .end = 0x5f, .name = "timer", .flags = IORESOURCE_BUSY }, { .start = 0x80, .end = 0x8f, .name = "dma page reg", .flags = IORESOURCE_BUSY }, { .start = 0xc0, .end = 0xdf, .name = "dma2", .flags = IORESOURCE_BUSY } }; void __init plat_mem_setup(void) { int i; /* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */ add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K); /* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */ add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M); /* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */ add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M); set_io_port_base(JAZZ_PORT_BASE); #ifdef CONFIG_EISA EISA_bus = 1; #endif /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < ARRAY_SIZE(jazz_io_resources); i++) request_resource(&ioport_resource, jazz_io_resources + i); /* The RTC is outside the port address space */ _machine_restart = jazz_machine_restart; #ifdef CONFIG_VT screen_info = (struct screen_info) { .orig_video_cols = 160, .orig_video_lines = 64, .orig_video_points = 16, }; #endif add_preferred_console("ttyS", 0, "9600"); } #ifdef CONFIG_OLIVETTI_M700 #define UART_CLK 1843200 #else /* Some Jazz machines seem to have an 8MHz crystal clock but I don't know exactly which ones ... XXX */ #define UART_CLK (8000000 / 16) /* ( 3072000 / 16) */ #endif #define MEMPORT(_base, _irq) \ { \ .mapbase = (_base), \ .membase = (void *)(_base), \ .irq = (_irq), \ .uartclk = UART_CLK, \ .iotype = UPIO_MEM, \ .flags = UPF_BOOT_AUTOCONF, \ } static struct plat_serial8250_port jazz_serial_data[] = { MEMPORT(JAZZ_SERIAL1_BASE, JAZZ_SERIAL1_IRQ), MEMPORT(JAZZ_SERIAL2_BASE, JAZZ_SERIAL2_IRQ), { }, }; static struct platform_device jazz_serial8250_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = jazz_serial_data, }, }; static struct resource jazz_esp_rsrc[] = { { .start = JAZZ_SCSI_BASE, .end = JAZZ_SCSI_BASE + 31, .flags = IORESOURCE_MEM }, { .start = JAZZ_SCSI_DMA, .end = JAZZ_SCSI_DMA, .flags = IORESOURCE_MEM }, { .start = JAZZ_SCSI_IRQ, .end = JAZZ_SCSI_IRQ, .flags = IORESOURCE_IRQ } }; static struct platform_device jazz_esp_pdev = { .name = "jazz_esp", .num_resources = ARRAY_SIZE(jazz_esp_rsrc), .resource = jazz_esp_rsrc }; static struct resource jazz_sonic_rsrc[] = { { .start = JAZZ_ETHERNET_BASE, .end = JAZZ_ETHERNET_BASE + 0xff, .flags = IORESOURCE_MEM }, { .start = JAZZ_ETHERNET_IRQ, .end = JAZZ_ETHERNET_IRQ, .flags = IORESOURCE_IRQ } }; static struct platform_device jazz_sonic_pdev = { .name = "jazzsonic", .num_resources = ARRAY_SIZE(jazz_sonic_rsrc), .resource = jazz_sonic_rsrc }; static struct resource jazz_cmos_rsrc[] = { { .start = 0x70, .end = 0x71, .flags = IORESOURCE_IO }, { .start = 8, .end = 8, .flags = IORESOURCE_IRQ } }; static struct platform_device jazz_cmos_pdev = { .name = "rtc_cmos", .num_resources = ARRAY_SIZE(jazz_cmos_rsrc), .resource = jazz_cmos_rsrc }; static struct platform_device pcspeaker_pdev = { .name = "pcspkr", .id = -1, }; static int __init jazz_setup_devinit(void) { platform_device_register(&jazz_serial8250_device); platform_device_register(&jazz_esp_pdev); platform_device_register(&jazz_sonic_pdev); platform_device_register(&jazz_cmos_pdev); platform_device_register(&pcspeaker_pdev); return 0; } device_initcall(jazz_setup_devinit);
gpl-2.0
InfinitiveOS-Devices/kernel_xiaomi_armani
mm/mremap.c
4418
14300
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * * Address space accounting code <alan@lxorguk.ukuu.org.uk> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/ksm.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/mmu_notifier.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include "internal.h" static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return NULL; return pmd; } static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) return NULL; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); return pmd; } static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr) { struct address_space *mapping = NULL; struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before * moving file-based ptes, we must lock truncate_pagecache * out, since it might clean the dst vma before the src vma, * and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; mutex_lock(&mapping->i_mmap_mutex); } /* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); new_pte = pte_offset_map(new_pmd, new_addr); new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; pte = ptep_get_and_clear(mm, old_addr, old_pte); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) mutex_unlock(&mapping->i_mmap_mutex); } #define LATENCY_LIMIT (64 * PAGE_SIZE) unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len) { unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; bool need_flush = false; old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end); for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); next = (old_addr + PMD_SIZE) & PMD_MASK; /* even if next overflowed, extent below will be ok */ extent = next - old_addr; if (extent > old_end - old_addr) extent = old_end - old_addr; old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; if (pmd_trans_huge(*old_pmd)) { int err = 0; if (extent == HPAGE_PMD_SIZE) err = move_huge_pmd(vma, new_vma, old_addr, new_addr, old_end, old_pmd, new_pmd); if (err > 0) { need_flush = true; continue; } else if (!err) { split_huge_page_pmd(vma->vm_mm, old_pmd); } VM_BUG_ON(pmd_trans_huge(*old_pmd)); } if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, new_pmd, new_addr)) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); need_flush = true; } if (likely(need_flush)) flush_tlb_range(vma, old_end-len, old_addr); mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end); return len + old_addr - old_end; /* how much done */ } static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; unsigned long hiwater_vm; int split = 0; int err; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; /* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); if (!new_vma) return -ENOMEM; moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { /* * Before moving the page tables from the new vma to * the old vma, we need to be sure the old vma is * queued after new vma in the same_anon_vma list to * prevent SMP races with rmap_walk (that could lead * rmap_walk to miss some page table). */ anon_vma_moveto_tail(vma); /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } /* * If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; mm->total_vm += new_len >> PAGE_SHIFT; vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) mlock_vma_pages_range(new_vma, new_addr + old_len, new_addr + new_len); } return new_addr; } static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; /* Need to be careful about a growing mapping */ if (new_len > old_len) { unsigned long pgoff; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) goto Efault; pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) goto Einval; } if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; unsigned long map_flags; if (new_addr & ~PAGE_MASK) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ if ((new_addr <= addr) && (new_addr+new_len) > addr) goto out; if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } map_flags = MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (ret & ~PAGE_MASK) goto out1; ret = move_vma(vma, addr, old_len, new_len, new_addr); if (!(ret & ~PAGE_MASK)) goto out; out1: vm_unacct_memory(charged); out: return ret; } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { unsigned long end = vma->vm_end + delta; if (end < vma->vm_end) /* overflow */ return 0; if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) return 0; return 1; } /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; if (addr & ~PAGE_MASK) goto out; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) goto out; if (flags & MREMAP_FIXED) { if (flags & MREMAP_MAYMOVE) ret = mremap_to(addr, old_len, new_addr, new_len); goto out; } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; ret = addr; goto out; } /* * Ok, we need to grow.. */ vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* old_len exactly to the end of the area.. */ if (old_len == vma->vm_end - addr) { /* can we just expand the current mapping? */ if (vma_expandable(vma, new_len - old_len)) { int pages = (new_len - old_len) >> PAGE_SHIFT; if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } mm->total_vm += pages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { mm->locked_vm += pages; mlock_vma_pages_range(vma, addr + old_len, addr + new_len); } ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; } ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); return ret; } SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { unsigned long ret; down_write(&current->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(&current->mm->mmap_sem); return ret; }
gpl-2.0
LDAP/android_kernel_motorola_msm8226
arch/s390/kvm/diag.c
4418
2656
/* * diag.c - handling diagnose instructions * * Copyright IBM Corp. 2008,2011 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> */ #include <linux/kvm.h> #include <linux/kvm_host.h> #include "kvm-s390.h" static int diag_release_pages(struct kvm_vcpu *vcpu) { unsigned long start, end; unsigned long prefix = vcpu->arch.sie_block->prefix; start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end || start < 2 * PAGE_SIZE) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); vcpu->stat.diagnose_10++; /* we checked for start > end above */ if (end < prefix || start >= prefix + 2 * PAGE_SIZE) { gmap_discard(start, end, vcpu->arch.gmap); } else { if (start < prefix) gmap_discard(start, prefix, vcpu->arch.gmap); if (end >= prefix) gmap_discard(prefix + 2 * PAGE_SIZE, end, vcpu->arch.gmap); } return 0; } static int __diag_time_slice_end(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); vcpu->stat.diagnose_44++; vcpu_put(vcpu); yield(); vcpu_load(vcpu); return 0; } static int __diag_ipl_functions(struct kvm_vcpu *vcpu) { unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); switch (subcode) { case 3: vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; break; case 4: vcpu->run->s390_reset_flags = 0; break; default: return -EOPNOTSUPP; } atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; vcpu->run->exit_reason = KVM_EXIT_S390_RESET; VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", vcpu->run->s390_reset_flags); return -EREMOTE; } int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) { int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; switch (code) { case 0x10: return diag_release_pages(vcpu); case 0x44: return __diag_time_slice_end(vcpu); case 0x308: return __diag_ipl_functions(vcpu); default: return -EOPNOTSUPP; } }
gpl-2.0
MattCrystal/oneXL
drivers/net/wireless/p54/eeprom.c
5186
23930
/* * EEPROM parser code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de> * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/sort.h> #include <linux/slab.h> #include <net/mac80211.h> #include <linux/crc-ccitt.h> #include <linux/export.h> #include "p54.h" #include "eeprom.h" #include "lmac.h" static struct ieee80211_rate p54_bgrates[] = { { .bitrate = 10, .hw_value = 0, }, { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = 4, }, { .bitrate = 90, .hw_value = 5, }, { .bitrate = 120, .hw_value = 6, }, { .bitrate = 180, .hw_value = 7, }, { .bitrate = 240, .hw_value = 8, }, { .bitrate = 360, .hw_value = 9, }, { .bitrate = 480, .hw_value = 10, }, { .bitrate = 540, .hw_value = 11, }, }; static struct ieee80211_rate p54_arates[] = { { .bitrate = 60, .hw_value = 4, }, { .bitrate = 90, .hw_value = 5, }, { .bitrate = 120, .hw_value = 6, }, { .bitrate = 180, .hw_value = 7, }, { .bitrate = 240, .hw_value = 8, }, { .bitrate = 360, .hw_value = 9, }, { .bitrate = 480, .hw_value = 10, }, { .bitrate = 540, .hw_value = 11, }, }; static struct p54_rssi_db_entry p54_rssi_default = { /* * The defaults are taken from usb-logs of the * vendor driver. So, they should be safe to * use in case we can't get a match from the * rssi <-> dBm conversion database. */ .mul = 130, .add = -398, }; #define CHAN_HAS_CAL BIT(0) #define CHAN_HAS_LIMIT BIT(1) #define CHAN_HAS_CURVE BIT(2) #define CHAN_HAS_ALL (CHAN_HAS_CAL | CHAN_HAS_LIMIT | CHAN_HAS_CURVE) struct p54_channel_entry { u16 freq; u16 data; int index; enum ieee80211_band band; }; struct p54_channel_list { struct p54_channel_entry *channels; size_t entries; size_t max_entries; size_t band_channel_num[IEEE80211_NUM_BANDS]; }; static int p54_get_band_from_freq(u16 freq) { /* FIXME: sync these values with the 802.11 spec */ if ((freq >= 2412) && (freq <= 2484)) return IEEE80211_BAND_2GHZ; if ((freq >= 4920) && (freq <= 5825)) return IEEE80211_BAND_5GHZ; return -1; } static int same_band(u16 freq, u16 freq2) { return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2); } static int p54_compare_channels(const void *_a, const void *_b) { const struct p54_channel_entry *a = _a; const struct p54_channel_entry *b = _b; return a->freq - b->freq; } static int p54_compare_rssichan(const void *_a, const void *_b) { const struct p54_rssi_db_entry *a = _a; const struct p54_rssi_db_entry *b = _b; return a->freq - b->freq; } static int p54_fill_band_bitrates(struct ieee80211_hw *dev, struct ieee80211_supported_band *band_entry, enum ieee80211_band band) { /* TODO: generate rate array dynamically */ switch (band) { case IEEE80211_BAND_2GHZ: band_entry->bitrates = p54_bgrates; band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates); break; case IEEE80211_BAND_5GHZ: band_entry->bitrates = p54_arates; band_entry->n_bitrates = ARRAY_SIZE(p54_arates); break; default: return -EINVAL; } return 0; } static int p54_generate_band(struct ieee80211_hw *dev, struct p54_channel_list *list, unsigned int *chan_num, enum ieee80211_band band) { struct p54_common *priv = dev->priv; struct ieee80211_supported_band *tmp, *old; unsigned int i, j; int ret = -ENOMEM; if ((!list->entries) || (!list->band_channel_num[band])) return -EINVAL; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) goto err_out; tmp->channels = kzalloc(sizeof(struct ieee80211_channel) * list->band_channel_num[band], GFP_KERNEL); if (!tmp->channels) goto err_out; ret = p54_fill_band_bitrates(dev, tmp, band); if (ret) goto err_out; for (i = 0, j = 0; (j < list->band_channel_num[band]) && (i < list->entries); i++) { struct p54_channel_entry *chan = &list->channels[i]; if (chan->band != band) continue; if (chan->data != CHAN_HAS_ALL) { wiphy_err(dev->wiphy, "%s%s%s is/are missing for " "channel:%d [%d MHz].\n", (chan->data & CHAN_HAS_CAL ? "" : " [iqauto calibration data]"), (chan->data & CHAN_HAS_LIMIT ? "" : " [output power limits]"), (chan->data & CHAN_HAS_CURVE ? "" : " [curve data]"), chan->index, chan->freq); continue; } tmp->channels[j].band = chan->band; tmp->channels[j].center_freq = chan->freq; priv->survey[*chan_num].channel = &tmp->channels[j]; priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_TX; tmp->channels[j].hw_value = (*chan_num); j++; (*chan_num)++; } if (j == 0) { wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n", (band == IEEE80211_BAND_2GHZ) ? 2 : 5); ret = -ENODATA; goto err_out; } tmp->n_channels = j; old = priv->band_table[band]; priv->band_table[band] = tmp; if (old) { kfree(old->channels); kfree(old); } return 0; err_out: if (tmp) { kfree(tmp->channels); kfree(tmp); } return ret; } static void p54_update_channel_param(struct p54_channel_list *list, u16 freq, u16 data) { int band, i; /* * usually all lists in the eeprom are mostly sorted. * so it's very likely that the entry we are looking for * is right at the end of the list */ for (i = list->entries; i >= 0; i--) { if (freq == list->channels[i].freq) { list->channels[i].data |= data; break; } } if ((i < 0) && (list->entries < list->max_entries)) { /* entry does not exist yet. Initialize a new one. */ band = p54_get_band_from_freq(freq); /* * filter out frequencies which don't belong into * any supported band. */ if (band < 0) return ; i = list->entries++; list->band_channel_num[band]++; list->channels[i].freq = freq; list->channels[i].data = data; list->channels[i].band = band; list->channels[i].index = ieee80211_frequency_to_channel(freq); /* TODO: parse output_limit and fill max_power */ } } static int p54_generate_channel_lists(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; struct p54_channel_list *list; unsigned int i, j, k, max_channel_num; int ret = 0; u16 freq; if ((priv->iq_autocal_len != priv->curve_data->entries) || (priv->iq_autocal_len != priv->output_limit->entries)) wiphy_err(dev->wiphy, "Unsupported or damaged EEPROM detected. " "You may not be able to use all channels.\n"); max_channel_num = max_t(unsigned int, priv->output_limit->entries, priv->iq_autocal_len); max_channel_num = max_t(unsigned int, max_channel_num, priv->curve_data->entries); list = kzalloc(sizeof(*list), GFP_KERNEL); if (!list) { ret = -ENOMEM; goto free; } priv->chan_num = max_channel_num; priv->survey = kzalloc(sizeof(struct survey_info) * max_channel_num, GFP_KERNEL); if (!priv->survey) { ret = -ENOMEM; goto free; } list->max_entries = max_channel_num; list->channels = kzalloc(sizeof(struct p54_channel_entry) * max_channel_num, GFP_KERNEL); if (!list->channels) { ret = -ENOMEM; goto free; } for (i = 0; i < max_channel_num; i++) { if (i < priv->iq_autocal_len) { freq = le16_to_cpu(priv->iq_autocal[i].freq); p54_update_channel_param(list, freq, CHAN_HAS_CAL); } if (i < priv->output_limit->entries) { freq = le16_to_cpup((__le16 *) (i * priv->output_limit->entry_size + priv->output_limit->offset + priv->output_limit->data)); p54_update_channel_param(list, freq, CHAN_HAS_LIMIT); } if (i < priv->curve_data->entries) { freq = le16_to_cpup((__le16 *) (i * priv->curve_data->entry_size + priv->curve_data->offset + priv->curve_data->data)); p54_update_channel_param(list, freq, CHAN_HAS_CURVE); } } /* sort the channel list by frequency */ sort(list->channels, list->entries, sizeof(struct p54_channel_entry), p54_compare_channels, NULL); k = 0; for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) { if (p54_generate_band(dev, list, &k, i) == 0) j++; } if (j == 0) { /* no useable band available. */ ret = -EINVAL; } free: if (list) { kfree(list->channels); kfree(list); } if (ret) { kfree(priv->survey); priv->survey = NULL; } return ret; } static int p54_convert_rev0(struct ieee80211_hw *dev, struct pda_pa_curve_data *curve_data) { struct p54_common *priv = dev->priv; struct p54_pa_curve_data_sample *dst; struct pda_pa_curve_data_sample_rev0 *src; size_t cd_len = sizeof(*curve_data) + (curve_data->points_per_channel*sizeof(*dst) + 2) * curve_data->channels; unsigned int i, j; void *source, *target; priv->curve_data = kmalloc(sizeof(*priv->curve_data) + cd_len, GFP_KERNEL); if (!priv->curve_data) return -ENOMEM; priv->curve_data->entries = curve_data->channels; priv->curve_data->entry_size = sizeof(__le16) + sizeof(*dst) * curve_data->points_per_channel; priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data); priv->curve_data->len = cd_len; memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data)); source = curve_data->data; target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data; for (i = 0; i < curve_data->channels; i++) { __le16 *freq = source; source += sizeof(__le16); *((__le16 *)target) = *freq; target += sizeof(__le16); for (j = 0; j < curve_data->points_per_channel; j++) { dst = target; src = source; dst->rf_power = src->rf_power; dst->pa_detector = src->pa_detector; dst->data_64qam = src->pcv; /* "invent" the points for the other modulations */ #define SUB(x, y) (u8)(((x) - (y)) > (x) ? 0 : (x) - (y)) dst->data_16qam = SUB(src->pcv, 12); dst->data_qpsk = SUB(dst->data_16qam, 12); dst->data_bpsk = SUB(dst->data_qpsk, 12); dst->data_barker = SUB(dst->data_bpsk, 14); #undef SUB target += sizeof(*dst); source += sizeof(*src); } } return 0; } static int p54_convert_rev1(struct ieee80211_hw *dev, struct pda_pa_curve_data *curve_data) { struct p54_common *priv = dev->priv; struct p54_pa_curve_data_sample *dst; struct pda_pa_curve_data_sample_rev1 *src; size_t cd_len = sizeof(*curve_data) + (curve_data->points_per_channel*sizeof(*dst) + 2) * curve_data->channels; unsigned int i, j; void *source, *target; priv->curve_data = kzalloc(cd_len + sizeof(*priv->curve_data), GFP_KERNEL); if (!priv->curve_data) return -ENOMEM; priv->curve_data->entries = curve_data->channels; priv->curve_data->entry_size = sizeof(__le16) + sizeof(*dst) * curve_data->points_per_channel; priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data); priv->curve_data->len = cd_len; memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data)); source = curve_data->data; target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data; for (i = 0; i < curve_data->channels; i++) { __le16 *freq = source; source += sizeof(__le16); *((__le16 *)target) = *freq; target += sizeof(__le16); for (j = 0; j < curve_data->points_per_channel; j++) { memcpy(target, source, sizeof(*src)); target += sizeof(*dst); source += sizeof(*src); } source++; } return 0; } static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2", "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" }; static int p54_parse_rssical(struct ieee80211_hw *dev, u8 *data, int len, u16 type) { struct p54_common *priv = dev->priv; struct p54_rssi_db_entry *entry; size_t db_len, entries; int offset = 0, i; if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) { entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; if (len != sizeof(struct pda_rssi_cal_entry) * entries) { wiphy_err(dev->wiphy, "rssical size mismatch.\n"); goto err_data; } } else { /* * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...) * have an empty two byte header. */ if (*((__le16 *)&data[offset]) == cpu_to_le16(0)) offset += 2; entries = (len - offset) / sizeof(struct pda_rssi_cal_ext_entry); if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) || entries <= 0) { wiphy_err(dev->wiphy, "invalid rssi database.\n"); goto err_data; } } db_len = sizeof(*entry) * entries; priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL); if (!priv->rssi_db) return -ENOMEM; priv->rssi_db->offset = 0; priv->rssi_db->entries = entries; priv->rssi_db->entry_size = sizeof(*entry); priv->rssi_db->len = db_len; entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset); if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) { struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset]; for (i = 0; i < entries; i++) { entry[i].freq = le16_to_cpu(cal[i].freq); entry[i].mul = (s16) le16_to_cpu(cal[i].mul); entry[i].add = (s16) le16_to_cpu(cal[i].add); } } else { struct pda_rssi_cal_entry *cal = (void *) &data[offset]; for (i = 0; i < entries; i++) { u16 freq = 0; switch (i) { case IEEE80211_BAND_2GHZ: freq = 2437; break; case IEEE80211_BAND_5GHZ: freq = 5240; break; } entry[i].freq = freq; entry[i].mul = (s16) le16_to_cpu(cal[i].mul); entry[i].add = (s16) le16_to_cpu(cal[i].add); } } /* sort the list by channel frequency */ sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL); return 0; err_data: wiphy_err(dev->wiphy, "rssi calibration data packing type:(%x) len:%d.\n", type, len); print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len); wiphy_err(dev->wiphy, "please report this issue.\n"); return -EINVAL; } struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq) { struct p54_rssi_db_entry *entry; int i, found = -1; if (!priv->rssi_db) return &p54_rssi_default; entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset); for (i = 0; i < priv->rssi_db->entries; i++) { if (!same_band(freq, entry[i].freq)) continue; if (found == -1) { found = i; continue; } /* nearest match */ if (abs(freq - entry[i].freq) < abs(freq - entry[found].freq)) { found = i; continue; } else { break; } } return found < 0 ? &p54_rssi_default : &entry[found]; } static void p54_parse_default_country(struct ieee80211_hw *dev, void *data, int len) { struct pda_country *country; if (len != sizeof(*country)) { wiphy_err(dev->wiphy, "found possible invalid default country eeprom entry. (entry size: %d)\n", len); print_hex_dump_bytes("country:", DUMP_PREFIX_NONE, data, len); wiphy_err(dev->wiphy, "please report this issue.\n"); return; } country = (struct pda_country *) data; if (country->flags == PDR_COUNTRY_CERT_CODE_PSEUDO) regulatory_hint(dev->wiphy, country->alpha2); else { /* TODO: * write a shared/common function that converts * "Regulatory domain codes" (802.11-2007 14.8.2.2) * into ISO/IEC 3166-1 alpha2 for regulatory_hint. */ } } static int p54_convert_output_limits(struct ieee80211_hw *dev, u8 *data, size_t len) { struct p54_common *priv = dev->priv; if (len < 2) return -EINVAL; if (data[0] != 0) { wiphy_err(dev->wiphy, "unknown output power db revision:%x\n", data[0]); return -EINVAL; } if (2 + data[1] * sizeof(struct pda_channel_output_limit) > len) return -EINVAL; priv->output_limit = kmalloc(data[1] * sizeof(struct pda_channel_output_limit) + sizeof(*priv->output_limit), GFP_KERNEL); if (!priv->output_limit) return -ENOMEM; priv->output_limit->offset = 0; priv->output_limit->entries = data[1]; priv->output_limit->entry_size = sizeof(struct pda_channel_output_limit); priv->output_limit->len = priv->output_limit->entry_size * priv->output_limit->entries + priv->output_limit->offset; memcpy(priv->output_limit->data, &data[2], data[1] * sizeof(struct pda_channel_output_limit)); return 0; } static struct p54_cal_database *p54_convert_db(struct pda_custom_wrapper *src, size_t total_len) { struct p54_cal_database *dst; size_t payload_len, entries, entry_size, offset; payload_len = le16_to_cpu(src->len); entries = le16_to_cpu(src->entries); entry_size = le16_to_cpu(src->entry_size); offset = le16_to_cpu(src->offset); if (((entries * entry_size + offset) != payload_len) || (payload_len + sizeof(*src) != total_len)) return NULL; dst = kmalloc(sizeof(*dst) + payload_len, GFP_KERNEL); if (!dst) return NULL; dst->entries = entries; dst->entry_size = entry_size; dst->offset = offset; dst->len = payload_len; memcpy(dst->data, src->data, payload_len); return dst; } int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) { struct p54_common *priv = dev->priv; struct eeprom_pda_wrap *wrap; struct pda_entry *entry; unsigned int data_len, entry_len; void *tmp; int err; u8 *end = (u8 *)eeprom + len; u16 synth = 0; u16 crc16 = ~0; wrap = (struct eeprom_pda_wrap *) eeprom; entry = (void *)wrap->data + le16_to_cpu(wrap->len); /* verify that at least the entry length/code fits */ while ((u8 *)entry <= end - sizeof(*entry)) { entry_len = le16_to_cpu(entry->len); data_len = ((entry_len - 1) << 1); /* abort if entry exceeds whole structure */ if ((u8 *)entry + sizeof(*entry) + data_len > end) break; switch (le16_to_cpu(entry->code)) { case PDR_MAC_ADDRESS: if (data_len != ETH_ALEN) break; SET_IEEE80211_PERM_ADDR(dev, entry->data); break; case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS: if (priv->output_limit) break; err = p54_convert_output_limits(dev, entry->data, data_len); if (err) goto err; break; case PDR_PRISM_PA_CAL_CURVE_DATA: { struct pda_pa_curve_data *curve_data = (struct pda_pa_curve_data *)entry->data; if (data_len < sizeof(*curve_data)) { err = -EINVAL; goto err; } switch (curve_data->cal_method_rev) { case 0: err = p54_convert_rev0(dev, curve_data); break; case 1: err = p54_convert_rev1(dev, curve_data); break; default: wiphy_err(dev->wiphy, "unknown curve data revision %d\n", curve_data->cal_method_rev); err = -ENODEV; break; } if (err) goto err; } break; case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: priv->iq_autocal = kmemdup(entry->data, data_len, GFP_KERNEL); if (!priv->iq_autocal) { err = -ENOMEM; goto err; } priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); break; case PDR_DEFAULT_COUNTRY: p54_parse_default_country(dev, entry->data, data_len); break; case PDR_INTERFACE_LIST: tmp = entry->data; while ((u8 *)tmp < entry->data + data_len) { struct exp_if *exp_if = tmp; if (exp_if->if_id == cpu_to_le16(IF_ID_ISL39000)) synth = le16_to_cpu(exp_if->variant); tmp += sizeof(*exp_if); } break; case PDR_HARDWARE_PLATFORM_COMPONENT_ID: if (data_len < 2) break; priv->version = *(u8 *)(entry->data + 1); break; case PDR_RSSI_LINEAR_APPROXIMATION: case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND: case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED: err = p54_parse_rssical(dev, entry->data, data_len, le16_to_cpu(entry->code)); if (err) goto err; break; case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: { struct pda_custom_wrapper *pda = (void *) entry->data; __le16 *src; u16 *dst; int i; if (priv->rssi_db || data_len < sizeof(*pda)) break; priv->rssi_db = p54_convert_db(pda, data_len); if (!priv->rssi_db) break; src = (void *) priv->rssi_db->data; dst = (void *) priv->rssi_db->data; for (i = 0; i < priv->rssi_db->entries; i++) *(dst++) = (s16) le16_to_cpu(*(src++)); } break; case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: { struct pda_custom_wrapper *pda = (void *) entry->data; if (priv->output_limit || data_len < sizeof(*pda)) break; priv->output_limit = p54_convert_db(pda, data_len); } break; case PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM: { struct pda_custom_wrapper *pda = (void *) entry->data; if (priv->curve_data || data_len < sizeof(*pda)) break; priv->curve_data = p54_convert_db(pda, data_len); } break; case PDR_END: crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry)); if (crc16 != le16_to_cpup((__le16 *)entry->data)) { wiphy_err(dev->wiphy, "eeprom failed checksum " "test!\n"); err = -ENOMSG; goto err; } else { goto good_eeprom; } break; default: break; } crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2); entry = (void *)entry + (entry_len + 1) * 2; } wiphy_err(dev->wiphy, "unexpected end of eeprom data.\n"); err = -ENODATA; goto err; good_eeprom: if (!synth || !priv->iq_autocal || !priv->output_limit || !priv->curve_data) { wiphy_err(dev->wiphy, "not all required entries found in eeprom!\n"); err = -EINVAL; goto err; } err = p54_generate_channel_lists(dev); if (err) goto err; priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK; if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) p54_init_xbow_synth(priv); if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) dev->wiphy->bands[IEEE80211_BAND_2GHZ] = priv->band_table[IEEE80211_BAND_2GHZ]; if (!(synth & PDR_SYNTH_5_GHZ_DISABLED)) dev->wiphy->bands[IEEE80211_BAND_5GHZ] = priv->band_table[IEEE80211_BAND_5GHZ]; if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED) priv->rx_diversity_mask = 3; if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED) priv->tx_diversity_mask = 3; if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { u8 perm_addr[ETH_ALEN]; wiphy_warn(dev->wiphy, "Invalid hwaddr! Using randomly generated MAC addr\n"); random_ether_addr(perm_addr); SET_IEEE80211_PERM_ADDR(dev, perm_addr); } priv->cur_rssi = &p54_rssi_default; wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", dev->wiphy->perm_addr, priv->version, p54_rf_chips[priv->rxhw]); return 0; err: kfree(priv->iq_autocal); kfree(priv->output_limit); kfree(priv->curve_data); kfree(priv->rssi_db); kfree(priv->survey); priv->iq_autocal = NULL; priv->output_limit = NULL; priv->curve_data = NULL; priv->rssi_db = NULL; priv->survey = NULL; wiphy_err(dev->wiphy, "eeprom parse failed!\n"); return err; } EXPORT_SYMBOL_GPL(p54_parse_eeprom); int p54_read_eeprom(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; size_t eeprom_size = 0x2020, offset = 0, blocksize, maxblocksize; int ret = -ENOMEM; void *eeprom; maxblocksize = EEPROM_READBACK_LEN; if (priv->fw_var >= 0x509) maxblocksize -= 0xc; else maxblocksize -= 0x4; eeprom = kzalloc(eeprom_size, GFP_KERNEL); if (unlikely(!eeprom)) goto free; while (eeprom_size) { blocksize = min(eeprom_size, maxblocksize); ret = p54_download_eeprom(priv, (void *) (eeprom + offset), offset, blocksize); if (unlikely(ret)) goto free; offset += blocksize; eeprom_size -= blocksize; } ret = p54_parse_eeprom(dev, eeprom, offset); free: kfree(eeprom); return ret; } EXPORT_SYMBOL_GPL(p54_read_eeprom);
gpl-2.0
Tommy-Geenexus/android_kernel_sony_msm8974_togari_5.x
drivers/mfd/tps65911-comparator.c
5442
4637
/* * tps65910.c -- TI TPS6591x * * Copyright 2010 Texas Instruments Inc. * * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/mfd/tps65910.h> #define COMP 0 #define COMP1 1 #define COMP2 2 /* Comparator 1 voltage selection table in milivolts */ static const u16 COMP_VSEL_TABLE[] = { 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650, 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450, 3500, }; struct comparator { const char *name; int reg; int uV_max; const u16 *vsel_table; }; static struct comparator tps_comparators[] = { { .name = "COMP1", .reg = TPS65911_VMBCH, .uV_max = 3500, .vsel_table = COMP_VSEL_TABLE, }, { .name = "COMP2", .reg = TPS65911_VMBCH2, .uV_max = 3500, .vsel_table = COMP_VSEL_TABLE, }, }; static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage) { struct comparator tps_comp = tps_comparators[id]; int curr_voltage = 0; int ret; u8 index = 0, val; if (id == COMP) return 0; while (curr_voltage < tps_comp.uV_max) { curr_voltage = tps_comp.vsel_table[index]; if (curr_voltage >= voltage) break; else if (curr_voltage < voltage) index ++; } if (curr_voltage > tps_comp.uV_max) return -EINVAL; val = index << 1; ret = tps65910->write(tps65910, tps_comp.reg, 1, &val); return ret; } static int comp_threshold_get(struct tps65910 *tps65910, int id) { struct comparator tps_comp = tps_comparators[id]; int ret; u8 val; if (id == COMP) return 0; ret = tps65910->read(tps65910, tps_comp.reg, 1, &val); if (ret < 0) return ret; val >>= 1; return tps_comp.vsel_table[val]; } static ssize_t comp_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tps65910 *tps65910 = dev_get_drvdata(dev->parent); struct attribute comp_attr = attr->attr; int id, uVolt; if (!strcmp(comp_attr.name, "comp1_threshold")) id = COMP1; else if (!strcmp(comp_attr.name, "comp2_threshold")) id = COMP2; else return -EINVAL; uVolt = comp_threshold_get(tps65910, id); return sprintf(buf, "%d\n", uVolt); } static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL); static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL); static __devinit int tps65911_comparator_probe(struct platform_device *pdev) { struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); struct tps65910_board *pdata = dev_get_platdata(tps65910->dev); int ret; ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold); if (ret < 0) { dev_err(&pdev->dev, "cannot set COMP1 threshold\n"); return ret; } ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold); if (ret < 0) { dev_err(&pdev->dev, "cannot set COMP2 theshold\n"); return ret; } /* Create sysfs entry */ ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold); if (ret < 0) dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n"); ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold); if (ret < 0) dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n"); return ret; } static __devexit int tps65911_comparator_remove(struct platform_device *pdev) { struct tps65910 *tps65910; tps65910 = dev_get_drvdata(pdev->dev.parent); device_remove_file(&pdev->dev, &dev_attr_comp2_threshold); device_remove_file(&pdev->dev, &dev_attr_comp1_threshold); return 0; } static struct platform_driver tps65911_comparator_driver = { .driver = { .name = "tps65911-comparator", .owner = THIS_MODULE, }, .probe = tps65911_comparator_probe, .remove = __devexit_p(tps65911_comparator_remove), }; static int __init tps65911_comparator_init(void) { return platform_driver_register(&tps65911_comparator_driver); } subsys_initcall(tps65911_comparator_init); static void __exit tps65911_comparator_exit(void) { platform_driver_unregister(&tps65911_comparator_driver); } module_exit(tps65911_comparator_exit); MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>"); MODULE_DESCRIPTION("TPS65911 comparator driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:tps65911-comparator");
gpl-2.0
brymaster5000/m7_4.1
drivers/media/video/omap3isp/ispcsiphy.c
8002
6316
/* * ispcsiphy.c * * TI OMAP3 ISP - CSI PHY module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/delay.h> #include <linux/device.h> #include <linux/regulator/consumer.h> #include "isp.h" #include "ispreg.h" #include "ispcsiphy.h" /* * csiphy_lanes_config - Configuration of CSIPHY lanes. * * Updates HW configuration. * Called with phy->mutex taken. */ static void csiphy_lanes_config(struct isp_csiphy *phy) { unsigned int i; u32 reg; reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG); for (i = 0; i < phy->num_data_lanes; i++) { reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) | ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1)); reg |= (phy->lanes.data[i].pol << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1)); reg |= (phy->lanes.data[i].pos << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1)); } reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK | ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK); reg |= phy->lanes.clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT; reg |= phy->lanes.clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT; isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG); } /* * csiphy_power_autoswitch_enable * @enable: Sets or clears the autoswitch function enable flag. */ static void csiphy_power_autoswitch_enable(struct isp_csiphy *phy, bool enable) { isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_PWR_AUTO, enable ? ISPCSI2_PHY_CFG_PWR_AUTO : 0); } /* * csiphy_set_power * @power: Power state to be set. * * Returns 0 if successful, or -EBUSY if the retry count is exceeded. */ static int csiphy_set_power(struct isp_csiphy *phy, u32 power) { u32 reg; u8 retry_count; isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_PWR_CMD_MASK, power); retry_count = 0; do { udelay(50); reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG) & ISPCSI2_PHY_CFG_PWR_STATUS_MASK; if (reg != power >> 2) retry_count++; } while ((reg != power >> 2) && (retry_count < 100)); if (retry_count == 100) { printk(KERN_ERR "CSI2 CIO set power failed!\n"); return -EBUSY; } return 0; } /* * csiphy_dphy_config - Configure CSI2 D-PHY parameters. * * Called with phy->mutex taken. */ static void csiphy_dphy_config(struct isp_csiphy *phy) { u32 reg; /* Set up ISPCSIPHY_REG0 */ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0); reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK | ISPCSIPHY_REG0_THS_SETTLE_MASK); reg |= phy->dphy.ths_term << ISPCSIPHY_REG0_THS_TERM_SHIFT; reg |= phy->dphy.ths_settle << ISPCSIPHY_REG0_THS_SETTLE_SHIFT; isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0); /* Set up ISPCSIPHY_REG1 */ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1); reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK | ISPCSIPHY_REG1_TCLK_MISS_MASK | ISPCSIPHY_REG1_TCLK_SETTLE_MASK); reg |= phy->dphy.tclk_term << ISPCSIPHY_REG1_TCLK_TERM_SHIFT; reg |= phy->dphy.tclk_miss << ISPCSIPHY_REG1_TCLK_MISS_SHIFT; reg |= phy->dphy.tclk_settle << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT; isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1); } static int csiphy_config(struct isp_csiphy *phy, struct isp_csiphy_dphy_cfg *dphy, struct isp_csiphy_lanes_cfg *lanes) { unsigned int used_lanes = 0; unsigned int i; /* Clock and data lanes verification */ for (i = 0; i < phy->num_data_lanes; i++) { if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3) return -EINVAL; if (used_lanes & (1 << lanes->data[i].pos)) return -EINVAL; used_lanes |= 1 << lanes->data[i].pos; } if (lanes->clk.pol > 1 || lanes->clk.pos > 3) return -EINVAL; if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos)) return -EINVAL; mutex_lock(&phy->mutex); phy->dphy = *dphy; phy->lanes = *lanes; mutex_unlock(&phy->mutex); return 0; } int omap3isp_csiphy_acquire(struct isp_csiphy *phy) { int rval; if (phy->vdd == NULL) { dev_err(phy->isp->dev, "Power regulator for CSI PHY not " "available\n"); return -ENODEV; } mutex_lock(&phy->mutex); rval = regulator_enable(phy->vdd); if (rval < 0) goto done; omap3isp_csi2_reset(phy->csi2); csiphy_dphy_config(phy); csiphy_lanes_config(phy); rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON); if (rval) { regulator_disable(phy->vdd); goto done; } csiphy_power_autoswitch_enable(phy, true); phy->phy_in_use = 1; done: mutex_unlock(&phy->mutex); return rval; } void omap3isp_csiphy_release(struct isp_csiphy *phy) { mutex_lock(&phy->mutex); if (phy->phy_in_use) { csiphy_power_autoswitch_enable(phy, false); csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF); regulator_disable(phy->vdd); phy->phy_in_use = 0; } mutex_unlock(&phy->mutex); } /* * omap3isp_csiphy_init - Initialize the CSI PHY frontends */ int omap3isp_csiphy_init(struct isp_device *isp) { struct isp_csiphy *phy1 = &isp->isp_csiphy1; struct isp_csiphy *phy2 = &isp->isp_csiphy2; isp->platform_cb.csiphy_config = csiphy_config; phy2->isp = isp; phy2->csi2 = &isp->isp_csi2a; phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES; phy2->cfg_regs = OMAP3_ISP_IOMEM_CSI2A_REGS1; phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2; mutex_init(&phy2->mutex); if (isp->revision == ISP_REVISION_15_0) { phy1->isp = isp; phy1->csi2 = &isp->isp_csi2c; phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES; phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1; phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1; mutex_init(&phy1->mutex); } return 0; }
gpl-2.0
hendrik42/odroid-linux
drivers/infiniband/hw/mthca/mthca_mad.c
8514
10038
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/slab.h> #include <rdma/ib_verbs.h> #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> #include "mthca_dev.h" #include "mthca_cmd.h" enum { MTHCA_VENDOR_CLASS1 = 0x9, MTHCA_VENDOR_CLASS2 = 0xa }; static int mthca_update_rate(struct mthca_dev *dev, u8 port_num) { struct ib_port_attr *tprops = NULL; int ret; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return -ENOMEM; ret = ib_query_port(&dev->ib_dev, port_num, tprops); if (ret) { printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n", ret, dev->ib_dev.name, port_num); goto out; } dev->rate[port_num - 1] = tprops->active_speed * ib_width_enum_to_int(tprops->active_width); out: kfree(tprops); return ret; } static void update_sm_ah(struct mthca_dev *dev, u8 port_num, u16 lid, u8 sl) { struct ib_ah *new_ah; struct ib_ah_attr ah_attr; unsigned long flags; if (!dev->send_agent[port_num - 1][0]) return; memset(&ah_attr, 0, sizeof ah_attr); ah_attr.dlid = lid; ah_attr.sl = sl; ah_attr.port_num = port_num; new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, &ah_attr); if (IS_ERR(new_ah)) return; spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) ib_destroy_ah(dev->sm_ah[port_num - 1]); dev->sm_ah[port_num - 1] = new_ah; spin_unlock_irqrestore(&dev->sm_lock, flags); } /* * Snoop SM MADs for port info and P_Key table sets, so we can * synthesize LID change and P_Key change events. */ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, u16 prev_lid) { struct ib_event event; if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_SET) { if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { struct ib_port_info *pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; u16 lid = be16_to_cpu(pinfo->lid); mthca_update_rate(to_mdev(ibdev), port_num); update_sm_ah(to_mdev(ibdev), port_num, be16_to_cpu(pinfo->sm_lid), pinfo->neighbormtu_mastersmsl & 0xf); event.device = ibdev; event.element.port_num = port_num; if (pinfo->clientrereg_resv_subnetto & 0x80) { event.event = IB_EVENT_CLIENT_REREGISTER; ib_dispatch_event(&event); } if (prev_lid != lid) { event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } } if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { event.device = ibdev; event.event = IB_EVENT_PKEY_CHANGE; event.element.port_num = port_num; ib_dispatch_event(&event); } } } static void node_desc_override(struct ib_device *dev, struct ib_mad *mad) { if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { mutex_lock(&to_mdev(dev)->cap_mask_mutex); memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); mutex_unlock(&to_mdev(dev)->cap_mask_mutex); } } static void forward_trap(struct mthca_dev *dev, u8 port_num, struct ib_mad *mad) { int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; int ret; unsigned long flags; if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(send_buf)) return; /* * We rely here on the fact that MLX QPs don't use the * address handle after the send is posted (this is * wrong following the IB spec strictly, but we know * it's OK for our devices). */ spin_lock_irqsave(&dev->sm_lock, flags); memcpy(send_buf->mad, mad, sizeof *mad); if ((send_buf->ah = dev->sm_ah[port_num - 1])) ret = ib_post_send_mad(send_buf, NULL); else ret = -EINVAL; spin_unlock_irqrestore(&dev->sm_lock, flags); if (ret) ib_free_send_mad(send_buf); } } int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { int err; u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); u16 prev_lid = 0; struct ib_port_attr pattr; /* Forward locally generated traps to the SM */ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { forward_trap(to_mdev(ibdev), port_num, in_mad); return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } /* * Only handle SM gets, sets and trap represses for SM class * * Only handle PMA and Mellanox vendor-specific class gets and * sets for other classes. */ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) return IB_MAD_RESULT_SUCCESS; /* * Don't process SMInfo queries or vendor-specific * MADs -- the SMA can't handle them. */ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == IB_SMP_ATTR_VENDOR_MASK)) return IB_MAD_RESULT_SUCCESS; } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) return IB_MAD_RESULT_SUCCESS; } else return IB_MAD_RESULT_SUCCESS; if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && !ib_query_port(ibdev, port_num, &pattr)) prev_lid = pattr.lid; err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY, mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc, in_grh, in_mad, out_mad); if (err == -EBADMSG) return IB_MAD_RESULT_SUCCESS; else if (err) { mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err); return IB_MAD_RESULT_FAILURE; } if (!out_mad->mad_hdr.status) { smp_snoop(ibdev, port_num, in_mad, prev_lid); node_desc_override(ibdev, out_mad); } /* set return bit in status of directed route responses */ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) /* no response for trap repress */ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { ib_free_send_mad(mad_send_wc->send_buf); } int mthca_create_agents(struct mthca_dev *dev) { struct ib_mad_agent *agent; int p, q; int ret; spin_lock_init(&dev->sm_lock); for (p = 0; p < dev->limits.num_ports; ++p) for (q = 0; q <= 1; ++q) { agent = ib_register_mad_agent(&dev->ib_dev, p + 1, q ? IB_QPT_GSI : IB_QPT_SMI, NULL, 0, send_handler, NULL, NULL); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; } dev->send_agent[p][q] = agent; } for (p = 1; p <= dev->limits.num_ports; ++p) { ret = mthca_update_rate(dev, p); if (ret) { mthca_err(dev, "Failed to obtain port %d rate." " aborting.\n", p); goto err; } } return 0; err: for (p = 0; p < dev->limits.num_ports; ++p) for (q = 0; q <= 1; ++q) if (dev->send_agent[p][q]) ib_unregister_mad_agent(dev->send_agent[p][q]); return ret; } void mthca_free_agents(struct mthca_dev *dev) { struct ib_mad_agent *agent; int p, q; for (p = 0; p < dev->limits.num_ports; ++p) { for (q = 0; q <= 1; ++q) { agent = dev->send_agent[p][q]; dev->send_agent[p][q] = NULL; ib_unregister_mad_agent(agent); } if (dev->sm_ah[p]) ib_destroy_ah(dev->sm_ah[p]); } }
gpl-2.0
milodky/tegra_git
arch/mips/loongson/common/setup.c
8770
1197
/* * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <asm/wbflush.h> #include <loongson.h> #ifdef CONFIG_VT #include <linux/console.h> #include <linux/screen_info.h> #endif void (*__wbflush)(void); EXPORT_SYMBOL(__wbflush); static void wbflush_loongson(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" ".set mips3\n\t" "sync\n\t" "nop\n\t" ".set\tpop\n\t" ".set mips0\n\t"); } void __init plat_mem_setup(void) { __wbflush = wbflush_loongson; #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; screen_info = (struct screen_info) { .orig_x = 0, .orig_y = 25, .orig_video_cols = 80, .orig_video_lines = 25, .orig_video_isVGA = VIDEO_TYPE_VGAC, .orig_video_points = 16, }; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif }
gpl-2.0
fire855/android_kernel_wiko_l5510
net/irda/irproc.c
12354
2589
/********************************************************************* * * Filename: irproc.c * Version: 1.0 * Description: Various entries in the /proc file system * Status: Experimental. * Author: Thomas Davis, <ratbert@radiks.net> * Created at: Sat Feb 21 21:33:24 1998 * Modified at: Sun Nov 14 08:54:54 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999, Dag Brattli <dagb@cs.uit.no> * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * I, Thomas Davis, provide no warranty for any of this software. * This material is provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/miscdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/init.h> #include <net/net_namespace.h> #include <net/irda/irda.h> #include <net/irda/irlap.h> #include <net/irda/irlmp.h> extern const struct file_operations discovery_seq_fops; extern const struct file_operations irlap_seq_fops; extern const struct file_operations irlmp_seq_fops; extern const struct file_operations irttp_seq_fops; extern const struct file_operations irias_seq_fops; struct irda_entry { const char *name; const struct file_operations *fops; }; struct proc_dir_entry *proc_irda; EXPORT_SYMBOL(proc_irda); static const struct irda_entry irda_dirs[] = { {"discovery", &discovery_seq_fops}, {"irttp", &irttp_seq_fops}, {"irlmp", &irlmp_seq_fops}, {"irlap", &irlap_seq_fops}, {"irias", &irias_seq_fops}, }; /* * Function irda_proc_register (void) * * Register irda entry in /proc file system * */ void __init irda_proc_register(void) { int i; proc_irda = proc_mkdir("irda", init_net.proc_net); if (proc_irda == NULL) return; for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) (void) proc_create(irda_dirs[i].name, 0, proc_irda, irda_dirs[i].fops); } /* * Function irda_proc_unregister (void) * * Unregister irda entry in /proc file system * */ void irda_proc_unregister(void) { int i; if (proc_irda) { for (i=0; i<ARRAY_SIZE(irda_dirs); i++) remove_proc_entry(irda_dirs[i].name, proc_irda); remove_proc_entry("irda", init_net.proc_net); proc_irda = NULL; } }
gpl-2.0
Ahmed-Hady/android_kernel_nokia_normandy
net/irda/irproc.c
12354
2589
/********************************************************************* * * Filename: irproc.c * Version: 1.0 * Description: Various entries in the /proc file system * Status: Experimental. * Author: Thomas Davis, <ratbert@radiks.net> * Created at: Sat Feb 21 21:33:24 1998 * Modified at: Sun Nov 14 08:54:54 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999, Dag Brattli <dagb@cs.uit.no> * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * I, Thomas Davis, provide no warranty for any of this software. * This material is provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/miscdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/init.h> #include <net/net_namespace.h> #include <net/irda/irda.h> #include <net/irda/irlap.h> #include <net/irda/irlmp.h> extern const struct file_operations discovery_seq_fops; extern const struct file_operations irlap_seq_fops; extern const struct file_operations irlmp_seq_fops; extern const struct file_operations irttp_seq_fops; extern const struct file_operations irias_seq_fops; struct irda_entry { const char *name; const struct file_operations *fops; }; struct proc_dir_entry *proc_irda; EXPORT_SYMBOL(proc_irda); static const struct irda_entry irda_dirs[] = { {"discovery", &discovery_seq_fops}, {"irttp", &irttp_seq_fops}, {"irlmp", &irlmp_seq_fops}, {"irlap", &irlap_seq_fops}, {"irias", &irias_seq_fops}, }; /* * Function irda_proc_register (void) * * Register irda entry in /proc file system * */ void __init irda_proc_register(void) { int i; proc_irda = proc_mkdir("irda", init_net.proc_net); if (proc_irda == NULL) return; for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) (void) proc_create(irda_dirs[i].name, 0, proc_irda, irda_dirs[i].fops); } /* * Function irda_proc_unregister (void) * * Unregister irda entry in /proc file system * */ void irda_proc_unregister(void) { int i; if (proc_irda) { for (i=0; i<ARRAY_SIZE(irda_dirs); i++) remove_proc_entry(irda_dirs[i].name, proc_irda); remove_proc_entry("irda", init_net.proc_net); proc_irda = NULL; } }
gpl-2.0
bilalliberty/android_kernel_htc_villec2
arch/arm/mach-msm/rpm.c
67
33347
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/bug.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/device.h> #ifdef CONFIG_ARCH_MSM8960 #include <linux/slab.h> #endif #include <linux/platform_device.h> #include <asm/hardware/gic.h> #include <mach/msm_iomap.h> #include <mach/rpm.h> #include <mach/board_htc.h> #include <linux/rtc.h> /****************************************************************************** * Data type and structure definitions *****************************************************************************/ struct msm_rpm_request { struct msm_rpm_iv_pair *req; int count; uint32_t *ctx_mask_ack; uint32_t *sel_masks_ack; struct completion *done; }; struct msm_rpm_notif_config { struct msm_rpm_iv_pair iv[MSM_RPM_SEL_MASK_SIZE * 2]; }; #define configured_iv(notif_cfg) ((notif_cfg)->iv) #define registered_iv(notif_cfg) ((notif_cfg)->iv + MSM_RPM_SEL_MASK_SIZE) #ifdef CONFIG_ARCH_MSM8960 static int rpm_debug_enable = 0; #endif static struct msm_rpm_platform_data *msm_rpm_platform; static uint32_t msm_rpm_map[MSM_RPM_ID_LAST + 1]; static stats_blob *msm_rpm_stat_data; static DEFINE_MUTEX(msm_rpm_mutex); static DEFINE_SPINLOCK(msm_rpm_lock); static DEFINE_SPINLOCK(msm_rpm_irq_lock); static struct msm_rpm_request *msm_rpm_request; static struct msm_rpm_request msm_rpm_request_irq_mode; static struct msm_rpm_request msm_rpm_request_poll_mode; static LIST_HEAD(msm_rpm_notifications); static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT]; static bool msm_rpm_init_notif_done; /****************************************************************************** * Internal functions *****************************************************************************/ static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg) { return __raw_readl(msm_rpm_platform->reg_base_addrs[page] + reg * 4); } static inline void msm_rpm_write( unsigned int page, unsigned int reg, uint32_t value) { __raw_writel(value, msm_rpm_platform->reg_base_addrs[page] + reg * 4); } static inline void msm_rpm_read_contiguous( unsigned int page, unsigned int reg, uint32_t *values, int count) { int i; for (i = 0; i < count; i++) values[i] = msm_rpm_read(page, reg + i); } static inline void msm_rpm_write_contiguous( unsigned int page, unsigned int reg, uint32_t *values, int count) { int i; for (i = 0; i < count; i++) msm_rpm_write(page, reg + i, values[i]); } static inline void msm_rpm_write_contiguous_zeros( unsigned int page, unsigned int reg, int count) { int i; for (i = 0; i < count; i++) msm_rpm_write(page, reg + i, 0); } static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id) { return (id > MSM_RPM_ID_LAST) ? MSM_RPM_SEL_LAST + 1 : msm_rpm_map[id]; } /* * Note: the function does not clear the masks before filling them. * * Return value: * 0: success * -EINVAL: invalid id in <req> array */ static int msm_rpm_fill_sel_masks( uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { uint32_t sel; int i; for (i = 0; i < count; i++) { sel = msm_rpm_map_id_to_sel(req[i].id); if (sel > MSM_RPM_SEL_LAST) return -EINVAL; sel_masks[msm_rpm_get_sel_mask_reg(sel)] |= msm_rpm_get_sel_mask(sel); } return 0; } static inline void msm_rpm_send_req_interrupt(void) { __raw_writel(msm_rpm_platform->msm_apps_ipc_rpm_val, msm_rpm_platform->msm_apps_ipc_rpm_reg); } /* * Note: assumes caller has acquired <msm_rpm_irq_lock>. * * Return value: * 0: request acknowledgement * 1: notification * 2: spurious interrupt */ static int msm_rpm_process_ack_interrupt(void) { uint32_t ctx_mask_ack; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0); msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_SEL_0, sel_masks_ack, MSM_RPM_SEL_MASK_SIZE); if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) { struct msm_rpm_notification *n; int i; list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) if (sel_masks_ack[i] & n->sel_masks[i]) { up(&n->sem); break; } msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0); /* Ensure the write is complete before return */ mb(); return 1; } if (msm_rpm_request) { int i; *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack; memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack, sizeof(sel_masks_ack)); for (i = 0; i < msm_rpm_request->count; i++) msm_rpm_request->req[i].value = msm_rpm_read(MSM_RPM_PAGE_ACK, msm_rpm_request->req[i].id); msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0); /* Ensure the write is complete before return */ mb(); if (msm_rpm_request->done) complete_all(msm_rpm_request->done); msm_rpm_request = NULL; return 0; } return 2; } static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id) { unsigned long flags; int rc; if (dev_id != &msm_rpm_ack_interrupt) return IRQ_NONE; spin_lock_irqsave(&msm_rpm_irq_lock, flags); rc = msm_rpm_process_ack_interrupt(); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return IRQ_HANDLED; } /* * Note: assumes caller has acquired <msm_rpm_irq_lock>. */ static void msm_rpm_busy_wait_for_request_completion( bool allow_async_completion) { int rc; do { while (!gic_is_spi_pending(msm_rpm_platform->irq_ack) && msm_rpm_request) { if (allow_async_completion) spin_unlock(&msm_rpm_irq_lock); udelay(1); if (allow_async_completion) spin_lock(&msm_rpm_irq_lock); } if (!msm_rpm_request) break; rc = msm_rpm_process_ack_interrupt(); gic_clear_spi_pending(msm_rpm_platform->irq_ack); } while (rc); } /* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_mutex>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { DECLARE_COMPLETION_ONSTACK(ack); unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; int i; msm_rpm_request_irq_mode.req = req; msm_rpm_request_irq_mode.count = count; msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_irq_mode.done = &ack; spin_lock_irqsave(&msm_rpm_lock, flags); spin_lock(&msm_rpm_irq_lock); BUG_ON(msm_rpm_request); msm_rpm_request = &msm_rpm_request_irq_mode; for (i = 0; i < count; i++) { BUG_ON(req[i].id > MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); msm_rpm_send_req_interrupt(); spin_unlock(&msm_rpm_irq_lock); spin_unlock_irqrestore(&msm_rpm_lock, flags); wait_for_completion(&ack); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; } /* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_lock>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_platform->irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; struct irq_chip *irq_chip = NULL; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); irq_chip = irq_get_chip(irq); if (!irq_chip) { spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } irq_chip->irq_mask(irq_get_irq_data(irq)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(req[i].id > MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; } /* Upon return, the <req> array will contain values from the ack page. * * Return value: * 0: success * -EINTR: interrupted * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENOSPC: request rejected */ static int msm_rpm_set_common( int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; int rc; if (ctx >= MSM_RPM_CTX_SET_COUNT) { rc = -EINVAL; goto set_common_exit; } rc = msm_rpm_fill_sel_masks(sel_masks, req, count); if (rc) goto set_common_exit; if (noirq) { unsigned long flags; spin_lock_irqsave(&msm_rpm_lock, flags); rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count); spin_unlock_irqrestore(&msm_rpm_lock, flags); } else { rc = mutex_lock_interruptible(&msm_rpm_mutex); if (rc) goto set_common_exit; rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count); mutex_unlock(&msm_rpm_mutex); } set_common_exit: return rc; } /* * Return value: * 0: success * -EINTR: interrupted * -EINVAL: invalid <ctx> or invalid id in <req> array */ static int msm_rpm_clear_common( int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE]; int rc; int i; if (ctx >= MSM_RPM_CTX_SET_COUNT) { rc = -EINVAL; goto clear_common_exit; } rc = msm_rpm_fill_sel_masks(sel_masks, req, count); if (rc) goto clear_common_exit; for (i = 0; i < ARRAY_SIZE(r); i++) { r[i].id = MSM_RPM_ID_INVALIDATE_0 + i; r[i].value = sel_masks[i]; } memset(sel_masks, 0, sizeof(sel_masks)); sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |= msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE); if (noirq) { unsigned long flags; spin_lock_irqsave(&msm_rpm_lock, flags); rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r, ARRAY_SIZE(r)); spin_unlock_irqrestore(&msm_rpm_lock, flags); BUG_ON(rc); } else { rc = mutex_lock_interruptible(&msm_rpm_mutex); if (rc) goto clear_common_exit; rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r)); mutex_unlock(&msm_rpm_mutex); BUG_ON(rc); } clear_common_exit: return rc; } /* * Note: assumes caller has acquired <msm_rpm_mutex>. */ static void msm_rpm_update_notification(uint32_t ctx, struct msm_rpm_notif_config *curr_cfg, struct msm_rpm_notif_config *new_cfg) { if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; int rc; sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_NOTIFICATION)] |= msm_rpm_get_sel_mask(MSM_RPM_SEL_NOTIFICATION); rc = msm_rpm_set_exclusive(ctx, sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv)); BUG_ON(rc); memcpy(curr_cfg, new_cfg, sizeof(*new_cfg)); } } /* * Note: assumes caller has acquired <msm_rpm_mutex>. */ static void msm_rpm_initialize_notification(void) { struct msm_rpm_notif_config cfg; unsigned int ctx; int i; for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) { cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) { configured_iv(&cfg)[i].id = MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i; configured_iv(&cfg)[i].value = ~0UL; registered_iv(&cfg)[i].id = MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i; registered_iv(&cfg)[i].value = 0; } msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); } } /****************************************************************************** * Public functions *****************************************************************************/ void msm_rpm_print_sleep_tick(void) { uint32_t *mpm_sleep_tick = (void *) (MSM_RPM_MPM_BASE + 0x24); pr_info("MPM_SLEEP_TICK: %llums\n", ((uint64_t)(*mpm_sleep_tick) * 1000) >> 15); } EXPORT_SYMBOL(msm_rpm_print_sleep_tick); void msm_rpm_check_rtc(void) { uint32_t *mpm_sleep_tick = (void *) (MSM_RPM_MPM_BASE + 0x24); static ulong rpm_time_record = 0, rtc_time_record = 0; ulong rpm_time_diff, rtc_time_diff; struct timespec ts; getnstimeofday(&ts); pr_info("[RTC_DEBUG] MPM_RPM_TICK: %llums\n", ((uint64_t)(*mpm_sleep_tick) * 1000) >> 15); if (likely(rpm_time_record)) { if (ts.tv_sec > rtc_time_record) { rpm_time_diff = (ulong)(((uint64_t)(*mpm_sleep_tick) * 1000) >> 15) / 1000 - rpm_time_record; rtc_time_diff = ts.tv_sec - rtc_time_record; if ((rpm_time_diff > rtc_time_diff && (rpm_time_diff - rtc_time_diff) > 5) || (rpm_time_diff < rtc_time_diff && (rtc_time_diff - rpm_time_diff) > 5)) { pr_info("[K][RTC_DEBUG] RTC TIME Change!!!\n"); pr_info("[K][RTC_DEBUG] Last RTC[%lu], Now RTC[%lu]\n", rtc_time_record, ts.tv_sec); pr_info("[K][RTC_DEBUG] Last RPM[%lu], Diff[%lu]\n", rpm_time_record, rpm_time_diff); } else { pr_info("[K][RTC_DEBUG] RTC TIME OK!\n"); pr_info("[K][RTC_DEBUG] Last RTC[%lu], Now RTC[%lu]\n", rtc_time_record, ts.tv_sec); pr_info("[K][RTC_DEBUG] Last RPM[%lu], Diff[%lu]\n", rpm_time_record, rpm_time_diff); } } else { pr_info("[K][RTC_DEBUG] RTC TIME Changes!!!\n"); pr_info("[K][RTC_DEBUG] Last RTC[%lu], Now RTC[%lu]\n", rtc_time_record, ts.tv_sec); } } rpm_time_record = (ulong)(((uint64_t)(*mpm_sleep_tick) * 1000) >> 15) / 1000; rtc_time_record = ts.tv_sec; } EXPORT_SYMBOL(msm_rpm_check_rtc); int msm_rpm_local_request_is_outstanding(void) { unsigned long flags; int outstanding = 0; if (!spin_trylock_irqsave(&msm_rpm_lock, flags)) goto local_request_is_outstanding_exit; if (!spin_trylock(&msm_rpm_irq_lock)) goto local_request_is_outstanding_unlock; outstanding = (msm_rpm_request != NULL); spin_unlock(&msm_rpm_irq_lock); local_request_is_outstanding_unlock: spin_unlock_irqrestore(&msm_rpm_lock, flags); local_request_is_outstanding_exit: return outstanding; } /* * Read the specified status registers and return their values. * * status: array of id-value pairs. Each <id> specifies a status register, * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will * contain the value of the status register. * count: number of id-value pairs in the array * * Return value: * 0: success * -EBUSY: RPM is updating the status page; values across different registers * may not be consistent * -EINVAL: invalid id in <status> array */ int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count) { uint32_t seq_begin; uint32_t seq_end; int rc; int i; seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_SEQUENCE); for (i = 0; i < count; i++) { if (status[i].id > MSM_RPM_STATUS_ID_LAST) { rc = -EINVAL; goto get_status_exit; } status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS, status[i].id); } seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_SEQUENCE); rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0; get_status_exit: return rc; } EXPORT_SYMBOL(msm_rpm_get_status); /* * Issue a resource request to RPM to set resource values. * * Note: the function may sleep and must be called in a task context. * * ctx: the request's context. * There two contexts that a RPM driver client can use: * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values * that are intended to take effect when the CPU is active, * MSM_RPM_CTX_SET_0 should be used. For resource values that are * intended to take effect when the CPU is not active, * MSM_RPM_CTX_SET_SLEEP should be used. * req: array of id-value pairs. Each <id> specifies a RPM resource, * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested * resource value. * count: number of id-value pairs in the array * * Return value: * 0: success * -EINTR: interrupted * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENOSPC: request rejected */ int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count) { return msm_rpm_set_common(ctx, req, count, false); } EXPORT_SYMBOL(msm_rpm_set); /* * Issue a resource request to RPM to set resource values. * * Note: the function is similar to msm_rpm_set() except that it must be * called with interrupts masked. If possible, use msm_rpm_set() * instead, to maximize CPU throughput. */ int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count) { WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called " "safely when local irqs are disabled. Consider using " "msm_rpm_set or msm_rpm_set_nosleep instead."); return msm_rpm_set_common(ctx, req, count, true); } EXPORT_SYMBOL(msm_rpm_set_noirq); /* * Issue a resource request to RPM to clear resource values. Once the * values are cleared, the resources revert back to their default values * for this RPM master. * * Note: the function may sleep and must be called in a task context. * * ctx: the request's context. * req: array of id-value pairs. Each <id> specifies a RPM resource, * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored. * count: number of id-value pairs in the array * * Return value: * 0: success * -EINTR: interrupted * -EINVAL: invalid <ctx> or invalid id in <req> array */ int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count) { return msm_rpm_clear_common(ctx, req, count, false); } EXPORT_SYMBOL(msm_rpm_clear); /* * Issue a resource request to RPM to clear resource values. * * Note: the function is similar to msm_rpm_clear() except that it must be * called with interrupts masked. If possible, use msm_rpm_clear() * instead, to maximize CPU throughput. */ int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count) { WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called " "safely when local irqs are disabled. Consider using " "msm_rpm_clear or msm_rpm_clear_nosleep instead."); return msm_rpm_clear_common(ctx, req, count, true); } EXPORT_SYMBOL(msm_rpm_clear_noirq); /* * Register for RPM notification. When the specified resources * change their status on RPM, RPM sends out notifications and the * driver will "up" the semaphore in struct msm_rpm_notification. * * Note: the function may sleep and must be called in a task context. * * Memory for <n> must not be freed until the notification is * unregistered. Memory for <req> can be freed after this * function returns. * * n: the notifcation object. Caller should initialize only the * semaphore field. When a notification arrives later, the * semaphore will be "up"ed. * req: array of id-value pairs. Each <id> specifies a status register, * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored. * count: number of id-value pairs in the array * * Return value: * 0: success * -EINTR: interrupted * -EINVAL: invalid id in <req> array */ int msm_rpm_register_notification(struct msm_rpm_notification *n, struct msm_rpm_iv_pair *req, int count) { unsigned long flags; unsigned int ctx; struct msm_rpm_notif_config cfg; int rc; int i; INIT_LIST_HEAD(&n->list); rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count); if (rc) goto register_notification_exit; rc = mutex_lock_interruptible(&msm_rpm_mutex); if (rc) goto register_notification_exit; if (!msm_rpm_init_notif_done) { msm_rpm_initialize_notification(); msm_rpm_init_notif_done = true; } spin_lock_irqsave(&msm_rpm_irq_lock, flags); list_add(&n->list, &msm_rpm_notifications); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); ctx = MSM_RPM_CTX_SET_0; cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value |= n->sel_masks[i]; msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); mutex_unlock(&msm_rpm_mutex); register_notification_exit: return rc; } EXPORT_SYMBOL(msm_rpm_register_notification); /* * Unregister a notification. * * Note: the function may sleep and must be called in a task context. * * n: the notifcation object that was registered previously. * * Return value: * 0: success * -EINTR: interrupted */ int msm_rpm_unregister_notification(struct msm_rpm_notification *n) { unsigned long flags; unsigned int ctx; struct msm_rpm_notif_config cfg; int rc; int i; rc = mutex_lock_interruptible(&msm_rpm_mutex); if (rc) goto unregister_notification_exit; ctx = MSM_RPM_CTX_SET_0; cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value = 0; spin_lock_irqsave(&msm_rpm_irq_lock, flags); list_del(&n->list); list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value |= n->sel_masks[i]; spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); mutex_unlock(&msm_rpm_mutex); unregister_notification_exit: return rc; } EXPORT_SYMBOL(msm_rpm_unregister_notification); static uint32_t fw_major, fw_minor, fw_build; static ssize_t driver_version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n", RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER); } static ssize_t fw_version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n", fw_major, fw_minor, fw_build); } static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version); static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version); static struct attribute *driver_attributes[] = { &driver_version_attr.attr, &fw_version_attr.attr, NULL }; static struct attribute_group driver_attr_group = { .attrs = driver_attributes, }; static int __devinit msm_rpm_probe(struct platform_device *pdev) { return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group); } static int __devexit msm_rpm_remove(struct platform_device *pdev) { sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group); return 0; } static struct platform_driver msm_rpm_platform_driver = { .probe = msm_rpm_probe, .remove = __devexit_p(msm_rpm_remove), .driver = { .name = "msm_rpm", .owner = THIS_MODULE, }, }; static void __init msm_rpm_populate_map(void) { int i, k; for (i = 0; i < ARRAY_SIZE(msm_rpm_map); i++) msm_rpm_map[i] = MSM_RPM_SEL_LAST + 1; for (i = 0; i < rpm_map_data_size; i++) { struct msm_rpm_map_data *raw_data = &rpm_map_data[i]; for (k = 0; k < raw_data->count; k++) msm_rpm_map[raw_data->id + k] = raw_data->sel; } } #ifdef CONFIG_ARCH_MSM8960 #define IMEM_DEBUG_LOC (0x2A03F7F0) unsigned int pa_memtest_rpm; #endif int __init msm_rpm_init(struct msm_rpm_platform_data *data) { unsigned int irq; int rc; #ifdef CONFIG_ARCH_MSM8960 int i; #endif msm_rpm_platform = data; msm_rpm_stat_data = (stats_blob *)msm_rpm_platform->reg_base_addrs[MSM_RPM_PAGE_STAT]; #ifdef CONFIG_ARCH_MSM8960 if (rpm_debug_enable != 0) { unsigned int *rpm_memtest; void *imem_loc = ioremap_nocache(IMEM_DEBUG_LOC, 4); rpm_memtest = kmalloc(1024*4, GFP_KERNEL); pa_memtest_rpm = __pa(rpm_memtest); pr_info("RPMTest address: %x\n", pa_memtest_rpm); for(i = 0; i < 1024; i++) { rpm_memtest[i] = 0xEFBEADDE; } writel(pa_memtest_rpm, imem_loc); iounmap(imem_loc); msm_rpm_stat_data->rpm_debug_mode |= RPM_DEBUG_RAM_DEBUG; } if ((get_radio_flag() & 0x8) && msm_rpm_stat_data) msm_rpm_stat_data->rpm_debug_mode |= RPM_DEBUG_RAM_DUMP; pr_info("%s : rpm_debug_mode : 0x%x\n", __func__, msm_rpm_stat_data->rpm_debug_mode); #endif fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_MAJOR); fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_MINOR); fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_BUILD); pr_info("%s: RPM firmware %u.%u.%u\n", __func__, fw_major, fw_minor, fw_build); if (fw_major != RPM_MAJOR_VER) { pr_err("%s: RPM version %u.%u.%u incompatible with " "this driver version %u.%u.%u\n", __func__, fw_major, fw_minor, fw_build, RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER); return -EFAULT; } msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MAJOR, RPM_MAJOR_VER); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MINOR, RPM_MINOR_VER); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_BUILD, RPM_BUILD_VER); irq = msm_rpm_platform->irq_ack; rc = request_irq(irq, msm_rpm_ack_interrupt, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, "rpm_drv", msm_rpm_ack_interrupt); if (rc) { pr_err("%s: failed to request irq %d: %d\n", __func__, irq, rc); return rc; } rc = irq_set_irq_wake(irq, 1); if (rc) { pr_err("%s: failed to set wakeup irq %u: %d\n", __func__, irq, rc); return rc; } msm_rpm_populate_map(); msm_rpm_print_sleep_tick(); return platform_driver_register(&msm_rpm_platform_driver); } #if defined(CONFIG_ARCH_MSM8X60) void msm_rpm_dump_stat(void) { int i = 0, j = 0; if (msm_rpm_stat_data) { pr_info("%s: %u, %llums, %u, %llums, 0x%x, 0x%x\n", __func__, msm_rpm_stat_data->stats[RPM_STAT_XO_SHUTDOWN_COUNT].value, ((uint64_t)msm_rpm_stat_data->stats[RPM_STAT_XO_SHUTDOWN_TIME].value * 1000) >> 15, msm_rpm_stat_data->stats[RPM_STAT_VDD_MIN_COUNT].value, ((uint64_t)msm_rpm_stat_data->stats[RPM_STAT_VDD_MIN_TIME].value * 1000) >> 15, msm_rpm_stat_data->mpm_int_status[0], msm_rpm_stat_data->mpm_int_status[1]); for (i = 0; i < RPM_MASTER_COUNT; i++) { #ifdef CONFIG_ARCH_MSM8X60_LTE pr_info("sleep_info_m.%d - %u (%d), %llums, %d %d %d %d, 0x%x 0x%x\n", i, msm_rpm_stat_data->sleep_info_ex[i].count, msm_rpm_stat_data->sleep_info_ex[i].is_in_sleep_mode, ((uint64_t)msm_rpm_stat_data->sleep_info_ex[i].total_sleep_duration * 1000) >> 15, msm_rpm_stat_data->sleep_info[i].cxo, msm_rpm_stat_data->sleep_info[i].pxo, msm_rpm_stat_data->sleep_info[i].vdd_mem, msm_rpm_stat_data->sleep_info[i].vdd_dig, msm_rpm_stat_data->mpm_trigger[i][0], msm_rpm_stat_data->mpm_trigger[i][1]); #else pr_info("sleep_info_m.%d - %llums, %llums, %d %d %d %d, 0x%x 0x%x\n", i, ((uint64_t)msm_rpm_stat_data->wake_info[i].timestamp * 1000) >> 15, ((uint64_t)msm_rpm_stat_data->sleep_info[i].timestamp * 1000) >> 15, msm_rpm_stat_data->sleep_info[i].cxo, msm_rpm_stat_data->sleep_info[i].pxo, msm_rpm_stat_data->sleep_info[i].vdd_mem, msm_rpm_stat_data->sleep_info[i].vdd_dig, msm_rpm_stat_data->mpm_trigger[i][0], msm_rpm_stat_data->mpm_trigger[i][1]); #endif } for (i = 0; i < 2; i++) { msm_rpm_stat_data->mpm_int_status[i] = 0; for (j = 0; j < RPM_MASTER_COUNT; j++) msm_rpm_stat_data->mpm_trigger[j][i] = 0; } } } void msm_rpm_set_suspend_flag(bool app_from_suspend) { if (msm_rpm_stat_data) msm_rpm_stat_data->app_from_suspend = (!!app_from_suspend); } void __init msm_rpm_lpm_init(uint32_t *lpm_setting, uint32_t num) { uint32_t i = 0; for (i = 0; i < num; i++) msm_rpm_write(MSM_RPM_PAGE_STAT, RPM_LPM_PM8058 + i, lpm_setting[i]); } #elif defined(CONFIG_ARCH_MSM8960) void msm_rpm_dump_stat(void) { int i = 0, j = 0; if (msm_rpm_stat_data) { pr_info("%s: %u, %llums, %u, %llums, 0x%x, 0x%x\n", __func__, msm_rpm_stat_data->stats[RPM_STAT_XO_SHUTDOWN_COUNT].value, ((uint64_t)msm_rpm_stat_data->stats[RPM_STAT_XO_SHUTDOWN_TIME].value * 1000) >> 15, msm_rpm_stat_data->stats[RPM_STAT_VDD_MIN_COUNT].value, ((uint64_t)msm_rpm_stat_data->stats[RPM_STAT_VDD_MIN_TIME].value * 1000) >> 15, msm_rpm_stat_data->mpm_int_status[0], msm_rpm_stat_data->mpm_int_status[1]); for (i = 0; i < RPM_MASTER_COUNT; i++) { pr_info("sleep_info_m.%d - %u (%d), %llums, %d %d %d %d, 0x%x 0x%x\n", i, msm_rpm_stat_data->sleep_info[i].count, msm_rpm_stat_data->sleep_info[i].is_sleep_mode, ((uint64_t)msm_rpm_stat_data->sleep_info[i].total_duration * 1000) >> 15, msm_rpm_stat_data->sleep_info[i].cxo, msm_rpm_stat_data->sleep_info[i].pxo, msm_rpm_stat_data->sleep_info[i].vdd_mem, msm_rpm_stat_data->sleep_info[i].vdd_dig, msm_rpm_stat_data->mpm_trigger[i][0], msm_rpm_stat_data->mpm_trigger[i][1]); } for (i = 0; i < 2; i++) { msm_rpm_stat_data->mpm_int_status[i] = 0; for (j = 0; j < RPM_MASTER_COUNT; j++) msm_rpm_stat_data->mpm_trigger[j][i] = 0; } } } void msm_rpm_set_suspend_flag(bool app_from_suspend) { if (msm_rpm_stat_data) { if (app_from_suspend) msm_rpm_stat_data->rpm_debug_mode |= RPM_DEBUG_APP_FROM_SUSPEND; else msm_rpm_stat_data->rpm_debug_mode &= !RPM_DEBUG_APP_FROM_SUSPEND; } } static int __init htc_rpm_debug_parser(char *str) { int val; val = simple_strtoul(str, NULL, 0); rpm_debug_enable = val; return 1; } __setup("rpm_debug.enable=", htc_rpm_debug_parser); #else void msm_rpm_dump_stat(void) { } #endif #ifdef CONFIG_ARCH_MSM8X60_LTE int htc_get_XO_Vdd_min_info(uint32_t* XO_count, uint64_t* XO_time, uint32_t* Vddmin_count, uint64_t* Vddmin_time ) { if(!msm_rpm_stat_data) return 0; *XO_count = msm_rpm_stat_data->stats[RPM_STAT_XO_SHUTDOWN_COUNT].value; *XO_time = ((uint64_t)msm_rpm_stat_data->stats[RPM_STAT_XO_SHUTDOWN_TIME].value * 1000) >> 15; *Vddmin_count = msm_rpm_stat_data->stats[RPM_STAT_VDD_MIN_COUNT].value; *Vddmin_time = ((uint64_t)msm_rpm_stat_data->stats[RPM_STAT_VDD_MIN_TIME].value * 1000) >> 15; return 1; } void htc_get_MPSS_timestamp(uint64_t* active_set_time, uint64_t* sleep_set_time) { if(!msm_rpm_stat_data) return; *active_set_time = ((uint64_t)msm_rpm_stat_data->wake_info[1].timestamp * 1000) >> 15; *sleep_set_time = ((uint64_t)msm_rpm_stat_data->sleep_info[1].timestamp * 1000) >> 15; } uint64_t htc_get_MPSS_total_sleep_time(void) { uint32_t *mpm_sleep_tick; uint32_t total; if (!msm_rpm_stat_data) return 0; if (msm_rpm_stat_data->sleep_info_ex[1].is_in_sleep_mode == 1) { mpm_sleep_tick = (void *) (MSM_RPM_MPM_BASE + 0x24); total = ((*mpm_sleep_tick) - (msm_rpm_stat_data->sleep_info[1].timestamp)) + msm_rpm_stat_data->sleep_info_ex[1].total_sleep_duration; return ((uint64_t)total * 1000) >> 15; } else { return ((uint64_t)(msm_rpm_stat_data->sleep_info_ex[1].total_sleep_duration) * 1000) >> 15; } } void htc_get_LPASS_timestamp(uint64_t* active_set_time, uint64_t* sleep_set_time) { if(!msm_rpm_stat_data) return; *active_set_time = ((uint64_t)msm_rpm_stat_data->wake_info[2].timestamp * 1000) >> 15; *sleep_set_time = ((uint64_t)msm_rpm_stat_data->sleep_info[2].timestamp * 1000) >> 15; } uint64_t htc_get_LPASS_total_sleep_time(void) { uint32_t *mpm_sleep_tick; uint32_t total; if (!msm_rpm_stat_data) return 0; if (msm_rpm_stat_data->sleep_info_ex[2].is_in_sleep_mode == 1) { mpm_sleep_tick = (void *) (MSM_RPM_MPM_BASE + 0x24); total = ((*mpm_sleep_tick) - (msm_rpm_stat_data->sleep_info[2].timestamp)) + msm_rpm_stat_data->sleep_info_ex[2].total_sleep_duration; return ((uint64_t)total * 1000) >> 15; } else { return ((uint64_t)(msm_rpm_stat_data->sleep_info_ex[2].total_sleep_duration) * 1000) >> 15; } } #endif
gpl-2.0
meizuosc/m75
kernel/fs/btrfs/compression.c
1603
27636
/* * Copyright (C) 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/kernel.h> #include <linux/bio.h> #include <linux/buffer_head.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/mpage.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/bit_spinlock.h> #include <linux/slab.h> #include "compat.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "volumes.h" #include "ordered-data.h" #include "compression.h" #include "extent_io.h" #include "extent_map.h" struct compressed_bio { /* number of bios pending for this compressed extent */ atomic_t pending_bios; /* the pages with the compressed data on them */ struct page **compressed_pages; /* inode that owns this data */ struct inode *inode; /* starting offset in the inode for our pages */ u64 start; /* number of bytes in the inode we're working on */ unsigned long len; /* number of bytes on disk */ unsigned long compressed_len; /* the compression algorithm for this bio */ int compress_type; /* number of compressed pages in the array */ unsigned long nr_pages; /* IO errors */ int errors; int mirror_num; /* for reads, this is the bio we are copying the data into */ struct bio *orig_bio; /* * the start of a variable length array of checksums only * used by reads */ u32 sums; }; static int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, struct bio_vec *bvec, int vcnt, size_t srclen); static inline int compressed_bio_size(struct btrfs_root *root, unsigned long disk_size) { u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); return sizeof(struct compressed_bio) + ((disk_size + root->sectorsize - 1) / root->sectorsize) * csum_size; } static struct bio *compressed_bio_alloc(struct block_device *bdev, u64 first_byte, gfp_t gfp_flags) { int nr_vecs; nr_vecs = bio_get_nr_vecs(bdev); return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); } static int check_compressed_csum(struct inode *inode, struct compressed_bio *cb, u64 disk_start) { int ret; struct page *page; unsigned long i; char *kaddr; u32 csum; u32 *cb_sum = &cb->sums; if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) return 0; for (i = 0; i < cb->nr_pages; i++) { page = cb->compressed_pages[i]; csum = ~(u32)0; kaddr = kmap_atomic(page); csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); btrfs_csum_final(csum, (char *)&csum); kunmap_atomic(kaddr); if (csum != *cb_sum) { printk(KERN_INFO "btrfs csum failed ino %llu " "extent %llu csum %u " "wanted %u mirror %d\n", (unsigned long long)btrfs_ino(inode), (unsigned long long)disk_start, csum, *cb_sum, cb->mirror_num); ret = -EIO; goto fail; } cb_sum++; } ret = 0; fail: return ret; } /* when we finish reading compressed pages from the disk, we * decompress them and then run the bio end_io routines on the * decompressed pages (in the inode address space). * * This allows the checksumming and other IO error handling routines * to work normally * * The compressed pages are freed here, and it must be run * in process context */ static void end_compressed_bio_read(struct bio *bio, int err) { struct compressed_bio *cb = bio->bi_private; struct inode *inode; struct page *page; unsigned long index; int ret; if (err) cb->errors = 1; /* if there are more bios still pending for this compressed * extent, just exit */ if (!atomic_dec_and_test(&cb->pending_bios)) goto out; inode = cb->inode; ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); if (ret) goto csum_failed; /* ok, we're the last bio for this extent, lets start * the decompression. */ ret = btrfs_decompress_biovec(cb->compress_type, cb->compressed_pages, cb->start, cb->orig_bio->bi_io_vec, cb->orig_bio->bi_vcnt, cb->compressed_len); csum_failed: if (ret) cb->errors = 1; /* release the compressed pages */ index = 0; for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; page_cache_release(page); } /* do io completion on the original bio */ if (cb->errors) { bio_io_error(cb->orig_bio); } else { int bio_index = 0; struct bio_vec *bvec = cb->orig_bio->bi_io_vec; /* * we have verified the checksum already, set page * checked so the end_io handlers know about it */ while (bio_index < cb->orig_bio->bi_vcnt) { SetPageChecked(bvec->bv_page); bvec++; bio_index++; } bio_endio(cb->orig_bio, 0); } /* finally free the cb struct */ kfree(cb->compressed_pages); kfree(cb); out: bio_put(bio); } /* * Clear the writeback bits on all of the file * pages for a compressed write */ static noinline void end_compressed_writeback(struct inode *inode, u64 start, unsigned long ram_size) { unsigned long index = start >> PAGE_CACHE_SHIFT; unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; struct page *pages[16]; unsigned long nr_pages = end_index - index + 1; int i; int ret; while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); if (ret == 0) { nr_pages -= 1; index += 1; continue; } for (i = 0; i < ret; i++) { end_page_writeback(pages[i]); page_cache_release(pages[i]); } nr_pages -= ret; index += ret; } /* the inode may be gone now */ } /* * do the cleanup once all the compressed pages hit the disk. * This will clear writeback on the file pages and free the compressed * pages. * * This also calls the writeback end hooks for the file pages so that * metadata and checksums can be updated in the file. */ static void end_compressed_bio_write(struct bio *bio, int err) { struct extent_io_tree *tree; struct compressed_bio *cb = bio->bi_private; struct inode *inode; struct page *page; unsigned long index; if (err) cb->errors = 1; /* if there are more bios still pending for this compressed * extent, just exit */ if (!atomic_dec_and_test(&cb->pending_bios)) goto out; /* ok, we're the last bio for this extent, step one is to * call back into the FS and do all the end_io operations */ inode = cb->inode; tree = &BTRFS_I(inode)->io_tree; cb->compressed_pages[0]->mapping = cb->inode->i_mapping; tree->ops->writepage_end_io_hook(cb->compressed_pages[0], cb->start, cb->start + cb->len - 1, NULL, 1); cb->compressed_pages[0]->mapping = NULL; end_compressed_writeback(inode, cb->start, cb->len); /* note, our inode could be gone now */ /* * release the compressed pages, these came from alloc_page and * are not attached to the inode at all */ index = 0; for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; page_cache_release(page); } /* finally free the cb struct */ kfree(cb->compressed_pages); kfree(cb); out: bio_put(bio); } /* * worker function to build and submit bios for previously compressed pages. * The corresponding pages in the inode should be marked for writeback * and the compressed pages should have a reference on them for dropping * when the IO is complete. * * This also checksums the file bytes and gets things ready for * the end io hooks. */ int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long len, u64 disk_start, unsigned long compressed_len, struct page **compressed_pages, unsigned long nr_pages) { struct bio *bio = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; struct compressed_bio *cb; unsigned long bytes_left; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; int pg_index = 0; struct page *page; u64 first_byte = disk_start; struct block_device *bdev; int ret; int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); if (!cb) return -ENOMEM; atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; cb->start = start; cb->len = len; cb->mirror_num = 0; cb->compressed_pages = compressed_pages; cb->compressed_len = compressed_len; cb->orig_bio = NULL; cb->nr_pages = nr_pages; bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); if(!bio) { kfree(cb); return -ENOMEM; } bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; atomic_inc(&cb->pending_bios); /* create and submit bios for the compressed pages */ bytes_left = compressed_len; for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { page = compressed_pages[pg_index]; page->mapping = inode->i_mapping; if (bio->bi_size) ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, PAGE_CACHE_SIZE, bio, 0); else ret = 0; page->mapping = NULL; if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_get(bio); /* * inc the count before we submit the bio so * we know the end IO handler won't happen before * we inc the count. Otherwise, the cb might get * freed before we're done setting it up */ atomic_inc(&cb->pending_bios); ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); /* -ENOMEM */ if (!skip_sum) { ret = btrfs_csum_one_bio(root, inode, bio, start, 1); BUG_ON(ret); /* -ENOMEM */ } ret = btrfs_map_bio(root, WRITE, bio, 0, 1); BUG_ON(ret); /* -ENOMEM */ bio_put(bio); bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); BUG_ON(!bio); bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); } if (bytes_left < PAGE_CACHE_SIZE) { printk("bytes left %lu compress len %lu nr %lu\n", bytes_left, cb->compressed_len, cb->nr_pages); } bytes_left -= PAGE_CACHE_SIZE; first_byte += PAGE_CACHE_SIZE; cond_resched(); } bio_get(bio); ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); /* -ENOMEM */ if (!skip_sum) { ret = btrfs_csum_one_bio(root, inode, bio, start, 1); BUG_ON(ret); /* -ENOMEM */ } ret = btrfs_map_bio(root, WRITE, bio, 0, 1); BUG_ON(ret); /* -ENOMEM */ bio_put(bio); return 0; } static noinline int add_ra_bio_pages(struct inode *inode, u64 compressed_end, struct compressed_bio *cb) { unsigned long end_index; unsigned long pg_index; u64 last_offset; u64 isize = i_size_read(inode); int ret; struct page *page; unsigned long nr_pages = 0; struct extent_map *em; struct address_space *mapping = inode->i_mapping; struct extent_map_tree *em_tree; struct extent_io_tree *tree; u64 end; int misses = 0; page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; last_offset = (page_offset(page) + PAGE_CACHE_SIZE); em_tree = &BTRFS_I(inode)->extent_tree; tree = &BTRFS_I(inode)->io_tree; if (isize == 0) return 0; end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; while (last_offset < compressed_end) { pg_index = last_offset >> PAGE_CACHE_SHIFT; if (pg_index > end_index) break; rcu_read_lock(); page = radix_tree_lookup(&mapping->page_tree, pg_index); rcu_read_unlock(); if (page) { misses++; if (misses > 4) break; goto next; } page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); if (!page) break; if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { page_cache_release(page); goto next; } end = last_offset + PAGE_CACHE_SIZE - 1; /* * at this point, we have a locked page in the page cache * for these bytes in the file. But, we have to make * sure they map to this compressed extent on disk. */ set_page_extent_mapped(page); lock_extent(tree, last_offset, end); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, last_offset, PAGE_CACHE_SIZE); read_unlock(&em_tree->lock); if (!em || last_offset < em->start || (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || (em->block_start >> 9) != cb->orig_bio->bi_sector) { free_extent_map(em); unlock_extent(tree, last_offset, end); unlock_page(page); page_cache_release(page); break; } free_extent_map(em); if (page->index == end_index) { char *userpage; size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); if (zero_offset) { int zeros; zeros = PAGE_CACHE_SIZE - zero_offset; userpage = kmap_atomic(page); memset(userpage + zero_offset, 0, zeros); flush_dcache_page(page); kunmap_atomic(userpage); } } ret = bio_add_page(cb->orig_bio, page, PAGE_CACHE_SIZE, 0); if (ret == PAGE_CACHE_SIZE) { nr_pages++; page_cache_release(page); } else { unlock_extent(tree, last_offset, end); unlock_page(page); page_cache_release(page); break; } next: last_offset += PAGE_CACHE_SIZE; } return 0; } /* * for a compressed read, the bio we get passed has all the inode pages * in it. We don't actually do IO on those pages but allocate new ones * to hold the compressed pages on disk. * * bio->bi_sector points to the compressed extent on disk * bio->bi_io_vec points to all of the inode pages * bio->bi_vcnt is a count of pages * * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags) { struct extent_io_tree *tree; struct extent_map_tree *em_tree; struct compressed_bio *cb; struct btrfs_root *root = BTRFS_I(inode)->root; unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; unsigned long compressed_len; unsigned long nr_pages; unsigned long pg_index; struct page *page; struct block_device *bdev; struct bio *comp_bio; u64 cur_disk_byte = (u64)bio->bi_sector << 9; u64 em_len; u64 em_start; struct extent_map *em; int ret = -ENOMEM; int faili = 0; u32 *sums; tree = &BTRFS_I(inode)->io_tree; em_tree = &BTRFS_I(inode)->extent_tree; /* we need the actual starting offset of this extent in the file */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, page_offset(bio->bi_io_vec->bv_page), PAGE_CACHE_SIZE); read_unlock(&em_tree->lock); if (!em) return -EIO; compressed_len = em->block_len; cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); if (!cb) goto out; atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; cb->mirror_num = mirror_num; sums = &cb->sums; cb->start = em->orig_start; em_len = em->len; em_start = em->start; free_extent_map(em); em = NULL; cb->len = uncompressed_len; cb->compressed_len = compressed_len; cb->compress_type = extent_compress_type(bio_flags); cb->orig_bio = bio; nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); if (!cb->compressed_pages) goto fail1; bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; for (pg_index = 0; pg_index < nr_pages; pg_index++) { cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!cb->compressed_pages[pg_index]) { faili = pg_index - 1; ret = -ENOMEM; goto fail2; } } faili = nr_pages - 1; cb->nr_pages = nr_pages; add_ra_bio_pages(inode, em_start + em_len, cb); /* include any pages we added in add_ra-bio_pages */ uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); if (!comp_bio) goto fail2; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); for (pg_index = 0; pg_index < nr_pages; pg_index++) { page = cb->compressed_pages[pg_index]; page->mapping = inode->i_mapping; page->index = em_start >> PAGE_CACHE_SHIFT; if (comp_bio->bi_size) ret = tree->ops->merge_bio_hook(READ, page, 0, PAGE_CACHE_SIZE, comp_bio, 0); else ret = 0; page->mapping = NULL; if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_get(comp_bio); ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); BUG_ON(ret); /* -ENOMEM */ /* * inc the count before we submit the bio so * we know the end IO handler won't happen before * we inc the count. Otherwise, the cb might get * freed before we're done setting it up */ atomic_inc(&cb->pending_bios); if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); BUG_ON(ret); /* -ENOMEM */ } sums += (comp_bio->bi_size + root->sectorsize - 1) / root->sectorsize; ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); if (ret) bio_endio(comp_bio, ret); bio_put(comp_bio); comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); BUG_ON(!comp_bio); comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); } cur_disk_byte += PAGE_CACHE_SIZE; } bio_get(comp_bio); ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); BUG_ON(ret); /* -ENOMEM */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); BUG_ON(ret); /* -ENOMEM */ } ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); if (ret) bio_endio(comp_bio, ret); bio_put(comp_bio); return 0; fail2: while (faili >= 0) { __free_page(cb->compressed_pages[faili]); faili--; } kfree(cb->compressed_pages); fail1: kfree(cb); out: free_extent_map(em); return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; static struct btrfs_compress_op *btrfs_compress_op[] = { &btrfs_zlib_compress, &btrfs_lzo_compress, }; void __init btrfs_init_compress(void) { int i; for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { INIT_LIST_HEAD(&comp_idle_workspace[i]); spin_lock_init(&comp_workspace_lock[i]); atomic_set(&comp_alloc_workspace[i], 0); init_waitqueue_head(&comp_workspace_wait[i]); } } /* * this finds an available workspace or allocates a new one * ERR_PTR is returned if things go bad. */ static struct list_head *find_workspace(int type) { struct list_head *workspace; int cpus = num_online_cpus(); int idx = type - 1; struct list_head *idle_workspace = &comp_idle_workspace[idx]; spinlock_t *workspace_lock = &comp_workspace_lock[idx]; atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; int *num_workspace = &comp_num_workspace[idx]; again: spin_lock(workspace_lock); if (!list_empty(idle_workspace)) { workspace = idle_workspace->next; list_del(workspace); (*num_workspace)--; spin_unlock(workspace_lock); return workspace; } if (atomic_read(alloc_workspace) > cpus) { DEFINE_WAIT(wait); spin_unlock(workspace_lock); prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(alloc_workspace) > cpus && !*num_workspace) schedule(); finish_wait(workspace_wait, &wait); goto again; } atomic_inc(alloc_workspace); spin_unlock(workspace_lock); workspace = btrfs_compress_op[idx]->alloc_workspace(); if (IS_ERR(workspace)) { atomic_dec(alloc_workspace); wake_up(workspace_wait); } return workspace; } /* * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ static void free_workspace(int type, struct list_head *workspace) { int idx = type - 1; struct list_head *idle_workspace = &comp_idle_workspace[idx]; spinlock_t *workspace_lock = &comp_workspace_lock[idx]; atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; int *num_workspace = &comp_num_workspace[idx]; spin_lock(workspace_lock); if (*num_workspace < num_online_cpus()) { list_add_tail(workspace, idle_workspace); (*num_workspace)++; spin_unlock(workspace_lock); goto wake; } spin_unlock(workspace_lock); btrfs_compress_op[idx]->free_workspace(workspace); atomic_dec(alloc_workspace); wake: smp_mb(); if (waitqueue_active(workspace_wait)) wake_up(workspace_wait); } /* * cleanup function for module exit */ static void free_workspaces(void) { struct list_head *workspace; int i; for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { while (!list_empty(&comp_idle_workspace[i])) { workspace = comp_idle_workspace[i].next; list_del(workspace); btrfs_compress_op[i]->free_workspace(workspace); atomic_dec(&comp_alloc_workspace[i]); } } } /* * given an address space and start/len, compress the bytes. * * pages are allocated to hold the compressed result and stored * in 'pages' * * out_pages is used to return the number of pages allocated. There * may be pages allocated even if we return an error * * total_in is used to return the number of bytes actually read. It * may be smaller then len if we had to exit early because we * ran out of room in the pages array or because we cross the * max_out threshold. * * total_out is used to return the total number of compressed bytes * * max_out tells us the max number of bytes that we're allowed to * stuff into pages */ int btrfs_compress_pages(int type, struct address_space *mapping, u64 start, unsigned long len, struct page **pages, unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, unsigned long *total_out, unsigned long max_out) { struct list_head *workspace; int ret; workspace = find_workspace(type); if (IS_ERR(workspace)) return -1; ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, start, len, pages, nr_dest_pages, out_pages, total_in, total_out, max_out); free_workspace(type, workspace); return ret; } /* * pages_in is an array of pages with compressed data. * * disk_start is the starting logical offset of this array in the file * * bvec is a bio_vec of pages from the file that we want to decompress into * * vcnt is the count of pages in the biovec * * srclen is the number of bytes in pages_in * * The basic idea is that we have a bio that was created by readpages. * The pages in the bio are for the uncompressed data, and they may not * be contiguous. They all correspond to the range of bytes covered by * the compressed extent. */ static int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, struct bio_vec *bvec, int vcnt, size_t srclen) { struct list_head *workspace; int ret; workspace = find_workspace(type); if (IS_ERR(workspace)) return -ENOMEM; ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, disk_start, bvec, vcnt, srclen); free_workspace(type, workspace); return ret; } /* * a less complex decompression routine. Our compressed data fits in a * single page, and we want to read a single page out of it. * start_byte tells us the offset into the compressed data we're interested in */ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen) { struct list_head *workspace; int ret; workspace = find_workspace(type); if (IS_ERR(workspace)) return -ENOMEM; ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, dest_page, start_byte, srclen, destlen); free_workspace(type, workspace); return ret; } void btrfs_exit_compress(void) { free_workspaces(); } /* * Copy uncompressed data from working buffer to pages. * * buf_start is the byte offset we're of the start of our workspace buffer. * * total_out is the last byte of the buffer */ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio_vec *bvec, int vcnt, unsigned long *pg_index, unsigned long *pg_offset) { unsigned long buf_offset; unsigned long current_buf_start; unsigned long start_byte; unsigned long working_bytes = total_out - buf_start; unsigned long bytes; char *kaddr; struct page *page_out = bvec[*pg_index].bv_page; /* * start byte is the first byte of the page we're currently * copying into relative to the start of the compressed data. */ start_byte = page_offset(page_out) - disk_start; /* we haven't yet hit data corresponding to this page */ if (total_out <= start_byte) return 1; /* * the start of the data we care about is offset into * the middle of our working buffer */ if (total_out > start_byte && buf_start < start_byte) { buf_offset = start_byte - buf_start; working_bytes -= buf_offset; } else { buf_offset = 0; } current_buf_start = buf_start; /* copy bytes from the working buffer into the pages */ while (working_bytes > 0) { bytes = min(PAGE_CACHE_SIZE - *pg_offset, PAGE_CACHE_SIZE - buf_offset); bytes = min(bytes, working_bytes); kaddr = kmap_atomic(page_out); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); if (*pg_index == (vcnt - 1) && *pg_offset == 0) memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); kunmap_atomic(kaddr); flush_dcache_page(page_out); *pg_offset += bytes; buf_offset += bytes; working_bytes -= bytes; current_buf_start += bytes; /* check if we need to pick another page */ if (*pg_offset == PAGE_CACHE_SIZE) { (*pg_index)++; if (*pg_index >= vcnt) return 0; page_out = bvec[*pg_index].bv_page; *pg_offset = 0; start_byte = page_offset(page_out) - disk_start; /* * make sure our new page is covered by this * working buffer */ if (total_out <= start_byte) return 1; /* * the next page in the biovec might not be adjacent * to the last page, but it might still be found * inside this working buffer. bump our offset pointer */ if (total_out > start_byte && current_buf_start < start_byte) { buf_offset = start_byte - buf_start; working_bytes = total_out - start_byte; current_buf_start = buf_start + buf_offset; } } } return 1; }
gpl-2.0
TheScarastic/kenzo
arch/arm/mach-exynos/dev-audio.c
2115
6177
/* linux/arch/arm/mach-exynos4/dev-audio.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Copyright (c) 2010 Samsung Electronics Co. Ltd * Jaswinder Singh <jassi.brar@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/platform_data/asoc-s3c.h> #include <plat/gpio-cfg.h> #include <mach/map.h> #include <mach/dma.h> #include <mach/irqs.h> #define EXYNOS4_AUDSS_INT_MEM (0x03000000) static int exynos4_cfg_i2s(struct platform_device *pdev) { /* configure GPIO for i2s port */ switch (pdev->id) { case 0: s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 7, S3C_GPIO_SFN(2)); break; case 1: s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(2)); break; case 2: s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(4)); break; default: printk(KERN_ERR "Invalid Device %d\n", pdev->id); return -EINVAL; } return 0; } static struct s3c_audio_pdata i2sv5_pdata = { .cfg_gpio = exynos4_cfg_i2s, .type = { .i2s = { .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR, .idma_addr = EXYNOS4_AUDSS_INT_MEM, }, }, }; static struct resource exynos4_i2s0_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_I2S0, SZ_256), [1] = DEFINE_RES_DMA(DMACH_I2S0_TX), [2] = DEFINE_RES_DMA(DMACH_I2S0_RX), [3] = DEFINE_RES_DMA(DMACH_I2S0S_TX), }; struct platform_device exynos4_device_i2s0 = { .name = "samsung-i2s", .id = 0, .num_resources = ARRAY_SIZE(exynos4_i2s0_resource), .resource = exynos4_i2s0_resource, .dev = { .platform_data = &i2sv5_pdata, }, }; static struct s3c_audio_pdata i2sv3_pdata = { .cfg_gpio = exynos4_cfg_i2s, .type = { .i2s = { .quirks = QUIRK_NO_MUXPSR, }, }, }; static struct resource exynos4_i2s1_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_I2S1, SZ_256), [1] = DEFINE_RES_DMA(DMACH_I2S1_TX), [2] = DEFINE_RES_DMA(DMACH_I2S1_RX), }; struct platform_device exynos4_device_i2s1 = { .name = "samsung-i2s", .id = 1, .num_resources = ARRAY_SIZE(exynos4_i2s1_resource), .resource = exynos4_i2s1_resource, .dev = { .platform_data = &i2sv3_pdata, }, }; static struct resource exynos4_i2s2_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_I2S2, SZ_256), [1] = DEFINE_RES_DMA(DMACH_I2S2_TX), [2] = DEFINE_RES_DMA(DMACH_I2S2_RX), }; struct platform_device exynos4_device_i2s2 = { .name = "samsung-i2s", .id = 2, .num_resources = ARRAY_SIZE(exynos4_i2s2_resource), .resource = exynos4_i2s2_resource, .dev = { .platform_data = &i2sv3_pdata, }, }; /* PCM Controller platform_devices */ static int exynos4_pcm_cfg_gpio(struct platform_device *pdev) { switch (pdev->id) { case 0: s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 5, S3C_GPIO_SFN(3)); break; case 1: s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(3)); break; case 2: s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(3)); break; default: printk(KERN_DEBUG "Invalid PCM Controller number!"); return -EINVAL; } return 0; } static struct s3c_audio_pdata s3c_pcm_pdata = { .cfg_gpio = exynos4_pcm_cfg_gpio, }; static struct resource exynos4_pcm0_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_PCM0, SZ_256), [1] = DEFINE_RES_DMA(DMACH_PCM0_TX), [2] = DEFINE_RES_DMA(DMACH_PCM0_RX), }; struct platform_device exynos4_device_pcm0 = { .name = "samsung-pcm", .id = 0, .num_resources = ARRAY_SIZE(exynos4_pcm0_resource), .resource = exynos4_pcm0_resource, .dev = { .platform_data = &s3c_pcm_pdata, }, }; static struct resource exynos4_pcm1_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_PCM1, SZ_256), [1] = DEFINE_RES_DMA(DMACH_PCM1_TX), [2] = DEFINE_RES_DMA(DMACH_PCM1_RX), }; struct platform_device exynos4_device_pcm1 = { .name = "samsung-pcm", .id = 1, .num_resources = ARRAY_SIZE(exynos4_pcm1_resource), .resource = exynos4_pcm1_resource, .dev = { .platform_data = &s3c_pcm_pdata, }, }; static struct resource exynos4_pcm2_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_PCM2, SZ_256), [1] = DEFINE_RES_DMA(DMACH_PCM2_TX), [2] = DEFINE_RES_DMA(DMACH_PCM2_RX), }; struct platform_device exynos4_device_pcm2 = { .name = "samsung-pcm", .id = 2, .num_resources = ARRAY_SIZE(exynos4_pcm2_resource), .resource = exynos4_pcm2_resource, .dev = { .platform_data = &s3c_pcm_pdata, }, }; /* AC97 Controller platform devices */ static int exynos4_ac97_cfg_gpio(struct platform_device *pdev) { return s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(4)); } static struct resource exynos4_ac97_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_AC97, SZ_256), [1] = DEFINE_RES_DMA(DMACH_AC97_PCMOUT), [2] = DEFINE_RES_DMA(DMACH_AC97_PCMIN), [3] = DEFINE_RES_DMA(DMACH_AC97_MICIN), [4] = DEFINE_RES_IRQ(EXYNOS4_IRQ_AC97), }; static struct s3c_audio_pdata s3c_ac97_pdata = { .cfg_gpio = exynos4_ac97_cfg_gpio, }; static u64 exynos4_ac97_dmamask = DMA_BIT_MASK(32); struct platform_device exynos4_device_ac97 = { .name = "samsung-ac97", .id = -1, .num_resources = ARRAY_SIZE(exynos4_ac97_resource), .resource = exynos4_ac97_resource, .dev = { .platform_data = &s3c_ac97_pdata, .dma_mask = &exynos4_ac97_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; /* S/PDIF Controller platform_device */ static int exynos4_spdif_cfg_gpio(struct platform_device *pdev) { s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4)); return 0; } static struct resource exynos4_spdif_resource[] = { [0] = DEFINE_RES_MEM(EXYNOS4_PA_SPDIF, SZ_256), [1] = DEFINE_RES_DMA(DMACH_SPDIF), }; static struct s3c_audio_pdata samsung_spdif_pdata = { .cfg_gpio = exynos4_spdif_cfg_gpio, }; static u64 exynos4_spdif_dmamask = DMA_BIT_MASK(32); struct platform_device exynos4_device_spdif = { .name = "samsung-spdif", .id = -1, .num_resources = ARRAY_SIZE(exynos4_spdif_resource), .resource = exynos4_spdif_resource, .dev = { .platform_data = &samsung_spdif_pdata, .dma_mask = &exynos4_spdif_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, };
gpl-2.0
dzo/android_kernel_huawei_u8800-1
drivers/input/touchscreen/eeti_ts.c
2883
7887
/* * Touch Screen driver for EETI's I2C connected touch screen panels * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * See EETI's software guide for the protocol specification: * http://home.eeti.com.tw/web20/eg/guide.htm * * Based on migor_ts.c * Copyright (c) 2008 Magnus Damm * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com> * * This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/timer.h> #include <linux/gpio.h> #include <linux/input/eeti_ts.h> #include <linux/slab.h> static bool flip_x; module_param(flip_x, bool, 0644); MODULE_PARM_DESC(flip_x, "flip x coordinate"); static bool flip_y; module_param(flip_y, bool, 0644); MODULE_PARM_DESC(flip_y, "flip y coordinate"); struct eeti_ts_priv { struct i2c_client *client; struct input_dev *input; struct work_struct work; struct mutex mutex; int irq, irq_active_high; }; #define EETI_TS_BITDEPTH (11) #define EETI_MAXVAL ((1 << (EETI_TS_BITDEPTH + 1)) - 1) #define REPORT_BIT_PRESSED (1 << 0) #define REPORT_BIT_AD0 (1 << 1) #define REPORT_BIT_AD1 (1 << 2) #define REPORT_BIT_HAS_PRESSURE (1 << 6) #define REPORT_RES_BITS(v) (((v) >> 1) + EETI_TS_BITDEPTH) static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv) { return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high; } static void eeti_ts_read(struct work_struct *work) { char buf[6]; unsigned int x, y, res, pressed, to = 100; struct eeti_ts_priv *priv = container_of(work, struct eeti_ts_priv, work); mutex_lock(&priv->mutex); while (eeti_ts_irq_active(priv) && --to) i2c_master_recv(priv->client, buf, sizeof(buf)); if (!to) { dev_err(&priv->client->dev, "unable to clear IRQ - line stuck?\n"); goto out; } /* drop non-report packets */ if (!(buf[0] & 0x80)) goto out; pressed = buf[0] & REPORT_BIT_PRESSED; res = REPORT_RES_BITS(buf[0] & (REPORT_BIT_AD0 | REPORT_BIT_AD1)); x = buf[2] | (buf[1] << 8); y = buf[4] | (buf[3] << 8); /* fix the range to 11 bits */ x >>= res - EETI_TS_BITDEPTH; y >>= res - EETI_TS_BITDEPTH; if (flip_x) x = EETI_MAXVAL - x; if (flip_y) y = EETI_MAXVAL - y; if (buf[0] & REPORT_BIT_HAS_PRESSURE) input_report_abs(priv->input, ABS_PRESSURE, buf[5]); input_report_abs(priv->input, ABS_X, x); input_report_abs(priv->input, ABS_Y, y); input_report_key(priv->input, BTN_TOUCH, !!pressed); input_sync(priv->input); out: mutex_unlock(&priv->mutex); } static irqreturn_t eeti_ts_isr(int irq, void *dev_id) { struct eeti_ts_priv *priv = dev_id; /* postpone I2C transactions as we are atomic */ schedule_work(&priv->work); return IRQ_HANDLED; } static void eeti_ts_start(struct eeti_ts_priv *priv) { enable_irq(priv->irq); /* Read the events once to arm the IRQ */ eeti_ts_read(&priv->work); } static void eeti_ts_stop(struct eeti_ts_priv *priv) { disable_irq(priv->irq); cancel_work_sync(&priv->work); } static int eeti_ts_open(struct input_dev *dev) { struct eeti_ts_priv *priv = input_get_drvdata(dev); eeti_ts_start(priv); return 0; } static void eeti_ts_close(struct input_dev *dev) { struct eeti_ts_priv *priv = input_get_drvdata(dev); eeti_ts_stop(priv); } static int __devinit eeti_ts_probe(struct i2c_client *client, const struct i2c_device_id *idp) { struct eeti_ts_platform_data *pdata; struct eeti_ts_priv *priv; struct input_dev *input; unsigned int irq_flags; int err = -ENOMEM; /* * In contrast to what's described in the datasheet, there seems * to be no way of probing the presence of that device using I2C * commands. So we need to blindly believe it is there, and wait * for interrupts to occur. */ priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&client->dev, "failed to allocate driver data\n"); goto err0; } mutex_init(&priv->mutex); input = input_allocate_device(); if (!input) { dev_err(&client->dev, "Failed to allocate input device.\n"); goto err1; } input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input, ABS_X, 0, EETI_MAXVAL, 0, 0); input_set_abs_params(input, ABS_Y, 0, EETI_MAXVAL, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, 0xff, 0, 0); input->name = client->name; input->id.bustype = BUS_I2C; input->dev.parent = &client->dev; input->open = eeti_ts_open; input->close = eeti_ts_close; priv->client = client; priv->input = input; priv->irq = client->irq; pdata = client->dev.platform_data; if (pdata) priv->irq_active_high = pdata->irq_active_high; irq_flags = priv->irq_active_high ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; INIT_WORK(&priv->work, eeti_ts_read); i2c_set_clientdata(client, priv); input_set_drvdata(input, priv); err = input_register_device(input); if (err) goto err1; err = request_irq(priv->irq, eeti_ts_isr, irq_flags, client->name, priv); if (err) { dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); goto err2; } /* * Disable the device for now. It will be enabled once the * input device is opened. */ eeti_ts_stop(priv); device_init_wakeup(&client->dev, 0); return 0; err2: input_unregister_device(input); input = NULL; /* so we dont try to free it below */ err1: input_free_device(input); kfree(priv); err0: return err; } static int __devexit eeti_ts_remove(struct i2c_client *client) { struct eeti_ts_priv *priv = i2c_get_clientdata(client); free_irq(priv->irq, priv); /* * eeti_ts_stop() leaves IRQ disabled. We need to re-enable it * so that device still works if we reload the driver. */ enable_irq(priv->irq); input_unregister_device(priv->input); kfree(priv); return 0; } #ifdef CONFIG_PM static int eeti_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct eeti_ts_priv *priv = i2c_get_clientdata(client); struct input_dev *input_dev = priv->input; mutex_lock(&input_dev->mutex); if (input_dev->users) eeti_ts_stop(priv); mutex_unlock(&input_dev->mutex); if (device_may_wakeup(&client->dev)) enable_irq_wake(priv->irq); return 0; } static int eeti_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct eeti_ts_priv *priv = i2c_get_clientdata(client); struct input_dev *input_dev = priv->input; if (device_may_wakeup(&client->dev)) disable_irq_wake(priv->irq); mutex_lock(&input_dev->mutex); if (input_dev->users) eeti_ts_start(priv); mutex_unlock(&input_dev->mutex); return 0; } static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume); #endif static const struct i2c_device_id eeti_ts_id[] = { { "eeti_ts", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, eeti_ts_id); static struct i2c_driver eeti_ts_driver = { .driver = { .name = "eeti_ts", #ifdef CONFIG_PM .pm = &eeti_ts_pm, #endif }, .probe = eeti_ts_probe, .remove = __devexit_p(eeti_ts_remove), .id_table = eeti_ts_id, }; module_i2c_driver(eeti_ts_driver); MODULE_DESCRIPTION("EETI Touchscreen driver"); MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_LICENSE("GPL");
gpl-2.0
zaclimon/impulse_ypg1
arch/arm/mach-ux500/devices-db8500.c
2883
5801
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <plat/ste_dma40.h> #include <mach/hardware.h> #include <mach/setup.h> #include "ste-dma40-db8500.h" static struct resource dma40_resources[] = { [0] = { .start = U8500_DMA_BASE, .end = U8500_DMA_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, .name = "base", }, [1] = { .start = U8500_DMA_LCPA_BASE, .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1, .flags = IORESOURCE_MEM, .name = "lcpa", }, [2] = { .start = IRQ_DB8500_DMA, .end = IRQ_DB8500_DMA, .flags = IORESOURCE_IRQ, } }; /* Default configuration for physcial memcpy */ struct stedma40_chan_cfg dma40_memcpy_conf_phy = { .mode = STEDMA40_MODE_PHYSICAL, .dir = STEDMA40_MEM_TO_MEM, .src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.psize = STEDMA40_PSIZE_PHY_1, .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.psize = STEDMA40_PSIZE_PHY_1, .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, }; /* Default configuration for logical memcpy */ struct stedma40_chan_cfg dma40_memcpy_conf_log = { .dir = STEDMA40_MEM_TO_MEM, .src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.psize = STEDMA40_PSIZE_LOG_1, .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.psize = STEDMA40_PSIZE_LOG_1, .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, }; /* * Mapping between destination event lines and physical device address. * The event line is tied to a device and therefore the address is constant. * When the address comes from a primecell it will be configured in runtime * and we set the address to -1 as a placeholder. */ static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = { /* MUSB - these will be runtime-reconfigured */ [DB8500_DMA_DEV39_USB_OTG_OEP_8] = -1, [DB8500_DMA_DEV16_USB_OTG_OEP_7_15] = -1, [DB8500_DMA_DEV17_USB_OTG_OEP_6_14] = -1, [DB8500_DMA_DEV18_USB_OTG_OEP_5_13] = -1, [DB8500_DMA_DEV19_USB_OTG_OEP_4_12] = -1, [DB8500_DMA_DEV36_USB_OTG_OEP_3_11] = -1, [DB8500_DMA_DEV37_USB_OTG_OEP_2_10] = -1, [DB8500_DMA_DEV38_USB_OTG_OEP_1_9] = -1, /* PrimeCells - run-time configured */ [DB8500_DMA_DEV0_SPI0_TX] = -1, [DB8500_DMA_DEV1_SD_MMC0_TX] = -1, [DB8500_DMA_DEV2_SD_MMC1_TX] = -1, [DB8500_DMA_DEV3_SD_MMC2_TX] = -1, [DB8500_DMA_DEV8_SSP0_TX] = -1, [DB8500_DMA_DEV9_SSP1_TX] = -1, [DB8500_DMA_DEV11_UART2_TX] = -1, [DB8500_DMA_DEV12_UART1_TX] = -1, [DB8500_DMA_DEV13_UART0_TX] = -1, [DB8500_DMA_DEV28_SD_MM2_TX] = -1, [DB8500_DMA_DEV29_SD_MM0_TX] = -1, [DB8500_DMA_DEV32_SD_MM1_TX] = -1, [DB8500_DMA_DEV33_SPI2_TX] = -1, [DB8500_DMA_DEV35_SPI1_TX] = -1, [DB8500_DMA_DEV40_SPI3_TX] = -1, [DB8500_DMA_DEV41_SD_MM3_TX] = -1, [DB8500_DMA_DEV42_SD_MM4_TX] = -1, [DB8500_DMA_DEV43_SD_MM5_TX] = -1, }; /* Mapping between source event lines and physical device address */ static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = { /* MUSB - these will be runtime-reconfigured */ [DB8500_DMA_DEV39_USB_OTG_IEP_8] = -1, [DB8500_DMA_DEV16_USB_OTG_IEP_7_15] = -1, [DB8500_DMA_DEV17_USB_OTG_IEP_6_14] = -1, [DB8500_DMA_DEV18_USB_OTG_IEP_5_13] = -1, [DB8500_DMA_DEV19_USB_OTG_IEP_4_12] = -1, [DB8500_DMA_DEV36_USB_OTG_IEP_3_11] = -1, [DB8500_DMA_DEV37_USB_OTG_IEP_2_10] = -1, [DB8500_DMA_DEV38_USB_OTG_IEP_1_9] = -1, /* PrimeCells */ [DB8500_DMA_DEV0_SPI0_RX] = -1, [DB8500_DMA_DEV1_SD_MMC0_RX] = -1, [DB8500_DMA_DEV2_SD_MMC1_RX] = -1, [DB8500_DMA_DEV3_SD_MMC2_RX] = -1, [DB8500_DMA_DEV8_SSP0_RX] = -1, [DB8500_DMA_DEV9_SSP1_RX] = -1, [DB8500_DMA_DEV11_UART2_RX] = -1, [DB8500_DMA_DEV12_UART1_RX] = -1, [DB8500_DMA_DEV13_UART0_RX] = -1, [DB8500_DMA_DEV28_SD_MM2_RX] = -1, [DB8500_DMA_DEV29_SD_MM0_RX] = -1, [DB8500_DMA_DEV32_SD_MM1_RX] = -1, [DB8500_DMA_DEV33_SPI2_RX] = -1, [DB8500_DMA_DEV35_SPI1_RX] = -1, [DB8500_DMA_DEV40_SPI3_RX] = -1, [DB8500_DMA_DEV41_SD_MM3_RX] = -1, [DB8500_DMA_DEV42_SD_MM4_RX] = -1, [DB8500_DMA_DEV43_SD_MM5_RX] = -1, }; /* Reserved event lines for memcpy only */ static int dma40_memcpy_event[] = { DB8500_DMA_MEMCPY_TX_0, DB8500_DMA_MEMCPY_TX_1, DB8500_DMA_MEMCPY_TX_2, DB8500_DMA_MEMCPY_TX_3, DB8500_DMA_MEMCPY_TX_4, DB8500_DMA_MEMCPY_TX_5, }; static struct stedma40_platform_data dma40_plat_data = { .dev_len = DB8500_DMA_NR_DEV, .dev_rx = dma40_rx_map, .dev_tx = dma40_tx_map, .memcpy = dma40_memcpy_event, .memcpy_len = ARRAY_SIZE(dma40_memcpy_event), .memcpy_conf_phy = &dma40_memcpy_conf_phy, .memcpy_conf_log = &dma40_memcpy_conf_log, .disabled_channels = {-1}, }; struct platform_device u8500_dma40_device = { .dev = { .platform_data = &dma40_plat_data, }, .name = "dma40", .id = 0, .num_resources = ARRAY_SIZE(dma40_resources), .resource = dma40_resources }; void dma40_u8500ed_fixup(void) { dma40_plat_data.memcpy = NULL; dma40_plat_data.memcpy_len = 0; dma40_resources[0].start = U8500_DMA_BASE_ED; dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1; dma40_resources[1].start = U8500_DMA_LCPA_BASE_ED; dma40_resources[1].end = U8500_DMA_LCPA_BASE_ED + 2 * SZ_1K - 1; } struct resource keypad_resources[] = { [0] = { .start = U8500_SKE_BASE, .end = U8500_SKE_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DB8500_KB, .end = IRQ_DB8500_KB, .flags = IORESOURCE_IRQ, }, }; struct platform_device u8500_ske_keypad_device = { .name = "nmk-ske-keypad", .id = -1, .num_resources = ARRAY_SIZE(keypad_resources), .resource = keypad_resources, };
gpl-2.0
ktoonsez/MIUIv4-I777
arch/x86/mm/init_32.c
2883
25398
/* * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/proc_fs.h> #include <linux/memory_hotplug.h> #include <linux/initrd.h> #include <linux/cpumask.h> #include <linux/gfp.h> #include <asm/asm.h> #include <asm/bios_ebda.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/dma.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/bugs.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/olpc_ofw.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/paravirt.h> #include <asm/setup.h> #include <asm/cacheflush.h> #include <asm/page_types.h> #include <asm/init.h> unsigned long highstart_pfn, highend_pfn; static noinline int do_test_wp_bit(void); bool __read_mostly __vmalloc_start_set = false; static __init void *alloc_low_page(void) { unsigned long pfn = pgt_buf_end++; void *adr; if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); clear_page(adr); return adr; } /* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t * __init one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { if (after_bootmem) pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); else pmd_table = (pmd_t *)alloc_low_page(); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); return pmd_table; } #endif pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); return pmd_table; } /* * Create a page table and place a pointer to it in a middle page * directory entry: */ static pte_t * __init one_page_table_init(pmd_t *pmd) { if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = NULL; if (after_bootmem) { #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) page_table = (pte_t *)alloc_bootmem_pages(PAGE_SIZE); } else page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); int pmd_idx = pmd_index(vaddr); return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; } pte_t * __init populate_extra_pte(unsigned long vaddr) { int pte_idx = pte_index(vaddr); pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return one_page_table_init(pmd) + pte_idx; } static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte) { #ifdef CONFIG_HIGHMEM /* * Something (early fixmap) may already have put a pte * page here, which causes the page table allocation * to become nonlinear. Attempt to fix it, and if it * is still nonlinear then we have to bug. */ int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; if (pmd_idx_kmap_begin != pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { pte_t *newpte; int i; BUG_ON(after_bootmem); newpte = alloc_low_page(); for (i = 0; i < PTRS_PER_PTE; i++) set_pte(newpte + i, pte[i]); paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); BUG_ON(newpte != pte_offset_kernel(pmd, 0)); __flush_tlb_all(); paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); pte = newpte; } BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) && vaddr > fix_to_virt(FIX_KMAP_END) && lastpte && lastpte + PTRS_PER_PTE != pte); #endif return pte; } /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in * the given range. * * NOTE: The pagetables are allocated contiguous on the physical space * so we can cache the place of the first one and move around without * checking the pgd every time. */ static void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); pmd_idx = pmd_index(vaddr); pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), pmd, vaddr, pte); vaddr += PMD_SIZE; } pmd_idx = 0; } } static inline int is_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) return 1; return 0; } /* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET: */ unsigned long __init kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { int use_pse = page_size_mask == (1<<PG_LEVEL_2M); unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; int mapping_iter; start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; /* * First iteration will setup identity mapping using large/small pages * based on use_pse, with other attributes same as set by * the early code in head_32.S * * Second iteration will setup the appropriate attributes (NX, GLOBAL..) * as desired for the kernel identity mapping. * * This two pass mechanism conforms to the TLB app note which says: * * "Software should not write to a paging-structure entry in a way * that would change, for any linear address, both the page size * and either the page frame or attributes." */ mapping_iter = 1; if (!cpu_has_pse) use_pse = 0; repeat: pages_2m = pages_4k = 0; pfn = start_pfn; pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pgd = pgd_base + pgd_idx; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); if (pfn >= end_pfn) continue; #ifdef CONFIG_X86_PAE pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pmd += pmd_idx; #else pmd_idx = 0; #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial * identity mapping attribute + _PAGE_PSE. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR | _PAGE_PSE); addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; if (is_kernel_text(addr) || is_kernel_text(addr2)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; if (mapping_iter == 1) set_pmd(pmd, pfn_pmd(pfn, init_prot)); else set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; continue; } pte = one_page_table_init(pmd); pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pte += pte_ofs; for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { pgprot_t prot = PAGE_KERNEL; /* * first pass will use the same initial * identity mapping attribute. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); if (is_kernel_text(addr)) prot = PAGE_KERNEL_EXEC; pages_4k++; if (mapping_iter == 1) { set_pte(pte, pfn_pte(pfn, init_prot)); last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; } else set_pte(pte, pfn_pte(pfn, prot)); } } } if (mapping_iter == 1) { /* * update direct mapping page count only in the first * iteration. */ update_page_count(PG_LEVEL_2M, pages_2m); update_page_count(PG_LEVEL_4K, pages_4k); /* * local global flush tlb, which will flush the previous * mappings present in both small and large page TLB's. */ __flush_tlb_all(); /* * Second iteration will set the actual desired PTE attributes. */ mapping_iter = 2; goto repeat; } return last_map_addr; } pte_t *kmap_pte; pgprot_t kmap_prot; static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) { return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr), vaddr); } static void __init kmap_init(void) { unsigned long kmap_vstart; /* * Cache the first kmap pte: */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL; } #ifdef CONFIG_HIGHMEM static void __init permanent_kmaps_init(pgd_t *pgd_base) { unsigned long vaddr; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; vaddr = PKMAP_BASE; page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; } static void __init add_one_highpage_init(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); totalhigh_pages++; } void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { struct range *range; int nr_range; int i; nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); for (i = 0; i < nr_range; i++) { struct page *page; int node_pfn; for (node_pfn = range[i].start; node_pfn < range[i].end; node_pfn++) { if (!pfn_valid(node_pfn)) continue; page = pfn_to_page(node_pfn); add_one_highpage_init(page); } } } #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { } #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) { unsigned long pfn, va; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; /* * Remove any mappings which extend past the end of physical * memory from the boot time page table: */ for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); pgd = base + pgd_index(va); if (!pgd_present(*pgd)) break; pud = pud_offset(pgd, va); pmd = pmd_offset(pud, va); if (!pmd_present(*pmd)) break; pte = pte_offset_kernel(pmd, va); if (!pte_present(*pte)) break; pte_clear(NULL, va, pte); } paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); } void __init native_pagetable_setup_done(pgd_t *base) { } /* * Build a proper pagetable for the kernel mappings. Up until this * point, we've been running on some set of pagetables constructed by * the boot process. * * If we're booting on native hardware, this will be a pagetable * constructed in arch/x86/kernel/head_32.S. The root of the * pagetable will be swapper_pg_dir. * * If we're booting paravirtualized under a hypervisor, then there are * more options: we may already be running PAE, and the pagetable may * or may not be based in swapper_pg_dir. In any case, * paravirt_pagetable_setup_start() will set up swapper_pg_dir * appropriately for the rest of the initialization to work. * * In general, pagetable_init() assumes that the pagetable may already * be partially populated, and so it avoids stomping on any existing * mappings. */ void __init early_ioremap_page_table_range_init(void) { pgd_t *pgd_base = swapper_pg_dir; unsigned long vaddr, end; /* * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); early_ioremap_reset(); } static void __init pagetable_init(void) { pgd_t *pgd_base = swapper_pg_dir; permanent_kmaps_init(pgd_base); } pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); /* user-defined highmem size */ static unsigned int highmem_pages = -1; /* * highmem=size forces highmem to be exactly 'size' bytes. * This works even on boxes that have no highmem otherwise. * This also works to reduce highmem size on bigger boxes. */ static int __init parse_highmem(char *arg) { if (!arg) return -EINVAL; highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; return 0; } early_param("highmem", parse_highmem); #define MSG_HIGHMEM_TOO_BIG \ "highmem size (%luMB) is bigger than pages available (%luMB)!\n" #define MSG_LOWMEM_TOO_SMALL \ "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" /* * All of RAM fits into lowmem - but if user wants highmem * artificially via the highmem=x boot parameter then create * it: */ void __init lowmem_pfn_init(void) { /* max_low_pfn is 0, we already have early_res support */ max_low_pfn = max_pfn; if (highmem_pages == -1) highmem_pages = 0; #ifdef CONFIG_HIGHMEM if (highmem_pages >= max_pfn) { printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); highmem_pages = 0; } if (highmem_pages) { if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, pages_to_mb(highmem_pages)); highmem_pages = 0; } max_low_pfn -= highmem_pages; } #else if (highmem_pages) printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); #endif } #define MSG_HIGHMEM_TOO_SMALL \ "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" #define MSG_HIGHMEM_TRIMMED \ "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" /* * We have more RAM than fits into lowmem - we try to put it into * highmem, also taking the highmem=x boot parameter into account: */ void __init highmem_pfn_init(void) { max_low_pfn = MAXMEM_PFN; if (highmem_pages == -1) highmem_pages = max_pfn - MAXMEM_PFN; if (highmem_pages + MAXMEM_PFN < max_pfn) max_pfn = MAXMEM_PFN + highmem_pages; if (highmem_pages + MAXMEM_PFN > max_pfn) { printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); highmem_pages = 0; } #ifndef CONFIG_HIGHMEM /* Maximum memory usable is what is directly addressable */ printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); if (max_pfn > MAX_NONPAE_PFN) printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); else printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); max_pfn = MAXMEM_PFN; #else /* !CONFIG_HIGHMEM */ #ifndef CONFIG_HIGHMEM64G if (max_pfn > MAX_NONPAE_PFN) { max_pfn = MAX_NONPAE_PFN; printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); } #endif /* !CONFIG_HIGHMEM64G */ #endif /* !CONFIG_HIGHMEM */ } /* * Determine low and high memory ranges: */ void __init find_low_pfn_range(void) { /* it could update max_pfn */ if (max_pfn <= MAXMEM_PFN) lowmem_pfn_init(); else highmem_pfn_init(); } #ifndef CONFIG_NEED_MULTIPLE_NODES void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; memblock_x86_register_active_regions(0, 0, highend_pfn); sparse_memory_present_with_active_regions(0); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else memblock_x86_register_active_regions(0, 0, max_low_pfn); sparse_memory_present_with_active_regions(0); num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif #ifdef CONFIG_FLATMEM max_mapnr = num_physpages; #endif __vmalloc_start_set = true; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); setup_bootmem_allocator(); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; #endif free_area_init_nodes(max_zone_pfns); } void __init setup_bootmem_allocator(void) { printk(KERN_INFO " mapped low ram: 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); after_bootmem = 1; } /* * paging_init() sets up the page tables - note that the first 8MB are * already mapped by head.S. * * This routines also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { pagetable_init(); __flush_tlb_all(); kmap_init(); /* * NOTE: at this point the bootmem allocator is fully available. */ olpc_dt_build_devicetree(); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_sizes_init(); } /* * Test if the WP bit works in supervisor mode. It isn't supported on 386's * and also on some strange 486's. All 586+'s are OK. This used to involve * black magic jumps to work around some nasty CPU bugs, but fortunately the * switch to using exceptions got rid of all that. */ static void __init test_wp_bit(void) { printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); /* Any page-aligned address will do, the test is non-destructive */ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); boot_cpu_data.wp_works_ok = do_test_wp_bit(); clear_fixmap(FIX_WP_TEST); if (!boot_cpu_data.wp_works_ok) { printk(KERN_CONT "No.\n"); #ifdef CONFIG_X86_WP_WORKS_OK panic( "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); #endif } else { printk(KERN_CONT "Ok.\n"); } } void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; int tmp; pci_iommu_alloc(); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) /* * Only count reserved RAM pages: */ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) reservedpages++; set_highmem_pages_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init, %ldk highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, #endif VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, (unsigned long)__va(0), (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, (unsigned long)&_edata, ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, (unsigned long)&_text, (unsigned long)&_etext, ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */ #define __FIXADDR_TOP (-PAGE_SIZE) #ifdef CONFIG_HIGHMEM BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); #endif #define high_memory (-128UL << 20) BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); #undef high_memory #undef __FIXADDR_TOP #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUG_ON(VMALLOC_END > PKMAP_BASE); #endif BUG_ON(VMALLOC_START >= VMALLOC_END); BUG_ON((unsigned long)high_memory > VMALLOC_START); if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); } #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata = NODE_DATA(nid); struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; return __add_pages(nid, zone, start_pfn, nr_pages); } #endif /* * This function cannot be __init, since exceptions don't work in that * section. Put this after the callers, so that it cannot be inlined. */ static noinline int do_test_wp_bit(void) { char tmp_reg; int flag; __asm__ __volatile__( " movb %0, %1 \n" "1: movb %1, %0 \n" " xorl %2, %2 \n" "2: \n" _ASM_EXTABLE(1b,2b) :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), "=q" (tmp_reg), "=r" (flag) :"2" (1) :"memory"); return flag; } #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); int kernel_set_to_readonly __read_mostly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); } void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); } static void mark_nxdata_nx(void) { /* * When this called, init has already been executed and released, * so everything past _etext should be NX. */ unsigned long start = PFN_ALIGN(_etext); /* * This comes from is_kernel_text upper limit. Also HPAGE where used: */ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; if (__supported_pte_mask & _PAGE_NX) printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); } void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); kernel_set_to_readonly = 1; #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); #endif start += size; size = (unsigned long)__end_rodata - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10); rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif mark_nxdata_nx(); } #endif
gpl-2.0
aapav01/samsung_ms013g_SWA
drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c
3395
5467
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/device.h> #include <linux/regulator/consumer.h> #include <linux/gpio.h> #include <mach/rpc_pmapp.h> #include <linux/err.h> #include <linux/qcomwlan7x27a_pwrif.h> #include <linux/module.h> #define WLAN_GPIO_EXT_POR_N 134 static const char *id = "WLAN"; enum { WLAN_VREG_L17 = 0, WLAN_VREG_S3, WLAN_VREG_TCXO_L11, WLAN_VREG_L19, WLAN_VREG_L5, WLAN_VREG_L6 }; struct wlan_vreg_info { const char *vreg_id; unsigned int level_min; unsigned int level_max; unsigned int pmapp_id; unsigned int is_vreg_pin_controlled; struct regulator *reg; }; static struct wlan_vreg_info vreg_info[] = { {"bt", 3050000, 3050000, 21, 1, NULL}, {"msme1", 1800000, 1800000, 2, 0, NULL}, {"wlan_tcx0", 1800000, 1800000, 53, 0, NULL}, {"wlan4", 1200000, 1200000, 23, 0, NULL}, {"wlan2", 1350000, 1350000, 9, 1, NULL}, {"wlan3", 1200000, 1200000, 10, 1, NULL}, }; static int qrf6285_init_regs(void) { struct regulator_bulk_data regs[ARRAY_SIZE(vreg_info)]; int i, rc; for (i = 0; i < ARRAY_SIZE(regs); i++) { regs[i].supply = vreg_info[i].vreg_id; regs[i].min_uV = vreg_info[i].level_min; regs[i].max_uV = vreg_info[i].level_max; } rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs), regs); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } for (i = 0; i < ARRAY_SIZE(regs); i++) vreg_info[i].reg = regs[i].consumer; return 0; out: return rc; } int chip_power_qrf6285(bool on) { static bool init_done; int rc = 0, index = 0; if (unlikely(!init_done)) { rc = qrf6285_init_regs(); if (rc) return rc; else init_done = true; } if (on) { rc = gpio_request(WLAN_GPIO_EXT_POR_N, "WLAN_DEEP_SLEEP_N"); if (rc) { pr_err("WLAN reset GPIO %d request failed %d\n", WLAN_GPIO_EXT_POR_N, rc); goto fail; } rc = gpio_direction_output(WLAN_GPIO_EXT_POR_N, 1); if (rc < 0) { pr_err("WLAN reset GPIO %d set direction failed %d\n", WLAN_GPIO_EXT_POR_N, rc); goto fail_gpio_dir_out; } rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0, PMAPP_CLOCK_VOTE_ON); if (rc) { pr_err("%s: Configuring A0 to always" " on failed %d\n", __func__, rc); goto clock_vote_fail; } } else { gpio_set_value_cansleep(WLAN_GPIO_EXT_POR_N, 0); rc = gpio_direction_input(WLAN_GPIO_EXT_POR_N); if (rc) { pr_err("WLAN reset GPIO %d set direction failed %d\n", WLAN_GPIO_EXT_POR_N, rc); } gpio_free(WLAN_GPIO_EXT_POR_N); rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0, PMAPP_CLOCK_VOTE_OFF); if (rc) { pr_err("%s: Configuring A0 to turn OFF" " failed %d\n", __func__, rc); } } for (index = 0; index < ARRAY_SIZE(vreg_info); index++) { if (on) { rc = regulator_set_voltage(vreg_info[index].reg, vreg_info[index].level_min, vreg_info[index].level_max); if (rc) { pr_err("%s:%s set voltage failed %d\n", __func__, vreg_info[index].vreg_id, rc); goto vreg_fail; } rc = regulator_enable(vreg_info[index].reg); if (rc) { pr_err("%s:%s vreg enable failed %d\n", __func__, vreg_info[index].vreg_id, rc); goto vreg_fail; } if (vreg_info[index].is_vreg_pin_controlled) { rc = pmapp_vreg_lpm_pincntrl_vote(id, vreg_info[index].pmapp_id, PMAPP_CLOCK_ID_A0, 1); if (rc) { pr_err("%s:%s pmapp_vreg_lpm_pincntrl" " for enable failed %d\n", __func__, vreg_info[index].vreg_id, rc); goto vreg_clock_vote_fail; } } /*At this point CLK_PWR_REQ is high*/ if (WLAN_VREG_L6 == index) { /* * Configure A0 clock to be slave to * WLAN_CLK_PWR_REQ ` */ rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0, PMAPP_CLOCK_VOTE_PIN_CTRL); if (rc) { pr_err("%s: Configuring A0 to Pin" " controllable failed %d\n", __func__, rc); goto vreg_clock_vote_fail; } } } else { if (vreg_info[index].is_vreg_pin_controlled) { rc = pmapp_vreg_lpm_pincntrl_vote(id, vreg_info[index].pmapp_id, PMAPP_CLOCK_ID_A0, 0); if (rc) { pr_err("%s:%s pmapp_vreg_lpm_pincntrl" " for disable failed %d\n", __func__, vreg_info[index].vreg_id, rc); } } rc = regulator_disable(vreg_info[index].reg); if (rc) { pr_err("%s:%s vreg disable failed %d\n", __func__, vreg_info[index].vreg_id, rc); } } } return 0; vreg_fail: index--; vreg_clock_vote_fail: while (index >= 0) { rc = regulator_disable(vreg_info[index].reg); if (rc) { pr_err("%s:%s vreg disable failed %d\n", __func__, vreg_info[index].vreg_id, rc); } index--; } if (!on) goto fail; clock_vote_fail: gpio_set_value_cansleep(WLAN_GPIO_EXT_POR_N, 0); rc = gpio_direction_input(WLAN_GPIO_EXT_POR_N); if (rc) { pr_err("WLAN reset GPIO %d set direction failed %d\n", WLAN_GPIO_EXT_POR_N, rc); } fail_gpio_dir_out: gpio_free(WLAN_GPIO_EXT_POR_N); fail: return rc; } EXPORT_SYMBOL(chip_power_qrf6285);
gpl-2.0
DDiaz007/shooter-gb-mr-cdma
arch/mips/loongson/lemote-2f/ec_kb3310b.c
3395
3019
/* * Basic KB3310B Embedded Controller support for the YeeLoong 2F netbook * * Copyright (C) 2008 Lemote Inc. * Author: liujl <liujl@lemote.com>, 2008-04-20 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/delay.h> #include "ec_kb3310b.h" static DEFINE_SPINLOCK(index_access_lock); static DEFINE_SPINLOCK(port_access_lock); unsigned char ec_read(unsigned short addr) { unsigned char value; unsigned long flags; spin_lock_irqsave(&index_access_lock, flags); outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH); outb((addr & 0x00ff), EC_IO_PORT_LOW); value = inb(EC_IO_PORT_DATA); spin_unlock_irqrestore(&index_access_lock, flags); return value; } EXPORT_SYMBOL_GPL(ec_read); void ec_write(unsigned short addr, unsigned char val) { unsigned long flags; spin_lock_irqsave(&index_access_lock, flags); outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH); outb((addr & 0x00ff), EC_IO_PORT_LOW); outb(val, EC_IO_PORT_DATA); /* flush the write action */ inb(EC_IO_PORT_DATA); spin_unlock_irqrestore(&index_access_lock, flags); return; } EXPORT_SYMBOL_GPL(ec_write); /* * This function is used for EC command writes and corresponding status queries. */ int ec_query_seq(unsigned char cmd) { int timeout; unsigned char status; unsigned long flags; int ret = 0; spin_lock_irqsave(&port_access_lock, flags); /* make chip goto reset mode */ udelay(EC_REG_DELAY); outb(cmd, EC_CMD_PORT); udelay(EC_REG_DELAY); /* check if the command is received by ec */ timeout = EC_CMD_TIMEOUT; status = inb(EC_STS_PORT); while (timeout-- && (status & (1 << 1))) { status = inb(EC_STS_PORT); udelay(EC_REG_DELAY); } spin_unlock_irqrestore(&port_access_lock, flags); if (timeout <= 0) { printk(KERN_ERR "%s: deadable error : timeout...\n", __func__); ret = -EINVAL; } else printk(KERN_INFO "(%x/%d)ec issued command %d status : 0x%x\n", timeout, EC_CMD_TIMEOUT - timeout, cmd, status); return ret; } EXPORT_SYMBOL_GPL(ec_query_seq); /* * Send query command to EC to get the proper event number */ int ec_query_event_num(void) { return ec_query_seq(CMD_GET_EVENT_NUM); } EXPORT_SYMBOL(ec_query_event_num); /* * Get event number from EC * * NOTE: This routine must follow the query_event_num function in the * interrupt. */ int ec_get_event_num(void) { int timeout = 100; unsigned char value; unsigned char status; udelay(EC_REG_DELAY); status = inb(EC_STS_PORT); udelay(EC_REG_DELAY); while (timeout-- && !(status & (1 << 0))) { status = inb(EC_STS_PORT); udelay(EC_REG_DELAY); } if (timeout <= 0) { pr_info("%s: get event number timeout.\n", __func__); return -EINVAL; } value = inb(EC_DAT_PORT); udelay(EC_REG_DELAY); return value; } EXPORT_SYMBOL(ec_get_event_num);
gpl-2.0
sudosurootdev/kernel_lge_msm8974
net/atm/common.c
3395
21922
/* net/atm/common.c - ATM sockets (common part for PVC and SVC) */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/module.h> #include <linux/kmod.h> #include <linux/net.h> /* struct socket, struct proto_ops */ #include <linux/atm.h> /* ATM stuff */ #include <linux/atmdev.h> #include <linux/socket.h> /* SOL_SOCKET */ #include <linux/errno.h> /* error codes */ #include <linux/capability.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/time.h> /* struct timeval */ #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/init.h> #include <linux/slab.h> #include <net/sock.h> /* struct sock */ #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/atomic.h> #include "resources.h" /* atm_find_dev */ #include "common.h" /* prototypes */ #include "protocols.h" /* atm_init_<transport> */ #include "addr.h" /* address registry */ #include "signaling.h" /* for WAITING and sigd_attach */ struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; EXPORT_SYMBOL(vcc_hash); DEFINE_RWLOCK(vcc_sklist_lock); EXPORT_SYMBOL(vcc_sklist_lock); static ATOMIC_NOTIFIER_HEAD(atm_dev_notify_chain); static void __vcc_insert_socket(struct sock *sk) { struct atm_vcc *vcc = atm_sk(sk); struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)]; sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); sk_add_node(sk, head); } void vcc_insert_socket(struct sock *sk) { write_lock_irq(&vcc_sklist_lock); __vcc_insert_socket(sk); write_unlock_irq(&vcc_sklist_lock); } EXPORT_SYMBOL(vcc_insert_socket); static void vcc_remove_socket(struct sock *sk) { write_lock_irq(&vcc_sklist_lock); sk_del_node_init(sk); write_unlock_irq(&vcc_sklist_lock); } static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size) { struct sk_buff *skb; struct sock *sk = sk_atm(vcc); if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", sk_wmem_alloc_get(sk), size, sk->sk_sndbuf); return NULL; } while (!(skb = alloc_skb(size, GFP_KERNEL))) schedule(); pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); atomic_add(skb->truesize, &sk->sk_wmem_alloc); return skb; } static void vcc_sock_destruct(struct sock *sk) { if (atomic_read(&sk->sk_rmem_alloc)) printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n", __func__, atomic_read(&sk->sk_rmem_alloc)); if (atomic_read(&sk->sk_wmem_alloc)) printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n", __func__, atomic_read(&sk->sk_wmem_alloc)); } static void vcc_def_wakeup(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up(&wq->wait); rcu_read_unlock(); } static inline int vcc_writable(struct sock *sk) { struct atm_vcc *vcc = atm_sk(sk); return (vcc->qos.txtp.max_sdu + atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; } static void vcc_write_space(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); if (vcc_writable(sk)) { wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible(&wq->wait); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } static struct proto vcc_proto = { .name = "VCC", .owner = THIS_MODULE, .obj_size = sizeof(struct atm_vcc), }; int vcc_create(struct net *net, struct socket *sock, int protocol, int family) { struct sock *sk; struct atm_vcc *vcc; sock->sk = NULL; if (sock->type == SOCK_STREAM) return -EINVAL; sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_state_change = vcc_def_wakeup; sk->sk_write_space = vcc_write_space; vcc = atm_sk(sk); vcc->dev = NULL; memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc)); memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc)); vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ atomic_set(&sk->sk_wmem_alloc, 1); atomic_set(&sk->sk_rmem_alloc, 0); vcc->push = NULL; vcc->pop = NULL; vcc->push_oam = NULL; vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */ vcc->atm_options = vcc->aal_options = 0; sk->sk_destruct = vcc_sock_destruct; return 0; } static void vcc_destroy_socket(struct sock *sk) { struct atm_vcc *vcc = atm_sk(sk); struct sk_buff *skb; set_bit(ATM_VF_CLOSE, &vcc->flags); clear_bit(ATM_VF_READY, &vcc->flags); if (vcc->dev) { if (vcc->dev->ops->close) vcc->dev->ops->close(vcc); if (vcc->push) vcc->push(vcc, NULL); /* atmarpd has no push */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { atm_return(vcc, skb->truesize); kfree_skb(skb); } module_put(vcc->dev->ops->owner); atm_dev_put(vcc->dev); } vcc_remove_socket(sk); } int vcc_release(struct socket *sock) { struct sock *sk = sock->sk; if (sk) { lock_sock(sk); vcc_destroy_socket(sock->sk); release_sock(sk); sock_put(sk); } return 0; } void vcc_release_async(struct atm_vcc *vcc, int reply) { struct sock *sk = sk_atm(vcc); set_bit(ATM_VF_CLOSE, &vcc->flags); sk->sk_shutdown |= RCV_SHUTDOWN; sk->sk_err = -reply; clear_bit(ATM_VF_WAITING, &vcc->flags); sk->sk_state_change(sk); } EXPORT_SYMBOL(vcc_release_async); void vcc_process_recv_queue(struct atm_vcc *vcc) { struct sk_buff_head queue, *rq; struct sk_buff *skb, *tmp; unsigned long flags; __skb_queue_head_init(&queue); rq = &sk_atm(vcc)->sk_receive_queue; spin_lock_irqsave(&rq->lock, flags); skb_queue_splice_init(rq, &queue); spin_unlock_irqrestore(&rq->lock, flags); skb_queue_walk_safe(&queue, skb, tmp) { __skb_unlink(skb, &queue); vcc->push(vcc, skb); } } EXPORT_SYMBOL(vcc_process_recv_queue); void atm_dev_signal_change(struct atm_dev *dev, char signal) { pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n", __func__, signal, dev, dev->number, dev->signal); /* atm driver sending invalid signal */ WARN_ON(signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND); if (dev->signal == signal) return; /* no change */ dev->signal = signal; atomic_notifier_call_chain(&atm_dev_notify_chain, signal, dev); } EXPORT_SYMBOL(atm_dev_signal_change); void atm_dev_release_vccs(struct atm_dev *dev) { int i; write_lock_irq(&vcc_sklist_lock); for (i = 0; i < VCC_HTABLE_SIZE; i++) { struct hlist_head *head = &vcc_hash[i]; struct hlist_node *node, *tmp; struct sock *s; struct atm_vcc *vcc; sk_for_each_safe(s, node, tmp, head) { vcc = atm_sk(s); if (vcc->dev == dev) { vcc_release_async(vcc, -EPIPE); sk_del_node_init(s); } } } write_unlock_irq(&vcc_sklist_lock); } EXPORT_SYMBOL(atm_dev_release_vccs); static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) { int max_sdu; if (!tp->traffic_class) return 0; switch (aal) { case ATM_AAL0: max_sdu = ATM_CELL_SIZE-1; break; case ATM_AAL34: max_sdu = ATM_MAX_AAL34_PDU; break; default: pr_warning("AAL problems ... (%d)\n", aal); /* fall through */ case ATM_AAL5: max_sdu = ATM_MAX_AAL5_PDU; } if (!tp->max_sdu) tp->max_sdu = max_sdu; else if (tp->max_sdu > max_sdu) return -EINVAL; if (!tp->max_cdv) tp->max_cdv = ATM_MAX_CDV; return 0; } static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) { struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; struct hlist_node *node; struct sock *s; struct atm_vcc *walk; sk_for_each(s, node, head) { walk = atm_sk(s); if (walk->dev != vcc->dev) continue; if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi && walk->vci == vci && ((walk->qos.txtp.traffic_class != ATM_NONE && vcc->qos.txtp.traffic_class != ATM_NONE) || (walk->qos.rxtp.traffic_class != ATM_NONE && vcc->qos.rxtp.traffic_class != ATM_NONE))) return -EADDRINUSE; } /* allow VCCs with same VPI/VCI iff they don't collide on TX/RX (but we may refuse such sharing for other reasons, e.g. if protocol requires to have both channels) */ return 0; } static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci) { static short p; /* poor man's per-device cache */ static int c; short old_p; int old_c; int err; if (*vpi != ATM_VPI_ANY && *vci != ATM_VCI_ANY) { err = check_ci(vcc, *vpi, *vci); return err; } /* last scan may have left values out of bounds for current device */ if (*vpi != ATM_VPI_ANY) p = *vpi; else if (p >= 1 << vcc->dev->ci_range.vpi_bits) p = 0; if (*vci != ATM_VCI_ANY) c = *vci; else if (c < ATM_NOT_RSV_VCI || c >= 1 << vcc->dev->ci_range.vci_bits) c = ATM_NOT_RSV_VCI; old_p = p; old_c = c; do { if (!check_ci(vcc, p, c)) { *vpi = p; *vci = c; return 0; } if (*vci == ATM_VCI_ANY) { c++; if (c >= 1 << vcc->dev->ci_range.vci_bits) c = ATM_NOT_RSV_VCI; } if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) && *vpi == ATM_VPI_ANY) { p++; if (p >= 1 << vcc->dev->ci_range.vpi_bits) p = 0; } } while (old_p != p || old_c != c); return -EADDRINUSE; } static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, int vci) { struct sock *sk = sk_atm(vcc); int error; if ((vpi != ATM_VPI_UNSPEC && vpi != ATM_VPI_ANY && vpi >> dev->ci_range.vpi_bits) || (vci != ATM_VCI_UNSPEC && vci != ATM_VCI_ANY && vci >> dev->ci_range.vci_bits)) return -EINVAL; if (vci > 0 && vci < ATM_NOT_RSV_VCI && !capable(CAP_NET_BIND_SERVICE)) return -EPERM; error = -ENODEV; if (!try_module_get(dev->ops->owner)) return error; vcc->dev = dev; write_lock_irq(&vcc_sklist_lock); if (test_bit(ATM_DF_REMOVED, &dev->flags) || (error = find_ci(vcc, &vpi, &vci))) { write_unlock_irq(&vcc_sklist_lock); goto fail_module_put; } vcc->vpi = vpi; vcc->vci = vci; __vcc_insert_socket(sk); write_unlock_irq(&vcc_sklist_lock); switch (vcc->qos.aal) { case ATM_AAL0: error = atm_init_aal0(vcc); vcc->stats = &dev->stats.aal0; break; case ATM_AAL34: error = atm_init_aal34(vcc); vcc->stats = &dev->stats.aal34; break; case ATM_NO_AAL: /* ATM_AAL5 is also used in the "0 for default" case */ vcc->qos.aal = ATM_AAL5; /* fall through */ case ATM_AAL5: error = atm_init_aal5(vcc); vcc->stats = &dev->stats.aal5; break; default: error = -EPROTOTYPE; } if (!error) error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal); if (!error) error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal); if (error) goto fail; pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal); pr_debug(" TX: %d, PCR %d..%d, SDU %d\n", vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu); pr_debug(" RX: %d, PCR %d..%d, SDU %d\n", vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu); if (dev->ops->open) { error = dev->ops->open(vcc); if (error) goto fail; } return 0; fail: vcc_remove_socket(sk); fail_module_put: module_put(dev->ops->owner); /* ensure we get dev module ref count correct */ vcc->dev = NULL; return error; } int vcc_connect(struct socket *sock, int itf, short vpi, int vci) { struct atm_dev *dev; struct atm_vcc *vcc = ATM_SD(sock); int error; pr_debug("(vpi %d, vci %d)\n", vpi, vci); if (sock->state == SS_CONNECTED) return -EISCONN; if (sock->state != SS_UNCONNECTED) return -EINVAL; if (!(vpi || vci)) return -EINVAL; if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC) clear_bit(ATM_VF_PARTIAL, &vcc->flags); else if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) return -EINVAL; pr_debug("(TX: cl %d,bw %d-%d,sdu %d; " "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n", vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu, vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu, vcc->qos.aal == ATM_AAL5 ? "" : vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ", vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal); if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) return -EBADFD; if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) return -EINVAL; if (likely(itf != ATM_ITF_ANY)) { dev = try_then_request_module(atm_dev_lookup(itf), "atm-device-%d", itf); } else { dev = NULL; mutex_lock(&atm_dev_mutex); if (!list_empty(&atm_devs)) { dev = list_entry(atm_devs.next, struct atm_dev, dev_list); atm_dev_hold(dev); } mutex_unlock(&atm_dev_mutex); } if (!dev) return -ENODEV; error = __vcc_connect(vcc, dev, vpi, vci); if (error) { atm_dev_put(dev); return error; } if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) set_bit(ATM_VF_PARTIAL, &vcc->flags); if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags)) sock->state = SS_CONNECTED; return 0; } int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; struct sk_buff *skb; int copied, error = -EINVAL; if (sock->state != SS_CONNECTED) return -ENOTCONN; /* only handle MSG_DONTWAIT and MSG_PEEK */ if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); if (!skb) return error; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); } skb_free_datagram(sk, skb); return copied; } int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct sock *sk = sock->sk; DEFINE_WAIT(wait); struct atm_vcc *vcc; struct sk_buff *skb; int eff, error; const void __user *buff; int size; lock_sock(sk); if (sock->state != SS_CONNECTED) { error = -ENOTCONN; goto out; } if (m->msg_name) { error = -EISCONN; goto out; } if (m->msg_iovlen != 1) { error = -ENOSYS; /* fix this later @@@ */ goto out; } buff = m->msg_iov->iov_base; size = m->msg_iov->iov_len; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) { error = -EPIPE; send_sig(SIGPIPE, current, 0); goto out; } if (!size) { error = 0; goto out; } if (size < 0 || size > vcc->qos.txtp.max_sdu) { error = -EMSGSIZE; goto out; } eff = (size+3) & ~3; /* align to word boundary */ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); error = 0; while (!(skb = alloc_tx(vcc, eff))) { if (m->msg_flags & MSG_DONTWAIT) { error = -EAGAIN; break; } schedule(); if (signal_pending(current)) { error = -ERESTARTSYS; break; } if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) { error = -EPIPE; send_sig(SIGPIPE, current, 0); break; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (error) goto out; skb->dev = NULL; /* for paths shared with net_device interfaces */ ATM_SKB(skb)->atm_options = vcc->atm_options; if (copy_from_user(skb_put(skb, size), buff, size)) { kfree_skb(skb); error = -EFAULT; goto out; } if (eff != size) memset(skb->data + size, 0, eff-size); error = vcc->dev->ops->send(vcc, skb); error = error ? error : size; out: release_sock(sk); return error; } unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct atm_vcc *vcc; unsigned int mask; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; vcc = ATM_SD(sock); /* exceptional events */ if (sk->sk_err) mask = POLLERR; if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags)) mask |= POLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* writable? */ if (sock->state == SS_CONNECTING && test_bit(ATM_VF_WAITING, &vcc->flags)) return mask; if (vcc->qos.txtp.traffic_class != ATM_NONE && vcc_writable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask; } static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) { int error; /* * Don't let the QoS change the already connected AAL type nor the * traffic class. */ if (qos->aal != vcc->qos.aal || qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class || qos->txtp.traffic_class != vcc->qos.txtp.traffic_class) return -EINVAL; error = adjust_tp(&qos->txtp, qos->aal); if (!error) error = adjust_tp(&qos->rxtp, qos->aal); if (error) return error; if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP; if (sk_atm(vcc)->sk_family == AF_ATMPVC) return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET); return svc_change_qos(vcc, qos); } static int check_tp(const struct atm_trafprm *tp) { /* @@@ Should be merged with adjust_tp */ if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr && !tp->max_pcr) return -EINVAL; if (tp->min_pcr == ATM_MAX_PCR) return -EINVAL; if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->min_pcr > tp->max_pcr) return -EINVAL; /* * We allow pcr to be outside [min_pcr,max_pcr], because later * adjustment may still push it in the valid range. */ return 0; } static int check_qos(const struct atm_qos *qos) { int error; if (!qos->txtp.traffic_class && !qos->rxtp.traffic_class) return -EINVAL; if (qos->txtp.traffic_class != qos->rxtp.traffic_class && qos->txtp.traffic_class && qos->rxtp.traffic_class && qos->txtp.traffic_class != ATM_ANYCLASS && qos->rxtp.traffic_class != ATM_ANYCLASS) return -EINVAL; error = check_tp(&qos->txtp); if (error) return error; return check_tp(&qos->rxtp); } int vcc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct atm_vcc *vcc; unsigned long value; int error; if (__SO_LEVEL_MATCH(optname, level) && optlen != __SO_SIZE(optname)) return -EINVAL; vcc = ATM_SD(sock); switch (optname) { case SO_ATMQOS: { struct atm_qos qos; if (copy_from_user(&qos, optval, sizeof(qos))) return -EFAULT; error = check_qos(&qos); if (error) return error; if (sock->state == SS_CONNECTED) return atm_change_qos(vcc, &qos); if (sock->state != SS_UNCONNECTED) return -EBADFD; vcc->qos = qos; set_bit(ATM_VF_HASQOS, &vcc->flags); return 0; } case SO_SETCLP: if (get_user(value, (unsigned long __user *)optval)) return -EFAULT; if (value) vcc->atm_options |= ATM_ATMOPT_CLP; else vcc->atm_options &= ~ATM_ATMOPT_CLP; return 0; default: if (level == SOL_SOCKET) return -EINVAL; break; } if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL; return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen); } int vcc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct atm_vcc *vcc; int len; if (get_user(len, optlen)) return -EFAULT; if (__SO_LEVEL_MATCH(optname, level) && len != __SO_SIZE(optname)) return -EINVAL; vcc = ATM_SD(sock); switch (optname) { case SO_ATMQOS: if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) return -EINVAL; return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos)) ? -EFAULT : 0; case SO_SETCLP: return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0, (unsigned long __user *)optval) ? -EFAULT : 0; case SO_ATMPVC: { struct sockaddr_atmpvc pvc; if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) return -ENOTCONN; pvc.sap_family = AF_ATMPVC; pvc.sap_addr.itf = vcc->dev->number; pvc.sap_addr.vpi = vcc->vpi; pvc.sap_addr.vci = vcc->vci; return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0; } default: if (level == SOL_SOCKET) return -EINVAL; break; } if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL; return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); } int register_atmdevice_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&atm_dev_notify_chain, nb); } EXPORT_SYMBOL_GPL(register_atmdevice_notifier); void unregister_atmdevice_notifier(struct notifier_block *nb) { atomic_notifier_chain_unregister(&atm_dev_notify_chain, nb); } EXPORT_SYMBOL_GPL(unregister_atmdevice_notifier); static int __init atm_init(void) { int error; error = proto_register(&vcc_proto, 0); if (error < 0) goto out; error = atmpvc_init(); if (error < 0) { pr_err("atmpvc_init() failed with %d\n", error); goto out_unregister_vcc_proto; } error = atmsvc_init(); if (error < 0) { pr_err("atmsvc_init() failed with %d\n", error); goto out_atmpvc_exit; } error = atm_proc_init(); if (error < 0) { pr_err("atm_proc_init() failed with %d\n", error); goto out_atmsvc_exit; } error = atm_sysfs_init(); if (error < 0) { pr_err("atm_sysfs_init() failed with %d\n", error); goto out_atmproc_exit; } out: return error; out_atmproc_exit: atm_proc_exit(); out_atmsvc_exit: atmsvc_exit(); out_atmpvc_exit: atmsvc_exit(); out_unregister_vcc_proto: proto_unregister(&vcc_proto); goto out; } static void __exit atm_exit(void) { atm_proc_exit(); atm_sysfs_exit(); atmsvc_exit(); atmpvc_exit(); proto_unregister(&vcc_proto); } subsys_initcall(atm_init); module_exit(atm_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_ATMPVC); MODULE_ALIAS_NETPROTO(PF_ATMSVC);
gpl-2.0
TeamSXL/htc-cm-kernel-doubleshot-34_old
arch/blackfin/mach-bf518/boards/tcm-bf518.c
4419
17122
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <asm/bfin_sdh.h> #include <linux/spi/ad7877.h> #include <net/dsa.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Bluetechnix TCM-BF518"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition tcm_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data tcm_flash_data = { .width = 2, .parts = tcm_partitions, .nr_parts = ARRAY_SIZE(tcm_partitions), }; static struct resource tcm_flash_resource = { .start = 0x20000000, .end = 0x201fffff, .flags = IORESOURCE_MEM, }; static struct platform_device tcm_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &tcm_flash_data, }, .num_resources = 1, .resource = &tcm_flash_resource, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_MII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_MII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p16", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 2, /* SPI0_SSEL2 */ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF8, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, }, #endif #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ && defined(CONFIG_SND_SOC_WM8731_SPI) { .modalias = "wm8731", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .mode = SPI_CPHA | SPI_CPOL, }, #endif }; /* SPI controller data */ #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI (0) */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 6, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI0, .end = CH_SPI0, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI0, .end = IRQ_SPI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; /* SPI (1) */ static struct bfin5xx_spi_master bfin_spi1_info = { .num_chipselect = 6, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0}, }; static struct resource bfin_spi1_resource[] = { [0] = { .start = SPI1_REGBASE, .end = SPI1_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI1, .end = CH_SPI1, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI1, .end = IRQ_SPI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi1_device = { .name = "bfin-spi", .id = 1, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi1_resource), .resource = bfin_spi1_resource, .dev = { .platform_data = &bfin_spi1_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) { I2C_BOARD_INFO("pcf8574_keypad", 0x27), .irq = IRQ_PF8, }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/input.h> #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) static struct bfin_sd_host bfin_sdh_data = { .dma_chan = CH_RSI, .irq_int0 = IRQ_RSI_INT0, .pin_req = {P_RSI_DATA0, P_RSI_DATA1, P_RSI_DATA2, P_RSI_DATA3, P_RSI_CMD, P_RSI_CLK, 0}, }; static struct platform_device bf51x_sdh_device = { .name = "bfin-sdh", .id = 0, .dev = { .platform_data = &bfin_sdh_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *tcm_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, &bfin_spi1_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) &bf51x_sdh_device, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &tcm_flash_device, #endif }; static int __init tcm_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(tcm_devices, ARRAY_SIZE(tcm_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(tcm_init); static struct platform_device *tcm_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(tcm_early_devices, ARRAY_SIZE(tcm_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { return 1; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
padovan/bluetooth-next
arch/sh/kernel/sh_ksyms_64.c
8515
1555
/* * arch/sh/kernel/sh_ksyms_64.c * * Copyright (C) 2000, 2001 Paolo Alberelli * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/rwsem.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/user.h> #include <linux/elfcore.h> #include <linux/sched.h> #include <linux/in6.h> #include <linux/interrupt.h> #include <linux/screen_info.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/checksum.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/irq.h> EXPORT_SYMBOL(__put_user_asm_b); EXPORT_SYMBOL(__put_user_asm_w); EXPORT_SYMBOL(__put_user_asm_l); EXPORT_SYMBOL(__put_user_asm_q); EXPORT_SYMBOL(__get_user_asm_b); EXPORT_SYMBOL(__get_user_asm_w); EXPORT_SYMBOL(__get_user_asm_l); EXPORT_SYMBOL(__get_user_asm_q); EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcpy); /* Ugh. These come in from libgcc.a at link time. */ #define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name) DECLARE_EXPORT(__sdivsi3); DECLARE_EXPORT(__sdivsi3_1); DECLARE_EXPORT(__sdivsi3_2); DECLARE_EXPORT(__udivsi3); DECLARE_EXPORT(__div_table);
gpl-2.0
kozmikkick/KozmiKKernel
arch/mn10300/unit-asb2305/pci-irq.c
8771
1300
/* PCI IRQ routing on the MN103E010 based ASB2305 * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. * * This is simple: All PCI interrupts route through the CPU's XIRQ1 pin [IRQ 35] */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/io.h> #include <asm/smp.h> #include "pci-asb2305.h" void __init pcibios_irq_init(void) { } void __init pcibios_fixup_irqs(void) { struct pci_dev *dev = NULL; u8 line, pin; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin) { dev->irq = XIRQ1; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); } } void __init pcibios_penalize_isa_irq(int irq) { } void pcibios_enable_irq(struct pci_dev *dev) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); }
gpl-2.0
kbc-developers/android_kernel_samsung_msm8660
net/llc/llc_s_ev.c
15683
3613
/* * llc_s_ev.c - Defines SAP component events * * The followed event functions are SAP component events which are described * in 802.2 LLC protocol standard document. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/socket.h> #include <net/sock.h> #include <net/llc_if.h> #include <net/llc_s_ev.h> #include <net/llc_pdu.h> int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_SIMPLE && ev->prim_type == LLC_SAP_EV_ACTIVATION_REQ ? 0 : 1; } int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_UI ? 0 : 1; } int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_DATAUNIT_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_XID_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; } int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; } int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_TEST_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; } int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; } int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_SIMPLE && ev->prim_type == LLC_SAP_EV_DEACTIVATION_REQ ? 0 : 1; }
gpl-2.0
Pingmin/linux
arch/powerpc/kernel/idle.c
68
2292
// SPDX-License-Identifier: GPL-2.0-or-later /* * Idle daemon for PowerPC. Idle daemon will handle any action * that needs to be taken when the system becomes idle. * * Originally written by Cort Dougan (cort@cs.nmt.edu). * Subsequent 32-bit hacking by Tom Rini, Armin Kuster, * Paul Mackerras and others. * * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com> * * Additional shared processor, SMT, and firmware support * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com> * * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org> */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/sysctl.h> #include <linux/tick.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/runlatch.h> #include <asm/smp.h> unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(cpuidle_disable); static int __init powersave_off(char *arg) { ppc_md.power_save = NULL; cpuidle_disable = IDLE_POWERSAVE_OFF; return 0; } __setup("powersave=off", powersave_off); #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { sched_preempt_enable_no_resched(); cpu_die(); } #endif void arch_cpu_idle(void) { ppc64_runlatch_off(); if (ppc_md.power_save) { ppc_md.power_save(); /* * Some power_save functions return with * interrupts enabled, some don't. */ if (irqs_disabled()) local_irq_enable(); } else { local_irq_enable(); /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); } HMT_medium(); ppc64_runlatch_on(); } int powersave_nap; #ifdef CONFIG_SYSCTL /* * Register the sysctl to set/clear powersave_nap. */ static struct ctl_table powersave_nap_ctl_table[] = { { .procname = "powersave-nap", .data = &powersave_nap, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; static struct ctl_table powersave_nap_sysctl_root[] = { { .procname = "kernel", .mode = 0555, .child = powersave_nap_ctl_table, }, {} }; static int __init register_powersave_nap_sysctl(void) { register_sysctl_table(powersave_nap_sysctl_root); return 0; } __initcall(register_powersave_nap_sysctl); #endif
gpl-2.0
johndai1984/platform_external_qemu
distrib/sdl-1.2.12/src/video/fbcon/SDL_fbevents.c
68
32336
/* SDL - Simple DirectMedia Layer Copyright (C) 1997-2006 Sam Lantinga This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Sam Lantinga slouken@libsdl.org */ #include "SDL_config.h" /* Handle the event stream, converting console events into SDL events */ #include <stdio.h> #include <sys/types.h> #include <sys/time.h> #include <sys/ioctl.h> #include <unistd.h> #include <fcntl.h> #include <errno.h> #include <limits.h> /* For parsing /proc */ #include <dirent.h> #include <ctype.h> #include <linux/vt.h> #include <linux/kd.h> #include <linux/keyboard.h> #include "SDL_timer.h" #include "SDL_mutex.h" #include "../SDL_sysvideo.h" #include "../../events/SDL_sysevents.h" #include "../../events/SDL_events_c.h" #include "SDL_fbvideo.h" #include "SDL_fbevents_c.h" #include "SDL_fbkeys.h" #include "SDL_fbelo.h" #ifndef GPM_NODE_FIFO #define GPM_NODE_FIFO "/dev/gpmdata" #endif /*#define DEBUG_KEYBOARD*/ /*#define DEBUG_MOUSE*/ /* The translation tables from a console scancode to a SDL keysym */ #define NUM_VGAKEYMAPS (1<<KG_CAPSSHIFT) static Uint16 vga_keymap[NUM_VGAKEYMAPS][NR_KEYS]; static SDLKey keymap[128]; static Uint16 keymap_temp[128]; /* only used at startup */ static SDL_keysym *TranslateKey(int scancode, SDL_keysym *keysym); /* Ugh, we have to duplicate the kernel's keysym mapping code... Oh, it's not so bad. :-) FIXME: Add keyboard LED handling code */ static void FB_vgainitkeymaps(int fd) { struct kbentry entry; int map, i; /* Don't do anything if we are passed a closed keyboard */ if ( fd < 0 ) { return; } /* Load all the keysym mappings */ for ( map=0; map<NUM_VGAKEYMAPS; ++map ) { SDL_memset(vga_keymap[map], 0, NR_KEYS*sizeof(Uint16)); for ( i=0; i<NR_KEYS; ++i ) { entry.kb_table = map; entry.kb_index = i; if ( ioctl(fd, KDGKBENT, &entry) == 0 ) { /* fill keytemp. This replaces SDL_fbkeys.h */ if ( (map == 0) && (i<128) ) { keymap_temp[i] = entry.kb_value; } /* The "Enter" key is a special case */ if ( entry.kb_value == K_ENTER ) { entry.kb_value = K(KT_ASCII,13); } /* Handle numpad specially as well */ if ( KTYP(entry.kb_value) == KT_PAD ) { switch ( entry.kb_value ) { case K_P0: case K_P1: case K_P2: case K_P3: case K_P4: case K_P5: case K_P6: case K_P7: case K_P8: case K_P9: vga_keymap[map][i]=entry.kb_value; vga_keymap[map][i]+= '0'; break; case K_PPLUS: vga_keymap[map][i]=K(KT_ASCII,'+'); break; case K_PMINUS: vga_keymap[map][i]=K(KT_ASCII,'-'); break; case K_PSTAR: vga_keymap[map][i]=K(KT_ASCII,'*'); break; case K_PSLASH: vga_keymap[map][i]=K(KT_ASCII,'/'); break; case K_PENTER: vga_keymap[map][i]=K(KT_ASCII,'\r'); break; case K_PCOMMA: vga_keymap[map][i]=K(KT_ASCII,','); break; case K_PDOT: vga_keymap[map][i]=K(KT_ASCII,'.'); break; default: break; } } /* Do the normal key translation */ if ( (KTYP(entry.kb_value) == KT_LATIN) || (KTYP(entry.kb_value) == KT_ASCII) || (KTYP(entry.kb_value) == KT_LETTER) ) { vga_keymap[map][i] = entry.kb_value; } } } } } int FB_InGraphicsMode(_THIS) { return((keyboard_fd >= 0) && (saved_kbd_mode >= 0)); } int FB_EnterGraphicsMode(_THIS) { struct termios keyboard_termios; /* Set medium-raw keyboard mode */ if ( (keyboard_fd >= 0) && !FB_InGraphicsMode(this) ) { /* Switch to the correct virtual terminal */ if ( current_vt > 0 ) { struct vt_stat vtstate; if ( ioctl(keyboard_fd, VT_GETSTATE, &vtstate) == 0 ) { saved_vt = vtstate.v_active; } if ( ioctl(keyboard_fd, VT_ACTIVATE, current_vt) == 0 ) { ioctl(keyboard_fd, VT_WAITACTIVE, current_vt); } } /* Set the terminal input mode */ if ( tcgetattr(keyboard_fd, &saved_kbd_termios) < 0 ) { SDL_SetError("Unable to get terminal attributes"); if ( keyboard_fd > 0 ) { close(keyboard_fd); } keyboard_fd = -1; return(-1); } if ( ioctl(keyboard_fd, KDGKBMODE, &saved_kbd_mode) < 0 ) { SDL_SetError("Unable to get current keyboard mode"); if ( keyboard_fd > 0 ) { close(keyboard_fd); } keyboard_fd = -1; return(-1); } keyboard_termios = saved_kbd_termios; keyboard_termios.c_lflag &= ~(ICANON | ECHO | ISIG); keyboard_termios.c_iflag &= ~(ISTRIP | IGNCR | ICRNL | INLCR | IXOFF | IXON); keyboard_termios.c_cc[VMIN] = 0; keyboard_termios.c_cc[VTIME] = 0; if (tcsetattr(keyboard_fd, TCSAFLUSH, &keyboard_termios) < 0) { FB_CloseKeyboard(this); SDL_SetError("Unable to set terminal attributes"); return(-1); } /* This will fail if we aren't root or this isn't our tty */ if ( ioctl(keyboard_fd, KDSKBMODE, K_MEDIUMRAW) < 0 ) { FB_CloseKeyboard(this); SDL_SetError("Unable to set keyboard in raw mode"); return(-1); } if ( ioctl(keyboard_fd, KDSETMODE, KD_GRAPHICS) < 0 ) { FB_CloseKeyboard(this); SDL_SetError("Unable to set keyboard in graphics mode"); return(-1); } /* Prevent switching the virtual terminal */ ioctl(keyboard_fd, VT_LOCKSWITCH, 1); } return(keyboard_fd); } void FB_LeaveGraphicsMode(_THIS) { if ( FB_InGraphicsMode(this) ) { ioctl(keyboard_fd, KDSETMODE, KD_TEXT); ioctl(keyboard_fd, KDSKBMODE, saved_kbd_mode); tcsetattr(keyboard_fd, TCSAFLUSH, &saved_kbd_termios); saved_kbd_mode = -1; /* Head back over to the original virtual terminal */ ioctl(keyboard_fd, VT_UNLOCKSWITCH, 1); if ( saved_vt > 0 ) { ioctl(keyboard_fd, VT_ACTIVATE, saved_vt); } } } void FB_CloseKeyboard(_THIS) { if ( keyboard_fd >= 0 ) { FB_LeaveGraphicsMode(this); if ( keyboard_fd > 0 ) { close(keyboard_fd); } } keyboard_fd = -1; } int FB_OpenKeyboard(_THIS) { /* Open only if not already opened */ if ( keyboard_fd < 0 ) { static const char * const tty0[] = { "/dev/tty0", "/dev/vc/0", NULL }; static const char * const vcs[] = { "/dev/vc/%d", "/dev/tty%d", NULL }; int i, tty0_fd; /* Try to query for a free virtual terminal */ tty0_fd = -1; for ( i=0; tty0[i] && (tty0_fd < 0); ++i ) { tty0_fd = open(tty0[i], O_WRONLY, 0); } if ( tty0_fd < 0 ) { tty0_fd = dup(0); /* Maybe stdin is a VT? */ } ioctl(tty0_fd, VT_OPENQRY, &current_vt); close(tty0_fd); if ( (geteuid() == 0) && (current_vt > 0) ) { for ( i=0; vcs[i] && (keyboard_fd < 0); ++i ) { char vtpath[12]; SDL_snprintf(vtpath, SDL_arraysize(vtpath), vcs[i], current_vt); keyboard_fd = open(vtpath, O_RDWR, 0); #ifdef DEBUG_KEYBOARD fprintf(stderr, "vtpath = %s, fd = %d\n", vtpath, keyboard_fd); #endif /* DEBUG_KEYBOARD */ /* This needs to be our controlling tty so that the kernel ioctl() calls work */ if ( keyboard_fd >= 0 ) { tty0_fd = open("/dev/tty", O_RDWR, 0); if ( tty0_fd >= 0 ) { ioctl(tty0_fd, TIOCNOTTY, 0); close(tty0_fd); } } } } if ( keyboard_fd < 0 ) { /* Last resort, maybe our tty is a usable VT */ struct vt_stat vtstate; keyboard_fd = open("/dev/tty", O_RDWR); if ( ioctl(keyboard_fd, VT_GETSTATE, &vtstate) == 0 ) { current_vt = vtstate.v_active; } else { current_vt = 0; } } #ifdef DEBUG_KEYBOARD fprintf(stderr, "Current VT: %d\n", current_vt); #endif saved_kbd_mode = -1; /* Make sure that our input is a console terminal */ { int dummy; if ( ioctl(keyboard_fd, KDGKBMODE, &dummy) < 0 ) { close(keyboard_fd); keyboard_fd = -1; SDL_SetError("Unable to open a console terminal"); } } /* Set up keymap */ FB_vgainitkeymaps(keyboard_fd); } return(keyboard_fd); } static enum { MOUSE_NONE = -1, MOUSE_MSC, /* Note: GPM uses the MSC protocol */ MOUSE_PS2, MOUSE_IMPS2, MOUSE_MS, MOUSE_BM, MOUSE_ELO, MOUSE_TSLIB, NUM_MOUSE_DRVS } mouse_drv = MOUSE_NONE; void FB_CloseMouse(_THIS) { #if SDL_INPUT_TSLIB if (ts_dev != NULL) { ts_close(ts_dev); ts_dev = NULL; mouse_fd = -1; } #endif /* SDL_INPUT_TSLIB */ if ( mouse_fd > 0 ) { close(mouse_fd); } mouse_fd = -1; } /* Returns processes listed in /proc with the desired name */ static int find_pid(DIR *proc, const char *wanted_name) { struct dirent *entry; int pid; /* First scan proc for the gpm process */ pid = 0; while ( (pid == 0) && ((entry=readdir(proc)) != NULL) ) { if ( isdigit(entry->d_name[0]) ) { FILE *status; char path[PATH_MAX]; char name[PATH_MAX]; SDL_snprintf(path, SDL_arraysize(path), "/proc/%s/status", entry->d_name); status=fopen(path, "r"); if ( status ) { name[0] = '\0'; fscanf(status, "Name: %s", name); if ( SDL_strcmp(name, wanted_name) == 0 ) { pid = SDL_atoi(entry->d_name); } fclose(status); } } } return pid; } /* Returns true if /dev/gpmdata is being written to by gpm */ static int gpm_available(char *proto, size_t protolen) { int available; DIR *proc; int pid; int cmdline, len, arglen; char path[PATH_MAX]; char args[PATH_MAX], *arg; /* Don't bother looking if the fifo isn't there */ #ifdef DEBUG_MOUSE fprintf(stderr,"testing gpm\n"); #endif if ( access(GPM_NODE_FIFO, F_OK) < 0 ) { return(0); } available = 0; proc = opendir("/proc"); if ( proc ) { char raw_proto[10] = { '\0' }; char repeat_proto[10] = { '\0' }; while ( !available && (pid=find_pid(proc, "gpm")) > 0 ) { SDL_snprintf(path, SDL_arraysize(path), "/proc/%d/cmdline", pid); cmdline = open(path, O_RDONLY, 0); if ( cmdline >= 0 ) { len = read(cmdline, args, sizeof(args)); arg = args; while ( len > 0 ) { arglen = SDL_strlen(arg)+1; #ifdef DEBUG_MOUSE fprintf(stderr,"gpm arg %s len %d\n",arg,arglen); #endif if ( SDL_strcmp(arg, "-t") == 0) { /* protocol string, keep it for later */ char *t, *s; t = arg + arglen; s = SDL_strchr(t, ' '); if (s) *s = 0; SDL_strlcpy(raw_proto, t, SDL_arraysize(raw_proto)); if (s) *s = ' '; } if ( SDL_strncmp(arg, "-R", 2) == 0 ) { char *t, *s; available = 1; t = arg + 2; s = SDL_strchr(t, ' '); if (s) *s = 0; SDL_strlcpy(repeat_proto, t, SDL_arraysize(repeat_proto)); if (s) *s = ' '; } len -= arglen; arg += arglen; } close(cmdline); } } closedir(proc); if ( available ) { if ( SDL_strcmp(repeat_proto, "raw") == 0 ) { SDL_strlcpy(proto, raw_proto, protolen); } else if ( *repeat_proto ) { SDL_strlcpy(proto, repeat_proto, protolen); } else { SDL_strlcpy(proto, "msc", protolen); } } } return available; } /* rcg06112001 Set up IMPS/2 mode, if possible. This gives * us access to the mousewheel, etc. Returns zero if * writes to device failed, but you still need to query the * device to see which mode it's actually in. */ static int set_imps2_mode(int fd) { /* If you wanted to control the mouse mode (and we do :) ) ... Set IMPS/2 protocol: {0xf3,200,0xf3,100,0xf3,80} Reset mouse device: {0xFF} */ Uint8 set_imps2[] = {0xf3, 200, 0xf3, 100, 0xf3, 80}; /*Uint8 reset = 0xff;*/ fd_set fdset; struct timeval tv; int retval = 0; if ( write(fd, &set_imps2, sizeof(set_imps2)) == sizeof(set_imps2) ) { /* Don't reset it, that'll clear IMPS/2 mode on some mice if (write(fd, &reset, sizeof (reset)) == sizeof (reset) ) { retval = 1; } */ } /* Get rid of any chatter from the above */ FD_ZERO(&fdset); FD_SET(fd, &fdset); tv.tv_sec = 0; tv.tv_usec = 0; while ( select(fd+1, &fdset, 0, 0, &tv) > 0 ) { char temp[32]; read(fd, temp, sizeof(temp)); } return retval; } /* Returns true if the mouse uses the IMPS/2 protocol */ static int detect_imps2(int fd) { int imps2; imps2 = 0; if ( SDL_getenv("SDL_MOUSEDEV_IMPS2") ) { imps2 = 1; } if ( ! imps2 ) { Uint8 query_ps2 = 0xF2; fd_set fdset; struct timeval tv; /* Get rid of any mouse motion noise */ FD_ZERO(&fdset); FD_SET(fd, &fdset); tv.tv_sec = 0; tv.tv_usec = 0; while ( select(fd+1, &fdset, 0, 0, &tv) > 0 ) { char temp[32]; read(fd, temp, sizeof(temp)); } /* Query for the type of mouse protocol */ if ( write(fd, &query_ps2, sizeof (query_ps2)) == sizeof (query_ps2)) { Uint8 ch = 0; /* Get the mouse protocol response */ do { FD_ZERO(&fdset); FD_SET(fd, &fdset); tv.tv_sec = 1; tv.tv_usec = 0; if ( select(fd+1, &fdset, 0, 0, &tv) < 1 ) { break; } } while ( (read(fd, &ch, sizeof (ch)) == sizeof (ch)) && ((ch == 0xFA) || (ch == 0xAA)) ); /* Experimental values (Logitech wheelmouse) */ #ifdef DEBUG_MOUSE fprintf(stderr, "Last mouse mode: 0x%x\n", ch); #endif if ( (ch == 3) || (ch == 4) ) { imps2 = 1; } } } return imps2; } int FB_OpenMouse(_THIS) { int i; const char *mousedev; const char *mousedrv; mousedrv = SDL_getenv("SDL_MOUSEDRV"); mousedev = SDL_getenv("SDL_MOUSEDEV"); mouse_fd = -1; #if SDL_INPUT_TSLIB if ( mousedrv && (SDL_strcmp(mousedrv, "TSLIB") == 0) ) { if (mousedev == NULL) mousedev = SDL_getenv("TSLIB_TSDEVICE"); if (mousedev != NULL) { ts_dev = ts_open(mousedev, 1); if ((ts_dev != NULL) && (ts_config(ts_dev) >= 0)) { #ifdef DEBUG_MOUSE fprintf(stderr, "Using tslib touchscreen\n"); #endif mouse_drv = MOUSE_TSLIB; mouse_fd = ts_fd(ts_dev); return mouse_fd; } } mouse_drv = MOUSE_NONE; return mouse_fd; } #endif /* SDL_INPUT_TSLIB */ /* ELO TOUCHSCREEN SUPPORT */ if ( mousedrv && (SDL_strcmp(mousedrv, "ELO") == 0) ) { mouse_fd = open(mousedev, O_RDWR); if ( mouse_fd >= 0 ) { if(eloInitController(mouse_fd)) { #ifdef DEBUG_MOUSE fprintf(stderr, "Using ELO touchscreen\n"); #endif mouse_drv = MOUSE_ELO; } } else if ( mouse_fd < 0 ) { mouse_drv = MOUSE_NONE; } return(mouse_fd); } /* STD MICE */ if ( mousedev == NULL ) { /* FIXME someday... allow multiple mice in this driver */ static const char *ps2mice[] = { "/dev/input/mice", "/dev/usbmouse", "/dev/psaux", NULL }; /* First try to use GPM in repeater mode */ if ( mouse_fd < 0 ) { char proto[10]; if ( gpm_available(proto, SDL_arraysize(proto)) ) { mouse_fd = open(GPM_NODE_FIFO, O_RDONLY, 0); if ( mouse_fd >= 0 ) { if ( SDL_strcmp(proto, "msc") == 0 ) { mouse_drv = MOUSE_MSC; } else if ( SDL_strcmp(proto, "ps2") == 0 ) { mouse_drv = MOUSE_PS2; } else if ( SDL_strcmp(proto, "imps2") == 0 ) { mouse_drv = MOUSE_IMPS2; } else if ( SDL_strcmp(proto, "ms") == 0 || SDL_strcmp(proto, "bare") == 0 ) { mouse_drv = MOUSE_MS; } else if ( SDL_strcmp(proto, "bm") == 0 ) { mouse_drv = MOUSE_BM; } else { /* Unknown protocol... */ #ifdef DEBUG_MOUSE fprintf(stderr, "GPM mouse using unknown protocol = %s\n", proto); #endif close(mouse_fd); mouse_fd = -1; } } #ifdef DEBUG_MOUSE if ( mouse_fd >= 0 ) { fprintf(stderr, "Using GPM mouse, protocol = %s\n", proto); } #endif /* DEBUG_MOUSE */ } } /* Now try to use a modern PS/2 mouse */ for ( i=0; (mouse_fd < 0) && ps2mice[i]; ++i ) { mouse_fd = open(ps2mice[i], O_RDWR, 0); if (mouse_fd < 0) { mouse_fd = open(ps2mice[i], O_RDONLY, 0); } if (mouse_fd >= 0) { /* rcg06112001 Attempt to set IMPS/2 mode */ set_imps2_mode(mouse_fd); if (detect_imps2(mouse_fd)) { #ifdef DEBUG_MOUSE fprintf(stderr, "Using IMPS2 mouse\n"); #endif mouse_drv = MOUSE_IMPS2; } else { #ifdef DEBUG_MOUSE fprintf(stderr, "Using PS2 mouse\n"); #endif mouse_drv = MOUSE_PS2; } } } /* Next try to use a PPC ADB port mouse */ if ( mouse_fd < 0 ) { mouse_fd = open("/dev/adbmouse", O_RDONLY, 0); if ( mouse_fd >= 0 ) { #ifdef DEBUG_MOUSE fprintf(stderr, "Using ADB mouse\n"); #endif mouse_drv = MOUSE_BM; } } } /* Default to a serial Microsoft mouse */ if ( mouse_fd < 0 ) { if ( mousedev == NULL ) { mousedev = "/dev/mouse"; } mouse_fd = open(mousedev, O_RDONLY, 0); if ( mouse_fd >= 0 ) { struct termios mouse_termios; /* Set the sampling speed to 1200 baud */ tcgetattr(mouse_fd, &mouse_termios); mouse_termios.c_iflag = IGNBRK | IGNPAR; mouse_termios.c_oflag = 0; mouse_termios.c_lflag = 0; mouse_termios.c_line = 0; mouse_termios.c_cc[VTIME] = 0; mouse_termios.c_cc[VMIN] = 1; mouse_termios.c_cflag = CREAD | CLOCAL | HUPCL; mouse_termios.c_cflag |= CS8; mouse_termios.c_cflag |= B1200; tcsetattr(mouse_fd, TCSAFLUSH, &mouse_termios); if ( mousedrv && (SDL_strcmp(mousedrv, "PS2") == 0) ) { #ifdef DEBUG_MOUSE fprintf(stderr, "Using (user specified) PS2 mouse on %s\n", mousedev); #endif mouse_drv = MOUSE_PS2; } else { #ifdef DEBUG_MOUSE fprintf(stderr, "Using (default) MS mouse on %s\n", mousedev); #endif mouse_drv = MOUSE_MS; } } } if ( mouse_fd < 0 ) { mouse_drv = MOUSE_NONE; } return(mouse_fd); } static int posted = 0; void FB_vgamousecallback(int button, int relative, int dx, int dy) { int button_1, button_3; int button_state; int state_changed; int i; Uint8 state; if ( dx || dy ) { posted += SDL_PrivateMouseMotion(0, relative, dx, dy); } /* Swap button 1 and 3 */ button_1 = (button & 0x04) >> 2; button_3 = (button & 0x01) << 2; button &= ~0x05; button |= (button_1|button_3); /* See what changed */ button_state = SDL_GetMouseState(NULL, NULL); state_changed = button_state ^ button; for ( i=0; i<8; ++i ) { if ( state_changed & (1<<i) ) { if ( button & (1<<i) ) { state = SDL_PRESSED; } else { state = SDL_RELEASED; } posted += SDL_PrivateMouseButton(state, i+1, 0, 0); } } } /* Handle input from tslib */ #if SDL_INPUT_TSLIB static void handle_tslib(_THIS) { struct ts_sample sample; int button; while (ts_read(ts_dev, &sample, 1) > 0) { button = (sample.pressure > 0) ? 1 : 0; button <<= 2; /* must report it as button 3 */ FB_vgamousecallback(button, 0, sample.x, sample.y); } return; } #endif /* SDL_INPUT_TSLIB */ /* For now, use MSC, PS/2, and MS protocols Driver adapted from the SVGAlib mouse driver code (taken from gpm, etc.) */ static void handle_mouse(_THIS) { static int start = 0; static unsigned char mousebuf[BUFSIZ]; static int relative = 1; int i, nread; int button = 0; int dx = 0, dy = 0; int packetsize = 0; int realx, realy; /* Figure out the mouse packet size */ switch (mouse_drv) { case MOUSE_NONE: /* Ack! */ read(mouse_fd, mousebuf, BUFSIZ); return; case MOUSE_MSC: packetsize = 5; break; case MOUSE_IMPS2: packetsize = 4; break; case MOUSE_PS2: case MOUSE_MS: case MOUSE_BM: packetsize = 3; break; case MOUSE_ELO: /* try to read the next packet */ if(eloReadPosition(this, mouse_fd, &dx, &dy, &button, &realx, &realy)) { button = (button & 0x01) << 2; FB_vgamousecallback(button, 0, dx, dy); } return; /* nothing left to do */ case MOUSE_TSLIB: #if SDL_INPUT_TSLIB handle_tslib(this); #endif return; /* nothing left to do */ default: /* Uh oh.. */ packetsize = 0; break; } /* Special handling for the quite sensitive ELO controller */ if (mouse_drv == MOUSE_ELO) { } /* Read as many packets as possible */ nread = read(mouse_fd, &mousebuf[start], BUFSIZ-start); if ( nread < 0 ) { return; } nread += start; #ifdef DEBUG_MOUSE fprintf(stderr, "Read %d bytes from mouse, start = %d\n", nread, start); #endif for ( i=0; i<(nread-(packetsize-1)); i += packetsize ) { switch (mouse_drv) { case MOUSE_NONE: break; case MOUSE_MSC: /* MSC protocol has 0x80 in high byte */ if ( (mousebuf[i] & 0xF8) != 0x80 ) { /* Go to next byte */ i -= (packetsize-1); continue; } /* Get current mouse state */ button = (~mousebuf[i]) & 0x07; dx = (signed char)(mousebuf[i+1]) + (signed char)(mousebuf[i+3]); dy = -((signed char)(mousebuf[i+2]) + (signed char)(mousebuf[i+4])); break; case MOUSE_PS2: /* PS/2 protocol has nothing in high byte */ if ( (mousebuf[i] & 0xC0) != 0 ) { /* Go to next byte */ i -= (packetsize-1); continue; } /* Get current mouse state */ button = (mousebuf[i] & 0x04) >> 1 | /*Middle*/ (mousebuf[i] & 0x02) >> 1 | /*Right*/ (mousebuf[i] & 0x01) << 2; /*Left*/ dx = (mousebuf[i] & 0x10) ? mousebuf[i+1] - 256 : mousebuf[i+1]; dy = (mousebuf[i] & 0x20) ? -(mousebuf[i+2] - 256) : -mousebuf[i+2]; break; case MOUSE_IMPS2: /* Get current mouse state */ button = (mousebuf[i] & 0x04) >> 1 | /*Middle*/ (mousebuf[i] & 0x02) >> 1 | /*Right*/ (mousebuf[i] & 0x01) << 2 | /*Left*/ (mousebuf[i] & 0x40) >> 3 | /* 4 */ (mousebuf[i] & 0x80) >> 3; /* 5 */ dx = (mousebuf[i] & 0x10) ? mousebuf[i+1] - 256 : mousebuf[i+1]; dy = (mousebuf[i] & 0x20) ? -(mousebuf[i+2] - 256) : -mousebuf[i+2]; switch (mousebuf[i+3]&0x0F) { case 0x0E: /* DX = +1 */ case 0x02: /* DX = -1 */ break; case 0x0F: /* DY = +1 (map button 4) */ FB_vgamousecallback(button | (1<<3), 1, 0, 0); break; case 0x01: /* DY = -1 (map button 5) */ FB_vgamousecallback(button | (1<<4), 1, 0, 0); break; } break; case MOUSE_MS: /* Microsoft protocol has 0x40 in high byte */ if ( (mousebuf[i] & 0x40) != 0x40 ) { /* Go to next byte */ i -= (packetsize-1); continue; } /* Get current mouse state */ button = ((mousebuf[i] & 0x20) >> 3) | ((mousebuf[i] & 0x10) >> 4); dx = (signed char)(((mousebuf[i] & 0x03) << 6) | (mousebuf[i + 1] & 0x3F)); dy = (signed char)(((mousebuf[i] & 0x0C) << 4) | (mousebuf[i + 2] & 0x3F)); break; case MOUSE_BM: /* BusMouse protocol has 0xF8 in high byte */ if ( (mousebuf[i] & 0xF8) != 0x80 ) { /* Go to next byte */ i -= (packetsize-1); continue; } /* Get current mouse state */ button = (~mousebuf[i]) & 0x07; dx = (signed char)mousebuf[i+1]; dy = -(signed char)mousebuf[i+2]; break; default: /* Uh oh.. */ dx = 0; dy = 0; break; } FB_vgamousecallback(button, relative, dx, dy); } if ( i < nread ) { SDL_memcpy(mousebuf, &mousebuf[i], (nread-i)); start = (nread-i); } else { start = 0; } return; } /* Handle switching to another VC, returns when our VC is back */ static void switch_vt_prep(_THIS) { SDL_Surface *screen = SDL_VideoSurface; SDL_PrivateAppActive(0, (SDL_APPACTIVE|SDL_APPINPUTFOCUS|SDL_APPMOUSEFOCUS)); /* Save the contents of the screen, and go to text mode */ wait_idle(this); screen_arealen = ((screen->h + (2*this->offset_y)) * screen->pitch); screen_contents = (Uint8 *)SDL_malloc(screen_arealen); if ( screen_contents ) { SDL_memcpy(screen_contents, screen->pixels, screen_arealen); } FB_SavePaletteTo(this, 256, screen_palette); ioctl(console_fd, FBIOGET_VSCREENINFO, &screen_vinfo); ioctl(keyboard_fd, KDSETMODE, KD_TEXT); ioctl(keyboard_fd, VT_UNLOCKSWITCH, 1); } static void switch_vt_done(_THIS) { SDL_Surface *screen = SDL_VideoSurface; /* Restore graphics mode and the contents of the screen */ ioctl(keyboard_fd, VT_LOCKSWITCH, 1); ioctl(keyboard_fd, KDSETMODE, KD_GRAPHICS); ioctl(console_fd, FBIOPUT_VSCREENINFO, &screen_vinfo); FB_RestorePaletteFrom(this, 256, screen_palette); if ( screen_contents ) { SDL_memcpy(screen->pixels, screen_contents, screen_arealen); SDL_free(screen_contents); screen_contents = NULL; } /* Get updates to the shadow surface while switched away */ if ( SDL_ShadowSurface ) { SDL_UpdateRect(SDL_ShadowSurface, 0, 0, 0, 0); } SDL_PrivateAppActive(1, (SDL_APPACTIVE|SDL_APPINPUTFOCUS|SDL_APPMOUSEFOCUS)); } static void switch_vt(_THIS, unsigned short which) { struct vt_stat vtstate; /* Figure out whether or not we're switching to a new console */ if ( (ioctl(keyboard_fd, VT_GETSTATE, &vtstate) < 0) || (which == vtstate.v_active) ) { return; } /* New console, switch to it */ SDL_mutexP(hw_lock); switch_vt_prep(this); if ( ioctl(keyboard_fd, VT_ACTIVATE, which) == 0 ) { ioctl(keyboard_fd, VT_WAITACTIVE, which); switched_away = 1; } else { switch_vt_done(this); } SDL_mutexV(hw_lock); } static void handle_keyboard(_THIS) { unsigned char keybuf[BUFSIZ]; int i, nread; int pressed; int scancode; SDL_keysym keysym; nread = read(keyboard_fd, keybuf, BUFSIZ); for ( i=0; i<nread; ++i ) { scancode = keybuf[i] & 0x7F; if ( keybuf[i] & 0x80 ) { pressed = SDL_RELEASED; } else { pressed = SDL_PRESSED; } TranslateKey(scancode, &keysym); /* Handle Ctrl-Alt-FN for vt switch */ switch (keysym.sym) { case SDLK_F1: case SDLK_F2: case SDLK_F3: case SDLK_F4: case SDLK_F5: case SDLK_F6: case SDLK_F7: case SDLK_F8: case SDLK_F9: case SDLK_F10: case SDLK_F11: case SDLK_F12: if ( (SDL_GetModState() & KMOD_CTRL) && (SDL_GetModState() & KMOD_ALT) ) { if ( pressed ) { switch_vt(this, (keysym.sym-SDLK_F1)+1); } break; } /* Fall through to normal processing */ default: posted += SDL_PrivateKeyboard(pressed, &keysym); break; } } } void FB_PumpEvents(_THIS) { fd_set fdset; int max_fd; static struct timeval zero; do { if ( switched_away ) { struct vt_stat vtstate; SDL_mutexP(hw_lock); if ( (ioctl(keyboard_fd, VT_GETSTATE, &vtstate) == 0) && vtstate.v_active == current_vt ) { switched_away = 0; switch_vt_done(this); } SDL_mutexV(hw_lock); } posted = 0; FD_ZERO(&fdset); max_fd = 0; if ( keyboard_fd >= 0 ) { FD_SET(keyboard_fd, &fdset); if ( max_fd < keyboard_fd ) { max_fd = keyboard_fd; } } if ( mouse_fd >= 0 ) { FD_SET(mouse_fd, &fdset); if ( max_fd < mouse_fd ) { max_fd = mouse_fd; } } if ( select(max_fd+1, &fdset, NULL, NULL, &zero) > 0 ) { if ( keyboard_fd >= 0 ) { if ( FD_ISSET(keyboard_fd, &fdset) ) { handle_keyboard(this); } } if ( mouse_fd >= 0 ) { if ( FD_ISSET(mouse_fd, &fdset) ) { handle_mouse(this); } } } } while ( posted ); } void FB_InitOSKeymap(_THIS) { int i; /* Initialize the Linux key translation table */ /* First get the ascii keys and others not well handled */ for (i=0; i<SDL_arraysize(keymap); ++i) { switch(i) { /* These aren't handled by the x86 kernel keymapping (?) */ case SCANCODE_PRINTSCREEN: keymap[i] = SDLK_PRINT; break; case SCANCODE_BREAK: keymap[i] = SDLK_BREAK; break; case SCANCODE_BREAK_ALTERNATIVE: keymap[i] = SDLK_PAUSE; break; case SCANCODE_LEFTSHIFT: keymap[i] = SDLK_LSHIFT; break; case SCANCODE_RIGHTSHIFT: keymap[i] = SDLK_RSHIFT; break; case SCANCODE_LEFTCONTROL: keymap[i] = SDLK_LCTRL; break; case SCANCODE_RIGHTCONTROL: keymap[i] = SDLK_RCTRL; break; case SCANCODE_RIGHTWIN: keymap[i] = SDLK_RSUPER; break; case SCANCODE_LEFTWIN: keymap[i] = SDLK_LSUPER; break; case SCANCODE_LEFTALT: keymap[i] = SDLK_LALT; break; case SCANCODE_RIGHTALT: keymap[i] = SDLK_RALT; break; case 127: keymap[i] = SDLK_MENU; break; /* this should take care of all standard ascii keys */ default: keymap[i] = KVAL(vga_keymap[0][i]); break; } } for (i=0; i<SDL_arraysize(keymap); ++i) { switch(keymap_temp[i]) { case K_F1: keymap[i] = SDLK_F1; break; case K_F2: keymap[i] = SDLK_F2; break; case K_F3: keymap[i] = SDLK_F3; break; case K_F4: keymap[i] = SDLK_F4; break; case K_F5: keymap[i] = SDLK_F5; break; case K_F6: keymap[i] = SDLK_F6; break; case K_F7: keymap[i] = SDLK_F7; break; case K_F8: keymap[i] = SDLK_F8; break; case K_F9: keymap[i] = SDLK_F9; break; case K_F10: keymap[i] = SDLK_F10; break; case K_F11: keymap[i] = SDLK_F11; break; case K_F12: keymap[i] = SDLK_F12; break; case K_DOWN: keymap[i] = SDLK_DOWN; break; case K_LEFT: keymap[i] = SDLK_LEFT; break; case K_RIGHT: keymap[i] = SDLK_RIGHT; break; case K_UP: keymap[i] = SDLK_UP; break; case K_P0: keymap[i] = SDLK_KP0; break; case K_P1: keymap[i] = SDLK_KP1; break; case K_P2: keymap[i] = SDLK_KP2; break; case K_P3: keymap[i] = SDLK_KP3; break; case K_P4: keymap[i] = SDLK_KP4; break; case K_P5: keymap[i] = SDLK_KP5; break; case K_P6: keymap[i] = SDLK_KP6; break; case K_P7: keymap[i] = SDLK_KP7; break; case K_P8: keymap[i] = SDLK_KP8; break; case K_P9: keymap[i] = SDLK_KP9; break; case K_PPLUS: keymap[i] = SDLK_KP_PLUS; break; case K_PMINUS: keymap[i] = SDLK_KP_MINUS; break; case K_PSTAR: keymap[i] = SDLK_KP_MULTIPLY; break; case K_PSLASH: keymap[i] = SDLK_KP_DIVIDE; break; case K_PENTER: keymap[i] = SDLK_KP_ENTER; break; case K_PDOT: keymap[i] = SDLK_KP_PERIOD; break; case K_SHIFT: if ( keymap[i] != SDLK_RSHIFT ) keymap[i] = SDLK_LSHIFT; break; case K_SHIFTL: keymap[i] = SDLK_LSHIFT; break; case K_SHIFTR: keymap[i] = SDLK_RSHIFT; break; case K_CTRL: if ( keymap[i] != SDLK_RCTRL ) keymap[i] = SDLK_LCTRL; break; case K_CTRLL: keymap[i] = SDLK_LCTRL; break; case K_CTRLR: keymap[i] = SDLK_RCTRL; break; case K_ALT: keymap[i] = SDLK_LALT; break; case K_ALTGR: keymap[i] = SDLK_RALT; break; case K_INSERT: keymap[i] = SDLK_INSERT; break; case K_REMOVE: keymap[i] = SDLK_DELETE; break; case K_PGUP: keymap[i] = SDLK_PAGEUP; break; case K_PGDN: keymap[i] = SDLK_PAGEDOWN; break; case K_FIND: keymap[i] = SDLK_HOME; break; case K_SELECT: keymap[i] = SDLK_END; break; case K_NUM: keymap[i] = SDLK_NUMLOCK; break; case K_CAPS: keymap[i] = SDLK_CAPSLOCK; break; case K_F13: keymap[i] = SDLK_PRINT; break; case K_HOLD: keymap[i] = SDLK_SCROLLOCK; break; case K_PAUSE: keymap[i] = SDLK_PAUSE; break; case 127: keymap[i] = SDLK_BACKSPACE; break; default: break; } } } static SDL_keysym *TranslateKey(int scancode, SDL_keysym *keysym) { /* Set the keysym information */ keysym->scancode = scancode; keysym->sym = keymap[scancode]; keysym->mod = KMOD_NONE; /* If UNICODE is on, get the UNICODE value for the key */ keysym->unicode = 0; if ( SDL_TranslateUNICODE ) { int map; SDLMod modstate; modstate = SDL_GetModState(); map = 0; if ( modstate & KMOD_SHIFT ) { map |= (1<<KG_SHIFT); } if ( modstate & KMOD_CTRL ) { map |= (1<<KG_CTRL); } if ( modstate & KMOD_LALT ) { map |= (1<<KG_ALT); } if ( modstate & KMOD_RALT ) { map |= (1<<KG_ALTGR); } if ( KTYP(vga_keymap[map][scancode]) == KT_LETTER ) { if ( modstate & KMOD_CAPS ) { map ^= (1<<KG_SHIFT); } } if ( KTYP(vga_keymap[map][scancode]) == KT_PAD ) { if ( modstate & KMOD_NUM ) { keysym->unicode=KVAL(vga_keymap[map][scancode]); } } else { keysym->unicode = KVAL(vga_keymap[map][scancode]); } } return(keysym); }
gpl-2.0
cdleonard/valgrind-vex
test/rounderr.c
68
2169
/* peach (7400, altivec supported, 450MHz, gcc -O) m1 = 1.20000000000000018, exp = 1.19999999999999996 m2 = 1.19999999999998885, exp = 1.19999999999999996 */ /* peach (7400, altivec supported, 450MHz, gcc) m1 = 1.20000000000000018, exp = 1.19999999999999996 m2 = 1.19999999999998885, exp = 1.19999999999999996 */ /* phoenix, gcc -O m1 = 1.19999999999999996, exp = 1.19999999999999996 m2 = 1.19999999999999996, exp = 1.19999999999999996 */ /* phoenix, icc -O m1 = 1.19999999999999996, exp = 1.19999999999999996 m2 = 1.19999999999999996, exp = 1.19999999999999996 */ /* phoenix, gcc -O, iropt-level=2 m1 = 1.20000000000000040, exp = 1.19999999999999996 m2 = 1.19999999999999440, exp = 1.19999999999999996 */ /* phoenix, gcc -O, iropt-level=1/0 m1 = 1.20000000000000018, exp = 1.19999999999999996 m2 = 1.19999999999998885, exp = 1.19999999999999996 */ #include <stdlib.h> #include <stdio.h> #include <math.h> #define NNN 1000 double my_mean1 (const double data[], size_t stride, const size_t size) { /* Compute the arithmetic mean of a dataset using the recurrence relation mean_(n) = mean(n-1) + (data[n] - mean(n-1))/(n+1) */ long double mean = 0; size_t i; for (i = 0; i < size; i++) { mean += (data[i * stride] - mean) / (i + 1); } return mean; } double my_mean2 (const double data[], size_t stride, const size_t size) { /* Compute the arithmetic mean of a dataset using the obvious scheme. */ int i; long double sum = 0; for (i = 0; i < size; i++) sum += data[i * stride]; return sum / (double)size; } int main (void) { int i; const size_t nacc2 = NNN+1; double numacc2[NNN+1] ; numacc2[0] = 1.2 ; for (i = 1 ; i < NNN; i += 2) numacc2[i] = 1.1 ; for (i = 1 ; i < NNN; i += 2) numacc2[i+1] = 1.3 ; #if 1 asm __volatile__("fninit"); #endif { double m1 = my_mean1 (numacc2, 1, nacc2); double m2 = my_mean2 (numacc2, 1, nacc2); double expected_mean = 1.2; printf("m1 = %19.17f, exp = %19.17f\n", m1, expected_mean); printf("m2 = %19.17f, exp = %19.17f\n", m2, expected_mean); } return 0; }
gpl-2.0
CyanogenMod/android_kernel_samsung_n1
net/ipv6/ip6_output.c
324
41936
/* * IPv6 output functions * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/net/ipv4/ip_output.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * A.N.Kuznetsov : airthmetics in fragmentation. * extension headers are implemented. * route changes now work. * ip6_forward does not confuse sniffers. * etc. * * H. von Brand : Added missing #include <linux/string.h> * Imran Patel : frag id should be in NBO * Kazunori MIYAZAWA @USAGI * : add ip6_append_data and related functions * for datagram xmit */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/tcp.h> #include <linux/route.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/rawv6.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/checksum.h> #include <linux/mroute6.h> int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); int __ip6_local_out(struct sk_buff *skb) { int len; len = skb->len - sizeof(struct ipv6hdr); if (len > IPV6_MAXPLEN) len = 0; ipv6_hdr(skb)->payload_len = htons(len); return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); } int ip6_local_out(struct sk_buff *skb) { int err; err = __ip6_local_out(skb); if (likely(err == 1)) err = dst_output(skb); return err; } EXPORT_SYMBOL_GPL(ip6_local_out); /* dev_loopback_xmit for use with netfilter. */ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) { skb_reset_mac_header(newskb); __skb_pull(newskb, skb_network_offset(newskb)); newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(newskb)); netif_rx_ni(newskb); return 0; } static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, ip6_dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); } rcu_read_lock(); neigh = dst_get_neighbour(dst); if (neigh) { int res = neigh_output(neigh, skb); rcu_read_unlock(); return res; } rcu_read_unlock(); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; } static int ip6_finish_output(struct sk_buff *skb) { if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb))) return ip6_fragment(skb, ip6_finish_output2); else return ip6_finish_output2(skb); } int ip6_output(struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev, ip6_finish_output, !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } /* * xmit an sk_buff (used by TCP, SCTP and DCCP) */ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, struct ipv6_txoptions *opt) { struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr; u8 proto = fl6->flowi6_proto; int seg_len = skb->len; int hlimit = -1; int tclass = 0; u32 mtu; if (opt) { unsigned int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); if (skb2 == NULL) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); /* * Fill in the IPv6 header */ if (np) { tclass = np->tclass; hlimit = np->hop_limit; } if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel; hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; ipv6_addr_copy(&hdr->saddr, &fl6->saddr); ipv6_addr_copy(&hdr->daddr, first_hop); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; mtu = dst_mtu(dst); if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, dst_output); } if (net_ratelimit()) printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } EXPORT_SYMBOL(ip6_xmit); /* * To avoid extra problems ND packets are send through this * routine. It's code duplication but I really want to avoid * extra checks since ipv6_build_header is used by TCP (which * is for us performance critical) */ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr, int proto, int len) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *hdr; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; skb_reset_network_header(skb); skb_put(skb, sizeof(struct ipv6hdr)); hdr = ipv6_hdr(skb); *(__be32*)hdr = htonl(0x60000000); hdr->payload_len = htons(len); hdr->nexthdr = proto; hdr->hop_limit = np->hop_limit; ipv6_addr_copy(&hdr->saddr, saddr); ipv6_addr_copy(&hdr->daddr, daddr); return 0; } static int ip6_call_ra_chain(struct sk_buff *skb, int sel) { struct ip6_ra_chain *ra; struct sock *last = NULL; read_lock(&ip6_ra_lock); for (ra = ip6_ra_chain; ra; ra = ra->next) { struct sock *sk = ra->sk; if (sk && ra->sel == sel && (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == skb->dev->ifindex)) { if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) rawv6_rcv(last, skb2); } last = sk; } } if (last) { rawv6_rcv(last, skb); read_unlock(&ip6_ra_lock); return 1; } read_unlock(&ip6_ra_lock); return 0; } static int ip6_forward_proxy_check(struct sk_buff *skb) { struct ipv6hdr *hdr = ipv6_hdr(skb); u8 nexthdr = hdr->nexthdr; int offset; if (ipv6_ext_hdr(nexthdr)) { offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr); if (offset < 0) return 0; } else offset = sizeof(struct ipv6hdr); if (nexthdr == IPPROTO_ICMPV6) { struct icmp6hdr *icmp6; if (!pskb_may_pull(skb, (skb_network_header(skb) + offset + 1 - skb->data))) return 0; icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); switch (icmp6->icmp6_type) { case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: /* For reaction involving unicast neighbor discovery * message destined to the proxied address, pass it to * input function. */ return 1; default: break; } } /* * The proxying router can't forward traffic sent to a link-local * address, so signal the sender and discard the packet. This * behavior is clarified by the MIPv6 specification. */ if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) { dst_link_failure(skb); return -1; } return 0; } static inline int ip6_forward_finish(struct sk_buff *skb) { return dst_output(skb); } int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); struct neighbour *n; u32 mtu; if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb_network_header(skb) + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ n = dst_get_neighbour(dst); if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct rt6_info *rt; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) ndisc_send_redirect(skb, n, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb->len > mtu && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; } static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; to->priority = from->priority; to->protocol = from->protocol; skb_dst_drop(to); skb_dst_set(to, dst_clone(skb_dst(from))); to->dev = from->dev; to->mark = from->mark; #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif nf_copy(to, from); #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) to->nf_trace = from->nf_trace; #endif skb_copy_secmark(to, from); } int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); unsigned int packet_len = skb->tail - skb->network_header; int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset + 1 <= packet_len) { switch (**nexthdr) { case NEXTHDR_HOP: break; case NEXTHDR_ROUTING: found_rhdr = 1; break; case NEXTHDR_DEST: #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) break; #endif if (found_rhdr) return offset; break; default : return offset; } offset += ipv6_optlen(exthdr); *nexthdr = &exthdr->nexthdr; exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); } return offset; } void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) { static atomic_t ipv6_fragmentation_id; int old, new; if (rt && !(rt->dst.flags & DST_NOPEER)) { struct inet_peer *peer; if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); peer = rt->rt6i_peer; if (peer) { fhdr->identification = htonl(inet_getid(peer, 0)); return; } } do { old = atomic_read(&ipv6_fragmentation_id); new = old + 1; if (!new) new = 1; } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old); fhdr->identification = htonl(new); } int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ if (!skb->local_df && skb->len > mtu) { skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (np && np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } mtu -= hlen + sizeof(struct frag_hdr); if (skb_has_frag_list(skb)) { int first_len = skb_pagelen(skb); struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } skb->truesize -= frag->truesize; } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(fh, rt); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); dst_hold(&rt->dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if(!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); dst_release(&rt->dst); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); dst_release(&rt->dst); return err; slow_path_clean: skb_walk_frags(skb, frag2) { if (frag2 == frag) break; frag2->sk = NULL; frag2->destructor = NULL; skb->truesize += frag2->truesize; } } slow_path: left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending up to and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag->transport_header = (frag->network_header + hlen + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(fh, rt); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ err = output(frag); if (err) goto fail; IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGCREATES); } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); kfree_skb(skb); return err; fail: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return err; } static inline int ip6_rt_check(const struct rt6key *rt_key, const struct in6_addr *fl_addr, const struct in6_addr *addr_cache) { return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); } static struct dst_entry *ip6_sk_dst_check(struct sock *sk, struct dst_entry *dst, const struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); struct rt6_info *rt = (struct rt6_info *)dst; if (!dst) goto out; /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, * and MSG_DONTROUTE --ANK (980726) * * 1. ip6_rt_check(): If route was host route, * check that cached destination is current. * If it is network route, we still may * check its validity using saved pointer * to the last used address: daddr_cache. * We do not want to save whole address now, * (because main consumer of this service * is tcp, which has not this problem), * so that the last trick works only on connected * sockets. * 2. oif also should be the same. */ if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) || #ifdef CONFIG_IPV6_SUBTREES ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || #endif (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { dst_release(dst); dst = NULL; } out: return dst; } static int ip6_dst_lookup_tail(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { struct net *net = sock_net(sk); #ifdef CONFIG_IPV6_OPTIMISTIC_DAD struct neighbour *n; #endif int err; if (*dst == NULL) *dst = ip6_route_output(net, sk, fl6); if ((err = (*dst)->error)) goto out_err_release; if (ipv6_addr_any(&fl6->saddr)) { struct rt6_info *rt = (struct rt6_info *) *dst; err = ip6_route_get_saddr(net, rt, &fl6->daddr, sk ? inet6_sk(sk)->srcprefs : 0, &fl6->saddr); if (err) goto out_err_release; } #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * Here if the dst entry we've looked up * has a neighbour entry that is in the INCOMPLETE * state and the src address from the flow is * marked as OPTIMISTIC, we release the found * dst entry and replace it instead with the * dst entry of the nexthop router */ rcu_read_lock(); n = dst_get_neighbour(*dst); if (n && !(n->nud_state & NUD_VALID)) { struct inet6_ifaddr *ifp; struct flowi6 fl_gw6; int redirect; rcu_read_unlock(); ifp = ipv6_get_ifaddr(net, &fl6->saddr, (*dst)->dev, 1); redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); if (ifp) in6_ifa_put(ifp); if (redirect) { /* * We need to get the dst entry for the * default router instead */ dst_release(*dst); memcpy(&fl_gw6, fl6, sizeof(struct flowi6)); memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr)); *dst = ip6_route_output(net, sk, &fl_gw6); if ((err = (*dst)->error)) goto out_err_release; } } else { rcu_read_unlock(); } #endif return 0; out_err_release: if (err == -ENETUNREACH) IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES); dst_release(*dst); *dst = NULL; return err; } /** * ip6_dst_lookup - perform route lookup on flow * @sk: socket which provides route info * @dst: pointer to dst_entry * for result * @fl6: flow to lookup * * This function performs a route lookup on the given flow. * * It returns zero on success, or a standard errno code on error. */ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { *dst = NULL; return ip6_dst_lookup_tail(sk, dst, fl6); } EXPORT_SYMBOL_GPL(ip6_dst_lookup); /** * ip6_dst_lookup_flow - perform route lookup on flow with ipsec * @sk: socket which provides route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = NULL; int err; err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) ipv6_addr_copy(&fl6->daddr, final_dst); if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); /** * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow * @sk: socket which provides the dst cache and route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow with the * possibility of using the cached route in the socket if it is valid. * It will take the socket dst lock when operating on the dst cache. * As a result, this function can only be used in process context. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); int err; dst = ip6_sk_dst_check(sk, dst, fl6); err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) ipv6_addr_copy(&fl6->daddr, final_dst); if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, int transhdrlen, int mtu,unsigned int flags, struct rt6_info *rt) { struct sk_buff *skb; int err; /* There is support for UDP large send offload by network * device, so create one single skb packet containing complete * udp datagram */ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { skb = sock_alloc_send_skb(sk, hh_len + fragheaderlen + transhdrlen + 20, (flags & MSG_DONTWAIT), &err); if (skb == NULL) return -ENOMEM; /* reserve space for Hardware header */ skb_reserve(skb, hh_len); /* create space for UDP/IP header */ skb_put(skb,fragheaderlen + transhdrlen); /* initialize network header pointer */ skb_reset_network_header(skb); /* initialize protocol header pointer */ skb->transport_header = skb->network_header + fragheaderlen; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; } err = skb_append_datato_frags(sk,skb, getfrag, from, (length - transhdrlen)); if (!err) { struct frag_hdr fhdr; /* Specify the length of each IPv6 datagram fragment. * It has to be a multiple of 8. */ skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - sizeof(struct frag_hdr)) & ~7; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ipv6_select_ident(&fhdr, rt); skb_shinfo(skb)->ip6_frag_id = fhdr.identification; __skb_queue_tail(&sk->sk_write_queue, skb); return 0; } /* There is not enough support do UPD LSO, * so follow normal path */ kfree_skb(skb); return err; } static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct inet_cork *cork; struct sk_buff *skb; unsigned int maxfraglen, fragheaderlen; int exthdrlen; int hh_len; int mtu; int copy; int err; int offset = 0; int csummode = CHECKSUM_NONE; __u8 tx_flags = 0; if (flags&MSG_PROBE) return 0; cork = &inet->cork.base; if (skb_queue_empty(&sk->sk_write_queue)) { /* * setup for corking */ if (opt) { if (WARN_ON(np->cork.opt)) return -EINVAL; np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation); if (unlikely(np->cork.opt == NULL)) return -ENOBUFS; np->cork.opt->tot_len = opt->tot_len; np->cork.opt->opt_flen = opt->opt_flen; np->cork.opt->opt_nflen = opt->opt_nflen; np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt, sk->sk_allocation); if (opt->dst0opt && !np->cork.opt->dst0opt) return -ENOBUFS; np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt, sk->sk_allocation); if (opt->dst1opt && !np->cork.opt->dst1opt) return -ENOBUFS; np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt, sk->sk_allocation); if (opt->hopopt && !np->cork.opt->hopopt) return -ENOBUFS; np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt, sk->sk_allocation); if (opt->srcrt && !np->cork.opt->srcrt) return -ENOBUFS; /* need source address above miyazawa*/ } dst_hold(&rt->dst); cork->dst = &rt->dst; inet->cork.fl.u.ip6 = *fl6; np->cork.hop_limit = hlimit; np->cork.tclass = tclass; mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(rt->dst.path); if (np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } cork->fragsize = mtu; if (dst_allfrag(rt->dst.path)) cork->flags |= IPCORK_ALLFRAG; cork->length = 0; sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; length += exthdrlen; transhdrlen += exthdrlen; } else { rt = (struct rt6_info *)cork->dst; fl6 = &inet->cork.fl.u.ip6; opt = np->cork.opt; transhdrlen = 0; exthdrlen = 0; mtu = cork->fragsize; } hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); return -EMSGSIZE; } } /* For UDP, check if TX timestamp is enabled */ if (sk->sk_type == SOCK_DGRAM) { err = sock_tx_timestamp(sk, &tx_flags); if (err) goto error; } /* * Let's try using as much space as possible. * Use MTU if total length of the message fits into the MTU. * Otherwise, we need to reserve fragment header and * fragment alignment (= 8-15 octects, in total). * * Note that we may need to "move" the data from the tail of * of the buffer to the new fragment when we split * the message. * * FIXME: It may be fragmented into multiple chunks * at once if non-fragmentable extension headers * are too large. * --yoshfuji */ cork->length += length; if (length > mtu) { int proto = sk->sk_protocol; if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); return -EMSGSIZE; } if (proto == IPPROTO_UDP && (rt->dst.dev->features & NETIF_F_UFO)) { err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, mtu, flags, rt); if (err) goto error; return 0; } } if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) goto alloc_new_skb; while (length > 0) { /* Check if the remaining data fits into current packet. */ copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; if (copy < length) copy = maxfraglen - skb->len; if (copy <= 0) { char *data; unsigned int datalen; unsigned int fraglen; unsigned int fraggap; unsigned int alloclen; struct sk_buff *skb_prev; alloc_new_skb: skb_prev = skb; /* There's no room in the current skb */ if (skb_prev) fraggap = skb_prev->len - maxfraglen; else fraggap = 0; /* * If remaining data exceeds the mtu, * we know we need more fragment(s). */ datalen = length + fraggap; if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) datalen = maxfraglen - fragheaderlen; fraglen = datalen + fragheaderlen; if ((flags & MSG_MORE) && !(rt->dst.dev->features&NETIF_F_SG)) alloclen = mtu; else alloclen = datalen + fragheaderlen; /* * The last fragment gets additional space at tail. * Note: we overallocate on fragments with MSG_MODE * because we have no idea if we're the last one. */ if (datalen == length + fraggap) alloclen += rt->dst.trailer_len; /* * We just reserve space for fragment header. * Note: this may be overallocation if the message * (without MSG_MORE) fits into the MTU. */ alloclen += sizeof(struct frag_hdr); if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len, (flags & MSG_DONTWAIT), &err); } else { skb = NULL; if (atomic_read(&sk->sk_wmem_alloc) <= 2 * sk->sk_sndbuf) skb = sock_wmalloc(sk, alloclen + hh_len, 1, sk->sk_allocation); if (unlikely(skb == NULL)) err = -ENOBUFS; else { /* Only the initial fragment * is time stamped. */ tx_flags = 0; } } if (skb == NULL) goto error; /* * Fill in the control structures */ skb->ip_summed = csummode; skb->csum = 0; /* reserve for fragmentation */ skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); if (sk->sk_type == SOCK_DGRAM) skb_shinfo(skb)->tx_flags = tx_flags; /* * Find where to start putting bytes */ data = skb_put(skb, fraglen); skb_set_network_header(skb, exthdrlen); data += fragheaderlen; skb->transport_header = (skb->network_header + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits( skb_prev, maxfraglen, data + transhdrlen, fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; if (copy < 0) { err = -EINVAL; kfree_skb(skb); goto error; } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; } offset += copy; length -= datalen - fraggap; transhdrlen = 0; exthdrlen = 0; csummode = CHECKSUM_NONE; /* * Put the packet on the pending queue */ __skb_queue_tail(&sk->sk_write_queue, skb); continue; } if (copy > length) copy = length; if (!(rt->dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; if (getfrag(from, skb_put(skb, copy), offset, copy, off, skb) < 0) { __skb_trim(skb, off); err = -EFAULT; goto error; } } else { int i = skb_shinfo(skb)->nr_frags; skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; struct page *page = sk->sk_sndmsg_page; int off = sk->sk_sndmsg_off; unsigned int left; if (page && (left = PAGE_SIZE - off) > 0) { if (copy >= left) copy = left; if (page != frag->page) { if (i == MAX_SKB_FRAGS) { err = -EMSGSIZE; goto error; } get_page(page); skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); frag = &skb_shinfo(skb)->frags[i]; } } else if(i < MAX_SKB_FRAGS) { if (copy > PAGE_SIZE) copy = PAGE_SIZE; page = alloc_pages(sk->sk_allocation, 0); if (page == NULL) { err = -ENOMEM; goto error; } sk->sk_sndmsg_page = page; sk->sk_sndmsg_off = 0; skb_fill_page_desc(skb, i, page, 0, 0); frag = &skb_shinfo(skb)->frags[i]; } else { err = -EMSGSIZE; goto error; } if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { err = -EFAULT; goto error; } sk->sk_sndmsg_off += copy; frag->size += copy; skb->len += copy; skb->data_len += copy; skb->truesize += copy; atomic_add(copy, &sk->sk_wmem_alloc); } offset += copy; length -= copy; } return 0; error: cork->length -= length; IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); return err; } static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) { if (np->cork.opt) { kfree(np->cork.opt->dst0opt); kfree(np->cork.opt->dst1opt); kfree(np->cork.opt->hopopt); kfree(np->cork.opt->srcrt); kfree(np->cork.opt); np->cork.opt = NULL; } if (inet->cork.base.dst) { dst_release(inet->cork.base.dst); inet->cork.base.dst = NULL; inet->cork.base.flags &= ~IPCORK_ALLFRAG; } memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); } int ip6_push_pending_frames(struct sock *sk) { struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct in6_addr final_dst_buf, *final_dst = &final_dst_buf; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); struct ipv6hdr *hdr; struct ipv6_txoptions *opt = np->cork.opt; struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst; struct flowi6 *fl6 = &inet->cork.fl.u.ip6; unsigned char proto = fl6->flowi6_proto; int err = 0; if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; skb->truesize += tmp_skb->truesize; tmp_skb->destructor = NULL; tmp_skb->sk = NULL; } /* Allow local fragmentation. */ if (np->pmtudisc < IPV6_PMTUDISC_DO) skb->local_df = 1; ipv6_addr_copy(final_dst, &fl6->daddr); __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt && opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); *(__be32*)hdr = fl6->flowlabel | htonl(0x60000000 | ((int)np->cork.tclass << 20)); hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; ipv6_addr_copy(&hdr->saddr, &fl6->saddr); ipv6_addr_copy(&hdr->daddr, final_dst); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); } err = ip6_local_out(skb); if (err) { if (err > 0) err = net_xmit_errno(err); if (err) goto error; } out: ip6_cork_release(inet, np); return err; error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); goto out; } void ip6_flush_pending_frames(struct sock *sk) { struct sk_buff *skb; while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { if (skb_dst(skb)) IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); } ip6_cork_release(inet_sk(sk), inet6_sk(sk)); }
gpl-2.0
1HLtd/linux
sound/soc/codecs/ak4104.c
324
7652
/* * AK4104 ALSA SoC (ASoC) driver * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/initval.h> #include <linux/spi/spi.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <sound/asoundef.h> /* AK4104 registers addresses */ #define AK4104_REG_CONTROL1 0x00 #define AK4104_REG_RESERVED 0x01 #define AK4104_REG_CONTROL2 0x02 #define AK4104_REG_TX 0x03 #define AK4104_REG_CHN_STATUS(x) ((x) + 0x04) #define AK4104_NUM_REGS 10 #define AK4104_REG_MASK 0x1f #define AK4104_READ 0xc0 #define AK4104_WRITE 0xe0 #define AK4104_RESERVED_VAL 0x5b /* Bit masks for AK4104 registers */ #define AK4104_CONTROL1_RSTN (1 << 0) #define AK4104_CONTROL1_PW (1 << 1) #define AK4104_CONTROL1_DIF0 (1 << 2) #define AK4104_CONTROL1_DIF1 (1 << 3) #define AK4104_CONTROL2_SEL0 (1 << 0) #define AK4104_CONTROL2_SEL1 (1 << 1) #define AK4104_CONTROL2_MODE (1 << 2) #define AK4104_TX_TXE (1 << 0) #define AK4104_TX_V (1 << 1) struct ak4104_private { struct regmap *regmap; }; static const struct snd_soc_dapm_widget ak4104_dapm_widgets[] = { SND_SOC_DAPM_PGA("TXE", AK4104_REG_TX, AK4104_TX_TXE, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("TX"), }; static const struct snd_soc_dapm_route ak4104_dapm_routes[] = { { "TXE", NULL, "Playback" }, { "TX", NULL, "TXE" }, }; static int ak4104_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int format) { struct snd_soc_codec *codec = codec_dai->codec; struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec); int val = 0; int ret; /* set DAI format */ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: val |= AK4104_CONTROL1_DIF0; break; case SND_SOC_DAIFMT_I2S: val |= AK4104_CONTROL1_DIF0 | AK4104_CONTROL1_DIF1; break; default: dev_err(codec->dev, "invalid dai format\n"); return -EINVAL; } /* This device can only be slave */ if ((format & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) return -EINVAL; ret = regmap_update_bits(ak4104->regmap, AK4104_REG_CONTROL1, AK4104_CONTROL1_DIF0 | AK4104_CONTROL1_DIF1, val); if (ret < 0) return ret; return 0; } static int ak4104_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec); int ret, val = 0; /* set the IEC958 bits: consumer mode, no copyright bit */ val |= IEC958_AES0_CON_NOT_COPYRIGHT; regmap_write(ak4104->regmap, AK4104_REG_CHN_STATUS(0), val); val = 0; switch (params_rate(params)) { case 22050: val |= IEC958_AES3_CON_FS_22050; break; case 24000: val |= IEC958_AES3_CON_FS_24000; break; case 32000: val |= IEC958_AES3_CON_FS_32000; break; case 44100: val |= IEC958_AES3_CON_FS_44100; break; case 48000: val |= IEC958_AES3_CON_FS_48000; break; case 88200: val |= IEC958_AES3_CON_FS_88200; break; case 96000: val |= IEC958_AES3_CON_FS_96000; break; case 176400: val |= IEC958_AES3_CON_FS_176400; break; case 192000: val |= IEC958_AES3_CON_FS_192000; break; default: dev_err(codec->dev, "unsupported sampling rate\n"); return -EINVAL; } ret = regmap_write(ak4104->regmap, AK4104_REG_CHN_STATUS(3), val); if (ret < 0) return ret; return 0; } static const struct snd_soc_dai_ops ak4101_dai_ops = { .hw_params = ak4104_hw_params, .set_fmt = ak4104_set_dai_fmt, }; static struct snd_soc_dai_driver ak4104_dai = { .name = "ak4104-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE }, .ops = &ak4101_dai_ops, }; static int ak4104_probe(struct snd_soc_codec *codec) { struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec); int ret; codec->control_data = ak4104->regmap; /* set power-up and non-reset bits */ ret = regmap_update_bits(ak4104->regmap, AK4104_REG_CONTROL1, AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN, AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN); if (ret < 0) return ret; /* enable transmitter */ ret = regmap_update_bits(ak4104->regmap, AK4104_REG_TX, AK4104_TX_TXE, AK4104_TX_TXE); if (ret < 0) return ret; return 0; } static int ak4104_remove(struct snd_soc_codec *codec) { struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec); regmap_update_bits(ak4104->regmap, AK4104_REG_CONTROL1, AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN, 0); return 0; } static struct snd_soc_codec_driver soc_codec_device_ak4104 = { .probe = ak4104_probe, .remove = ak4104_remove, .dapm_widgets = ak4104_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(ak4104_dapm_widgets), .dapm_routes = ak4104_dapm_routes, .num_dapm_routes = ARRAY_SIZE(ak4104_dapm_routes), }; static const struct regmap_config ak4104_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = AK4104_NUM_REGS - 1, .read_flag_mask = AK4104_READ, .write_flag_mask = AK4104_WRITE, .cache_type = REGCACHE_RBTREE, }; static int ak4104_spi_probe(struct spi_device *spi) { struct device_node *np = spi->dev.of_node; struct ak4104_private *ak4104; unsigned int val; int ret; spi->bits_per_word = 8; spi->mode = SPI_MODE_0; ret = spi_setup(spi); if (ret < 0) return ret; ak4104 = devm_kzalloc(&spi->dev, sizeof(struct ak4104_private), GFP_KERNEL); if (ak4104 == NULL) return -ENOMEM; ak4104->regmap = devm_regmap_init_spi(spi, &ak4104_regmap); if (IS_ERR(ak4104->regmap)) { ret = PTR_ERR(ak4104->regmap); return ret; } if (np) { enum of_gpio_flags flags; int gpio = of_get_named_gpio_flags(np, "reset-gpio", 0, &flags); if (gpio_is_valid(gpio)) { ret = devm_gpio_request_one(&spi->dev, gpio, flags & OF_GPIO_ACTIVE_LOW ? GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, "ak4104 reset"); if (ret < 0) return ret; } } /* read the 'reserved' register - according to the datasheet, it * should contain 0x5b. Not a good way to verify the presence of * the device, but there is no hardware ID register. */ ret = regmap_read(ak4104->regmap, AK4104_REG_RESERVED, &val); if (ret != 0) return ret; if (val != AK4104_RESERVED_VAL) return -ENODEV; spi_set_drvdata(spi, ak4104); ret = snd_soc_register_codec(&spi->dev, &soc_codec_device_ak4104, &ak4104_dai, 1); return ret; } static int ak4104_spi_remove(struct spi_device *spi) { snd_soc_unregister_codec(&spi->dev); return 0; } static const struct of_device_id ak4104_of_match[] = { { .compatible = "asahi-kasei,ak4104", }, { } }; MODULE_DEVICE_TABLE(of, ak4104_of_match); static const struct spi_device_id ak4104_id_table[] = { { "ak4104", 0 }, { } }; MODULE_DEVICE_TABLE(spi, ak4104_id_table); static struct spi_driver ak4104_spi_driver = { .driver = { .name = "ak4104", .owner = THIS_MODULE, .of_match_table = ak4104_of_match, }, .id_table = ak4104_id_table, .probe = ak4104_spi_probe, .remove = ak4104_spi_remove, }; module_spi_driver(ak4104_spi_driver); MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("Asahi Kasei AK4104 ALSA SoC driver"); MODULE_LICENSE("GPL");
gpl-2.0
xenord/TrebonKernel-3.0.101
arch/arm/mm/vcm_alloc.c
836
12198
/* Copyright (c) 2010, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/vcm.h> #include <linux/vcm_alloc.h> #include <linux/string.h> #include <asm/sizes.h> int basicalloc_init; #define vcm_alloc_err(a, ...) \ pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__) struct phys_chunk_head { struct list_head head; int num; }; struct phys_pool { int size; int chunk_size; struct phys_chunk_head head; }; static int vcm_num_phys_pools; static int vcm_num_memtypes; static struct phys_pool *vcm_phys_pool; static struct vcm_memtype_map *memtype_map; static int num_pools(enum memtype_t memtype) { if (memtype >= vcm_num_memtypes) { vcm_alloc_err("Bad memtype: %d\n", memtype); return -EINVAL; } return memtype_map[memtype].num_pools; } static int pool_chunk_size(enum memtype_t memtype, int prio_idx) { int pool_idx; if (memtype >= vcm_num_memtypes) { vcm_alloc_err("Bad memtype: %d\n", memtype); return -EINVAL; } if (prio_idx >= num_pools(memtype)) { vcm_alloc_err("Bad prio index: %d, max=%d, mt=%d\n", prio_idx, num_pools(memtype), memtype); return -EINVAL; } pool_idx = memtype_map[memtype].pool_id[prio_idx]; return vcm_phys_pool[pool_idx].chunk_size; } int vcm_alloc_pool_idx_to_size(int pool_idx) { if (pool_idx >= vcm_num_phys_pools) { vcm_alloc_err("Bad pool index: %d\n, max=%d\n", pool_idx, vcm_num_phys_pools); return -EINVAL; } return vcm_phys_pool[pool_idx].chunk_size; } static struct phys_chunk_head *get_chunk_list(enum memtype_t memtype, int prio_idx) { unsigned int pool_idx; if (memtype >= vcm_num_memtypes) { vcm_alloc_err("Bad memtype: %d\n", memtype); return NULL; } if (prio_idx >= num_pools(memtype)) { vcm_alloc_err("bad chunk size: mt=%d, prioidx=%d, np=%d\n", memtype, prio_idx, num_pools(memtype)); BUG(); return NULL; } if (!vcm_phys_pool) { vcm_alloc_err("phys_pool is null\n"); return NULL; } /* We don't have a "pool count" anywhere but this is coming * strictly from data in a board file */ pool_idx = memtype_map[memtype].pool_id[prio_idx]; return &vcm_phys_pool[pool_idx].head; } static int is_allocated(struct list_head *allocated) { /* This should not happen under normal conditions */ if (!allocated) { vcm_alloc_err("no allocated\n"); return 0; } if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); return 0; } return !list_empty(allocated); } static int count_allocated_size(enum memtype_t memtype, int idx) { int cnt = 0; struct phys_chunk *chunk, *tmp; struct phys_chunk_head *pch; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); return 0; } pch = get_chunk_list(memtype, idx); if (!pch) { vcm_alloc_err("null pch\n"); return -EINVAL; } list_for_each_entry_safe(chunk, tmp, &pch->head, list) { if (is_allocated(&chunk->allocated)) cnt++; } return cnt; } int vcm_alloc_get_mem_size(void) { if (!vcm_phys_pool) { vcm_alloc_err("No physical pool set up!\n"); return -ENODEV; } return vcm_phys_pool[0].size; } EXPORT_SYMBOL(vcm_alloc_get_mem_size); void vcm_alloc_print_list(enum memtype_t memtype, int just_allocated) { int i; struct phys_chunk *chunk, *tmp; struct phys_chunk_head *pch; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); return; } for (i = 0; i < num_pools(memtype); ++i) { pch = get_chunk_list(memtype, i); if (!pch) { vcm_alloc_err("pch is null\n"); return; } if (list_empty(&pch->head)) continue; list_for_each_entry_safe(chunk, tmp, &pch->head, list) { if (just_allocated && !is_allocated(&chunk->allocated)) continue; printk(KERN_INFO "pa = %#x, size = %#x\n", chunk->pa, vcm_phys_pool[chunk->pool_idx].chunk_size); } } } EXPORT_SYMBOL(vcm_alloc_print_list); int vcm_alloc_blocks_avail(enum memtype_t memtype, int idx) { struct phys_chunk_head *pch; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); return 0; } pch = get_chunk_list(memtype, idx); if (!pch) { vcm_alloc_err("pch is null\n"); return 0; } return pch->num; } EXPORT_SYMBOL(vcm_alloc_blocks_avail); int vcm_alloc_get_num_chunks(enum memtype_t memtype) { return num_pools(memtype); } EXPORT_SYMBOL(vcm_alloc_get_num_chunks); int vcm_alloc_all_blocks_avail(enum memtarget_t memtype) { int i; int cnt = 0; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); return 0; } for (i = 0; i < num_pools(memtype); ++i) cnt += vcm_alloc_blocks_avail(memtype, i); return cnt; } EXPORT_SYMBOL(vcm_alloc_all_blocks_avail); int vcm_alloc_count_allocated(enum memtype_t memtype) { int i; int cnt = 0; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); return 0; } for (i = 0; i < num_pools(memtype); ++i) cnt += count_allocated_size(memtype, i); return cnt; } EXPORT_SYMBOL(vcm_alloc_count_allocated); int vcm_alloc_destroy(void) { int i, mt; struct phys_chunk *chunk, *tmp; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); return -ENODEV; } /* can't destroy a space that has allocations */ for (mt = 0; mt < vcm_num_memtypes; mt++) if (vcm_alloc_count_allocated(mt)) { vcm_alloc_err("allocations still present\n"); return -EBUSY; } for (i = 0; i < vcm_num_phys_pools; i++) { struct phys_chunk_head *pch = &vcm_phys_pool[i].head; if (list_empty(&pch->head)) continue; list_for_each_entry_safe(chunk, tmp, &pch->head, list) { list_del(&chunk->list); memset(chunk, 0, sizeof(*chunk)); kfree(chunk); } vcm_phys_pool[i].head.num = 0; } kfree(vcm_phys_pool); kfree(memtype_map); vcm_phys_pool = NULL; memtype_map = NULL; basicalloc_init = 0; vcm_num_phys_pools = 0; return 0; } EXPORT_SYMBOL(vcm_alloc_destroy); int vcm_alloc_init(struct physmem_region *mem, int n_regions, struct vcm_memtype_map *mt_map, int n_mt) { int i = 0, j = 0, r = 0, num_chunks; struct phys_chunk *chunk; struct phys_chunk_head *pch = NULL; unsigned long pa; /* no double inits */ if (basicalloc_init) { vcm_alloc_err("double basicalloc_init\n"); BUG(); goto fail; } memtype_map = kzalloc(sizeof(*mt_map) * n_mt, GFP_KERNEL); if (!memtype_map) { vcm_alloc_err("Could not copy memtype map\n"); goto fail; } memcpy(memtype_map, mt_map, sizeof(*mt_map) * n_mt); vcm_phys_pool = kzalloc(sizeof(*vcm_phys_pool) * n_regions, GFP_KERNEL); vcm_num_phys_pools = n_regions; vcm_num_memtypes = n_mt; if (!vcm_phys_pool) { vcm_alloc_err("Could not allocate physical pool structure\n"); goto fail; } /* separate out to ensure good cleanup */ for (i = 0; i < n_regions; i++) { pch = &vcm_phys_pool[i].head; INIT_LIST_HEAD(&pch->head); pch->num = 0; } for (r = 0; r < n_regions; r++) { pa = mem[r].addr; vcm_phys_pool[r].size = mem[r].size; vcm_phys_pool[r].chunk_size = mem[r].chunk_size; pch = &vcm_phys_pool[r].head; num_chunks = mem[r].size / mem[r].chunk_size; printk(KERN_INFO "VCM Init: region %d, chunk size=%d, " "num=%d, pa=%p\n", r, mem[r].chunk_size, num_chunks, (void *)pa); for (j = 0; j < num_chunks; ++j) { chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); if (!chunk) { vcm_alloc_err("null chunk\n"); goto fail; } chunk->pa = pa; chunk->size = mem[r].chunk_size; pa += mem[r].chunk_size; chunk->pool_idx = r; INIT_LIST_HEAD(&chunk->allocated); list_add_tail(&chunk->list, &pch->head); pch->num++; } } basicalloc_init = 1; return 0; fail: vcm_alloc_destroy(); return -EINVAL; } EXPORT_SYMBOL(vcm_alloc_init); int vcm_alloc_free_blocks(enum memtype_t memtype, struct phys_chunk *alloc_head) { struct phys_chunk *chunk, *tmp; struct phys_chunk_head *pch = NULL; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); goto fail; } if (!alloc_head) { vcm_alloc_err("no alloc_head\n"); goto fail; } list_for_each_entry_safe(chunk, tmp, &alloc_head->allocated, allocated) { list_del_init(&chunk->allocated); pch = &vcm_phys_pool[chunk->pool_idx].head; if (!pch) { vcm_alloc_err("null pch\n"); goto fail; } pch->num++; } return 0; fail: return -ENODEV; } EXPORT_SYMBOL(vcm_alloc_free_blocks); int vcm_alloc_num_blocks(int num, enum memtype_t memtype, int idx, struct phys_chunk *alloc_head) { struct phys_chunk *chunk; struct phys_chunk_head *pch = NULL; int num_allocated = 0; if (!basicalloc_init) { vcm_alloc_err("no basicalloc_init\n"); goto fail; } if (!alloc_head) { vcm_alloc_err("no alloc_head\n"); goto fail; } pch = get_chunk_list(memtype, idx); if (!pch) { vcm_alloc_err("null pch\n"); goto fail; } if (list_empty(&pch->head)) { vcm_alloc_err("list is empty\n"); goto fail; } if (vcm_alloc_blocks_avail(memtype, idx) < num) { vcm_alloc_err("not enough blocks? num=%d\n", num); goto fail; } list_for_each_entry(chunk, &pch->head, list) { if (num_allocated == num) break; if (is_allocated(&chunk->allocated)) continue; list_add_tail(&chunk->allocated, &alloc_head->allocated); pch->num--; num_allocated++; } return num_allocated; fail: return 0; } EXPORT_SYMBOL(vcm_alloc_num_blocks); int vcm_alloc_max_munch(int len, enum memtype_t memtype, struct phys_chunk *alloc_head) { int i; int blocks_req = 0; int block_residual = 0; int blocks_allocated = 0; int cur_chunk_size = 0; int ba = 0; if (!basicalloc_init) { vcm_alloc_err("basicalloc_init is 0\n"); goto fail; } if (!alloc_head) { vcm_alloc_err("alloc_head is NULL\n"); goto fail; } if (num_pools(memtype) <= 0) { vcm_alloc_err("Memtype %d has improper mempool configuration\n", memtype); goto fail; } for (i = 0; i < num_pools(memtype); ++i) { cur_chunk_size = pool_chunk_size(memtype, i); if (cur_chunk_size <= 0) { vcm_alloc_err("Bad chunk size: %d\n", cur_chunk_size); goto fail; } blocks_req = len / cur_chunk_size; block_residual = len % cur_chunk_size; len = block_residual; /* len left */ if (blocks_req) { int blocks_available = 0; int blocks_diff = 0; int bytes_diff = 0; blocks_available = vcm_alloc_blocks_avail(memtype, i); if (blocks_available < blocks_req) { blocks_diff = (blocks_req - blocks_available); bytes_diff = blocks_diff * cur_chunk_size; /* add back in the rest */ len += bytes_diff; } else { /* got all the blocks I need */ blocks_available = (blocks_available > blocks_req) ? blocks_req : blocks_available; } ba = vcm_alloc_num_blocks(blocks_available, memtype, i, alloc_head); if (ba != blocks_available) { vcm_alloc_err("blocks allocated (%i) !=" " blocks_available (%i):" " chunk size = %#x," " alloc_head = %p\n", ba, blocks_available, i, (void *) alloc_head); goto fail; } blocks_allocated += blocks_available; } } if (len) { int blocks_available = 0; int last_sz = num_pools(memtype) - 1; blocks_available = vcm_alloc_blocks_avail(memtype, last_sz); if (blocks_available > 0) { ba = vcm_alloc_num_blocks(1, memtype, last_sz, alloc_head); if (ba != 1) { vcm_alloc_err("blocks allocated (%i) !=" " blocks_available (%i):" " chunk size = %#x," " alloc_head = %p\n", ba, 1, last_sz, (void *) alloc_head); goto fail; } blocks_allocated += 1; } else { vcm_alloc_err("blocks_available (%#x) <= 1\n", blocks_available); goto fail; } } return blocks_allocated; fail: vcm_alloc_free_blocks(memtype, alloc_head); return 0; } EXPORT_SYMBOL(vcm_alloc_max_munch);
gpl-2.0
htc-mirror/shooteru-ics-crc-3.0.16-e733189
sound/soc/pxa/pxa-ssp.c
1604
19952
/* * pxa-ssp.c -- ALSA Soc Audio Layer * * Copyright 2005,2008 Wolfson Microelectronics PLC. * Author: Liam Girdwood * Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * TODO: * o Test network mode for > 16bit sample size */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/pxa2xx_ssp.h> #include <asm/irq.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/pxa2xx-lib.h> #include <mach/hardware.h> #include <mach/dma.h> #include <mach/audio.h> #include "../../arm/pxa2xx-pcm.h" #include "pxa-ssp.h" /* * SSP audio private data */ struct ssp_priv { struct ssp_device *ssp; unsigned int sysclk; int dai_fmt; #ifdef CONFIG_PM uint32_t cr0; uint32_t cr1; uint32_t to; uint32_t psp; #endif }; static void dump_registers(struct ssp_device *ssp) { dev_dbg(&ssp->pdev->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n", pxa_ssp_read_reg(ssp, SSCR0), pxa_ssp_read_reg(ssp, SSCR1), pxa_ssp_read_reg(ssp, SSTO)); dev_dbg(&ssp->pdev->dev, "SSPSP 0x%08x SSSR 0x%08x SSACD 0x%08x\n", pxa_ssp_read_reg(ssp, SSPSP), pxa_ssp_read_reg(ssp, SSSR), pxa_ssp_read_reg(ssp, SSACD)); } static void pxa_ssp_enable(struct ssp_device *ssp) { uint32_t sscr0; sscr0 = __raw_readl(ssp->mmio_base + SSCR0) | SSCR0_SSE; __raw_writel(sscr0, ssp->mmio_base + SSCR0); } static void pxa_ssp_disable(struct ssp_device *ssp) { uint32_t sscr0; sscr0 = __raw_readl(ssp->mmio_base + SSCR0) & ~SSCR0_SSE; __raw_writel(sscr0, ssp->mmio_base + SSCR0); } struct pxa2xx_pcm_dma_data { struct pxa2xx_pcm_dma_params params; char name[20]; }; static struct pxa2xx_pcm_dma_params * pxa_ssp_get_dma_params(struct ssp_device *ssp, int width4, int out) { struct pxa2xx_pcm_dma_data *dma; dma = kzalloc(sizeof(struct pxa2xx_pcm_dma_data), GFP_KERNEL); if (dma == NULL) return NULL; snprintf(dma->name, 20, "SSP%d PCM %s %s", ssp->port_id, width4 ? "32-bit" : "16-bit", out ? "out" : "in"); dma->params.name = dma->name; dma->params.drcmr = &DRCMR(out ? ssp->drcmr_tx : ssp->drcmr_rx); dma->params.dcmd = (out ? (DCMD_INCSRCADDR | DCMD_FLOWTRG) : (DCMD_INCTRGADDR | DCMD_FLOWSRC)) | (width4 ? DCMD_WIDTH4 : DCMD_WIDTH2) | DCMD_BURST16; dma->params.dev_addr = ssp->phys_base + SSDR; return &dma->params; } static int pxa_ssp_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int ret = 0; if (!cpu_dai->active) { clk_enable(ssp->clk); pxa_ssp_disable(ssp); } kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); return ret; } static void pxa_ssp_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; if (!cpu_dai->active) { pxa_ssp_disable(ssp); clk_disable(ssp->clk); } kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); } #ifdef CONFIG_PM static int pxa_ssp_suspend(struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; if (!cpu_dai->active) clk_enable(ssp->clk); priv->cr0 = __raw_readl(ssp->mmio_base + SSCR0); priv->cr1 = __raw_readl(ssp->mmio_base + SSCR1); priv->to = __raw_readl(ssp->mmio_base + SSTO); priv->psp = __raw_readl(ssp->mmio_base + SSPSP); pxa_ssp_disable(ssp); clk_disable(ssp->clk); return 0; } static int pxa_ssp_resume(struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; uint32_t sssr = SSSR_ROR | SSSR_TUR | SSSR_BCE; clk_enable(ssp->clk); __raw_writel(sssr, ssp->mmio_base + SSSR); __raw_writel(priv->cr0 & ~SSCR0_SSE, ssp->mmio_base + SSCR0); __raw_writel(priv->cr1, ssp->mmio_base + SSCR1); __raw_writel(priv->to, ssp->mmio_base + SSTO); __raw_writel(priv->psp, ssp->mmio_base + SSPSP); if (cpu_dai->active) pxa_ssp_enable(ssp); else clk_disable(ssp->clk); return 0; } #else #define pxa_ssp_suspend NULL #define pxa_ssp_resume NULL #endif /** * ssp_set_clkdiv - set SSP clock divider * @div: serial clock rate divider */ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div) { u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) { sscr0 &= ~0x0000ff00; sscr0 |= ((div - 2)/2) << 8; /* 2..512 */ } else { sscr0 &= ~0x000fff00; sscr0 |= (div - 1) << 8; /* 1..4096 */ } pxa_ssp_write_reg(ssp, SSCR0, sscr0); } /** * pxa_ssp_get_clkdiv - get SSP clock divider */ static u32 pxa_ssp_get_scr(struct ssp_device *ssp) { u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); u32 div; if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) div = ((sscr0 >> 8) & 0xff) * 2 + 2; else div = ((sscr0 >> 8) & 0xfff) + 1; return div; } /* * Set the SSP ports SYSCLK. */ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS); dev_dbg(&ssp->pdev->dev, "pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %u\n", cpu_dai->id, clk_id, freq); switch (clk_id) { case PXA_SSP_CLK_NET_PLL: sscr0 |= SSCR0_MOD; break; case PXA_SSP_CLK_PLL: /* Internal PLL is fixed */ if (cpu_is_pxa25x()) priv->sysclk = 1843200; else priv->sysclk = 13000000; break; case PXA_SSP_CLK_EXT: priv->sysclk = freq; sscr0 |= SSCR0_ECS; break; case PXA_SSP_CLK_NET: priv->sysclk = freq; sscr0 |= SSCR0_NCS | SSCR0_MOD; break; case PXA_SSP_CLK_AUDIO: priv->sysclk = 0; pxa_ssp_set_scr(ssp, 1); sscr0 |= SSCR0_ACS; break; default: return -ENODEV; } /* The SSP clock must be disabled when changing SSP clock mode * on PXA2xx. On PXA3xx it must be enabled when doing so. */ if (!cpu_is_pxa3xx()) clk_disable(ssp->clk); val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0; pxa_ssp_write_reg(ssp, SSCR0, val); if (!cpu_is_pxa3xx()) clk_enable(ssp->clk); return 0; } /* * Set the SSP clock dividers. */ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; switch (div_id) { case PXA_SSP_AUDIO_DIV_ACDS: val = (pxa_ssp_read_reg(ssp, SSACD) & ~0x7) | SSACD_ACDS(div); pxa_ssp_write_reg(ssp, SSACD, val); break; case PXA_SSP_AUDIO_DIV_SCDB: val = pxa_ssp_read_reg(ssp, SSACD); val &= ~SSACD_SCDB; #if defined(CONFIG_PXA3xx) if (cpu_is_pxa3xx()) val &= ~SSACD_SCDX8; #endif switch (div) { case PXA_SSP_CLK_SCDB_1: val |= SSACD_SCDB; break; case PXA_SSP_CLK_SCDB_4: break; #if defined(CONFIG_PXA3xx) case PXA_SSP_CLK_SCDB_8: if (cpu_is_pxa3xx()) val |= SSACD_SCDX8; else return -EINVAL; break; #endif default: return -EINVAL; } pxa_ssp_write_reg(ssp, SSACD, val); break; case PXA_SSP_DIV_SCR: pxa_ssp_set_scr(ssp, div); break; default: return -ENODEV; } return 0; } /* * Configure the PLL frequency pxa27x and (afaik - pxa320 only) */ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70; #if defined(CONFIG_PXA3xx) if (cpu_is_pxa3xx()) pxa_ssp_write_reg(ssp, SSACDD, 0); #endif switch (freq_out) { case 5622000: break; case 11345000: ssacd |= (0x1 << 4); break; case 12235000: ssacd |= (0x2 << 4); break; case 14857000: ssacd |= (0x3 << 4); break; case 32842000: ssacd |= (0x4 << 4); break; case 48000000: ssacd |= (0x5 << 4); break; case 0: /* Disable */ break; default: #ifdef CONFIG_PXA3xx /* PXA3xx has a clock ditherer which can be used to generate * a wider range of frequencies - calculate a value for it. */ if (cpu_is_pxa3xx()) { u32 val; u64 tmp = 19968; tmp *= 1000000; do_div(tmp, freq_out); val = tmp; val = (val << 16) | 64; pxa_ssp_write_reg(ssp, SSACDD, val); ssacd |= (0x6 << 4); dev_dbg(&ssp->pdev->dev, "Using SSACDD %x to supply %uHz\n", val, freq_out); break; } #endif return -EINVAL; } pxa_ssp_write_reg(ssp, SSACD, ssacd); return 0; } /* * Set the active slots in TDM/Network mode */ static int pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr0; sscr0 = pxa_ssp_read_reg(ssp, SSCR0); sscr0 &= ~(SSCR0_MOD | SSCR0_SlotsPerFrm(8) | SSCR0_EDSS | SSCR0_DSS); /* set slot width */ if (slot_width > 16) sscr0 |= SSCR0_EDSS | SSCR0_DataSize(slot_width - 16); else sscr0 |= SSCR0_DataSize(slot_width); if (slots > 1) { /* enable network mode */ sscr0 |= SSCR0_MOD; /* set number of active slots */ sscr0 |= SSCR0_SlotsPerFrm(slots); /* set active slot mask */ pxa_ssp_write_reg(ssp, SSTSA, tx_mask); pxa_ssp_write_reg(ssp, SSRSA, rx_mask); } pxa_ssp_write_reg(ssp, SSCR0, sscr0); return 0; } /* * Tristate the SSP DAI lines */ static int pxa_ssp_set_dai_tristate(struct snd_soc_dai *cpu_dai, int tristate) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr1; sscr1 = pxa_ssp_read_reg(ssp, SSCR1); if (tristate) sscr1 &= ~SSCR1_TTE; else sscr1 |= SSCR1_TTE; pxa_ssp_write_reg(ssp, SSCR1, sscr1); return 0; } /* * Set up the SSP DAI format. * The SSP Port must be inactive before calling this function as the * physical interface format is changed. */ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr0, sscr1, sspsp, scfr; /* check if we need to change anything at all */ if (priv->dai_fmt == fmt) return 0; /* we can only change the settings if the port is not in use */ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) { dev_err(&ssp->pdev->dev, "can't change hardware dai format: stream is in use"); return -EINVAL; } /* reset port settings */ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS); sscr1 = SSCR1_RxTresh(8) | SSCR1_TxTresh(7); sspsp = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: sscr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR | SSCR1_SCFR; break; case SND_SOC_DAIFMT_CBM_CFS: sscr1 |= SSCR1_SCLKDIR | SSCR1_SCFR; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: sspsp |= SSPSP_SFRMP; break; case SND_SOC_DAIFMT_NB_IF: break; case SND_SOC_DAIFMT_IB_IF: sspsp |= SSPSP_SCMODE(2); break; case SND_SOC_DAIFMT_IB_NF: sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: sscr0 |= SSCR0_PSP; sscr1 |= SSCR1_RWOT | SSCR1_TRAIL; /* See hw_params() */ break; case SND_SOC_DAIFMT_DSP_A: sspsp |= SSPSP_FSRT; case SND_SOC_DAIFMT_DSP_B: sscr0 |= SSCR0_MOD | SSCR0_PSP; sscr1 |= SSCR1_TRAIL | SSCR1_RWOT; break; default: return -EINVAL; } pxa_ssp_write_reg(ssp, SSCR0, sscr0); pxa_ssp_write_reg(ssp, SSCR1, sscr1); pxa_ssp_write_reg(ssp, SSPSP, sspsp); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: scfr = pxa_ssp_read_reg(ssp, SSCR1) | SSCR1_SCFR; pxa_ssp_write_reg(ssp, SSCR1, scfr); while (pxa_ssp_read_reg(ssp, SSSR) & SSSR_BSY) cpu_relax(); break; } dump_registers(ssp); /* Since we are configuring the timings for the format by hand * we have to defer some things until hw_params() where we * know parameters like the sample size. */ priv->dai_fmt = fmt; return 0; } /* * Set the SSP audio DMA parameters and sample size. * Can be called multiple times by oss emulation. */ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int chn = params_channels(params); u32 sscr0; u32 sspsp; int width = snd_pcm_format_physical_width(params_format(params)); int ttsa = pxa_ssp_read_reg(ssp, SSTSA) & 0xf; struct pxa2xx_pcm_dma_params *dma_data; dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream); /* generate correct DMA params */ kfree(dma_data); /* Network mode with one active slot (ttsa == 1) can be used * to force 16-bit frame width on the wire (for S16_LE), even * with two channels. Use 16-bit DMA transfers for this case. */ dma_data = pxa_ssp_get_dma_params(ssp, ((chn == 2) && (ttsa != 1)) || (width == 32), substream->stream == SNDRV_PCM_STREAM_PLAYBACK); snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); /* we can only change the settings if the port is not in use */ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) return 0; /* clear selected SSP bits */ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_DSS | SSCR0_EDSS); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: #ifdef CONFIG_PXA3xx if (cpu_is_pxa3xx()) sscr0 |= SSCR0_FPCKE; #endif sscr0 |= SSCR0_DataSize(16); break; case SNDRV_PCM_FORMAT_S24_LE: sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(8)); break; case SNDRV_PCM_FORMAT_S32_LE: sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(16)); break; } pxa_ssp_write_reg(ssp, SSCR0, sscr0); switch (priv->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: sspsp = pxa_ssp_read_reg(ssp, SSPSP); if ((pxa_ssp_get_scr(ssp) == 4) && (width == 16)) { /* This is a special case where the bitclk is 64fs * and we're not dealing with 2*32 bits of audio * samples. * * The SSP values used for that are all found out by * trying and failing a lot; some of the registers * needed for that mode are only available on PXA3xx. */ #ifdef CONFIG_PXA3xx if (!cpu_is_pxa3xx()) return -EINVAL; sspsp |= SSPSP_SFRMWDTH(width * 2); sspsp |= SSPSP_SFRMDLY(width * 4); sspsp |= SSPSP_EDMYSTOP(3); sspsp |= SSPSP_DMYSTOP(3); sspsp |= SSPSP_DMYSTRT(1); #else return -EINVAL; #endif } else { /* The frame width is the width the LRCLK is * asserted for; the delay is expressed in * half cycle units. We need the extra cycle * because the data starts clocking out one BCLK * after LRCLK changes polarity. */ sspsp |= SSPSP_SFRMWDTH(width + 1); sspsp |= SSPSP_SFRMDLY((width + 1) * 2); sspsp |= SSPSP_DMYSTRT(1); } pxa_ssp_write_reg(ssp, SSPSP, sspsp); break; default: break; } /* When we use a network mode, we always require TDM slots * - complain loudly and fail if they've not been set up yet. */ if ((sscr0 & SSCR0_MOD) && !ttsa) { dev_err(&ssp->pdev->dev, "No TDM timeslot configured\n"); return -EINVAL; } dump_registers(ssp); return 0; } static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *cpu_dai) { int ret = 0; struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: pxa_ssp_enable(ssp); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: val = pxa_ssp_read_reg(ssp, SSCR1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) val |= SSCR1_TSRE; else val |= SSCR1_RSRE; pxa_ssp_write_reg(ssp, SSCR1, val); val = pxa_ssp_read_reg(ssp, SSSR); pxa_ssp_write_reg(ssp, SSSR, val); break; case SNDRV_PCM_TRIGGER_START: val = pxa_ssp_read_reg(ssp, SSCR1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) val |= SSCR1_TSRE; else val |= SSCR1_RSRE; pxa_ssp_write_reg(ssp, SSCR1, val); pxa_ssp_enable(ssp); break; case SNDRV_PCM_TRIGGER_STOP: val = pxa_ssp_read_reg(ssp, SSCR1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) val &= ~SSCR1_TSRE; else val &= ~SSCR1_RSRE; pxa_ssp_write_reg(ssp, SSCR1, val); break; case SNDRV_PCM_TRIGGER_SUSPEND: pxa_ssp_disable(ssp); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: val = pxa_ssp_read_reg(ssp, SSCR1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) val &= ~SSCR1_TSRE; else val &= ~SSCR1_RSRE; pxa_ssp_write_reg(ssp, SSCR1, val); break; default: ret = -EINVAL; } dump_registers(ssp); return ret; } static int pxa_ssp_probe(struct snd_soc_dai *dai) { struct ssp_priv *priv; int ret; priv = kzalloc(sizeof(struct ssp_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio"); if (priv->ssp == NULL) { ret = -ENODEV; goto err_priv; } priv->dai_fmt = (unsigned int) -1; snd_soc_dai_set_drvdata(dai, priv); return 0; err_priv: kfree(priv); return ret; } static int pxa_ssp_remove(struct snd_soc_dai *dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(dai); pxa_ssp_free(priv->ssp); kfree(priv); return 0; } #define PXA_SSP_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_ops pxa_ssp_dai_ops = { .startup = pxa_ssp_startup, .shutdown = pxa_ssp_shutdown, .trigger = pxa_ssp_trigger, .hw_params = pxa_ssp_hw_params, .set_sysclk = pxa_ssp_set_dai_sysclk, .set_clkdiv = pxa_ssp_set_dai_clkdiv, .set_pll = pxa_ssp_set_dai_pll, .set_fmt = pxa_ssp_set_dai_fmt, .set_tdm_slot = pxa_ssp_set_dai_tdm_slot, .set_tristate = pxa_ssp_set_dai_tristate, }; static struct snd_soc_dai_driver pxa_ssp_dai = { .probe = pxa_ssp_probe, .remove = pxa_ssp_remove, .suspend = pxa_ssp_suspend, .resume = pxa_ssp_resume, .playback = { .channels_min = 1, .channels_max = 8, .rates = PXA_SSP_RATES, .formats = PXA_SSP_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 8, .rates = PXA_SSP_RATES, .formats = PXA_SSP_FORMATS, }, .ops = &pxa_ssp_dai_ops, }; static __devinit int asoc_ssp_probe(struct platform_device *pdev) { return snd_soc_register_dai(&pdev->dev, &pxa_ssp_dai); } static int __devexit asoc_ssp_remove(struct platform_device *pdev) { snd_soc_unregister_dai(&pdev->dev); return 0; } static struct platform_driver asoc_ssp_driver = { .driver = { .name = "pxa-ssp-dai", .owner = THIS_MODULE, }, .probe = asoc_ssp_probe, .remove = __devexit_p(asoc_ssp_remove), }; static int __init pxa_ssp_init(void) { return platform_driver_register(&asoc_ssp_driver); } module_init(pxa_ssp_init); static void __exit pxa_ssp_exit(void) { platform_driver_unregister(&asoc_ssp_driver); } module_exit(pxa_ssp_exit); /* Module information */ MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
AndroidGX/SimpleGX-MM-6.0_H815
drivers/net/wireless/iwlwifi/mvm/rx.c
2116
12251
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler * * Copies the phy information in mvm->last_phy_info, it will be used when the * actual data will come from the fw in the next packet. */ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); mvm->ampdu_ref++; return 0; } /* * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 * * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, struct iwl_rx_cmd_buffer *rxb, struct ieee80211_rx_status *stats) { struct sk_buff *skb; unsigned int hdrlen, fraglen; /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr so that splice() or TCP coalesce * are more efficient. */ hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); memcpy(skb_put(skb, hdrlen), hdr, hdrlen); fraglen = len - hdrlen; if (fraglen) { int offset = (void *)hdr + hdrlen - rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx_ni(mvm->hw, skb); } /* * iwl_mvm_calc_rssi - calculate the rssi in dBm * @phy_info: the phy information for the coming packet */ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info) { int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; int rssi_all_band_a, rssi_all_band_b; u32 agc_a, agc_b, max_agc; u32 val; /* Find max rssi among 2 possible receivers. * These values are measured by the Digital Signal Processor (DSP). * They should stay fairly constant even as the signal strength varies, * if the radio's Automatic Gain Control (AGC) is working right. * AGC value (see below) will provide the "interesting" info. */ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS; agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS; max_agc = max_t(u32, agc_a, agc_b); val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >> IWL_OFDM_RSSI_ALLBAND_A_POS; rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >> IWL_OFDM_RSSI_ALLBAND_B_POS; /* * dBm = rssi dB - agc dB - constant. * Higher AGC (higher radio gain) means lower signal. */ rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a; rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b; max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm); IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n", rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b); return max_rssi_dbm; } /* * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format * @mvm: the mvm object * @hdr: 80211 header * @stats: status in mac80211's format * @rx_pkt_status: status coming from fw * * returns non 0 value if the packet should be dropped */ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, u32 rx_pkt_status) { if (!ieee80211_has_protected(hdr->frame_control) || (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_NO_ENC) return 0; /* packet was encrypted with unknown alg */ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_ENC_ERR) return 0; switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) { case RX_MPDU_RES_STATUS_SEC_CCM_ENC: /* alg is CCM: check MIC only */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n"); return 0; case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: /* Don't drop the frame and decrypt it in SW */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; /* fall through if TTAK OK */ case RX_MPDU_RES_STATUS_SEC_WEP_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; return 0; default: IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); } return 0; } /* * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler * * Handles the actual data of the Rx packet from the fw */ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct ieee80211_hdr *hdr; struct ieee80211_rx_status rx_status = {}; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_info *phy_info; struct iwl_rx_mpdu_res_start *rx_res; u32 len; u32 ampdu_status; u32 rate_n_flags; u32 rx_pkt_status; phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); len = le16_to_cpu(rx_res->byte_count); rx_pkt_status = le32_to_cpup((__le32 *) (pkt->data + sizeof(*rx_res) + len)); memset(&rx_status, 0, sizeof(rx_status)); /* * drop the packet if it has failed being decrypted by HW */ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) { IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status); return 0; } if ((unlikely(phy_info->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n", phy_info->cfg_phy_cnt); return 0; } if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); return 0; } /* This will be used in several places later */ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); /* rx_status carries information about the packet to mac80211 */ rx_status.mactime = le64_to_cpu(phy_info->timestamp); rx_status.device_timestamp = le32_to_cpu(phy_info->system_timestamp); rx_status.band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status.band); /* * TSF as indicated by the fw is at INA time, but mac80211 expects the * TSF at the beginning of the MPDU. */ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ /* Find max signal strength (dBm) among 3 antenna/receiver chains */ rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info); IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); /* * "antenna number" * * It seems that the antenna field in the phy flags value * is actually a bit field. This is undefined by radiotap, * it wants an actual antenna number but I always get "7" * for most legacy frames I receive indicating that the * same frame was received on all three RX chains. * * I think this field should be removed in favor of a * new 802.11n radiotap field "RX chains" that is defined * as a bitmask. */ rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA) >> RX_RES_PHY_FLAGS_ANTENNA_POS; /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) rx_status.flag |= RX_FLAG_SHORTPRE; if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { /* * We know which subframes of an A-MPDU belong * together since we get a single PHY response * from the firmware for all of them */ rx_status.flag |= RX_FLAG_AMPDU_DETAILS; rx_status.ampdu_reference = mvm->ampdu_ref; } /* Set up the HT phy flags */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status.flag |= RX_FLAG_40MHZ; break; case RATE_MCS_CHAN_WIDTH_80: rx_status.flag |= RX_FLAG_80MHZ; break; case RATE_MCS_CHAN_WIDTH_160: rx_status.flag |= RX_FLAG_160MHZ; break; } if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.flag |= RX_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status.flag |= RX_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_HT_MSK) { rx_status.flag |= RX_FLAG_HT; rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { rx_status.vht_nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status.flag |= RX_FLAG_VHT; } else { rx_status.rate_idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status.band); } iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status, rxb, &rx_status); return 0; }
gpl-2.0
donkeykang/donkeyk
drivers/bcma/driver_chipcommon_pmu.c
2116
7980
/* * Broadcom specific AMBA * ChipCommon Power Management Unit driver * * Copyright 2009, Michael Buesch <m@bues.ch> * Copyright 2007, Broadcom Corporation * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/export.h> #include <linux/bcma/bcma.h> static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) { bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); } void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) { bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_write); void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset); void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset); bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR); bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset); void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset); bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR); bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); static void bcma_pmu_pll_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; switch (bus->chipinfo.id) { case 0x4313: case 0x4331: case 43224: case 43225: break; default: pr_err("PLL init unknown for device 0x%04X\n", bus->chipinfo.id); } } static void bcma_pmu_resources_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; u32 min_msk = 0, max_msk = 0; switch (bus->chipinfo.id) { case 0x4313: min_msk = 0x200D; max_msk = 0xFFFF; break; case 0x4331: case 43224: case 43225: break; default: pr_err("PMU resource config unknown for device 0x%04X\n", bus->chipinfo.id); } /* Set the resource masks. */ if (min_msk) bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk); if (max_msk) bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); } void bcma_pmu_swreg_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; switch (bus->chipinfo.id) { case 0x4313: case 0x4331: case 43224: case 43225: break; default: pr_err("PMU switch/regulators init unknown for device " "0x%04X\n", bus->chipinfo.id); } } /* Disable to allow reading SPROM. Don't know the adventages of enabling it. */ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable) { struct bcma_bus *bus = cc->core->bus; u32 val; val = bcma_cc_read32(cc, BCMA_CC_CHIPCTL); if (enable) { val |= BCMA_CHIPCTL_4331_EXTPA_EN; if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11) val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5; } else { val &= ~BCMA_CHIPCTL_4331_EXTPA_EN; val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5; } bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val); } void bcma_pmu_workarounds(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; switch (bus->chipinfo.id) { case 0x4313: bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); break; case 0x4331: case 43431: /* Ext PA lines must be enabled for tx on BCM4331 */ bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true); break; case 43224: if (bus->chipinfo.rev == 0) { pr_err("Workarounds for 43224 rev 0 not fully " "implemented\n"); bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0); } else { bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0); } break; case 43225: break; default: pr_err("Workarounds unknown for device 0x%04X\n", bus->chipinfo.id); } } void bcma_pmu_init(struct bcma_drv_cc *cc) { u32 pmucap; pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP); cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION); pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, pmucap); if (cc->pmu.rev == 1) bcma_cc_mask32(cc, BCMA_CC_PMU_CTL, ~BCMA_CC_PMU_CTL_NOILPONW); else bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_NOILPONW); if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2) pr_err("Fix for 4329b0 bad LPOM state not implemented!\n"); bcma_pmu_pll_init(cc); bcma_pmu_resources_init(cc); bcma_pmu_swreg_init(cc); bcma_pmu_workarounds(cc); } u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; switch (bus->chipinfo.id) { case 0x4716: case 0x4748: case 47162: case 0x4313: case 0x5357: case 0x4749: case 53572: /* always 20Mhz */ return 20000 * 1000; case 0x5356: case 0x5300: /* always 25Mhz */ return 25000 * 1000; default: pr_warn("No ALP clock specified for %04X device, " "pmu rev. %d, using default %d Hz\n", bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK); } return BCMA_CC_PMU_ALP_CLOCK; } /* Find the output of the "m" pll divider given pll controls that start with * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc. */ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m) { u32 tmp, div, ndiv, p1, p2, fc; struct bcma_bus *bus = cc->core->bus; BUG_ON((pll0 & 3) || (pll0 > BCMA_CC_PMU4716_MAINPLL_PLL0)); BUG_ON(!m || m > 4); if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) { /* Detect failure in clock setting */ tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT); if (tmp & 0x40000) return 133 * 1000000; } tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_P1P2_OFF); p1 = (tmp & BCMA_CC_PPL_P1_MASK) >> BCMA_CC_PPL_P1_SHIFT; p2 = (tmp & BCMA_CC_PPL_P2_MASK) >> BCMA_CC_PPL_P2_SHIFT; tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_M14_OFF); div = (tmp >> ((m - 1) * BCMA_CC_PPL_MDIV_WIDTH)) & BCMA_CC_PPL_MDIV_MASK; tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_NM5_OFF); ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT; /* Do calculation in Mhz */ fc = bcma_pmu_alp_clock(cc) / 1000000; fc = (p1 * ndiv * fc) / p2; /* Return clock in Hertz */ return (fc / div) * 1000000; } /* query bus clock frequency for PMU-enabled chipcommon */ u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; switch (bus->chipinfo.id) { case 0x4716: case 0x4748: case 47162: return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0, BCMA_CC_PMU5_MAINPLL_SSB); case 0x5356: return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0, BCMA_CC_PMU5_MAINPLL_SSB); case 0x5357: case 0x4749: return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0, BCMA_CC_PMU5_MAINPLL_SSB); case 0x5300: return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0, BCMA_CC_PMU5_MAINPLL_SSB); case 53572: return 75000000; default: pr_warn("No backplane clock specified for %04X device, " "pmu rev. %d, using default %d Hz\n", bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK); } return BCMA_CC_PMU_HT_CLOCK; } /* query cpu clock frequency for PMU-enabled chipcommon */ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; if (bus->chipinfo.id == 53572) return 300000000; if (cc->pmu.rev >= 5) { u32 pll; switch (bus->chipinfo.id) { case 0x5356: pll = BCMA_CC_PMU5356_MAINPLL_PLL0; break; case 0x5357: case 0x4749: pll = BCMA_CC_PMU5357_MAINPLL_PLL0; break; default: pll = BCMA_CC_PMU4716_MAINPLL_PLL0; break; } /* TODO: if (bus->chipinfo.id == 0x5300) return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */ return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU); } return bcma_pmu_get_clockcontrol(cc); }
gpl-2.0
sagigrimberg/linux
drivers/input/touchscreen/dynapro.c
2116
4605
/* * Dynapro serial touchscreen driver * * Copyright (c) 2009 Tias Guns * Based on the inexio driver (c) Vojtech Pavlik and Dan Streetman and * Richard Lemon * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ /* * 2009/09/19 Tias Guns <tias@ulyssis.org> * Copied inexio.c and edited for Dynapro protocol (from retired Xorg module) */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #define DRIVER_DESC "Dynapro serial touchscreen driver" MODULE_AUTHOR("Tias Guns <tias@ulyssis.org>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ #define DYNAPRO_FORMAT_TOUCH_BIT 0x40 #define DYNAPRO_FORMAT_LENGTH 3 #define DYNAPRO_RESPONSE_BEGIN_BYTE 0x80 #define DYNAPRO_MIN_XC 0 #define DYNAPRO_MAX_XC 0x3ff #define DYNAPRO_MIN_YC 0 #define DYNAPRO_MAX_YC 0x3ff #define DYNAPRO_GET_XC(data) (data[1] | ((data[0] & 0x38) << 4)) #define DYNAPRO_GET_YC(data) (data[2] | ((data[0] & 0x07) << 7)) #define DYNAPRO_GET_TOUCHED(data) (DYNAPRO_FORMAT_TOUCH_BIT & data[0]) /* * Per-touchscreen data. */ struct dynapro { struct input_dev *dev; struct serio *serio; int idx; unsigned char data[DYNAPRO_FORMAT_LENGTH]; char phys[32]; }; static void dynapro_process_data(struct dynapro *pdynapro) { struct input_dev *dev = pdynapro->dev; if (DYNAPRO_FORMAT_LENGTH == ++pdynapro->idx) { input_report_abs(dev, ABS_X, DYNAPRO_GET_XC(pdynapro->data)); input_report_abs(dev, ABS_Y, DYNAPRO_GET_YC(pdynapro->data)); input_report_key(dev, BTN_TOUCH, DYNAPRO_GET_TOUCHED(pdynapro->data)); input_sync(dev); pdynapro->idx = 0; } } static irqreturn_t dynapro_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct dynapro *pdynapro = serio_get_drvdata(serio); pdynapro->data[pdynapro->idx] = data; if (DYNAPRO_RESPONSE_BEGIN_BYTE & pdynapro->data[0]) dynapro_process_data(pdynapro); else dev_dbg(&serio->dev, "unknown/unsynchronized data: %x\n", pdynapro->data[0]); return IRQ_HANDLED; } static void dynapro_disconnect(struct serio *serio) { struct dynapro *pdynapro = serio_get_drvdata(serio); input_get_device(pdynapro->dev); input_unregister_device(pdynapro->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(pdynapro->dev); kfree(pdynapro); } /* * dynapro_connect() is the routine that is called when someone adds a * new serio device that supports dynapro protocol and registers it as * an input device. This is usually accomplished using inputattach. */ static int dynapro_connect(struct serio *serio, struct serio_driver *drv) { struct dynapro *pdynapro; struct input_dev *input_dev; int err; pdynapro = kzalloc(sizeof(struct dynapro), GFP_KERNEL); input_dev = input_allocate_device(); if (!pdynapro || !input_dev) { err = -ENOMEM; goto fail1; } pdynapro->serio = serio; pdynapro->dev = input_dev; snprintf(pdynapro->phys, sizeof(pdynapro->phys), "%s/input0", serio->phys); input_dev->name = "Dynapro Serial TouchScreen"; input_dev->phys = pdynapro->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_DYNAPRO; input_dev->id.product = 0; input_dev->id.version = 0x0001; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(pdynapro->dev, ABS_X, DYNAPRO_MIN_XC, DYNAPRO_MAX_XC, 0, 0); input_set_abs_params(pdynapro->dev, ABS_Y, DYNAPRO_MIN_YC, DYNAPRO_MAX_YC, 0, 0); serio_set_drvdata(serio, pdynapro); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(pdynapro->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(pdynapro); return err; } /* * The serio driver structure. */ static struct serio_device_id dynapro_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_DYNAPRO, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, dynapro_serio_ids); static struct serio_driver dynapro_drv = { .driver = { .name = "dynapro", }, .description = DRIVER_DESC, .id_table = dynapro_serio_ids, .interrupt = dynapro_interrupt, .connect = dynapro_connect, .disconnect = dynapro_disconnect, }; module_serio_driver(dynapro_drv);
gpl-2.0
TeamWin/android_kernel_oppo_r7f
drivers/i2c/busses/i2c-xlr.c
2372
6988
/* * Copyright 2011, Netlogic Microsystems Inc. * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/platform_device.h> /* XLR I2C REGISTERS */ #define XLR_I2C_CFG 0x00 #define XLR_I2C_CLKDIV 0x01 #define XLR_I2C_DEVADDR 0x02 #define XLR_I2C_ADDR 0x03 #define XLR_I2C_DATAOUT 0x04 #define XLR_I2C_DATAIN 0x05 #define XLR_I2C_STATUS 0x06 #define XLR_I2C_STARTXFR 0x07 #define XLR_I2C_BYTECNT 0x08 #define XLR_I2C_HDSTATIM 0x09 /* XLR I2C REGISTERS FLAGS */ #define XLR_I2C_BUS_BUSY 0x01 #define XLR_I2C_SDOEMPTY 0x02 #define XLR_I2C_RXRDY 0x04 #define XLR_I2C_ACK_ERR 0x08 #define XLR_I2C_ARB_STARTERR 0x30 /* Register Values */ #define XLR_I2C_CFG_ADDR 0xF8 #define XLR_I2C_CFG_NOADDR 0xFA #define XLR_I2C_STARTXFR_ND 0x02 /* No Data */ #define XLR_I2C_STARTXFR_RD 0x01 /* Read */ #define XLR_I2C_STARTXFR_WR 0x00 /* Write */ #define XLR_I2C_TIMEOUT 10 /* timeout per byte in msec */ /* * On XLR/XLS, we need to use __raw_ IO to read the I2C registers * because they are in the big-endian MMIO area on the SoC. * * The readl/writel implementation on XLR/XLS byteswaps, because * those are for its little-endian PCI space (see arch/mips/Kconfig). */ static inline void xlr_i2c_wreg(u32 __iomem *base, unsigned int reg, u32 val) { __raw_writel(val, base + reg); } static inline u32 xlr_i2c_rdreg(u32 __iomem *base, unsigned int reg) { return __raw_readl(base + reg); } struct xlr_i2c_private { struct i2c_adapter adap; u32 __iomem *iobase; }; static int xlr_i2c_tx(struct xlr_i2c_private *priv, u16 len, u8 *buf, u16 addr) { struct i2c_adapter *adap = &priv->adap; unsigned long timeout, stoptime, checktime; u32 i2c_status; int pos, timedout; u8 offset, byte; offset = buf[0]; xlr_i2c_wreg(priv->iobase, XLR_I2C_ADDR, offset); xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr); xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG, XLR_I2C_CFG_ADDR); xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1); timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT); stoptime = jiffies + timeout; timedout = 0; pos = 1; retry: if (len == 1) { xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, XLR_I2C_STARTXFR_ND); } else { xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, buf[pos]); xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, XLR_I2C_STARTXFR_WR); } while (!timedout) { checktime = jiffies; i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS); if (i2c_status & XLR_I2C_SDOEMPTY) { pos++; /* need to do a empty dataout after the last byte */ byte = (pos < len) ? buf[pos] : 0; xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, byte); /* reset timeout on successful xmit */ stoptime = jiffies + timeout; } timedout = time_after(checktime, stoptime); if (i2c_status & XLR_I2C_ARB_STARTERR) { if (timedout) break; goto retry; } if (i2c_status & XLR_I2C_ACK_ERR) return -EIO; if ((i2c_status & XLR_I2C_BUS_BUSY) == 0 && pos >= len) return 0; } dev_err(&adap->dev, "I2C transmit timeout\n"); return -ETIMEDOUT; } static int xlr_i2c_rx(struct xlr_i2c_private *priv, u16 len, u8 *buf, u16 addr) { struct i2c_adapter *adap = &priv->adap; u32 i2c_status; unsigned long timeout, stoptime, checktime; int nbytes, timedout; u8 byte; xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG, XLR_I2C_CFG_NOADDR); xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len); xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr); timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT); stoptime = jiffies + timeout; timedout = 0; nbytes = 0; retry: xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, XLR_I2C_STARTXFR_RD); while (!timedout) { checktime = jiffies; i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS); if (i2c_status & XLR_I2C_RXRDY) { if (nbytes > len) return -EIO; /* should not happen */ /* we need to do a dummy datain when nbytes == len */ byte = xlr_i2c_rdreg(priv->iobase, XLR_I2C_DATAIN); if (nbytes < len) buf[nbytes] = byte; nbytes++; /* reset timeout on successful read */ stoptime = jiffies + timeout; } timedout = time_after(checktime, stoptime); if (i2c_status & XLR_I2C_ARB_STARTERR) { if (timedout) break; goto retry; } if (i2c_status & XLR_I2C_ACK_ERR) return -EIO; if ((i2c_status & XLR_I2C_BUS_BUSY) == 0) return 0; } dev_err(&adap->dev, "I2C receive timeout\n"); return -ETIMEDOUT; } static int xlr_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *msg; int i; int ret = 0; struct xlr_i2c_private *priv = i2c_get_adapdata(adap); for (i = 0; ret == 0 && i < num; i++) { msg = &msgs[i]; if (msg->flags & I2C_M_RD) ret = xlr_i2c_rx(priv, msg->len, &msg->buf[0], msg->addr); else ret = xlr_i2c_tx(priv, msg->len, &msg->buf[0], msg->addr); } return (ret != 0) ? ret : num; } static u32 xlr_func(struct i2c_adapter *adap) { /* Emulate SMBUS over I2C */ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } static struct i2c_algorithm xlr_i2c_algo = { .master_xfer = xlr_i2c_xfer, .functionality = xlr_func, }; static int xlr_i2c_probe(struct platform_device *pdev) { struct xlr_i2c_private *priv; struct resource *res; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->iobase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); priv->adap.dev.parent = &pdev->dev; priv->adap.owner = THIS_MODULE; priv->adap.algo_data = priv; priv->adap.algo = &xlr_i2c_algo; priv->adap.nr = pdev->id; priv->adap.class = I2C_CLASS_HWMON; snprintf(priv->adap.name, sizeof(priv->adap.name), "xlr-i2c"); i2c_set_adapdata(&priv->adap, priv); ret = i2c_add_numbered_adapter(&priv->adap); if (ret < 0) { dev_err(&priv->adap.dev, "Failed to add i2c bus.\n"); return ret; } platform_set_drvdata(pdev, priv); dev_info(&priv->adap.dev, "Added I2C Bus.\n"); return 0; } static int xlr_i2c_remove(struct platform_device *pdev) { struct xlr_i2c_private *priv; priv = platform_get_drvdata(pdev); i2c_del_adapter(&priv->adap); return 0; } static struct platform_driver xlr_i2c_driver = { .probe = xlr_i2c_probe, .remove = xlr_i2c_remove, .driver = { .name = "xlr-i2cbus", .owner = THIS_MODULE, }, }; module_platform_driver(xlr_i2c_driver); MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@netlogicmicro.com>"); MODULE_DESCRIPTION("XLR/XLS SoC I2C Controller driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:xlr-i2cbus");
gpl-2.0
Elite-Kernels/HTC-10
drivers/net/ethernet/sfc/mtd.c
2372
3120
/**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include "net_driver.h" #include "efx.h" #define to_efx_mtd_partition(mtd) \ container_of(mtd, struct efx_mtd_partition, mtd) /* MTD interface */ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) { struct efx_nic *efx = mtd->priv; int rc; rc = efx->type->mtd_erase(mtd, erase->addr, erase->len); if (rc == 0) { erase->state = MTD_ERASE_DONE; } else { erase->state = MTD_ERASE_FAILED; erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN; } mtd_erase_callback(erase); return rc; } static void efx_mtd_sync(struct mtd_info *mtd) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; int rc; rc = efx->type->mtd_sync(mtd); if (rc) pr_err("%s: %s sync failed (%d)\n", part->name, part->dev_type_name, rc); } static void efx_mtd_remove_partition(struct efx_mtd_partition *part) { int rc; for (;;) { rc = mtd_device_unregister(&part->mtd); if (rc != -EBUSY) break; ssleep(1); } WARN_ON(rc); list_del(&part->node); } int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, size_t n_parts, size_t sizeof_part) { struct efx_mtd_partition *part; size_t i; for (i = 0; i < n_parts; i++) { part = (struct efx_mtd_partition *)((char *)parts + i * sizeof_part); part->mtd.writesize = 1; part->mtd.owner = THIS_MODULE; part->mtd.priv = efx; part->mtd.name = part->name; part->mtd._erase = efx_mtd_erase; part->mtd._read = efx->type->mtd_read; part->mtd._write = efx->type->mtd_write; part->mtd._sync = efx_mtd_sync; efx->type->mtd_rename(part); if (mtd_device_register(&part->mtd, NULL, 0)) goto fail; /* Add to list in order - efx_mtd_remove() depends on this */ list_add_tail(&part->node, &efx->mtd_list); } return 0; fail: while (i--) { part = (struct efx_mtd_partition *)((char *)parts + i * sizeof_part); efx_mtd_remove_partition(part); } /* Failure is unlikely here, but probably means we're out of memory */ return -ENOMEM; } void efx_mtd_remove(struct efx_nic *efx) { struct efx_mtd_partition *parts, *part, *next; WARN_ON(efx_dev_registered(efx)); if (list_empty(&efx->mtd_list)) return; parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition, node); list_for_each_entry_safe(part, next, &efx->mtd_list, node) efx_mtd_remove_partition(part); kfree(parts); } void efx_mtd_rename(struct efx_nic *efx) { struct efx_mtd_partition *part; ASSERT_RTNL(); list_for_each_entry(part, &efx->mtd_list, node) efx->type->mtd_rename(part); }
gpl-2.0
manfromnn/msm8909_tp-link_c5l_kernel
drivers/watchdog/iTCO_vendor_support.c
4932
11089
/* * intel TCO vendor specific watchdog driver support * * (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. */ /* * Includes, defines, variables, module parameters, ... */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Module and version information */ #define DRV_NAME "iTCO_vendor_support" #define DRV_VERSION "1.04" /* Includes */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/ioport.h> /* For io-port access */ #include <linux/io.h> /* For inb/outb/... */ #include "iTCO_vendor.h" /* List of vendor support modes */ /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ #define SUPERMICRO_OLD_BOARD 1 /* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */ #define SUPERMICRO_NEW_BOARD 2 /* Broken BIOS */ #define BROKEN_BIOS 911 static int vendorsupport; module_param(vendorsupport, int, 0); MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, " "911=Broken SMI BIOS"); /* * Vendor Specific Support */ /* * Vendor Support: 1 * Board: Super Micro Computer Inc. 370SSE+-OEM1/P3TSSE * iTCO chipset: ICH2 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * BIOS setup -> Power -> TCO Logic SMI Enable -> Within5Minutes * This setting enables SMI to clear the watchdog expired flag. * If BIOS or CPU fail which may cause SMI hang, then system will * reboot. When application starts to use watchdog function, * application has to take over the control from SMI. * * For P3TSSE, J36 jumper needs to be removed to enable the Watchdog * function. * * Note: The system will reboot when Expire Flag is set TWICE. * So, if the watchdog timer is 20 seconds, then the maximum hang * time is about 40 seconds, and the minimum hang time is about * 20.6 seconds. */ static void supermicro_old_pre_start(struct resource *smires) { unsigned long val32; /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ val32 = inl(smires->start); val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ outl(val32, smires->start); /* Needed to activate watchdog */ } static void supermicro_old_pre_stop(struct resource *smires) { unsigned long val32; /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */ val32 = inl(smires->start); val32 |= 0x00002000; /* Turn on SMI clearing watchdog */ outl(val32, smires->start); /* Needed to deactivate watchdog */ } /* * Vendor Support: 2 * Board: Super Micro Computer Inc. P4SBx, P4DPx * iTCO chipset: ICH4 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * 1. BIOS * For P4SBx: * BIOS setup -> Advanced -> Integrated Peripherals -> Watch Dog Feature * For P4DPx: * BIOS setup -> Advanced -> I/O Device Configuration -> Watch Dog * This setting enables or disables Watchdog function. When enabled, the * default watchdog timer is set to be 5 minutes (about 4m35s). It is * enough to load and run the OS. The application (service or driver) has * to take over the control once OS is running up and before watchdog * expires. * * 2. JUMPER * For P4SBx: JP39 * For P4DPx: JP37 * This jumper is used for safety. Closed is enabled. This jumper * prevents user enables watchdog in BIOS by accident. * * To enable Watch Dog function, both BIOS and JUMPER must be enabled. * * The documentation lists motherboards P4SBx and P4DPx series as of * 20-March-2002. However, this code works flawlessly with much newer * motherboards, such as my X6DHR-8G2 (SuperServer 6014H-82). * * The original iTCO driver as written does not actually reset the * watchdog timer on these machines, as a result they reboot after five * minutes. * * NOTE: You may leave the Watchdog function disabled in the SuperMicro * BIOS to avoid a "boot-race"... This driver will enable watchdog * functionality even if it's disabled in the BIOS once the /dev/watchdog * file is opened. */ /* I/O Port's */ #define SM_REGINDEX 0x2e /* SuperMicro ICH4+ Register Index */ #define SM_DATAIO 0x2f /* SuperMicro ICH4+ Register Data I/O */ /* Control Register's */ #define SM_CTLPAGESW 0x07 /* SuperMicro ICH4+ Control Page Switch */ #define SM_CTLPAGE 0x08 /* SuperMicro ICH4+ Control Page Num */ #define SM_WATCHENABLE 0x30 /* Watchdog enable: Bit 0: 0=off, 1=on */ #define SM_WATCHPAGE 0x87 /* Watchdog unlock control page */ #define SM_ENDWATCH 0xAA /* Watchdog lock control page */ #define SM_COUNTMODE 0xf5 /* Watchdog count mode select */ /* (Bit 3: 0 = seconds, 1 = minutes */ #define SM_WATCHTIMER 0xf6 /* 8-bits, Watchdog timer counter (RW) */ #define SM_RESETCONTROL 0xf7 /* Watchdog reset control */ /* Bit 6: timer is reset by kbd interrupt */ /* Bit 7: timer is reset by mouse interrupt */ static void supermicro_new_unlock_watchdog(void) { /* Write 0x87 to port 0x2e twice */ outb(SM_WATCHPAGE, SM_REGINDEX); outb(SM_WATCHPAGE, SM_REGINDEX); /* Switch to watchdog control page */ outb(SM_CTLPAGESW, SM_REGINDEX); outb(SM_CTLPAGE, SM_DATAIO); } static void supermicro_new_lock_watchdog(void) { outb(SM_ENDWATCH, SM_REGINDEX); } static void supermicro_new_pre_start(unsigned int heartbeat) { unsigned int val; supermicro_new_unlock_watchdog(); /* Watchdog timer setting needs to be in seconds*/ outb(SM_COUNTMODE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xF7; outb(val, SM_DATAIO); /* Write heartbeat interval to WDOG */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); /* Make sure keyboard/mouse interrupts don't interfere */ outb(SM_RESETCONTROL, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0x3f; outb(val, SM_DATAIO); /* enable watchdog by setting bit 0 of Watchdog Enable to 1 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val |= 0x01; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_stop(void) { unsigned int val; supermicro_new_unlock_watchdog(); /* disable watchdog by setting bit 0 of Watchdog Enable to 0 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xFE; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat) { supermicro_new_unlock_watchdog(); /* reset watchdog timeout to heartveat value */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); supermicro_new_lock_watchdog(); } /* * Vendor Support: 911 * Board: Some Intel ICHx based motherboards * iTCO chipset: ICH7+ * * Some Intel motherboards have a broken BIOS implementation: i.e. * the SMI handler clear's the TIMEOUT bit in the TC01_STS register * and does not reload the time. Thus the TCO watchdog does not reboot * the system. * * These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after * debugging: the SMI handler is quite simple - it tests value in * TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set * the handler goes into an infinite loop, apparently to allow the * second timeout and reboot. Otherwise it simply clears TIMEOUT bit * in TCO1_STS and that's it. * So the logic seems to be reversed, because it is hard to see how * TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set * (other than a transitional effect). * * The only fix found to get the motherboard(s) to reboot is to put * the glb_smi_en bit to 0. This is a dirty hack that bypasses the * broken code by disabling Global SMI. * * WARNING: globally disabling SMI could possibly lead to dramatic * problems, especially on laptops! I.e. various ACPI things where * SMI is used for communication between OS and firmware. * * Don't use this fix if you don't need to!!! */ static void broken_bios_start(struct resource *smires) { unsigned long val32; val32 = inl(smires->start); /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */ val32 &= 0xffffdffe; outl(val32, smires->start); } static void broken_bios_stop(struct resource *smires) { unsigned long val32; val32 = inl(smires->start); /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */ val32 |= 0x00002001; outl(val32, smires->start); } /* * Generic Support Functions */ void iTCO_vendor_pre_start(struct resource *smires, unsigned int heartbeat) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_start(smires); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_start(heartbeat); break; case BROKEN_BIOS: broken_bios_start(smires); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_start); void iTCO_vendor_pre_stop(struct resource *smires) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_stop(smires); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_stop(); break; case BROKEN_BIOS: broken_bios_stop(smires); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_stop); void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_keepalive); void iTCO_vendor_pre_set_heartbeat(unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_set_heartbeat); int iTCO_vendor_check_noreboot_on(void) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: return 0; default: return 1; } } EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on); static int __init iTCO_vendor_init_module(void) { pr_info("vendor-support=%d\n", vendorsupport); return 0; } static void __exit iTCO_vendor_exit_module(void) { pr_info("Module Unloaded\n"); } module_init(iTCO_vendor_init_module); module_exit(iTCO_vendor_exit_module); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>, " "R. Seretny <lkpatches@paypc.com>"); MODULE_DESCRIPTION("Intel TCO Vendor Specific WatchDog Timer Driver Support"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
n3ocort3x/android_kernel_htc_m7
fs/logfs/journal.c
4932
24271
/* * fs/logfs/journal.c - journal handling code * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/slab.h> static void logfs_calc_free(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); u64 reserve, no_segs = super->s_no_segs; s64 free; int i; /* superblock segments */ no_segs -= 2; super->s_no_journal_segs = 0; /* journal */ journal_for_each(i) if (super->s_journal_seg[i]) { no_segs--; super->s_no_journal_segs++; } /* open segments plus one extra per level for GC */ no_segs -= 2 * super->s_total_levels; free = no_segs * (super->s_segsize - LOGFS_SEGMENT_RESERVE); free -= super->s_used_bytes; /* just a bit extra */ free -= super->s_total_levels * 4096; /* Bad blocks are 'paid' for with speed reserve - the filesystem * simply gets slower as bad blocks accumulate. Until the bad blocks * exceed the speed reserve - then the filesystem gets smaller. */ reserve = super->s_bad_segments + super->s_bad_seg_reserve; reserve *= super->s_segsize - LOGFS_SEGMENT_RESERVE; reserve = max(reserve, super->s_speed_reserve); free -= reserve; if (free < 0) free = 0; super->s_free_bytes = free; } static void reserve_sb_and_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct btree_head32 *head = &super->s_reserved_segments; int i, err; err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[0]), (void *)1, GFP_KERNEL); BUG_ON(err); err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[1]), (void *)1, GFP_KERNEL); BUG_ON(err); journal_for_each(i) { if (!super->s_journal_seg[i]) continue; err = btree_insert32(head, super->s_journal_seg[i], (void *)1, GFP_KERNEL); BUG_ON(err); } } static void read_dynsb(struct super_block *sb, struct logfs_je_dynsb *dynsb) { struct logfs_super *super = logfs_super(sb); super->s_gec = be64_to_cpu(dynsb->ds_gec); super->s_sweeper = be64_to_cpu(dynsb->ds_sweeper); super->s_victim_ino = be64_to_cpu(dynsb->ds_victim_ino); super->s_rename_dir = be64_to_cpu(dynsb->ds_rename_dir); super->s_rename_pos = be64_to_cpu(dynsb->ds_rename_pos); super->s_used_bytes = be64_to_cpu(dynsb->ds_used_bytes); super->s_generation = be32_to_cpu(dynsb->ds_generation); } static void read_anchor(struct super_block *sb, struct logfs_je_anchor *da) { struct logfs_super *super = logfs_super(sb); struct inode *inode = super->s_master_inode; struct logfs_inode *li = logfs_inode(inode); int i; super->s_last_ino = be64_to_cpu(da->da_last_ino); li->li_flags = 0; li->li_height = da->da_height; i_size_write(inode, be64_to_cpu(da->da_size)); li->li_used_bytes = be64_to_cpu(da->da_used_bytes); for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) li->li_data[i] = be64_to_cpu(da->da_data[i]); } static void read_erasecount(struct super_block *sb, struct logfs_je_journal_ec *ec) { struct logfs_super *super = logfs_super(sb); int i; journal_for_each(i) super->s_journal_ec[i] = be32_to_cpu(ec->ec[i]); } static int read_area(struct super_block *sb, struct logfs_je_area *a) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_area[a->gc_level]; u64 ofs; u32 writemask = ~(super->s_writesize - 1); if (a->gc_level >= LOGFS_NO_AREAS) return -EIO; if (a->vim != VIM_DEFAULT) return -EIO; /* TODO: close area and continue */ area->a_used_bytes = be32_to_cpu(a->used_bytes); area->a_written_bytes = area->a_used_bytes & writemask; area->a_segno = be32_to_cpu(a->segno); if (area->a_segno) area->a_is_open = 1; ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes); if (super->s_writesize > 1) return logfs_buf_recover(area, ofs, a + 1, super->s_writesize); else return logfs_buf_recover(area, ofs, NULL, 0); } static void *unpack(void *from, void *to) { struct logfs_journal_header *jh = from; void *data = from + sizeof(struct logfs_journal_header); int err; size_t inlen, outlen; inlen = be16_to_cpu(jh->h_len); outlen = be16_to_cpu(jh->h_datalen); if (jh->h_compr == COMPR_NONE) memcpy(to, data, inlen); else { err = logfs_uncompress(data, to, inlen, outlen); BUG_ON(err); } return to; } static int __read_je_header(struct super_block *sb, u64 ofs, struct logfs_journal_header *jh) { struct logfs_super *super = logfs_super(sb); size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize) + MAX_JOURNAL_HEADER; u16 type, len, datalen; int err; /* read header only */ err = wbuf_read(sb, ofs, sizeof(*jh), jh); if (err) return err; type = be16_to_cpu(jh->h_type); len = be16_to_cpu(jh->h_len); datalen = be16_to_cpu(jh->h_datalen); if (len > sb->s_blocksize) return -EIO; if ((type < JE_FIRST) || (type > JE_LAST)) return -EIO; if (datalen > bufsize) return -EIO; return 0; } static int __read_je_payload(struct super_block *sb, u64 ofs, struct logfs_journal_header *jh) { u16 len; int err; len = be16_to_cpu(jh->h_len); err = wbuf_read(sb, ofs + sizeof(*jh), len, jh + 1); if (err) return err; if (jh->h_crc != logfs_crc32(jh, len + sizeof(*jh), 4)) { /* Old code was confused. It forgot about the header length * and stopped calculating the crc 16 bytes before the end * of data - ick! * FIXME: Remove this hack once the old code is fixed. */ if (jh->h_crc == logfs_crc32(jh, len, 4)) WARN_ON_ONCE(1); else return -EIO; } return 0; } /* * jh needs to be large enough to hold the complete entry, not just the header */ static int __read_je(struct super_block *sb, u64 ofs, struct logfs_journal_header *jh) { int err; err = __read_je_header(sb, ofs, jh); if (err) return err; return __read_je_payload(sb, ofs, jh); } static int read_je(struct super_block *sb, u64 ofs) { struct logfs_super *super = logfs_super(sb); struct logfs_journal_header *jh = super->s_compressed_je; void *scratch = super->s_je; u16 type, datalen; int err; err = __read_je(sb, ofs, jh); if (err) return err; type = be16_to_cpu(jh->h_type); datalen = be16_to_cpu(jh->h_datalen); switch (type) { case JE_DYNSB: read_dynsb(sb, unpack(jh, scratch)); break; case JE_ANCHOR: read_anchor(sb, unpack(jh, scratch)); break; case JE_ERASECOUNT: read_erasecount(sb, unpack(jh, scratch)); break; case JE_AREA: err = read_area(sb, unpack(jh, scratch)); break; case JE_OBJ_ALIAS: err = logfs_load_object_aliases(sb, unpack(jh, scratch), datalen); break; default: WARN_ON_ONCE(1); return -EIO; } return err; } static int logfs_read_segment(struct super_block *sb, u32 segno) { struct logfs_super *super = logfs_super(sb); struct logfs_journal_header *jh = super->s_compressed_je; u64 ofs, seg_ofs = dev_ofs(sb, segno, 0); u32 h_ofs, last_ofs = 0; u16 len, datalen, last_len = 0; int i, err; /* search for most recent commit */ for (h_ofs = 0; h_ofs < super->s_segsize; h_ofs += sizeof(*jh)) { ofs = seg_ofs + h_ofs; err = __read_je_header(sb, ofs, jh); if (err) continue; if (jh->h_type != cpu_to_be16(JE_COMMIT)) continue; err = __read_je_payload(sb, ofs, jh); if (err) continue; len = be16_to_cpu(jh->h_len); datalen = be16_to_cpu(jh->h_datalen); if ((datalen > sizeof(super->s_je_array)) || (datalen % sizeof(__be64))) continue; last_ofs = h_ofs; last_len = datalen; h_ofs += ALIGN(len, sizeof(*jh)) - sizeof(*jh); } /* read commit */ if (last_ofs == 0) return -ENOENT; ofs = seg_ofs + last_ofs; log_journal("Read commit from %llx\n", ofs); err = __read_je(sb, ofs, jh); BUG_ON(err); /* We should have caught it in the scan loop already */ if (err) return err; /* uncompress */ unpack(jh, super->s_je_array); super->s_no_je = last_len / sizeof(__be64); /* iterate over array */ for (i = 0; i < super->s_no_je; i++) { err = read_je(sb, be64_to_cpu(super->s_je_array[i])); if (err) return err; } super->s_journal_area->a_segno = segno; return 0; } static u64 read_gec(struct super_block *sb, u32 segno) { struct logfs_segment_header sh; __be32 crc; int err; if (!segno) return 0; err = wbuf_read(sb, dev_ofs(sb, segno, 0), sizeof(sh), &sh); if (err) return 0; crc = logfs_crc32(&sh, sizeof(sh), 4); if (crc != sh.crc) { WARN_ON(sh.gec != cpu_to_be64(0xffffffffffffffffull)); /* Most likely it was just erased */ return 0; } return be64_to_cpu(sh.gec); } static int logfs_read_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); u64 gec[LOGFS_JOURNAL_SEGS], max; u32 segno; int i, max_i; max = 0; max_i = -1; journal_for_each(i) { segno = super->s_journal_seg[i]; gec[i] = read_gec(sb, super->s_journal_seg[i]); if (gec[i] > max) { max = gec[i]; max_i = i; } } if (max_i == -1) return -EIO; /* FIXME: Try older segments in case of error */ return logfs_read_segment(sb, super->s_journal_seg[max_i]); } /* * First search the current segment (outer loop), then pick the next segment * in the array, skipping any zero entries (inner loop). */ static void journal_get_free_segment(struct logfs_area *area) { struct logfs_super *super = logfs_super(area->a_sb); int i; journal_for_each(i) { if (area->a_segno != super->s_journal_seg[i]) continue; do { i++; if (i == LOGFS_JOURNAL_SEGS) i = 0; } while (!super->s_journal_seg[i]); area->a_segno = super->s_journal_seg[i]; area->a_erase_count = ++(super->s_journal_ec[i]); log_journal("Journal now at %x (ec %x)\n", area->a_segno, area->a_erase_count); return; } BUG(); } static void journal_get_erase_count(struct logfs_area *area) { /* erase count is stored globally and incremented in * journal_get_free_segment() - nothing to do here */ } static int journal_erase_segment(struct logfs_area *area) { struct super_block *sb = area->a_sb; union { struct logfs_segment_header sh; unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)]; } u; u64 ofs; int err; err = logfs_erase_segment(sb, area->a_segno, 1); if (err) return err; memset(&u, 0, sizeof(u)); u.sh.pad = 0; u.sh.type = SEG_JOURNAL; u.sh.level = 0; u.sh.segno = cpu_to_be32(area->a_segno); u.sh.ec = cpu_to_be32(area->a_erase_count); u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4); /* This causes a bug in segment.c. Not yet. */ //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); ofs = dev_ofs(sb, area->a_segno, 0); area->a_used_bytes = sizeof(u); logfs_buf_write(area, ofs, &u, sizeof(u)); return 0; } static size_t __logfs_write_header(struct logfs_super *super, struct logfs_journal_header *jh, size_t len, size_t datalen, u16 type, u8 compr) { jh->h_len = cpu_to_be16(len); jh->h_type = cpu_to_be16(type); jh->h_datalen = cpu_to_be16(datalen); jh->h_compr = compr; jh->h_pad[0] = 'H'; jh->h_pad[1] = 'E'; jh->h_pad[2] = 'A'; jh->h_pad[3] = 'D'; jh->h_pad[4] = 'R'; jh->h_crc = logfs_crc32(jh, len + sizeof(*jh), 4); return ALIGN(len, 16) + sizeof(*jh); } static size_t logfs_write_header(struct logfs_super *super, struct logfs_journal_header *jh, size_t datalen, u16 type) { size_t len = datalen; return __logfs_write_header(super, jh, len, datalen, type, COMPR_NONE); } static inline size_t logfs_journal_erasecount_size(struct logfs_super *super) { return LOGFS_JOURNAL_SEGS * sizeof(__be32); } static void *logfs_write_erasecount(struct super_block *sb, void *_ec, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_je_journal_ec *ec = _ec; int i; journal_for_each(i) ec->ec[i] = cpu_to_be32(super->s_journal_ec[i]); *type = JE_ERASECOUNT; *len = logfs_journal_erasecount_size(super); return ec; } static void account_shadow(void *_shadow, unsigned long _sb, u64 ignore, size_t ignore2) { struct logfs_shadow *shadow = _shadow; struct super_block *sb = (void *)_sb; struct logfs_super *super = logfs_super(sb); /* consume new space */ super->s_free_bytes -= shadow->new_len; super->s_used_bytes += shadow->new_len; super->s_dirty_used_bytes -= shadow->new_len; /* free up old space */ super->s_free_bytes += shadow->old_len; super->s_used_bytes -= shadow->old_len; super->s_dirty_free_bytes -= shadow->old_len; logfs_set_segment_used(sb, shadow->old_ofs, -shadow->old_len); logfs_set_segment_used(sb, shadow->new_ofs, shadow->new_len); log_journal("account_shadow(%llx, %llx, %x) %llx->%llx %x->%x\n", shadow->ino, shadow->bix, shadow->gc_level, shadow->old_ofs, shadow->new_ofs, shadow->old_len, shadow->new_len); mempool_free(shadow, super->s_shadow_pool); } static void account_shadows(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *inode = super->s_master_inode; struct logfs_inode *li = logfs_inode(inode); struct shadow_tree *tree = &super->s_shadow_tree; btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); btree_grim_visitor32(&tree->segment_map, 0, NULL); tree->no_shadowed_segments = 0; if (li->li_block) { /* * We never actually use the structure, when attached to the * master inode. But it is easier to always free it here than * to have checks in several places elsewhere when allocating * it. */ li->li_block->ops->free_block(sb, li->li_block); } BUG_ON((s64)li->li_used_bytes < 0); } static void *__logfs_write_anchor(struct super_block *sb, void *_da, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_je_anchor *da = _da; struct inode *inode = super->s_master_inode; struct logfs_inode *li = logfs_inode(inode); int i; da->da_height = li->li_height; da->da_last_ino = cpu_to_be64(super->s_last_ino); da->da_size = cpu_to_be64(i_size_read(inode)); da->da_used_bytes = cpu_to_be64(li->li_used_bytes); for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) da->da_data[i] = cpu_to_be64(li->li_data[i]); *type = JE_ANCHOR; *len = sizeof(*da); return da; } static void *logfs_write_dynsb(struct super_block *sb, void *_dynsb, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_je_dynsb *dynsb = _dynsb; dynsb->ds_gec = cpu_to_be64(super->s_gec); dynsb->ds_sweeper = cpu_to_be64(super->s_sweeper); dynsb->ds_victim_ino = cpu_to_be64(super->s_victim_ino); dynsb->ds_rename_dir = cpu_to_be64(super->s_rename_dir); dynsb->ds_rename_pos = cpu_to_be64(super->s_rename_pos); dynsb->ds_used_bytes = cpu_to_be64(super->s_used_bytes); dynsb->ds_generation = cpu_to_be32(super->s_generation); *type = JE_DYNSB; *len = sizeof(*dynsb); return dynsb; } static void write_wbuf(struct super_block *sb, struct logfs_area *area, void *wbuf) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; u64 ofs; pgoff_t index; int page_ofs; struct page *page; ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes & ~(super->s_writesize - 1)); index = ofs >> PAGE_SHIFT; page_ofs = ofs & (PAGE_SIZE - 1); page = find_lock_page(mapping, index); BUG_ON(!page); memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); unlock_page(page); } static void *logfs_write_area(struct super_block *sb, void *_a, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_area[super->s_sum_index]; struct logfs_je_area *a = _a; a->vim = VIM_DEFAULT; a->gc_level = super->s_sum_index; a->used_bytes = cpu_to_be32(area->a_used_bytes); a->segno = cpu_to_be32(area->a_segno); if (super->s_writesize > 1) write_wbuf(sb, area, a + 1); *type = JE_AREA; *len = sizeof(*a) + super->s_writesize; return a; } static void *logfs_write_commit(struct super_block *sb, void *h, u16 *type, size_t *len) { struct logfs_super *super = logfs_super(sb); *type = JE_COMMIT; *len = super->s_no_je * sizeof(__be64); return super->s_je_array; } static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type, size_t len) { struct logfs_super *super = logfs_super(sb); void *header = super->s_compressed_je; void *data = header + sizeof(struct logfs_journal_header); ssize_t compr_len, pad_len; u8 compr = COMPR_ZLIB; if (len == 0) return logfs_write_header(super, header, 0, type); compr_len = logfs_compress(buf, data, len, sb->s_blocksize); if (compr_len < 0 || type == JE_ANCHOR) { memcpy(data, buf, len); compr_len = len; compr = COMPR_NONE; } pad_len = ALIGN(compr_len, 16); memset(data + compr_len, 0, pad_len - compr_len); return __logfs_write_header(super, header, compr_len, len, type, compr); } static s64 logfs_get_free_bytes(struct logfs_area *area, size_t *bytes, int must_pad) { u32 writesize = logfs_super(area->a_sb)->s_writesize; s32 ofs; int ret; ret = logfs_open_area(area, *bytes); if (ret) return -EAGAIN; ofs = area->a_used_bytes; area->a_used_bytes += *bytes; if (must_pad) { area->a_used_bytes = ALIGN(area->a_used_bytes, writesize); *bytes = area->a_used_bytes - ofs; } return dev_ofs(area->a_sb, area->a_segno, ofs); } static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type, size_t buf_len) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_journal_area; struct logfs_journal_header *jh = super->s_compressed_je; size_t len; int must_pad = 0; s64 ofs; len = __logfs_write_je(sb, buf, type, buf_len); if (jh->h_type == cpu_to_be16(JE_COMMIT)) must_pad = 1; ofs = logfs_get_free_bytes(area, &len, must_pad); if (ofs < 0) return ofs; logfs_buf_write(area, ofs, super->s_compressed_je, len); BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES); super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); return 0; } static int logfs_write_je(struct super_block *sb, void* (*write)(struct super_block *sb, void *scratch, u16 *type, size_t *len)) { void *buf; size_t len; u16 type; buf = write(sb, logfs_super(sb)->s_je, &type, &len); return logfs_write_je_buf(sb, buf, type, len); } int write_alias_journal(struct super_block *sb, u64 ino, u64 bix, level_t level, int child_no, __be64 val) { struct logfs_super *super = logfs_super(sb); struct logfs_obj_alias *oa = super->s_je; int err = 0, fill = super->s_je_fill; log_aliases("logfs_write_obj_aliases #%x(%llx, %llx, %x, %x) %llx\n", fill, ino, bix, level, child_no, be64_to_cpu(val)); oa[fill].ino = cpu_to_be64(ino); oa[fill].bix = cpu_to_be64(bix); oa[fill].val = val; oa[fill].level = (__force u8)level; oa[fill].child_no = cpu_to_be16(child_no); fill++; if (fill >= sb->s_blocksize / sizeof(*oa)) { err = logfs_write_je_buf(sb, oa, JE_OBJ_ALIAS, sb->s_blocksize); fill = 0; } super->s_je_fill = fill; return err; } static int logfs_write_obj_aliases(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int err; log_journal("logfs_write_obj_aliases: %d aliases to write\n", super->s_no_object_aliases); super->s_je_fill = 0; err = logfs_write_obj_aliases_pagecache(sb); if (err) return err; if (super->s_je_fill) err = logfs_write_je_buf(sb, super->s_je, JE_OBJ_ALIAS, super->s_je_fill * sizeof(struct logfs_obj_alias)); return err; } /* * Write all journal entries. The goto logic ensures that all journal entries * are written whenever a new segment is used. It is ugly and potentially a * bit wasteful, but robustness is more important. With this we can *always* * erase all journal segments except the one containing the most recent commit. */ void logfs_write_anchor(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_journal_area; int i, err; if (!(super->s_flags & LOGFS_SB_FLAG_DIRTY)) return; super->s_flags &= ~LOGFS_SB_FLAG_DIRTY; BUG_ON(super->s_flags & LOGFS_SB_FLAG_SHUTDOWN); mutex_lock(&super->s_journal_mutex); /* Do this first or suffer corruption */ logfs_sync_segments(sb); account_shadows(sb); again: super->s_no_je = 0; for_each_area(i) { if (!super->s_area[i]->a_is_open) continue; super->s_sum_index = i; err = logfs_write_je(sb, logfs_write_area); if (err) goto again; } err = logfs_write_obj_aliases(sb); if (err) goto again; err = logfs_write_je(sb, logfs_write_erasecount); if (err) goto again; err = logfs_write_je(sb, __logfs_write_anchor); if (err) goto again; err = logfs_write_je(sb, logfs_write_dynsb); if (err) goto again; /* * Order is imperative. First we sync all writes, including the * non-committed journal writes. Then we write the final commit and * sync the current journal segment. * There is a theoretical bug here. Syncing the journal segment will * write a number of journal entries and the final commit. All these * are written in a single operation. If the device layer writes the * data back-to-front, the commit will precede the other journal * entries, leaving a race window. * Two fixes are possible. Preferred is to fix the device layer to * ensure writes happen front-to-back. Alternatively we can insert * another logfs_sync_area() super->s_devops->sync() combo before * writing the commit. */ /* * On another subject, super->s_devops->sync is usually not necessary. * Unless called from sys_sync or friends, a barrier would suffice. */ super->s_devops->sync(sb); err = logfs_write_je(sb, logfs_write_commit); if (err) goto again; log_journal("Write commit to %llx\n", be64_to_cpu(super->s_je_array[super->s_no_je - 1])); logfs_sync_area(area); BUG_ON(area->a_used_bytes != area->a_written_bytes); super->s_devops->sync(sb); mutex_unlock(&super->s_journal_mutex); return; } void do_logfs_journal_wl_pass(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_journal_area; struct btree_head32 *head = &super->s_reserved_segments; u32 segno, ec; int i, err; log_journal("Journal requires wear-leveling.\n"); /* Drop old segments */ journal_for_each(i) if (super->s_journal_seg[i]) { btree_remove32(head, super->s_journal_seg[i]); logfs_set_segment_unreserved(sb, super->s_journal_seg[i], super->s_journal_ec[i]); super->s_journal_seg[i] = 0; super->s_journal_ec[i] = 0; } /* Get new segments */ for (i = 0; i < super->s_no_journal_segs; i++) { segno = get_best_cand(sb, &super->s_reserve_list, &ec); super->s_journal_seg[i] = segno; super->s_journal_ec[i] = ec; logfs_set_segment_reserved(sb, segno); err = btree_insert32(head, segno, (void *)1, GFP_NOFS); BUG_ON(err); /* mempool should prevent this */ err = logfs_erase_segment(sb, segno, 1); BUG_ON(err); /* FIXME: remount-ro would be nicer */ } /* Manually move journal_area */ freeseg(sb, area->a_segno); area->a_segno = super->s_journal_seg[0]; area->a_is_open = 0; area->a_used_bytes = 0; /* Write journal */ logfs_write_anchor(sb); /* Write superblocks */ err = logfs_write_sb(sb); BUG_ON(err); } static const struct logfs_area_ops journal_area_ops = { .get_free_segment = journal_get_free_segment, .get_erase_count = journal_get_erase_count, .erase_segment = journal_erase_segment, }; int logfs_init_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize) + MAX_JOURNAL_HEADER; int ret = -ENOMEM; mutex_init(&super->s_journal_mutex); btree_init_mempool32(&super->s_reserved_segments, super->s_btree_pool); super->s_je = kzalloc(bufsize, GFP_KERNEL); if (!super->s_je) return ret; super->s_compressed_je = kzalloc(bufsize, GFP_KERNEL); if (!super->s_compressed_je) return ret; super->s_master_inode = logfs_new_meta_inode(sb, LOGFS_INO_MASTER); if (IS_ERR(super->s_master_inode)) return PTR_ERR(super->s_master_inode); ret = logfs_read_journal(sb); if (ret) return -EIO; reserve_sb_and_journal(sb); logfs_calc_free(sb); super->s_journal_area->a_ops = &journal_area_ops; return 0; } void logfs_cleanup_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); btree_grim_visitor32(&super->s_reserved_segments, 0, NULL); kfree(super->s_compressed_je); kfree(super->s_je); }
gpl-2.0
corcor67/SMPL_M8_SENSE
drivers/input/misc/cm109.c
5444
23974
/* * Driver for the VoIP USB phones with CM109 chipsets. * * Copyright (C) 2007 - 2008 Alfred E. Heggestad <aeh@db.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. */ /* * Tested devices: * - Komunikate KIP1000 * - Genius G-talk * - Allied-Telesis Corega USBPH01 * - ... * * This driver is based on the yealink.c driver * * Thanks to: * - Authors of yealink.c * - Thomas Reitmayr * - Oliver Neukum for good review comments and code * - Shaun Jackman <sjackman@gmail.com> for Genius G-talk keymap * - Dmitry Torokhov for valuable input and review * * Todo: * - Read/write EEPROM */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/rwsem.h> #include <linux/usb/input.h> #define DRIVER_VERSION "20080805" #define DRIVER_AUTHOR "Alfred E. Heggestad" #define DRIVER_DESC "CM109 phone driver" static char *phone = "kip1000"; module_param(phone, charp, S_IRUSR); MODULE_PARM_DESC(phone, "Phone name {kip1000, gtalk, usbph01, atcom}"); enum { /* HID Registers */ HID_IR0 = 0x00, /* Record/Playback-mute button, Volume up/down */ HID_IR1 = 0x01, /* GPI, generic registers or EEPROM_DATA0 */ HID_IR2 = 0x02, /* Generic registers or EEPROM_DATA1 */ HID_IR3 = 0x03, /* Generic registers or EEPROM_CTRL */ HID_OR0 = 0x00, /* Mapping control, buzzer, SPDIF (offset 0x04) */ HID_OR1 = 0x01, /* GPO - General Purpose Output */ HID_OR2 = 0x02, /* Set GPIO to input/output mode */ HID_OR3 = 0x03, /* SPDIF status channel or EEPROM_CTRL */ /* HID_IR0 */ RECORD_MUTE = 1 << 3, PLAYBACK_MUTE = 1 << 2, VOLUME_DOWN = 1 << 1, VOLUME_UP = 1 << 0, /* HID_OR0 */ /* bits 7-6 0: HID_OR1-2 are used for GPO; HID_OR0, 3 are used for buzzer and SPDIF 1: HID_OR0-3 are used as generic HID registers 2: Values written to HID_OR0-3 are also mapped to MCU_CTRL, EEPROM_DATA0-1, EEPROM_CTRL (see Note) 3: Reserved */ HID_OR_GPO_BUZ_SPDIF = 0 << 6, HID_OR_GENERIC_HID_REG = 1 << 6, HID_OR_MAP_MCU_EEPROM = 2 << 6, BUZZER_ON = 1 << 5, /* up to 256 normal keys, up to 16 special keys */ KEYMAP_SIZE = 256 + 16, }; /* CM109 protocol packet */ struct cm109_ctl_packet { u8 byte[4]; } __attribute__ ((packed)); enum { USB_PKT_LEN = sizeof(struct cm109_ctl_packet) }; /* CM109 device structure */ struct cm109_dev { struct input_dev *idev; /* input device */ struct usb_device *udev; /* usb device */ struct usb_interface *intf; /* irq input channel */ struct cm109_ctl_packet *irq_data; dma_addr_t irq_dma; struct urb *urb_irq; /* control output channel */ struct cm109_ctl_packet *ctl_data; dma_addr_t ctl_dma; struct usb_ctrlrequest *ctl_req; struct urb *urb_ctl; /* * The 3 bitfields below are protected by ctl_submit_lock. * They have to be separate since they are accessed from IRQ * context. */ unsigned irq_urb_pending:1; /* irq_urb is in flight */ unsigned ctl_urb_pending:1; /* ctl_urb is in flight */ unsigned buzzer_pending:1; /* need to issue buzz command */ spinlock_t ctl_submit_lock; unsigned char buzzer_state; /* on/off */ /* flags */ unsigned open:1; unsigned resetting:1; unsigned shutdown:1; /* This mutex protects writes to the above flags */ struct mutex pm_mutex; unsigned short keymap[KEYMAP_SIZE]; char phys[64]; /* physical device path */ int key_code; /* last reported key */ int keybit; /* 0=new scan 1,2,4,8=scan columns */ u8 gpi; /* Cached value of GPI (high nibble) */ }; /****************************************************************************** * CM109 key interface *****************************************************************************/ static unsigned short special_keymap(int code) { if (code > 0xff) { switch (code - 0xff) { case RECORD_MUTE: return KEY_MUTE; case PLAYBACK_MUTE: return KEY_MUTE; case VOLUME_DOWN: return KEY_VOLUMEDOWN; case VOLUME_UP: return KEY_VOLUMEUP; } } return KEY_RESERVED; } /* Map device buttons to internal key events. * * The "up" and "down" keys, are symbolised by arrows on the button. * The "pickup" and "hangup" keys are symbolised by a green and red phone * on the button. Komunikate KIP1000 Keyboard Matrix -> -- 1 -- 2 -- 3 --> GPI pin 4 (0x10) | | | | <- -- 4 -- 5 -- 6 --> GPI pin 5 (0x20) | | | | END - 7 -- 8 -- 9 --> GPI pin 6 (0x40) | | | | OK -- * -- 0 -- # --> GPI pin 7 (0x80) | | | | /|\ /|\ /|\ /|\ | | | | GPO pin: 3 2 1 0 0x8 0x4 0x2 0x1 */ static unsigned short keymap_kip1000(int scancode) { switch (scancode) { /* phone key: */ case 0x82: return KEY_NUMERIC_0; /* 0 */ case 0x14: return KEY_NUMERIC_1; /* 1 */ case 0x12: return KEY_NUMERIC_2; /* 2 */ case 0x11: return KEY_NUMERIC_3; /* 3 */ case 0x24: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x21: return KEY_NUMERIC_6; /* 6 */ case 0x44: return KEY_NUMERIC_7; /* 7 */ case 0x42: return KEY_NUMERIC_8; /* 8 */ case 0x41: return KEY_NUMERIC_9; /* 9 */ case 0x81: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x88: return KEY_ENTER; /* pickup */ case 0x48: return KEY_ESC; /* hangup */ case 0x28: return KEY_LEFT; /* IN */ case 0x18: return KEY_RIGHT; /* OUT */ default: return special_keymap(scancode); } } /* Contributed by Shaun Jackman <sjackman@gmail.com> Genius G-Talk keyboard matrix 0 1 2 3 4: 0 4 8 Talk 5: 1 5 9 End 6: 2 6 # Up 7: 3 7 * Down */ static unsigned short keymap_gtalk(int scancode) { switch (scancode) { case 0x11: return KEY_NUMERIC_0; case 0x21: return KEY_NUMERIC_1; case 0x41: return KEY_NUMERIC_2; case 0x81: return KEY_NUMERIC_3; case 0x12: return KEY_NUMERIC_4; case 0x22: return KEY_NUMERIC_5; case 0x42: return KEY_NUMERIC_6; case 0x82: return KEY_NUMERIC_7; case 0x14: return KEY_NUMERIC_8; case 0x24: return KEY_NUMERIC_9; case 0x44: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* Talk (green handset) */ case 0x28: return KEY_ESC; /* End (red handset) */ case 0x48: return KEY_UP; /* Menu up (rocker switch) */ case 0x88: return KEY_DOWN; /* Menu down (rocker switch) */ default: return special_keymap(scancode); } } /* * Keymap for Allied-Telesis Corega USBPH01 * http://www.alliedtelesis-corega.com/2/1344/1437/1360/chprd.html * * Contributed by july@nat.bg */ static unsigned short keymap_usbph01(int scancode) { switch (scancode) { case 0x11: return KEY_NUMERIC_0; /* 0 */ case 0x21: return KEY_NUMERIC_1; /* 1 */ case 0x41: return KEY_NUMERIC_2; /* 2 */ case 0x81: return KEY_NUMERIC_3; /* 3 */ case 0x12: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x42: return KEY_NUMERIC_6; /* 6 */ case 0x82: return KEY_NUMERIC_7; /* 7 */ case 0x14: return KEY_NUMERIC_8; /* 8 */ case 0x24: return KEY_NUMERIC_9; /* 9 */ case 0x44: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* pickup */ case 0x28: return KEY_ESC; /* hangup */ case 0x48: return KEY_LEFT; /* IN */ case 0x88: return KEY_RIGHT; /* OUT */ default: return special_keymap(scancode); } } /* * Keymap for ATCom AU-100 * http://www.atcom.cn/products.html * http://www.packetizer.com/products/au100/ * http://www.voip-info.org/wiki/view/AU-100 * * Contributed by daniel@gimpelevich.san-francisco.ca.us */ static unsigned short keymap_atcom(int scancode) { switch (scancode) { /* phone key: */ case 0x82: return KEY_NUMERIC_0; /* 0 */ case 0x11: return KEY_NUMERIC_1; /* 1 */ case 0x12: return KEY_NUMERIC_2; /* 2 */ case 0x14: return KEY_NUMERIC_3; /* 3 */ case 0x21: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x24: return KEY_NUMERIC_6; /* 6 */ case 0x41: return KEY_NUMERIC_7; /* 7 */ case 0x42: return KEY_NUMERIC_8; /* 8 */ case 0x44: return KEY_NUMERIC_9; /* 9 */ case 0x84: return KEY_NUMERIC_POUND; /* # */ case 0x81: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* pickup */ case 0x28: return KEY_ESC; /* hangup */ case 0x48: return KEY_LEFT; /* left arrow */ case 0x88: return KEY_RIGHT; /* right arrow */ default: return special_keymap(scancode); } } static unsigned short (*keymap)(int) = keymap_kip1000; /* * Completes a request by converting the data into events for the * input subsystem. */ static void report_key(struct cm109_dev *dev, int key) { struct input_dev *idev = dev->idev; if (dev->key_code >= 0) { /* old key up */ input_report_key(idev, dev->key_code, 0); } dev->key_code = key; if (key >= 0) { /* new valid key */ input_report_key(idev, key, 1); } input_sync(idev); } /****************************************************************************** * CM109 usb communication interface *****************************************************************************/ static void cm109_submit_buzz_toggle(struct cm109_dev *dev) { int error; if (dev->buzzer_state) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); if (error) err("%s: usb_submit_urb (urb_ctl) failed %d", __func__, error); } /* * IRQ handler */ static void cm109_urb_irq_callback(struct urb *urb) { struct cm109_dev *dev = urb->context; const int status = urb->status; int error; dev_dbg(&urb->dev->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n", dev->irq_data->byte[0], dev->irq_data->byte[1], dev->irq_data->byte[2], dev->irq_data->byte[3], dev->keybit); if (status) { if (status == -ESHUTDOWN) return; err("%s: urb status %d", __func__, status); } /* Special keys */ if (dev->irq_data->byte[HID_IR0] & 0x0f) { const int code = (dev->irq_data->byte[HID_IR0] & 0x0f); report_key(dev, dev->keymap[0xff + code]); } /* Scan key column */ if (dev->keybit == 0xf) { /* Any changes ? */ if ((dev->gpi & 0xf0) == (dev->irq_data->byte[HID_IR1] & 0xf0)) goto out; dev->gpi = dev->irq_data->byte[HID_IR1] & 0xf0; dev->keybit = 0x1; } else { report_key(dev, dev->keymap[dev->irq_data->byte[HID_IR1]]); dev->keybit <<= 1; if (dev->keybit > 0x8) dev->keybit = 0xf; } out: spin_lock(&dev->ctl_submit_lock); dev->irq_urb_pending = 0; if (likely(!dev->shutdown)) { if (dev->buzzer_state) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; dev->ctl_data->byte[HID_OR1] = dev->keybit; dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->buzzer_pending = 0; dev->ctl_urb_pending = 1; error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); if (error) err("%s: usb_submit_urb (urb_ctl) failed %d", __func__, error); } spin_unlock(&dev->ctl_submit_lock); } static void cm109_urb_ctl_callback(struct urb *urb) { struct cm109_dev *dev = urb->context; const int status = urb->status; int error; dev_dbg(&urb->dev->dev, "### URB CTL: [0x%02x 0x%02x 0x%02x 0x%02x]\n", dev->ctl_data->byte[0], dev->ctl_data->byte[1], dev->ctl_data->byte[2], dev->ctl_data->byte[3]); if (status) err("%s: urb status %d", __func__, status); spin_lock(&dev->ctl_submit_lock); dev->ctl_urb_pending = 0; if (likely(!dev->shutdown)) { if (dev->buzzer_pending) { dev->buzzer_pending = 0; dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); } else if (likely(!dev->irq_urb_pending)) { /* ask for key data */ dev->irq_urb_pending = 1; error = usb_submit_urb(dev->urb_irq, GFP_ATOMIC); if (error) err("%s: usb_submit_urb (urb_irq) failed %d", __func__, error); } } spin_unlock(&dev->ctl_submit_lock); } static void cm109_toggle_buzzer_async(struct cm109_dev *dev) { unsigned long flags; spin_lock_irqsave(&dev->ctl_submit_lock, flags); if (dev->ctl_urb_pending) { /* URB completion will resubmit */ dev->buzzer_pending = 1; } else { dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); } spin_unlock_irqrestore(&dev->ctl_submit_lock, flags); } static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on) { int error; if (on) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; error = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), dev->ctl_req->bRequest, dev->ctl_req->bRequestType, le16_to_cpu(dev->ctl_req->wValue), le16_to_cpu(dev->ctl_req->wIndex), dev->ctl_data, USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); if (error < 0 && error != -EINTR) err("%s: usb_control_msg() failed %d", __func__, error); } static void cm109_stop_traffic(struct cm109_dev *dev) { dev->shutdown = 1; /* * Make sure other CPUs see this */ smp_wmb(); usb_kill_urb(dev->urb_ctl); usb_kill_urb(dev->urb_irq); cm109_toggle_buzzer_sync(dev, 0); dev->shutdown = 0; smp_wmb(); } static void cm109_restore_state(struct cm109_dev *dev) { if (dev->open) { /* * Restore buzzer state. * This will also kick regular URB submission */ cm109_toggle_buzzer_async(dev); } } /****************************************************************************** * input event interface *****************************************************************************/ static int cm109_input_open(struct input_dev *idev) { struct cm109_dev *dev = input_get_drvdata(idev); int error; error = usb_autopm_get_interface(dev->intf); if (error < 0) { err("%s - cannot autoresume, result %d", __func__, error); return error; } mutex_lock(&dev->pm_mutex); dev->buzzer_state = 0; dev->key_code = -1; /* no keys pressed */ dev->keybit = 0xf; /* issue INIT */ dev->ctl_data->byte[HID_OR0] = HID_OR_GPO_BUZ_SPDIF; dev->ctl_data->byte[HID_OR1] = dev->keybit; dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->ctl_data->byte[HID_OR3] = 0x00; error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); if (error) err("%s: usb_submit_urb (urb_ctl) failed %d", __func__, error); else dev->open = 1; mutex_unlock(&dev->pm_mutex); if (error) usb_autopm_put_interface(dev->intf); return error; } static void cm109_input_close(struct input_dev *idev) { struct cm109_dev *dev = input_get_drvdata(idev); mutex_lock(&dev->pm_mutex); /* * Once we are here event delivery is stopped so we * don't need to worry about someone starting buzzer * again */ cm109_stop_traffic(dev); dev->open = 0; mutex_unlock(&dev->pm_mutex); usb_autopm_put_interface(dev->intf); } static int cm109_input_ev(struct input_dev *idev, unsigned int type, unsigned int code, int value) { struct cm109_dev *dev = input_get_drvdata(idev); dev_dbg(&dev->udev->dev, "input_ev: type=%u code=%u value=%d\n", type, code, value); if (type != EV_SND) return -EINVAL; switch (code) { case SND_TONE: case SND_BELL: dev->buzzer_state = !!value; if (!dev->resetting) cm109_toggle_buzzer_async(dev); return 0; default: return -EINVAL; } } /****************************************************************************** * Linux interface and usb initialisation *****************************************************************************/ struct driver_info { char *name; }; static const struct driver_info info_cm109 = { .name = "CM109 USB driver", }; enum { VENDOR_ID = 0x0d8c, /* C-Media Electronics */ PRODUCT_ID_CM109 = 0x000e, /* CM109 defines range 0x0008 - 0x000f */ }; /* table of devices that work with this driver */ static const struct usb_device_id cm109_usb_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = VENDOR_ID, .idProduct = PRODUCT_ID_CM109, .bInterfaceClass = USB_CLASS_HID, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t) &info_cm109 }, /* you can add more devices here with product ID 0x0008 - 0x000f */ { } }; static void cm109_usb_cleanup(struct cm109_dev *dev) { kfree(dev->ctl_req); if (dev->ctl_data) usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma); if (dev->irq_data) usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma); usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */ usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */ kfree(dev); } static void cm109_usb_disconnect(struct usb_interface *interface) { struct cm109_dev *dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); input_unregister_device(dev->idev); cm109_usb_cleanup(dev); } static int cm109_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct driver_info *nfo = (struct driver_info *)id->driver_info; struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct cm109_dev *dev; struct input_dev *input_dev = NULL; int ret, pipe, i; int error = -ENOMEM; interface = intf->cur_altsetting; endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->ctl_submit_lock); mutex_init(&dev->pm_mutex); dev->udev = udev; dev->intf = intf; dev->idev = input_dev = input_allocate_device(); if (!input_dev) goto err_out; /* allocate usb buffers */ dev->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_KERNEL, &dev->irq_dma); if (!dev->irq_data) goto err_out; dev->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_KERNEL, &dev->ctl_dma); if (!dev->ctl_data) goto err_out; dev->ctl_req = kmalloc(sizeof(*(dev->ctl_req)), GFP_KERNEL); if (!dev->ctl_req) goto err_out; /* allocate urb structures */ dev->urb_irq = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_irq) goto err_out; dev->urb_ctl = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_ctl) goto err_out; /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe)); if (ret != USB_PKT_LEN) err("invalid payload size %d, expected %d", ret, USB_PKT_LEN); /* initialise irq urb */ usb_fill_int_urb(dev->urb_irq, udev, pipe, dev->irq_data, USB_PKT_LEN, cm109_urb_irq_callback, dev, endpoint->bInterval); dev->urb_irq->transfer_dma = dev->irq_dma; dev->urb_irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev->urb_irq->dev = udev; /* initialise ctl urb */ dev->ctl_req->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT; dev->ctl_req->bRequest = USB_REQ_SET_CONFIGURATION; dev->ctl_req->wValue = cpu_to_le16(0x200); dev->ctl_req->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber); dev->ctl_req->wLength = cpu_to_le16(USB_PKT_LEN); usb_fill_control_urb(dev->urb_ctl, udev, usb_sndctrlpipe(udev, 0), (void *)dev->ctl_req, dev->ctl_data, USB_PKT_LEN, cm109_urb_ctl_callback, dev); dev->urb_ctl->transfer_dma = dev->ctl_dma; dev->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev->urb_ctl->dev = udev; /* find out the physical bus location */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); /* register settings for the input device */ input_dev->name = nfo->name; input_dev->phys = dev->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, dev); input_dev->open = cm109_input_open; input_dev->close = cm109_input_close; input_dev->event = cm109_input_ev; input_dev->keycode = dev->keymap; input_dev->keycodesize = sizeof(unsigned char); input_dev->keycodemax = ARRAY_SIZE(dev->keymap); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); /* register available key events */ for (i = 0; i < KEYMAP_SIZE; i++) { unsigned short k = keymap(i); dev->keymap[i] = k; __set_bit(k, input_dev->keybit); } __clear_bit(KEY_RESERVED, input_dev->keybit); error = input_register_device(dev->idev); if (error) goto err_out; usb_set_intfdata(intf, dev); return 0; err_out: input_free_device(input_dev); cm109_usb_cleanup(dev); return error; } static int cm109_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct cm109_dev *dev = usb_get_intfdata(intf); dev_info(&intf->dev, "cm109: usb_suspend (event=%d)\n", message.event); mutex_lock(&dev->pm_mutex); cm109_stop_traffic(dev); mutex_unlock(&dev->pm_mutex); return 0; } static int cm109_usb_resume(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); dev_info(&intf->dev, "cm109: usb_resume\n"); mutex_lock(&dev->pm_mutex); cm109_restore_state(dev); mutex_unlock(&dev->pm_mutex); return 0; } static int cm109_usb_pre_reset(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); mutex_lock(&dev->pm_mutex); /* * Make sure input events don't try to toggle buzzer * while we are resetting */ dev->resetting = 1; smp_wmb(); cm109_stop_traffic(dev); return 0; } static int cm109_usb_post_reset(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); dev->resetting = 0; smp_wmb(); cm109_restore_state(dev); mutex_unlock(&dev->pm_mutex); return 0; } static struct usb_driver cm109_driver = { .name = "cm109", .probe = cm109_usb_probe, .disconnect = cm109_usb_disconnect, .suspend = cm109_usb_suspend, .resume = cm109_usb_resume, .reset_resume = cm109_usb_resume, .pre_reset = cm109_usb_pre_reset, .post_reset = cm109_usb_post_reset, .id_table = cm109_usb_table, .supports_autosuspend = 1, }; static int __init cm109_select_keymap(void) { /* Load the phone keymap */ if (!strcasecmp(phone, "kip1000")) { keymap = keymap_kip1000; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Komunikate KIP1000 phone loaded\n"); } else if (!strcasecmp(phone, "gtalk")) { keymap = keymap_gtalk; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Genius G-talk phone loaded\n"); } else if (!strcasecmp(phone, "usbph01")) { keymap = keymap_usbph01; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Allied-Telesis Corega USBPH01 phone loaded\n"); } else if (!strcasecmp(phone, "atcom")) { keymap = keymap_atcom; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for ATCom AU-100 phone loaded\n"); } else { printk(KERN_ERR KBUILD_MODNAME ": " "Unsupported phone: %s\n", phone); return -EINVAL; } return 0; } static int __init cm109_init(void) { int err; err = cm109_select_keymap(); if (err) return err; err = usb_register(&cm109_driver); if (err) return err; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC ": " DRIVER_VERSION " (C) " DRIVER_AUTHOR "\n"); return 0; } static void __exit cm109_exit(void) { usb_deregister(&cm109_driver); } module_init(cm109_init); module_exit(cm109_exit); MODULE_DEVICE_TABLE(usb, cm109_usb_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
Split-Screen/android_kernel_asus_fugu
drivers/isdn/hisax/hscx.c
9796
7482
/* $Id: hscx.c,v 1.24.2.4 2004/01/24 20:47:23 keil Exp $ * * HSCX specific routines * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "hscx.h" #include "isac.h" #include "isdnl1.h" #include <linux/interrupt.h> #include <linux/slab.h> static char *HSCXVer[] = {"A1", "?1", "A2", "?3", "A3", "V2.1", "?6", "?7", "?8", "?9", "?10", "?11", "?12", "?13", "?14", "???"}; int HscxVersion(struct IsdnCardState *cs, char *s) { int verA, verB; verA = cs->BC_Read_Reg(cs, 0, HSCX_VSTR) & 0xf; verB = cs->BC_Read_Reg(cs, 1, HSCX_VSTR) & 0xf; printk(KERN_INFO "%s HSCX version A: %s B: %s\n", s, HSCXVer[verA], HSCXVer[verB]); if ((verA == 0) | (verA == 0xf) | (verB == 0) | (verB == 0xf)) return (1); else return (0); } void modehscx(struct BCState *bcs, int mode, int bc) { struct IsdnCardState *cs = bcs->cs; int hscx = bcs->hw.hscx.hscx; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hscx %c mode %d ichan %d", 'A' + hscx, mode, bc); bcs->mode = mode; bcs->channel = bc; cs->BC_Write_Reg(cs, hscx, HSCX_XAD1, 0xFF); cs->BC_Write_Reg(cs, hscx, HSCX_XAD2, 0xFF); cs->BC_Write_Reg(cs, hscx, HSCX_RAH2, 0xFF); cs->BC_Write_Reg(cs, hscx, HSCX_XBCH, 0x0); cs->BC_Write_Reg(cs, hscx, HSCX_RLCR, 0x0); cs->BC_Write_Reg(cs, hscx, HSCX_CCR1, test_bit(HW_IPAC, &cs->HW_Flags) ? 0x82 : 0x85); cs->BC_Write_Reg(cs, hscx, HSCX_CCR2, 0x30); cs->BC_Write_Reg(cs, hscx, HSCX_XCCR, 7); cs->BC_Write_Reg(cs, hscx, HSCX_RCCR, 7); /* Switch IOM 1 SSI */ if (test_bit(HW_IOM1, &cs->HW_Flags) && (hscx == 0)) bc = 1 - bc; if (bc == 0) { cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0); cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0); } else { cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, bcs->hw.hscx.tsaxr1); cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, bcs->hw.hscx.tsaxr1); } switch (mode) { case (L1_MODE_NULL): cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, 0x1f); cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, 0x1f); cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x84); break; case (L1_MODE_TRANS): cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0xe4); break; case (L1_MODE_HDLC): cs->BC_Write_Reg(cs, hscx, HSCX_CCR1, test_bit(HW_IPAC, &cs->HW_Flags) ? 0x8a : 0x8d); cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x8c); break; } if (mode) cs->BC_Write_Reg(cs, hscx, HSCX_CMDR, 0x41); cs->BC_Write_Reg(cs, hscx, HSCX_ISTA, 0x00); } void hscx_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; u_long flags; struct sk_buff *skb = arg; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->hw.hscx.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { printk(KERN_WARNING "hscx_l2l1: this shouldn't happen\n"); } else { test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->hw.hscx.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); modehscx(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); modehscx(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } static void close_hscxstate(struct BCState *bcs) { modehscx(bcs, 0, bcs->channel); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { kfree(bcs->hw.hscx.rcvbuf); bcs->hw.hscx.rcvbuf = NULL; kfree(bcs->blog); bcs->blog = NULL; skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } int open_hscxstate(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { if (!(bcs->hw.hscx.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for hscx.rcvbuf\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); return (1); } if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for bcs->blog\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); kfree(bcs->hw.hscx.rcvbuf); bcs->hw.hscx.rcvbuf = NULL; return (2); } skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->hw.hscx.rcvidx = 0; bcs->tx_cnt = 0; return (0); } static int setstack_hscx(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_hscxstate(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = hscx_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } void clear_pending_hscx_ints(struct IsdnCardState *cs) { int val, eval; val = cs->BC_Read_Reg(cs, 1, HSCX_ISTA); debugl1(cs, "HSCX B ISTA %x", val); if (val & 0x01) { eval = cs->BC_Read_Reg(cs, 1, HSCX_EXIR); debugl1(cs, "HSCX B EXIR %x", eval); } if (val & 0x02) { eval = cs->BC_Read_Reg(cs, 0, HSCX_EXIR); debugl1(cs, "HSCX A EXIR %x", eval); } val = cs->BC_Read_Reg(cs, 0, HSCX_ISTA); debugl1(cs, "HSCX A ISTA %x", val); val = cs->BC_Read_Reg(cs, 1, HSCX_STAR); debugl1(cs, "HSCX B STAR %x", val); val = cs->BC_Read_Reg(cs, 0, HSCX_STAR); debugl1(cs, "HSCX A STAR %x", val); /* disable all IRQ */ cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0xFF); cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0xFF); } void inithscx(struct IsdnCardState *cs) { cs->bcs[0].BC_SetStack = setstack_hscx; cs->bcs[1].BC_SetStack = setstack_hscx; cs->bcs[0].BC_Close = close_hscxstate; cs->bcs[1].BC_Close = close_hscxstate; cs->bcs[0].hw.hscx.hscx = 0; cs->bcs[1].hw.hscx.hscx = 1; cs->bcs[0].hw.hscx.tsaxr0 = 0x2f; cs->bcs[0].hw.hscx.tsaxr1 = 3; cs->bcs[1].hw.hscx.tsaxr0 = 0x2f; cs->bcs[1].hw.hscx.tsaxr1 = 3; modehscx(cs->bcs, 0, 0); modehscx(cs->bcs + 1, 0, 0); } void inithscxisac(struct IsdnCardState *cs, int part) { if (part & 1) { clear_pending_isac_ints(cs); clear_pending_hscx_ints(cs); initisac(cs); inithscx(cs); } if (part & 2) { /* Reenable all IRQ */ cs->writeisac(cs, ISAC_MASK, 0); cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0); cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0); /* RESET Receiver and Transmitter */ cs->writeisac(cs, ISAC_CMDR, 0x41); } }
gpl-2.0
v-yadli/YadliKernel
arch/blackfin/kernel/cplb-mpu/cplbinit.c
12356
2771
/* * Blackfin CPLB initialization * * Copyright 2008-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <asm/blackfin.h> #include <asm/cplb.h> #include <asm/cplbinit.h> #include <asm/mem_map.h> struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS]; struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS]; int first_switched_icplb, first_switched_dcplb; int first_mask_dcplb; void __init generate_cplb_tables_cpu(unsigned int cpu) { int i_d, i_i; unsigned long addr; unsigned long d_data, i_data; unsigned long d_cache = 0, i_cache = 0; printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n"); #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; #endif #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE d_cache = CPLB_L1_CHBL; #ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH d_cache |= CPLB_L1_AOW | CPLB_WT; #endif #endif i_d = i_i = 0; /* Set up the zero page. */ dcplb_tbl[cpu][i_d].addr = 0; dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; icplb_tbl[cpu][i_i].addr = 0; icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; /* Cover kernel memory with 4M pages. */ addr = 0; d_data = d_cache | CPLB_SUPV_WR | CPLB_VALID | PAGE_SIZE_4MB | CPLB_DIRTY; i_data = i_cache | CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4MB; for (; addr < memory_start; addr += 4 * 1024 * 1024) { dcplb_tbl[cpu][i_d].addr = addr; dcplb_tbl[cpu][i_d++].data = d_data; icplb_tbl[cpu][i_i].addr = addr; icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); } #ifdef CONFIG_ROMKERNEL /* Cover kernel XIP flash area */ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); dcplb_tbl[cpu][i_d].addr = addr; dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD; icplb_tbl[cpu][i_i].addr = addr; icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD; #endif /* Cover L1 memory. One 4M area for code and data each is enough. */ #if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); dcplb_tbl[cpu][i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB; #endif #if L1_CODE_LENGTH > 0 icplb_tbl[cpu][i_i].addr = get_l1_code_start_cpu(cpu); icplb_tbl[cpu][i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB; #endif /* Cover L2 memory */ #if L2_LENGTH > 0 dcplb_tbl[cpu][i_d].addr = L2_START; dcplb_tbl[cpu][i_d++].data = L2_DMEMORY; icplb_tbl[cpu][i_i].addr = L2_START; icplb_tbl[cpu][i_i++].data = L2_IMEMORY; #endif first_mask_dcplb = i_d; first_switched_dcplb = i_d + (1 << page_mask_order); first_switched_icplb = i_i; while (i_d < MAX_CPLBS) dcplb_tbl[cpu][i_d++].data = 0; while (i_i < MAX_CPLBS) icplb_tbl[cpu][i_i++].data = 0; } void __init generate_cplb_tables_all(void) { }
gpl-2.0
GruesomeWolf/Slippery_Sloth
arch/parisc/math-emu/fcnvfut.c
14148
8031
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/fcnvfut.c $Revision: 1.1 $ * * Purpose: * Floating-point to Unsigned Fixed-point Converts with Truncation * * External Interfaces: * dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) * dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) * sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) * sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" #include "dbl_float.h" #include "cnv_float.h" /************************************************************************ * Floating-point to Unsigned Fixed-point Converts with Truncation * ************************************************************************/ /* * Convert single floating-point to single fixed-point format * with truncated result */ /*ARGSUSED*/ int sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int src, result; register int src_exponent; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP + 1) { if (Sgl_isone_sign(src)) { result = 0; } else { result = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Sgl_isone_sign(src)) { result = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } Sgl_clear_signexponent_set_hidden(src); Suint_from_sgl_mantissa(src,src_exponent,result); *dstptr = result; /* check for inexact */ if (Sgl_isinexact_to_unsigned(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Single Floating-point to Double Unsigned Fixed */ /*ARGSUSED*/ int sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; register unsigned int src, resultp1, resultp2; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP + 1) { if (Sgl_isone_sign(src)) { resultp1 = resultp2 = 0; } else { resultp1 = resultp2 = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Sgl_isone_sign(src)) { resultp1 = resultp2 = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Sgl_clear_signexponent_set_hidden(src); Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isinexact_to_unsigned(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Duint_setzero(resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Single Unsigned Fixed */ /*ARGSUSED*/ int dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int srcp1, srcp2, result; register int src_exponent; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP + 1) { if (Dbl_isone_sign(srcp1)) { result = 0; } else { result = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Dbl_isone_sign(srcp1)) { result = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } Dbl_clear_signexponent_set_hidden(srcp1); Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result); *dstptr = result; /* check for inexact */ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Double Unsigned Fixed */ /*ARGSUSED*/ int dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; register unsigned int srcp1, srcp2, resultp1, resultp2; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP + 1) { if (Dbl_isone_sign(srcp1)) { resultp1 = resultp2 = 0; } else { resultp1 = resultp2 = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Dbl_isone_sign(srcp1)) { resultp1 = resultp2 = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Dbl_clear_signexponent_set_hidden(srcp1); Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent, resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Duint_setzero(resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); }
gpl-2.0
VincentEmmanuel/android_kernel_pantech_e51k
fs/afs/netdevices.c
14404
1487
/* AFS network device helpers * * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> */ #include <linux/string.h> #include <linux/rtnetlink.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <net/net_namespace.h> #include "internal.h" /* * get a MAC address from a random ethernet interface that has a real one * - the buffer will normally be 6 bytes in size */ int afs_get_MAC_address(u8 *mac, size_t maclen) { struct net_device *dev; int ret = -ENODEV; BUG_ON(maclen != ETH_ALEN); rtnl_lock(); dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER); if (dev) { memcpy(mac, dev->dev_addr, maclen); ret = 0; } rtnl_unlock(); return ret; } /* * get a list of this system's interface IPv4 addresses, netmasks and MTUs * - maxbufs must be at least 1 * - returns the number of interface records in the buffer */ int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs, bool wantloopback) { struct net_device *dev; struct in_device *idev; int n = 0; ASSERT(maxbufs > 0); rtnl_lock(); for_each_netdev(&init_net, dev) { if (dev->type == ARPHRD_LOOPBACK && !wantloopback) continue; idev = __in_dev_get_rtnl(dev); if (!idev) continue; for_primary_ifa(idev) { bufs[n].address.s_addr = ifa->ifa_address; bufs[n].netmask.s_addr = ifa->ifa_mask; bufs[n].mtu = dev->mtu; n++; if (n >= maxbufs) goto out; } endfor_ifa(idev); } out: rtnl_unlock(); return n; }
gpl-2.0
SlimRoms/kernel_motorola_msm8992
drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limSendMessages.c
69
33989
/* * Copyright (c) 2011-2013 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /* * * limSendMessages.c: Provides functions to send messages or Indications to HAL. * Author: Sunit Bhatia * Date: 09/21/2006 * History:- * Date Modified by Modification Information * * -------------------------------------------------------------------------- * */ #include "limSendMessages.h" #include "cfgApi.h" #include "limTrace.h" #ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT #include "vos_diag_core_log.h" #endif //FEATURE_WLAN_DIAG_SUPPORT /* When beacon filtering is enabled, firmware will * analyze the selected beacons received during BMPS, * and monitor any changes in the IEs as listed below. * The format of the table is: * - EID * - Check for IE presence * - Byte offset * - Byte value * - Bit Mask * - Byte refrence */ static tBeaconFilterIe beaconFilterTable[] = { {SIR_MAC_DS_PARAM_SET_EID, 0, {0, 0, DS_PARAM_CHANNEL_MASK, 0}}, {SIR_MAC_ERP_INFO_EID, 0, {0, 0, ERP_FILTER_MASK, 0}}, {SIR_MAC_EDCA_PARAM_SET_EID, 0, {0, 0, EDCA_FILTER_MASK, 0}}, {SIR_MAC_QOS_CAPABILITY_EID, 0, {0, 0, QOS_FILTER_MASK, 0}}, {SIR_MAC_CHNL_SWITCH_ANN_EID, 1, {0, 0, 0, 0}}, {SIR_MAC_HT_INFO_EID, 0, {0, 0, HT_BYTE0_FILTER_MASK, 0}}, {SIR_MAC_HT_INFO_EID, 0, {2, 0, HT_BYTE2_FILTER_MASK, 0}}, {SIR_MAC_HT_INFO_EID, 0, {5, 0, HT_BYTE5_FILTER_MASK, 0}} #if defined WLAN_FEATURE_VOWIFI ,{SIR_MAC_PWR_CONSTRAINT_EID, 0, {0, 0, 0, 0}} #endif #ifdef WLAN_FEATURE_11AC ,{SIR_MAC_VHT_OPMODE_EID, 0, {0, 0, 0, 0}} ,{SIR_MAC_VHT_OPERATION_EID, 0, {0, 0, VHTOP_CHWIDTH_MASK, 0}} #endif }; /** * limSendCFParams() * *FUNCTION: * This function is called to send CFP Parameters to WDA, when they are changed. * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param bssIdx Bss Index of the BSS to which STA is associated. * @param cfpCount CFP Count, if that is changed. * @param cfpPeriod CFP Period if that is changed. * * @return success if message send is ok, else false. */ tSirRetStatus limSendCFParams(tpAniSirGlobal pMac, tANI_U8 bssIdx, tANI_U8 cfpCount, tANI_U8 cfpPeriod) { tpUpdateCFParams pCFParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pCFParams = vos_mem_malloc(sizeof( tUpdateCFParams )); if ( NULL == pCFParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update CF Params" )); retCode = eSIR_MEM_ALLOC_FAILED; goto returnFailure; } vos_mem_set( (tANI_U8 *) pCFParams, sizeof(tUpdateCFParams), 0); pCFParams->cfpCount = cfpCount; pCFParams->cfpPeriod = cfpPeriod; pCFParams->bssIdx = bssIdx; msgQ.type = WDA_UPDATE_CF_IND; msgQ.reserved = 0; msgQ.bodyptr = pCFParams; msgQ.bodyval = 0; limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_CF_IND..." )); MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pCFParams); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_CF_IND to WDA failed, reason=%X"), retCode ); } returnFailure: return retCode; } /** * limSendBeaconParams() * *FUNCTION: * This function is called to send beacnon interval, short preamble or other * parameters to WDA, which are changed and indication is received in beacon. * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param tpUpdateBeaconParams pointer to the structure, which contains the beacon parameters which are changed. * * @return success if message send is ok, else false. */ tSirRetStatus limSendBeaconParams(tpAniSirGlobal pMac, tpUpdateBeaconParams pUpdatedBcnParams, tpPESession psessionEntry ) { tpUpdateBeaconParams pBcnParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pBcnParams = vos_mem_malloc(sizeof(*pBcnParams)); if ( NULL == pBcnParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update Beacon Params" )); return eSIR_MEM_ALLOC_FAILED; } vos_mem_copy((tANI_U8 *) pBcnParams, pUpdatedBcnParams, sizeof(*pBcnParams)); msgQ.type = WDA_UPDATE_BEACON_IND; msgQ.reserved = 0; msgQ.bodyptr = pBcnParams; msgQ.bodyval = 0; PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_BEACON_IND, paramChangeBitmap in hex = %x" ), pUpdatedBcnParams->paramChangeBitmap);) if(NULL == psessionEntry) { vos_mem_free(pBcnParams); MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); return eSIR_FAILURE; } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } pBcnParams->smeSessionId = psessionEntry->smeSessionId; if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pBcnParams); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_BEACON_IND to WDA failed, reason=%X"), retCode ); } limSendBeaconInd(pMac, psessionEntry); return retCode; } /** * limSendSwitchChnlParams() * *FUNCTION: * This function is called to send Channel Switch Indication to WDA * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param chnlNumber New Channel Number to be switched to. * @param secondaryChnlOffset an enum for secondary channel offset. * @param localPowerConstraint 11h local power constraint value * * @return success if message send is ok, else false. */ #if !defined WLAN_FEATURE_VOWIFI tSirRetStatus limSendSwitchChnlParams(tpAniSirGlobal pMac, tANI_U8 chnlNumber, ePhyChanBondState secondaryChnlOffset, tANI_U8 localPwrConstraint, tANI_U8 peSessionId) #else tSirRetStatus limSendSwitchChnlParams(tpAniSirGlobal pMac, tANI_U8 chnlNumber, ePhyChanBondState secondaryChnlOffset, tPowerdBm maxTxPower, tANI_U8 peSessionId) #endif { tpSwitchChannelParams pChnlParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tpPESession pSessionEntry; if((pSessionEntry = peFindSessionBySessionId(pMac, peSessionId)) == NULL) { limLog( pMac, LOGP, FL( "Unable to get Session for session Id %d" ), peSessionId); return eSIR_FAILURE; } pChnlParams = vos_mem_malloc(sizeof( tSwitchChannelParams )); if ( NULL == pChnlParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Switch Channel Params" )); retCode = eSIR_MEM_ALLOC_FAILED; goto returnFailure; } vos_mem_set((tANI_U8 *) pChnlParams, sizeof(tSwitchChannelParams), 0); pChnlParams->secondaryChannelOffset = secondaryChnlOffset; pChnlParams->channelNumber= chnlNumber; vos_mem_copy( pChnlParams->selfStaMacAddr, pSessionEntry->selfMacAddr, sizeof(tSirMacAddr) ); #if defined WLAN_FEATURE_VOWIFI pChnlParams->maxTxPower = maxTxPower; #else pChnlParams->localPowerConstraint = localPwrConstraint; #endif vos_mem_copy( pChnlParams->bssId, pSessionEntry->bssId, sizeof(tSirMacAddr) ); pChnlParams->peSessionId = peSessionId; pChnlParams->vhtCapable = pSessionEntry->vhtCapability; pChnlParams->dot11_mode = pSessionEntry->dot11mode; /*Set DFS flag for DFS channel*/ if (vos_nv_getChannelEnabledState(chnlNumber) == NV_CHANNEL_DFS) pChnlParams->isDfsChannel= VOS_TRUE; else pChnlParams->isDfsChannel = VOS_FALSE; //we need to defer the message until we get the response back from WDA. SET_LIM_PROCESS_DEFD_MESGS(pMac, false); msgQ.type = WDA_CHNL_SWITCH_REQ; msgQ.reserved = 0; msgQ.bodyptr = pChnlParams; msgQ.bodyval = 0; #if defined WLAN_FEATURE_VOWIFI PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_CHNL_SWITCH_REQ with SecondaryChnOffset - %d, ChannelNumber - %d, maxTxPower - %d"), pChnlParams->secondaryChannelOffset, pChnlParams->channelNumber, pChnlParams->maxTxPower);) #else PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_CHNL_SWITCH_REQ with SecondaryChnOffset - %d, ChannelNumber - %d, LocalPowerConstraint - %d"), pChnlParams->secondaryChannelOffset, pChnlParams->channelNumber, pChnlParams->localPowerConstraint);) #endif MTRACE(macTraceMsgTx(pMac, peSessionId, msgQ.type)); limLog(pMac,LOG1,"SessionId:%d WDA_CHNL_SWITCH_REQ for SSID:%s",peSessionId, pSessionEntry->ssId.ssId); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pChnlParams); limLog(pMac, LOGP, FL("Posting WDA_CHNL_SWITCH_REQ to WDA failed, reason=%X"), retCode ); } returnFailure: return retCode; } /** * limSendEdcaParams() * *FUNCTION: * This function is called to send dynamically changing EDCA Parameters to WDA. * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param tpUpdatedEdcaParams pointer to the structure which contains * dynamically changing EDCA parameters. * @param highPerformance If the peer is Airgo (taurus) then switch to highPerformance is true. * * @return success if message send is ok, else false. */ tSirRetStatus limSendEdcaParams(tpAniSirGlobal pMac, tSirMacEdcaParamRecord *pUpdatedEdcaParams, tANI_U16 bssIdx, tANI_BOOLEAN highPerformance) { tEdcaParams *pEdcaParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pEdcaParams = vos_mem_malloc(sizeof(tEdcaParams)); if ( NULL == pEdcaParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update EDCA Params" )); retCode = eSIR_MEM_ALLOC_FAILED; return retCode; } pEdcaParams->bssIdx = bssIdx; pEdcaParams->acbe = pUpdatedEdcaParams[EDCA_AC_BE]; pEdcaParams->acbk = pUpdatedEdcaParams[EDCA_AC_BK]; pEdcaParams->acvi = pUpdatedEdcaParams[EDCA_AC_VI]; pEdcaParams->acvo = pUpdatedEdcaParams[EDCA_AC_VO]; pEdcaParams->highPerformance = highPerformance; msgQ.type = WDA_UPDATE_EDCA_PROFILE_IND; msgQ.reserved = 0; msgQ.bodyptr = pEdcaParams; msgQ.bodyval = 0; { tANI_U8 i; PELOG1(limLog( pMac, LOG1,FL("Sending WDA_UPDATE_EDCA_PROFILE_IND with EDCA Parameters:" ));) for(i=0; i<MAX_NUM_AC; i++) { PELOG1(limLog(pMac, LOG1, FL("AC[%d]: AIFSN %d, ACM %d, CWmin %d, CWmax %d, TxOp %d "), i, pUpdatedEdcaParams[i].aci.aifsn, pUpdatedEdcaParams[i].aci.acm, pUpdatedEdcaParams[i].cw.min, pUpdatedEdcaParams[i].cw.max, pUpdatedEdcaParams[i].txoplimit);) } } MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pEdcaParams); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_EDCA_PROFILE_IND to WDA failed, reason=%X"), retCode ); } return retCode; } /** * limSetActiveEdcaParams() * * FUNCTION: * This function is called to set the most up-to-date EDCA parameters * given the default local EDCA parameters. The rules are as following: * - If ACM bit is set for all ACs, then downgrade everything to Best Effort. * - If ACM is not set for any AC, then PE will use the default EDCA * parameters as advertised by AP. * - If ACM is set in any of the ACs, PE will use the EDCA parameters * from the next best AC for which ACM is not enabled. * * @param pMac pointer to Global Mac structure. * @param plocalEdcaParams pointer to the local EDCA parameters * @ param psessionEntry point to the session entry * @return none */ void limSetActiveEdcaParams(tpAniSirGlobal pMac, tSirMacEdcaParamRecord *plocalEdcaParams, tpPESession psessionEntry) { tANI_U8 ac, newAc, i; tANI_U8 acAdmitted; #ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT vos_log_qos_edca_pkt_type *log_ptr = NULL; #endif //FEATURE_WLAN_DIAG_SUPPORT // Initialize gLimEdcaParamsActive[] to be same as localEdcaParams psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE] = plocalEdcaParams[EDCA_AC_BE]; psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK] = plocalEdcaParams[EDCA_AC_BK]; psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI] = plocalEdcaParams[EDCA_AC_VI]; psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO] = plocalEdcaParams[EDCA_AC_VO]; /* An AC requires downgrade if the ACM bit is set, and the AC has not * yet been admitted in uplink or bi-directions. * If an AC requires downgrade, it will downgrade to the next beset AC * for which ACM is not enabled. * * - There's no need to downgrade AC_BE since it IS the lowest AC. Hence * start the for loop with AC_BK. * - If ACM bit is set for an AC, initially downgrade it to AC_BE. Then * traverse thru the AC list. If we do find the next best AC which is * better than AC_BE, then use that one. For example, if ACM bits are set * such that: BE_ACM=1, BK_ACM=1, VI_ACM=1, VO_ACM=0 * then all AC will be downgraded to AC_BE. */ if(!pMac->psOffloadEnabled) { limLog(pMac, LOG1, FL("adAdmitMask[UPLINK] = 0x%x "), pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] ); limLog(pMac, LOG1, FL("adAdmitMask[DOWNLINK] = 0x%x "), pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_DNLINK] ); } else { limLog(pMac, LOG1, FL("adAdmitMask[UPLINK] = 0x%x "), psessionEntry->gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] ); limLog(pMac, LOG1, FL("adAdmitMask[DOWNLINK] = 0x%x "), psessionEntry->gAcAdmitMask[SIR_MAC_DIRECTION_DNLINK] ); } for (ac = EDCA_AC_BK; ac <= EDCA_AC_VO; ac++) { if(!pMac->psOffloadEnabled) { acAdmitted = ( (pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] & (1 << ac)) >> ac ); } else { acAdmitted = ((psessionEntry->gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] & (1 << ac)) >> ac ); } limLog(pMac, LOG1, FL("For AC[%d]: acm=%d, acAdmit=%d "), ac, plocalEdcaParams[ac].aci.acm, acAdmitted); if ( (plocalEdcaParams[ac].aci.acm == 1) && (acAdmitted == 0) ) { limLog(pMac, LOG1, FL("We need to downgrade AC %d!! "), ac); newAc = EDCA_AC_BE; for (i=ac-1; i>0; i--) { if (plocalEdcaParams[i].aci.acm == 0) { newAc = i; break; } } limLog(pMac, LOGW, FL("Downgrading AC %d ---> AC %d "), ac, newAc); psessionEntry->gLimEdcaParamsActive[ac] = plocalEdcaParams[newAc]; } } //log: LOG_WLAN_QOS_EDCA_C #ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_LOG_ALLOC(log_ptr, vos_log_qos_edca_pkt_type, LOG_WLAN_QOS_EDCA_C); if(log_ptr) { log_ptr->aci_be = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].aci.aci; log_ptr->cw_be = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].cw.min; log_ptr->txoplimit_be = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].txoplimit; log_ptr->aci_bk = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].aci.aci; log_ptr->cw_bk = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].cw.min; log_ptr->txoplimit_bk = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].txoplimit; log_ptr->aci_vi = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].aci.aci; log_ptr->cw_vi = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].cw.min; log_ptr->txoplimit_vi = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].txoplimit; log_ptr->aci_vo = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].aci.aci; log_ptr->cw_vo = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].cw.min; log_ptr->txoplimit_vo = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].txoplimit; } WLAN_VOS_DIAG_LOG_REPORT(log_ptr); #endif //FEATURE_WLAN_DIAG_SUPPORT return; } /** --------------------------------------------------------- \fn limSetLinkState \brief LIM sends a message to WDA to set the link state \param tpAniSirGlobal pMac \param tSirLinkState state \return None -----------------------------------------------------------*/ tSirRetStatus limSetLinkState(tpAniSirGlobal pMac, tSirLinkState state,tSirMacAddr bssId, tSirMacAddr selfMacAddr, tpSetLinkStateCallback callback, void *callbackArg) { tSirMsgQ msgQ; tSirRetStatus retCode; tpLinkStateParams pLinkStateParams = NULL; // Allocate memory. pLinkStateParams = vos_mem_malloc(sizeof(tLinkStateParams)); if ( NULL == pLinkStateParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory while sending Set Link State" )); retCode = eSIR_SME_RESOURCES_UNAVAILABLE; return retCode; } vos_mem_set((tANI_U8 *) pLinkStateParams, sizeof(tLinkStateParams), 0); pLinkStateParams->state = state; pLinkStateParams->callback = callback; pLinkStateParams->callbackArg = callbackArg; /* Copy Mac address */ sirCopyMacAddr(pLinkStateParams->bssid,bssId); sirCopyMacAddr(pLinkStateParams->selfMacAddr, selfMacAddr); msgQ.type = WDA_SET_LINK_STATE; msgQ.reserved = 0; msgQ.bodyptr = pLinkStateParams; msgQ.bodyval = 0; MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); retCode = (tANI_U32)wdaPostCtrlMsg(pMac, &msgQ); if (retCode != eSIR_SUCCESS) { vos_mem_free(pLinkStateParams); limLog(pMac, LOGP, FL("Posting link state %d failed, reason = %x "), state, retCode); } return retCode; } #ifdef WLAN_FEATURE_VOWIFI_11R extern tSirRetStatus limSetLinkStateFT(tpAniSirGlobal pMac, tSirLinkState state,tSirMacAddr bssId, tSirMacAddr selfMacAddr, int ft, tpPESession psessionEntry) { tSirMsgQ msgQ; tSirRetStatus retCode; tpLinkStateParams pLinkStateParams = NULL; // Allocate memory. pLinkStateParams = vos_mem_malloc(sizeof(tLinkStateParams)); if ( NULL == pLinkStateParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory while sending Set Link State" )); retCode = eSIR_SME_RESOURCES_UNAVAILABLE; return retCode; } vos_mem_set((tANI_U8 *) pLinkStateParams, sizeof(tLinkStateParams), 0); pLinkStateParams->state = state; /* Copy Mac address */ sirCopyMacAddr(pLinkStateParams->bssid,bssId); sirCopyMacAddr(pLinkStateParams->selfMacAddr, selfMacAddr); pLinkStateParams->ft = 1; pLinkStateParams->session = psessionEntry; msgQ.type = WDA_SET_LINK_STATE; msgQ.reserved = 0; msgQ.bodyptr = pLinkStateParams; msgQ.bodyval = 0; if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } retCode = (tANI_U32)wdaPostCtrlMsg(pMac, &msgQ); if (retCode != eSIR_SUCCESS) { vos_mem_free(pLinkStateParams); limLog(pMac, LOGP, FL("Posting link state %d failed, reason = %x "), state, retCode); } return retCode; } #endif /** --------------------------------------------------------- \fn limSendSetTxPowerReq \brief LIM sends a WDA_SET_TX_POWER_REQ message to WDA \param tpAniSirGlobal pMac \param tpSirSetTxPowerReq request message \return None -----------------------------------------------------------*/ tSirRetStatus limSendSetTxPowerReq(tpAniSirGlobal pMac, tANI_U32 *pMsgBuf) { tSirSetTxPowerReq *txPowerReq; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tpPESession psessionEntry; tANI_U8 sessionId = 0; if (NULL == pMsgBuf) return eSIR_FAILURE; txPowerReq = vos_mem_malloc(sizeof(tSirSetTxPowerReq)); if ( NULL == txPowerReq ) { return eSIR_FAILURE; } vos_mem_copy(txPowerReq, (tSirSetTxPowerReq *)pMsgBuf, sizeof(tSirSetTxPowerReq)); /* Found corresponding seesion to find BSS IDX */ psessionEntry = peFindSessionByBssid(pMac, txPowerReq->bssId, &sessionId); if (NULL == psessionEntry) { vos_mem_free(txPowerReq); limLog(pMac, LOGE, FL("Session does not exist for given BSSID")); return eSIR_FAILURE; } /* FW API requests BSS IDX */ txPowerReq->bssIdx = psessionEntry->bssIdx; msgQ.type = WDA_SET_TX_POWER_REQ; msgQ.reserved = 0; msgQ.bodyptr = txPowerReq; msgQ.bodyval = 0; PELOGW(limLog(pMac, LOGW, FL("Sending WDA_SET_TX_POWER_REQ to WDA"));) MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); retCode = wdaPostCtrlMsg(pMac, &msgQ); if (eSIR_SUCCESS != retCode) { limLog(pMac, LOGP, FL("Posting WDA_SET_TX_POWER_REQ to WDA failed, reason=%X"), retCode); vos_mem_free(txPowerReq); return retCode; } return retCode; } /** --------------------------------------------------------- \fn limSendGetTxPowerReq \brief LIM sends a WDA_GET_TX_POWER_REQ message to WDA \param tpAniSirGlobal pMac \param tpSirGetTxPowerReq request message \return None -----------------------------------------------------------*/ tSirRetStatus limSendGetTxPowerReq(tpAniSirGlobal pMac, tpSirGetTxPowerReq pTxPowerReq) { tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; if (NULL == pTxPowerReq) return retCode; msgQ.type = WDA_GET_TX_POWER_REQ; msgQ.reserved = 0; msgQ.bodyptr = pTxPowerReq; msgQ.bodyval = 0; PELOGW(limLog(pMac, LOGW, FL( "Sending WDA_GET_TX_POWER_REQ to WDA"));) MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { limLog( pMac, LOGP, FL("Posting WDA_GET_TX_POWER_REQ to WDA failed, reason=%X"), retCode ); if (NULL != pTxPowerReq) { vos_mem_free(pTxPowerReq); } return retCode; } return retCode; } /** --------------------------------------------------------- \fn limSendBeaconFilterInfo \brief LIM sends beacon filtering info to WDA \param tpAniSirGlobal pMac \return None -----------------------------------------------------------*/ tSirRetStatus limSendBeaconFilterInfo(tpAniSirGlobal pMac,tpPESession psessionEntry) { tpBeaconFilterMsg pBeaconFilterMsg = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tANI_U8 *ptr; tANI_U32 i; tANI_U32 msgSize; tpBeaconFilterIe pIe; if( psessionEntry == NULL ) { limLog( pMac, LOGE, FL("Fail to find the right session ")); retCode = eSIR_FAILURE; return retCode; } msgSize = sizeof(tBeaconFilterMsg) + sizeof(beaconFilterTable); pBeaconFilterMsg = vos_mem_malloc(msgSize); if ( NULL == pBeaconFilterMsg ) { limLog( pMac, LOGP, FL("Fail to allocate memory for beaconFiilterMsg ")); retCode = eSIR_MEM_ALLOC_FAILED; return retCode; } vos_mem_set((tANI_U8 *) pBeaconFilterMsg, msgSize, 0); // Fill in capability Info and mask //TBD-RAJESH get the BSS capability from session. //Don't send this message if no active Infra session is found. pBeaconFilterMsg->capabilityInfo = psessionEntry->limCurrentBssCaps; pBeaconFilterMsg->capabilityMask = CAPABILITY_FILTER_MASK; pBeaconFilterMsg->beaconInterval = (tANI_U16) psessionEntry->beaconParams.beaconInterval; // Fill in number of IEs in beaconFilterTable pBeaconFilterMsg->ieNum = (tANI_U16) (sizeof(beaconFilterTable) / sizeof(tBeaconFilterIe)); //Fill the BSSIDX pBeaconFilterMsg->bssIdx = psessionEntry->bssIdx; //Fill message with info contained in the beaconFilterTable ptr = (tANI_U8 *)pBeaconFilterMsg + sizeof(tBeaconFilterMsg); for(i=0; i < (pBeaconFilterMsg->ieNum); i++) { pIe = (tpBeaconFilterIe) ptr; pIe->elementId = beaconFilterTable[i].elementId; pIe->checkIePresence = beaconFilterTable[i].checkIePresence; pIe->byte.offset = beaconFilterTable[i].byte.offset; pIe->byte.value = beaconFilterTable[i].byte.value; pIe->byte.bitMask = beaconFilterTable[i].byte.bitMask; pIe->byte.ref = beaconFilterTable[i].byte.ref; ptr += sizeof(tBeaconFilterIe); } msgQ.type = WDA_BEACON_FILTER_IND; msgQ.reserved = 0; msgQ.bodyptr = pBeaconFilterMsg; msgQ.bodyval = 0; limLog( pMac, LOG3, FL( "Sending WDA_BEACON_FILTER_IND..." )); MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pBeaconFilterMsg); limLog( pMac, LOGP, FL("Posting WDA_BEACON_FILTER_IND to WDA failed, reason=%X"), retCode ); return retCode; } return retCode; } #ifdef WLAN_FEATURE_11AC tSirRetStatus limSendModeUpdate(tpAniSirGlobal pMac, tUpdateVHTOpMode *pTempParam, tpPESession psessionEntry ) { tUpdateVHTOpMode *pVhtOpMode = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pVhtOpMode = vos_mem_malloc(sizeof(tUpdateVHTOpMode)); if ( NULL == pVhtOpMode ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update Op Mode" )); return eSIR_MEM_ALLOC_FAILED; } vos_mem_copy((tANI_U8 *)pVhtOpMode, pTempParam, sizeof(tUpdateVHTOpMode)); msgQ.type = WDA_UPDATE_OP_MODE; msgQ.reserved = 0; msgQ.bodyptr = pVhtOpMode; msgQ.bodyval = 0; PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_OP_MODE" ));) if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pVhtOpMode); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_OP_MODE to WDA failed, reason=%X"), retCode ); } return retCode; } tSirRetStatus limSendRxNssUpdate(tpAniSirGlobal pMac, tUpdateRxNss *pTempParam, tpPESession psessionEntry ) { tUpdateRxNss *pRxNss = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pRxNss = vos_mem_malloc(sizeof(tUpdateRxNss)); if ( NULL == pRxNss ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update Rx Nss" )); return eSIR_MEM_ALLOC_FAILED; } vos_mem_copy((tANI_U8 *)pRxNss, pTempParam, sizeof(tUpdateRxNss)); msgQ.type = WDA_UPDATE_RX_NSS; msgQ.reserved = 0; msgQ.bodyptr = pRxNss; msgQ.bodyval = 0; PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_RX_NSS" ));) if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pRxNss); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_RX_NSS to WDA failed, reason=%X"), retCode ); } return retCode; } tSirRetStatus limSetMembership(tpAniSirGlobal pMac, tUpdateMembership *pTempParam, tpPESession psessionEntry ) { tUpdateMembership *pMembership = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pMembership = vos_mem_malloc(sizeof(tUpdateMembership)); if ( NULL == pMembership ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update Membership Mode" )); return eSIR_MEM_ALLOC_FAILED; } vos_mem_copy((tANI_U8 *)pMembership, pTempParam, sizeof(tUpdateMembership)); msgQ.type = WDA_UPDATE_MEMBERSHIP; msgQ.reserved = 0; msgQ.bodyptr = pMembership; msgQ.bodyval = 0; PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_MEMBERSHIP" ));) if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pMembership); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_MEMBERSHIP to WDA failed, reason=%X"), retCode ); } return retCode; } tSirRetStatus limSetUserPos(tpAniSirGlobal pMac, tUpdateUserPos *pTempParam, tpPESession psessionEntry ) { tUpdateUserPos *pUserPos = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pUserPos = vos_mem_malloc(sizeof(tUpdateUserPos)); if ( NULL == pUserPos ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update User Position" )); return eSIR_MEM_ALLOC_FAILED; } vos_mem_copy((tANI_U8 *)pUserPos, pTempParam, sizeof(tUpdateUserPos)); msgQ.type = WDA_UPDATE_USERPOS; msgQ.reserved = 0; msgQ.bodyptr = pUserPos; msgQ.bodyval = 0; PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_USERPOS" ));) if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pUserPos); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_USERPOS to WDA failed, reason=%X"), retCode ); } return retCode; } #endif #ifdef WLAN_FEATURE_11W /** --------------------------------------------------------- \fn limSendExcludeUnencryptInd \brief LIM sends a message to HAL to indicate whether to ignore or indicate the unprotected packet error \param tpAniSirGlobal pMac \param tANI_BOOLEAN excludeUnenc - true: ignore, false: indicate \param tpPESession psessionEntry - session context \return status -----------------------------------------------------------*/ tSirRetStatus limSendExcludeUnencryptInd(tpAniSirGlobal pMac, tANI_BOOLEAN excludeUnenc, tpPESession psessionEntry) { tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tSirWlanExcludeUnencryptParam * pExcludeUnencryptParam; pExcludeUnencryptParam = vos_mem_malloc(sizeof(tSirWlanExcludeUnencryptParam)); if ( NULL == pExcludeUnencryptParam ) { limLog(pMac, LOGP, FL( "Unable to allocate memory during limSendExcludeUnencryptInd")); return eSIR_MEM_ALLOC_FAILED; } pExcludeUnencryptParam->excludeUnencrypt = excludeUnenc; sirCopyMacAddr(pExcludeUnencryptParam->bssId, psessionEntry->bssId); msgQ.type = WDA_EXCLUDE_UNENCRYPTED_IND; msgQ.reserved = 0; msgQ.bodyptr = pExcludeUnencryptParam; msgQ.bodyval = 0; PELOG3(limLog(pMac, LOG3, FL("Sending WDA_EXCLUDE_UNENCRYPTED_IND"));) MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); retCode = wdaPostCtrlMsg(pMac, &msgQ); if (eSIR_SUCCESS != retCode) { vos_mem_free(pExcludeUnencryptParam); limLog(pMac, LOGP, FL("Posting WDA_EXCLUDE_UNENCRYPTED_IND to WDA failed, reason=%X"), retCode); } return retCode; } #endif
gpl-2.0
wengpingbo/linux
drivers/input/rmi4/rmi_f12.c
69
12121
/* * Copyright (c) 2012-2016 Synaptics Incorporated * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/input.h> #include <linux/input/mt.h> #include <linux/rmi.h> #include "rmi_driver.h" #include "rmi_2d_sensor.h" enum rmi_f12_object_type { RMI_F12_OBJECT_NONE = 0x00, RMI_F12_OBJECT_FINGER = 0x01, RMI_F12_OBJECT_STYLUS = 0x02, RMI_F12_OBJECT_PALM = 0x03, RMI_F12_OBJECT_UNCLASSIFIED = 0x04, RMI_F12_OBJECT_GLOVED_FINGER = 0x06, RMI_F12_OBJECT_NARROW_OBJECT = 0x07, RMI_F12_OBJECT_HAND_EDGE = 0x08, RMI_F12_OBJECT_COVER = 0x0A, RMI_F12_OBJECT_STYLUS_2 = 0x0B, RMI_F12_OBJECT_ERASER = 0x0C, RMI_F12_OBJECT_SMALL_OBJECT = 0x0D, }; struct f12_data { struct rmi_function *fn; struct rmi_2d_sensor sensor; struct rmi_2d_sensor_platform_data sensor_pdata; u16 data_addr; struct rmi_register_descriptor query_reg_desc; struct rmi_register_descriptor control_reg_desc; struct rmi_register_descriptor data_reg_desc; /* F12 Data1 describes sensed objects */ const struct rmi_register_desc_item *data1; u16 data1_offset; /* F12 Data5 describes finger ACM */ const struct rmi_register_desc_item *data5; u16 data5_offset; /* F12 Data5 describes Pen */ const struct rmi_register_desc_item *data6; u16 data6_offset; /* F12 Data9 reports relative data */ const struct rmi_register_desc_item *data9; u16 data9_offset; const struct rmi_register_desc_item *data15; u16 data15_offset; }; static int rmi_f12_read_sensor_tuning(struct f12_data *f12) { const struct rmi_register_desc_item *item; struct rmi_2d_sensor *sensor = &f12->sensor; struct rmi_function *fn = sensor->fn; struct rmi_device *rmi_dev = fn->rmi_dev; int ret; int offset; u8 buf[14]; int pitch_x = 0; int pitch_y = 0; int clip_x_low = 0; int clip_x_high = 0; int clip_y_low = 0; int clip_y_high = 0; int rx_receivers = 0; int tx_receivers = 0; int sensor_flags = 0; item = rmi_get_register_desc_item(&f12->control_reg_desc, 8); if (!item) { dev_err(&fn->dev, "F12 does not have the sensor tuning control register\n"); return -ENODEV; } offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8); if (item->reg_size > 14) { dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n", item->reg_size); return -ENODEV; } ret = rmi_read_block(rmi_dev, fn->fd.control_base_addr + offset, buf, item->reg_size); if (ret) return ret; offset = 0; if (rmi_register_desc_has_subpacket(item, 0)) { sensor->max_x = (buf[offset + 1] << 8) | buf[offset]; sensor->max_y = (buf[offset + 3] << 8) | buf[offset + 2]; offset += 4; } rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: max_x: %d max_y: %d\n", __func__, sensor->max_x, sensor->max_y); if (rmi_register_desc_has_subpacket(item, 1)) { pitch_x = (buf[offset + 1] << 8) | buf[offset]; pitch_y = (buf[offset + 3] << 8) | buf[offset + 2]; offset += 4; } if (rmi_register_desc_has_subpacket(item, 2)) { sensor->axis_align.clip_x_low = buf[offset]; sensor->axis_align.clip_x_high = sensor->max_x - buf[offset + 1]; sensor->axis_align.clip_y_low = buf[offset + 2]; sensor->axis_align.clip_y_high = sensor->max_y - buf[offset + 3]; offset += 4; } rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x low: %d x high: %d y low: %d y high: %d\n", __func__, clip_x_low, clip_x_high, clip_y_low, clip_y_high); if (rmi_register_desc_has_subpacket(item, 3)) { rx_receivers = buf[offset]; tx_receivers = buf[offset + 1]; offset += 2; } if (rmi_register_desc_has_subpacket(item, 4)) { sensor_flags = buf[offset]; offset += 1; } sensor->x_mm = (pitch_x * rx_receivers) >> 12; sensor->y_mm = (pitch_y * tx_receivers) >> 12; rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x_mm: %d y_mm: %d\n", __func__, sensor->x_mm, sensor->y_mm); return 0; } static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1) { int i; struct rmi_2d_sensor *sensor = &f12->sensor; for (i = 0; i < f12->data1->num_subpackets; i++) { struct rmi_2d_sensor_abs_object *obj = &sensor->objs[i]; obj->type = RMI_2D_OBJECT_NONE; obj->mt_tool = MT_TOOL_FINGER; switch (data1[0]) { case RMI_F12_OBJECT_FINGER: obj->type = RMI_2D_OBJECT_FINGER; break; case RMI_F12_OBJECT_STYLUS: obj->type = RMI_2D_OBJECT_STYLUS; obj->mt_tool = MT_TOOL_PEN; break; case RMI_F12_OBJECT_PALM: obj->type = RMI_2D_OBJECT_PALM; obj->mt_tool = MT_TOOL_PALM; break; case RMI_F12_OBJECT_UNCLASSIFIED: obj->type = RMI_2D_OBJECT_UNCLASSIFIED; break; } obj->x = (data1[2] << 8) | data1[1]; obj->y = (data1[4] << 8) | data1[3]; obj->z = data1[5]; obj->wx = data1[6]; obj->wy = data1[7]; rmi_2d_sensor_abs_process(sensor, obj, i); data1 += 8; } if (sensor->kernel_tracking) input_mt_assign_slots(sensor->input, sensor->tracking_slots, sensor->tracking_pos, sensor->nbr_fingers, sensor->dmax); for (i = 0; i < sensor->nbr_fingers; i++) rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i); } static int rmi_f12_attention(struct rmi_function *fn, unsigned long *irq_nr_regs) { int retval; struct rmi_device *rmi_dev = fn->rmi_dev; struct f12_data *f12 = dev_get_drvdata(&fn->dev); struct rmi_2d_sensor *sensor = &f12->sensor; if (rmi_dev->xport->attn_data) { memcpy(sensor->data_pkt, rmi_dev->xport->attn_data, sensor->attn_size); rmi_dev->xport->attn_data += sensor->attn_size; rmi_dev->xport->attn_size -= sensor->attn_size; } else { retval = rmi_read_block(rmi_dev, f12->data_addr, sensor->data_pkt, sensor->pkt_size); if (retval < 0) { dev_err(&fn->dev, "Failed to read object data. Code: %d.\n", retval); return retval; } } if (f12->data1) rmi_f12_process_objects(f12, &sensor->data_pkt[f12->data1_offset]); input_mt_sync_frame(sensor->input); return 0; } static int rmi_f12_config(struct rmi_function *fn) { struct rmi_driver *drv = fn->rmi_dev->driver; drv->set_irq_bits(fn->rmi_dev, fn->irq_mask); return 0; } static int rmi_f12_probe(struct rmi_function *fn) { struct f12_data *f12; int ret; struct rmi_device *rmi_dev = fn->rmi_dev; char buf; u16 query_addr = fn->fd.query_base_addr; const struct rmi_register_desc_item *item; struct rmi_2d_sensor *sensor; struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); struct rmi_transport_dev *xport = rmi_dev->xport; u16 data_offset = 0; rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__); ret = rmi_read(fn->rmi_dev, query_addr, &buf); if (ret < 0) { dev_err(&fn->dev, "Failed to read general info register: %d\n", ret); return -ENODEV; } ++query_addr; if (!(buf & 0x1)) { dev_err(&fn->dev, "Behavior of F12 without register descriptors is undefined.\n"); return -ENODEV; } f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL); if (!f12) return -ENOMEM; if (fn->dev.of_node) { ret = rmi_2d_sensor_of_probe(&fn->dev, &f12->sensor_pdata); if (ret) return ret; } else if (pdata->sensor_pdata) { f12->sensor_pdata = *pdata->sensor_pdata; } ret = rmi_read_register_desc(rmi_dev, query_addr, &f12->query_reg_desc); if (ret) { dev_err(&fn->dev, "Failed to read the Query Register Descriptor: %d\n", ret); return ret; } query_addr += 3; ret = rmi_read_register_desc(rmi_dev, query_addr, &f12->control_reg_desc); if (ret) { dev_err(&fn->dev, "Failed to read the Control Register Descriptor: %d\n", ret); return ret; } query_addr += 3; ret = rmi_read_register_desc(rmi_dev, query_addr, &f12->data_reg_desc); if (ret) { dev_err(&fn->dev, "Failed to read the Data Register Descriptor: %d\n", ret); return ret; } query_addr += 3; sensor = &f12->sensor; sensor->fn = fn; f12->data_addr = fn->fd.data_base_addr; sensor->pkt_size = rmi_register_desc_calc_size(&f12->data_reg_desc); sensor->axis_align = f12->sensor_pdata.axis_align; sensor->x_mm = f12->sensor_pdata.x_mm; sensor->y_mm = f12->sensor_pdata.y_mm; if (sensor->sensor_type == rmi_sensor_default) sensor->sensor_type = f12->sensor_pdata.sensor_type; rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: data packet size: %d\n", __func__, sensor->pkt_size); sensor->data_pkt = devm_kzalloc(&fn->dev, sensor->pkt_size, GFP_KERNEL); if (!sensor->data_pkt) return -ENOMEM; dev_set_drvdata(&fn->dev, f12); ret = rmi_f12_read_sensor_tuning(f12); if (ret) return ret; /* * Figure out what data is contained in the data registers. HID devices * may have registers defined, but their data is not reported in the * HID attention report. Registers which are not reported in the HID * attention report check to see if the device is receiving data from * HID attention reports. */ item = rmi_get_register_desc_item(&f12->data_reg_desc, 0); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 1); if (item) { f12->data1 = item; f12->data1_offset = data_offset; data_offset += item->reg_size; sensor->nbr_fingers = item->num_subpackets; sensor->report_abs = 1; sensor->attn_size += item->reg_size; } item = rmi_get_register_desc_item(&f12->data_reg_desc, 2); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 3); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 4); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 5); if (item) { f12->data5 = item; f12->data5_offset = data_offset; data_offset += item->reg_size; sensor->attn_size += item->reg_size; } item = rmi_get_register_desc_item(&f12->data_reg_desc, 6); if (item && !xport->attn_data) { f12->data6 = item; f12->data6_offset = data_offset; data_offset += item->reg_size; } item = rmi_get_register_desc_item(&f12->data_reg_desc, 7); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 8); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 9); if (item && !xport->attn_data) { f12->data9 = item; f12->data9_offset = data_offset; data_offset += item->reg_size; if (!sensor->report_abs) sensor->report_rel = 1; } item = rmi_get_register_desc_item(&f12->data_reg_desc, 10); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 11); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 12); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 13); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 14); if (item && !xport->attn_data) data_offset += item->reg_size; item = rmi_get_register_desc_item(&f12->data_reg_desc, 15); if (item && !xport->attn_data) { f12->data15 = item; f12->data15_offset = data_offset; data_offset += item->reg_size; } /* allocate the in-kernel tracking buffers */ sensor->tracking_pos = devm_kzalloc(&fn->dev, sizeof(struct input_mt_pos) * sensor->nbr_fingers, GFP_KERNEL); sensor->tracking_slots = devm_kzalloc(&fn->dev, sizeof(int) * sensor->nbr_fingers, GFP_KERNEL); sensor->objs = devm_kzalloc(&fn->dev, sizeof(struct rmi_2d_sensor_abs_object) * sensor->nbr_fingers, GFP_KERNEL); if (!sensor->tracking_pos || !sensor->tracking_slots || !sensor->objs) return -ENOMEM; ret = rmi_2d_sensor_configure_input(fn, sensor); if (ret) return ret; return 0; } struct rmi_function_handler rmi_f12_handler = { .driver = { .name = "rmi4_f12", }, .func = 0x12, .probe = rmi_f12_probe, .config = rmi_f12_config, .attention = rmi_f12_attention, };
gpl-2.0
Gamer125/limbo-android
jni/SDL_mixer/timidity/filter.c
69
5108
/* TiMidity -- Experimental MIDI to WAVE converter Copyright (C) 1995 Tuukka Toivonen <toivonen@clinet.fi> This program is free software; you can redistribute it and/or modify it under the terms of the Perl Artistic License, available in COPYING. filter.c: written by Vincent Pagel ( pagel@loria.fr ) implements fir antialiasing filter : should help when setting sample rates as low as 8Khz. April 95 - first draft 22/5/95 - modify "filter" so that it simulate leading and trailing 0 in the buffer */ #include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> #include "config.h" #include "common.h" #include "ctrlmode.h" #include "instrum.h" #include "filter.h" /* bessel function */ static float ino(float x) { float y, de, e, sde; int i; y = x / 2; e = 1.0; de = 1.0; i = 1; do { de = de * y / (float) i; sde = de * de; e += sde; } while (!( (e * 1.0e-08 - sde > 0) || (i++ > 25) )); return(e); } /* Kaiser Window (symetric) */ static void kaiser(float *w,int n,float beta) { float xind, xi; int i; xind = (float)((2*n - 1) * (2*n - 1)); for (i =0; i<n ; i++) { xi = (float)(i + 0.5); w[i] = ino((float)(beta * sqrt((double)(1. - 4 * xi * xi / xind)))) / ino((float)beta); } } /* * fir coef in g, cuttoff frequency in fc */ static void designfir(float *g , float fc) { int i; float xi, omega, att, beta ; float w[ORDER2]; for (i =0; i < ORDER2 ;i++) { xi = (float) (i + 0.5); omega = (float)(PI * xi); g[i] = (float)(sin( (double) omega * fc) / omega); } att = 40.; /* attenuation in db */ beta = (float) (exp(log((double)0.58417 * (att - 20.96)) * 0.4) + 0.07886 * (att - 20.96)); kaiser( w, ORDER2, beta); /* Matrix product */ for (i =0; i < ORDER2 ; i++) g[i] = g[i] * w[i]; } /* * FIR filtering -> apply the filter given by coef[] to the data buffer * Note that we simulate leading and trailing 0 at the border of the * data buffer */ static void filter(sample_t *result,sample_t *data, int32 length,float coef[]) { int32 sample,i,sample_window; int16 peak = 0; float sum; /* Simulate leading 0 at the begining of the buffer */ for (sample = 0; sample < ORDER2 ; sample++ ) { sum = 0.0; sample_window= sample - ORDER2; for (i = 0; i < ORDER ;i++) sum += (float)(coef[i] * ((sample_window<0)? 0.0 : data[sample_window++])) ; /* Saturation ??? */ if (sum> 32767.) { sum=32767.; peak++; } if (sum< -32768.) { sum=-32768; peak++; } result[sample] = (sample_t) sum; } /* The core of the buffer */ for (sample = ORDER2; sample < length - ORDER + ORDER2 ; sample++ ) { sum = 0.0; sample_window= sample - ORDER2; for (i = 0; i < ORDER ;i++) sum += data[sample_window++] * coef[i]; /* Saturation ??? */ if (sum> 32767.) { sum=32767.; peak++; } if (sum< -32768.) { sum=-32768; peak++; } result[sample] = (sample_t) sum; } /* Simulate 0 at the end of the buffer */ for (sample = length - ORDER + ORDER2; sample < length ; sample++ ) { sum = 0.0; sample_window= sample - ORDER2; for (i = 0; i < ORDER ;i++) sum += (float)(coef[i] * ((sample_window>=length)? 0.0 : data[sample_window++])) ; /* Saturation ??? */ if (sum> 32767.) { sum=32767.; peak++; } if (sum< -32768.) { sum=-32768; peak++; } result[sample] = (sample_t) sum; } if (peak) ctl->cmsg(CMSG_ERROR, VERB_NORMAL, "Saturation %2.3f %%.", 100.0*peak/ (float) length); } /***********************************************************************/ /* Prevent aliasing by filtering any freq above the output_rate */ /* */ /* I don't worry about looping point -> they will remain soft if they */ /* were already */ /***********************************************************************/ void antialiasing(Sample *sp, int32 output_rate ) { sample_t *temp; int i; float fir_symetric[ORDER]; float fir_coef[ORDER2]; float freq_cut; /* cutoff frequency [0..1.0] FREQ_CUT/SAMP_FREQ*/ ctl->cmsg(CMSG_INFO, VERB_NOISY, "Antialiasing: Fsample=%iKHz", sp->sample_rate); /* No oversampling */ if (output_rate>=sp->sample_rate) return; freq_cut= (float) output_rate / (float) sp->sample_rate; ctl->cmsg(CMSG_INFO, VERB_NOISY, "Antialiasing: cutoff=%f%%", freq_cut*100.); designfir(fir_coef,freq_cut); /* Make the filter symetric */ for (i = 0 ; i<ORDER2 ;i++) fir_symetric[ORDER-1 - i] = fir_symetric[i] = fir_coef[ORDER2-1 - i]; /* We apply the filter we have designed on a copy of the patch */ temp = safe_malloc(sp->data_length); memcpy(temp,sp->data,sp->data_length); filter(sp->data,temp,sp->data_length/sizeof(sample_t),fir_symetric); free(temp); }
gpl-2.0
Grarak/grakernel-n1
drivers/net/wireless/iwlegacy/iwl-4965-lib.c
581
35649
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-helpers.h" #include "iwl-4965-hw.h" #include "iwl-4965.h" #include "iwl-sta.h" void iwl4965_check_abort_status(struct iwl_priv *priv, u8 frame_count, u32 status) { if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { IWL_ERR(priv, "Tx flush command to flush out all frames\n"); if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) queue_work(priv->workqueue, &priv->tx_flush); } } /* * EEPROM */ struct iwl_mod_params iwl4965_mod_params = { .amsdu_size_8K = 1, .restart_fw = 1, /* the rest are 0 by default */ }; void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) { unsigned long flags; int i; spin_lock_irqsave(&rxq->lock, flags); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); /* Fill the rx_used queue with _all_ of the Rx buffers */ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, PAGE_SIZE << priv->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); __iwl_legacy_free_pages(priv, rxq->pool[i].page); rxq->pool[i].page = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); } for (i = 0; i < RX_QUEUE_SIZE; i++) rxq->queue[i] = NULL; /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; spin_unlock_irqrestore(&rxq->lock, flags); } int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) { u32 rb_size; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ u32 rb_timeout = 0; if (priv->cfg->mod_params->amsdu_size_8K) rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; else rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; /* Stop Rx DMA */ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); /* Reset driver's Rx queue write index */ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); /* Tell device where to find RBD circular buffer in DRAM */ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); /* Enable Rx DMA * Direct rx interrupts to hosts * Rx buffer size 4 or 8k * RB timeout 0x10 * 256 RBDs */ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | rb_size| (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); return 0; } static void iwl4965_set_pwr_vmain(struct iwl_priv *priv) { /* * (for documentation purposes) * to set power to V_AUX, do: if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); */ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); } int iwl4965_hw_nic_init(struct iwl_priv *priv) { unsigned long flags; struct iwl_rx_queue *rxq = &priv->rxq; int ret; /* nic_init */ spin_lock_irqsave(&priv->lock, flags); priv->cfg->ops->lib->apm_ops.init(priv); /* Set interrupt coalescing calibration timer to default (512 usecs) */ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); spin_unlock_irqrestore(&priv->lock, flags); iwl4965_set_pwr_vmain(priv); priv->cfg->ops->lib->apm_ops.config(priv); /* Allocate the RX queue, or reset if it is already allocated */ if (!rxq->bd) { ret = iwl_legacy_rx_queue_alloc(priv); if (ret) { IWL_ERR(priv, "Unable to initialize Rx queue\n"); return -ENOMEM; } } else iwl4965_rx_queue_reset(priv, rxq); iwl4965_rx_replenish(priv); iwl4965_rx_init(priv, rxq); spin_lock_irqsave(&priv->lock, flags); rxq->need_update = 1; iwl_legacy_rx_queue_update_write_ptr(priv, rxq); spin_unlock_irqrestore(&priv->lock, flags); /* Allocate or reset and init all Tx and Command queues */ if (!priv->txq) { ret = iwl4965_txq_ctx_alloc(priv); if (ret) return ret; } else iwl4965_txq_ctx_reset(priv); set_bit(STATUS_INIT, &priv->status); return 0; } /** * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr */ static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv, dma_addr_t dma_addr) { return cpu_to_le32((u32)(dma_addr >> 8)); } /** * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool * * If there are slots in the RX queue that need to be restocked, * and we have free pre-allocated buffers, fill the ranks as much * as we can, pulling from rx_free. * * This moves the 'write' index forward to catch up with 'processed', and * also updates the memory address in the firmware to reference the new * target buffer. */ void iwl4965_rx_queue_restock(struct iwl_priv *priv) { struct iwl_rx_queue *rxq = &priv->rxq; struct list_head *element; struct iwl_rx_mem_buffer *rxb; unsigned long flags; spin_lock_irqsave(&rxq->lock, flags); while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); /* Get next free Rx buffer, remove from free list */ element = rxq->rx_free.next; rxb = list_entry(element, struct iwl_rx_mem_buffer, list); list_del(element); /* Point to Rx buffer via next RBD in circular buffer */ rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; } spin_unlock_irqrestore(&rxq->lock, flags); /* If the pre-allocated buffer pool is dropping low, schedule to * refill it */ if (rxq->free_count <= RX_LOW_WATERMARK) queue_work(priv->workqueue, &priv->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock_irqsave(&rxq->lock, flags); rxq->need_update = 1; spin_unlock_irqrestore(&rxq->lock, flags); iwl_legacy_rx_queue_update_write_ptr(priv, rxq); } } /** * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free * * When moving to rx_free an SKB is allocated for the slot. * * Also restock the Rx queue via iwl_rx_queue_restock. * This is called as a scheduled work item (except for during initialization) */ static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority) { struct iwl_rx_queue *rxq = &priv->rxq; struct list_head *element; struct iwl_rx_mem_buffer *rxb; struct page *page; unsigned long flags; gfp_t gfp_mask = priority; while (1) { spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); return; } spin_unlock_irqrestore(&rxq->lock, flags); if (rxq->free_count > RX_LOW_WATERMARK) gfp_mask |= __GFP_NOWARN; if (priv->hw_params.rx_page_order > 0) gfp_mask |= __GFP_COMP; /* Alloc a new receive buffer */ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); if (!page) { if (net_ratelimit()) IWL_DEBUG_INFO(priv, "alloc_pages failed, " "order: %d\n", priv->hw_params.rx_page_order); if ((rxq->free_count <= RX_LOW_WATERMARK) && net_ratelimit()) IWL_CRIT(priv, "Failed to alloc_pages with %s. " "Only %u free buffers remaining.\n", priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", rxq->free_count); /* We don't reschedule replenish work here -- we will * call the restock method and if it still needs * more buffers it will schedule replenish */ return; } spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); __free_pages(page, priv->hw_params.rx_page_order); return; } element = rxq->rx_used.next; rxb = list_entry(element, struct iwl_rx_mem_buffer, list); list_del(element); spin_unlock_irqrestore(&rxq->lock, flags); BUG_ON(rxb->page); rxb->page = page; /* Get physical address of the RB */ rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, PAGE_SIZE << priv->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); /* dma address must be no more than 36 bits */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* and also 256 byte aligned! */ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); spin_lock_irqsave(&rxq->lock, flags); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; priv->alloc_rxb_page++; spin_unlock_irqrestore(&rxq->lock, flags); } } void iwl4965_rx_replenish(struct iwl_priv *priv) { unsigned long flags; iwl4965_rx_allocate(priv, GFP_KERNEL); spin_lock_irqsave(&priv->lock, flags); iwl4965_rx_queue_restock(priv); spin_unlock_irqrestore(&priv->lock, flags); } void iwl4965_rx_replenish_now(struct iwl_priv *priv) { iwl4965_rx_allocate(priv, GFP_ATOMIC); iwl4965_rx_queue_restock(priv); } /* Assumes that the skb field of the buffers in 'pool' is kept accurate. * If an SKB has been detached, the POOL needs to have its SKB set to NULL * This free routine walks the list of POOL entries and if SKB is set to * non NULL it is unmapped and freed */ void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) { int i; for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].page != NULL) { pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, PAGE_SIZE << priv->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); __iwl_legacy_free_pages(priv, rxq->pool[i].page); rxq->pool[i].page = NULL; } } dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); rxq->bd = NULL; rxq->rb_stts = NULL; } int iwl4965_rxq_stop(struct iwl_priv *priv) { /* stop Rx DMA */ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); return 0; } int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) { int idx = 0; int band_offset = 0; /* HT rate format: mac80211 wants an MCS number, which is just LSB */ if (rate_n_flags & RATE_MCS_HT_MSK) { idx = (rate_n_flags & 0xff); return idx; /* Legacy rate format, search for match in table */ } else { if (band == IEEE80211_BAND_5GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF)) return idx - band_offset; } return -1; } static int iwl4965_calc_rssi(struct iwl_priv *priv, struct iwl_rx_phy_res *rx_resp) { /* data from PHY/DSP regarding signal strength, etc., * contents are always there, not configurable by host. */ struct iwl4965_rx_non_cfg_phy *ncphy = (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) >> IWL49_AGC_DB_POS; u32 valid_antennae = (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; u8 max_rssi = 0; u32 i; /* Find max rssi among 3 possible receivers. * These values are measured by the digital signal processor (DSP). * They should stay fairly constant even as the signal strength varies, * if the radio's automatic gain control (AGC) is working right. * AGC value (see below) will provide the "interesting" info. */ for (i = 0; i < 3; i++) if (valid_antennae & (1 << i)) max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], max_rssi, agc); /* dBm = max_rssi dB - agc dB - constant. * Higher AGC (higher radio gain) means lower signal. */ return max_rssi - agc - IWL4965_RSSI_OFFSET; } static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) { u32 decrypt_out = 0; if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == RX_RES_STATUS_STATION_FOUND) decrypt_out |= (RX_RES_STATUS_STATION_FOUND | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); /* packet was not encrypted */ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == RX_RES_STATUS_SEC_TYPE_NONE) return decrypt_out; /* packet was encrypted with unknown alg */ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == RX_RES_STATUS_SEC_TYPE_ERR) return decrypt_out; /* decryption was not done in HW */ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != RX_MPDU_RES_STATUS_DEC_DONE_MSK) return decrypt_out; switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { case RX_RES_STATUS_SEC_TYPE_CCMP: /* alg is CCM: check MIC only */ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) /* Bad MIC */ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; else decrypt_out |= RX_RES_STATUS_DECRYPT_OK; break; case RX_RES_STATUS_SEC_TYPE_TKIP: if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { /* Bad TTAK */ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; break; } /* fall through if TTAK OK */ default: if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; else decrypt_out |= RX_RES_STATUS_DECRYPT_OK; break; } IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out); return decrypt_out; } static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, struct iwl_rx_mem_buffer *rxb, struct ieee80211_rx_status *stats) { struct sk_buff *skb; __le16 fc = hdr->frame_control; /* We only process data packets if the interface is open */ if (unlikely(!priv->is_open)) { IWL_DEBUG_DROP_LIMIT(priv, "Dropping packet while interface is not open.\n"); return; } /* In case of HW accelerated crypto and bad decryption, drop */ if (!priv->cfg->mod_params->sw_crypto && iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats)) return; skb = dev_alloc_skb(128); if (!skb) { IWL_ERR(priv, "dev_alloc_skb failed\n"); return; } skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); iwl_legacy_update_stats(priv, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(priv->hw, skb); priv->alloc_rxb_page--; rxb->page = NULL; } /* Called for REPLY_RX (legacy ABG frames), or * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ void iwl4965_rx_reply_rx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct ieee80211_hdr *header; struct ieee80211_rx_status rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_res *phy_res; __le32 rx_pkt_status; struct iwl_rx_mpdu_res_start *amsdu; u32 len; u32 ampdu_status; u32 rate_n_flags; /** * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. * REPLY_RX: physical layer info is in this buffer * REPLY_RX_MPDU_CMD: physical layer info was sent in separate * command and cached in priv->last_phy_res * * Here we set up local variables depending on which command is * received. */ if (pkt->hdr.cmd == REPLY_RX) { phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + phy_res->cfg_phy_cnt); len = le16_to_cpu(phy_res->byte_count); rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + phy_res->cfg_phy_cnt + len); ampdu_status = le32_to_cpu(rx_pkt_status); } else { if (!priv->_4965.last_phy_res_valid) { IWL_ERR(priv, "MPDU frame without cached PHY data\n"); return; } phy_res = &priv->_4965.last_phy_res; amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); len = le16_to_cpu(amsdu->byte_count); rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); ampdu_status = iwl4965_translate_rx_status(priv, le32_to_cpu(rx_pkt_status)); } if ((unlikely(phy_res->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", phy_res->cfg_phy_cnt); return; } if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); return; } /* This will be used in several places later */ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); /* rx_status carries information about the packet to mac80211 */ rx_status.mactime = le64_to_cpu(phy_res->timestamp); rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), rx_status.band); rx_status.rate_idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); rx_status.flag = 0; /* TSF isn't reliable. In order to allow smooth user experience, * this W/A doesn't propagate it to the mac80211 */ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); /* Find max signal strength (dBm) among 3 antenna/receiver chains */ rx_status.signal = iwl4965_calc_rssi(priv, phy_res); iwl_legacy_dbg_log_rx_data_frame(priv, len, header); IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); /* * "antenna number" * * It seems that the antenna field in the phy flags value * is actually a bit field. This is undefined by radiotap, * it wants an actual antenna number but I always get "7" * for most legacy frames I receive indicating that the * same frame was received on all three RX chains. * * I think this field should be removed in favor of a * new 802.11n radiotap field "RX chains" that is defined * as a bitmask. */ rx_status.antenna = (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> RX_RES_PHY_FLAGS_ANTENNA_POS; /* set the preamble flag if appropriate */ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) rx_status.flag |= RX_FLAG_SHORTPRE; /* Set up the HT phy flags */ if (rate_n_flags & RATE_MCS_HT_MSK) rx_status.flag |= RX_FLAG_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) rx_status.flag |= RX_FLAG_40MHZ; if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.flag |= RX_FLAG_SHORT_GI; iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status, rxb, &rx_status); } /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); priv->_4965.last_phy_res_valid = true; memcpy(&priv->_4965.last_phy_res, pkt->u.raw, sizeof(struct iwl_rx_phy_res)); } static int iwl4965_get_channels_for_scan(struct iwl_priv *priv, struct ieee80211_vif *vif, enum ieee80211_band band, u8 is_active, u8 n_probes, struct iwl_scan_channel *scan_ch) { struct ieee80211_channel *chan; const struct ieee80211_supported_band *sband; const struct iwl_channel_info *ch_info; u16 passive_dwell = 0; u16 active_dwell = 0; int added, i; u16 channel; sband = iwl_get_hw_mode(priv, band); if (!sband) return 0; active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); if (passive_dwell <= active_dwell) passive_dwell = active_dwell + 1; for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { chan = priv->scan_request->channels[i]; if (chan->band != band) continue; channel = chan->hw_value; scan_ch->channel = cpu_to_le16(channel); ch_info = iwl_legacy_get_channel_info(priv, band, channel); if (!iwl_legacy_is_channel_valid(ch_info)) { IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", channel); continue; } if (!is_active || iwl_legacy_is_channel_passive(ch_info) || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; else scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; if (n_probes) scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); scan_ch->active_dwell = cpu_to_le16(active_dwell); scan_ch->passive_dwell = cpu_to_le16(passive_dwell); /* Set txpower levels to defaults */ scan_ch->dsp_atten = 110; /* NOTE: if we were doing 6Mb OFDM for scans we'd use * power level: * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; */ if (band == IEEE80211_BAND_5GHZ) scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; else scan_ch->tx_gain = ((1 << 5) | (5 << 3)); IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", channel, le32_to_cpu(scan_ch->type), (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE", (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell : passive_dwell); scan_ch++; added++; } IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); return added; } int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) { struct iwl_host_cmd cmd = { .id = REPLY_SCAN_CMD, .len = sizeof(struct iwl_scan_cmd), .flags = CMD_SIZE_HUGE, }; struct iwl_scan_cmd *scan; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; u32 rate_flags = 0; u16 cmd_len; u16 rx_chain = 0; enum ieee80211_band band; u8 n_probes = 0; u8 rx_ant = priv->hw_params.valid_rx_ant; u8 rate; bool is_active = false; int chan_mod; u8 active_chains; u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; int ret; lockdep_assert_held(&priv->mutex); if (vif) ctx = iwl_legacy_rxon_ctx_from_vif(vif); if (!priv->scan_cmd) { priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE, GFP_KERNEL); if (!priv->scan_cmd) { IWL_DEBUG_SCAN(priv, "fail to allocate memory for scan\n"); return -ENOMEM; } } scan = priv->scan_cmd; memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; scan->quiet_time = IWL_ACTIVE_QUIET_TIME; if (iwl_legacy_is_any_associated(priv)) { u16 interval; u32 extra; u32 suspend_time = 100; u32 scan_suspend_time = 100; IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); interval = vif->bss_conf.beacon_int; scan->suspend_time = 0; scan->max_out_time = cpu_to_le32(200 * 1024); if (!interval) interval = suspend_time; extra = (suspend_time / interval) << 22; scan_suspend_time = (extra | ((suspend_time % interval) * 1024)); scan->suspend_time = cpu_to_le32(scan_suspend_time); IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", scan_suspend_time, interval); } if (priv->scan_request->n_ssids) { int i, p = 0; IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); for (i = 0; i < priv->scan_request->n_ssids; i++) { /* always does wildcard anyway */ if (!priv->scan_request->ssids[i].ssid_len) continue; scan->direct_scan[p].id = WLAN_EID_SSID; scan->direct_scan[p].len = priv->scan_request->ssids[i].ssid_len; memcpy(scan->direct_scan[p].ssid, priv->scan_request->ssids[i].ssid, priv->scan_request->ssids[i].ssid_len); n_probes++; p++; } is_active = true; } else IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; scan->tx_cmd.sta_id = ctx->bcast_sta_id; scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; switch (priv->scan_band) { case IEEE80211_BAND_2GHZ: scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; chan_mod = le32_to_cpu( priv->contexts[IWL_RXON_CTX_BSS].active.flags & RXON_FLG_CHANNEL_MODE_MSK) >> RXON_FLG_CHANNEL_MODE_POS; if (chan_mod == CHANNEL_MODE_PURE_40) { rate = IWL_RATE_6M_PLCP; } else { rate = IWL_RATE_1M_PLCP; rate_flags = RATE_MCS_CCK_MSK; } break; case IEEE80211_BAND_5GHZ: rate = IWL_RATE_6M_PLCP; break; default: IWL_WARN(priv, "Invalid scan band\n"); return -EIO; } /* * If active scanning is requested but a certain channel is * marked passive, we can do active scanning if we detect * transmissions. * * There is an issue with some firmware versions that triggers * a sysassert on a "good CRC threshold" of zero (== disabled), * on a radar channel even though this means that we should NOT * send probes. * * The "good CRC threshold" is the number of frames that we * need to receive during our dwell time on a channel before * sending out probes -- setting this to a huge value will * mean we never reach it, but at the same time work around * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER * here instead of IWL_GOOD_CRC_TH_DISABLED. */ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : IWL_GOOD_CRC_TH_NEVER; band = priv->scan_band; if (priv->cfg->scan_rx_antennas[band]) rx_ant = priv->cfg->scan_rx_antennas[band]; priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv, priv->scan_tx_ant[band], scan_tx_antennas); rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]); scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags); /* In power save mode use one chain, otherwise use all chains */ if (test_bit(STATUS_POWER_PMI, &priv->status)) { /* rx_ant has been set to all valid chains previously */ active_chains = rx_ant & ((u8)(priv->chain_noise_data.active_chains)); if (!active_chains) active_chains = rx_ant; IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", priv->chain_noise_data.active_chains); rx_ant = iwl4965_first_antenna(active_chains); } /* MIMO is not used here, but value is required */ rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; scan->rx_chain = cpu_to_le16(rx_chain); cmd_len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, vif->addr, priv->scan_request->ie, priv->scan_request->ie_len, IWL_MAX_SCAN_SIZE - sizeof(*scan)); scan->tx_cmd.len = cpu_to_le16(cmd_len); scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK); scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band, is_active, n_probes, (void *)&scan->data[cmd_len]); if (scan->channel_count == 0) { IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); return -EIO; } cmd.len += le16_to_cpu(scan->tx_cmd.len) + scan->channel_count * sizeof(struct iwl_scan_channel); cmd.data = scan; scan->len = cpu_to_le16(cmd.len); set_bit(STATUS_SCAN_HW, &priv->status); ret = iwl_legacy_send_cmd_sync(priv, &cmd); if (ret) clear_bit(STATUS_SCAN_HW, &priv->status); return ret; } int iwl4965_manage_ibss_station(struct iwl_priv *priv, struct ieee80211_vif *vif, bool add) { struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; if (add) return iwl4965_add_bssid_station(priv, vif_priv->ctx, vif->bss_conf.bssid, &vif_priv->ibss_bssid_sta_id); return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, vif->bss_conf.bssid); } void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, int sta_id, int tid, int freed) { lockdep_assert_held(&priv->sta_lock); if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; else { IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", priv->stations[sta_id].tid[tid].tfds_in_queue, freed); priv->stations[sta_id].tid[tid].tfds_in_queue = 0; } } #define IWL_TX_QUEUE_MSK 0xfffff static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv) { return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || priv->current_ht_config.single_chain_sufficient; } #define IWL_NUM_RX_CHAINS_MULTIPLE 3 #define IWL_NUM_RX_CHAINS_SINGLE 2 #define IWL_NUM_IDLE_CHAINS_DUAL 2 #define IWL_NUM_IDLE_CHAINS_SINGLE 1 /* * Determine how many receiver/antenna chains to use. * * More provides better reception via diversity. Fewer saves power * at the expense of throughput, but only when not in powersave to * start with. * * MIMO (dual stream) requires at least 2, but works better with 3. * This does not determine *which* chains to use, just how many. */ static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv) { /* # of Rx chains to use when expecting MIMO. */ if (iwl4965_is_single_rx_stream(priv)) return IWL_NUM_RX_CHAINS_SINGLE; else return IWL_NUM_RX_CHAINS_MULTIPLE; } /* * When we are in power saving mode, unless device support spatial * multiplexing power save, use the active count for rx chain count. */ static int iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) { /* # Rx chains when idling, depending on SMPS mode */ switch (priv->current_ht_config.smps) { case IEEE80211_SMPS_STATIC: case IEEE80211_SMPS_DYNAMIC: return IWL_NUM_IDLE_CHAINS_SINGLE; case IEEE80211_SMPS_OFF: return active_cnt; default: WARN(1, "invalid SMPS mode %d", priv->current_ht_config.smps); return active_cnt; } } /* up to 4 chains */ static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap) { u8 res; res = (chain_bitmap & BIT(0)) >> 0; res += (chain_bitmap & BIT(1)) >> 1; res += (chain_bitmap & BIT(2)) >> 2; res += (chain_bitmap & BIT(3)) >> 3; return res; } /** * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image * * Selects how many and which Rx receivers/antennas/chains to use. * This should not be used for scan command ... it puts data in wrong place. */ void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { bool is_single = iwl4965_is_single_rx_stream(priv); bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; u32 active_chains; u16 rx_chain; /* Tell uCode which antennas are actually connected. * Before first association, we assume all antennas are connected. * Just after first association, iwl4965_chain_noise_calibration() * checks which antennas actually *are* connected. */ if (priv->chain_noise_data.active_chains) active_chains = priv->chain_noise_data.active_chains; else active_chains = priv->hw_params.valid_rx_ant; rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; /* How many receivers should we use? */ active_rx_cnt = iwl4965_get_active_rx_chain_count(priv); idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt); /* correct rx chain count according hw settings * and chain noise calibration */ valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains); if (valid_rx_cnt < active_rx_cnt) active_rx_cnt = valid_rx_cnt; if (valid_rx_cnt < idle_rx_cnt) idle_rx_cnt = valid_rx_cnt; rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; ctx->staging.rx_chain = cpu_to_le16(rx_chain); if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; else ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain, active_rx_cnt, idle_rx_cnt); WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || active_rx_cnt < idle_rx_cnt); } u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) { int i; u8 ind = ant; for (i = 0; i < RATE_ANT_NUM - 1; i++) { ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; if (valid & BIT(ind)) return ind; } return ant; } static const char *iwl4965_get_fh_string(int cmd) { switch (cmd) { IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); IWL_CMD(FH_RSCSR_CHNL0_WPTR); IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); IWL_CMD(FH_TSSR_TX_STATUS_REG); IWL_CMD(FH_TSSR_TX_ERROR_REG); default: return "UNKNOWN"; } } int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display) { int i; #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG int pos = 0; size_t bufsz = 0; #endif static const u32 fh_tbl[] = { FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG, FH_RSCSR_CHNL0_WPTR, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_MEM_RSSR_SHARED_CTRL_REG, FH_MEM_RSSR_RX_STATUS_REG, FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG }; #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (display) { bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", iwl4965_get_fh_string(fh_tbl[i]), iwl_legacy_read_direct32(priv, fh_tbl[i])); } return pos; } #endif IWL_ERR(priv, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { IWL_ERR(priv, " %34s: 0X%08x\n", iwl4965_get_fh_string(fh_tbl[i]), iwl_legacy_read_direct32(priv, fh_tbl[i])); } return 0; }
gpl-2.0
adumont/kernel_morrison
arch/um/kernel/skas/uaccess.c
837
5087
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/err.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/sched.h> #include <asm/current.h> #include <asm/page.h> #include <asm/pgtable.h> #include "kern_util.h" #include "os.h" pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (mm == NULL) return NULL; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) return NULL; pud = pud_offset(pgd, addr); if (!pud_present(*pud)) return NULL; pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) return NULL; return pte_offset_kernel(pmd, addr); } static pte_t *maybe_map(unsigned long virt, int is_write) { pte_t *pte = virt_to_pte(current->mm, virt); int err, dummy_code; if ((pte == NULL) || !pte_present(*pte) || (is_write && !pte_write(*pte))) { err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); if (err) return NULL; pte = virt_to_pte(current->mm, virt); } if (!pte_present(*pte)) pte = NULL; return pte; } static int do_op_one_page(unsigned long addr, int len, int is_write, int (*op)(unsigned long addr, int len, void *arg), void *arg) { jmp_buf buf; struct page *page; pte_t *pte; int n, faulted; pte = maybe_map(addr, is_write); if (pte == NULL) return -1; page = pte_page(*pte); addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + (addr & ~PAGE_MASK); current->thread.fault_catcher = &buf; faulted = UML_SETJMP(&buf); if (faulted == 0) n = (*op)(addr, len, arg); else n = -1; current->thread.fault_catcher = NULL; kunmap_atomic(page, KM_UML_USERCOPY); return n; } static int buffer_op(unsigned long addr, int len, int is_write, int (*op)(unsigned long, int, void *), void *arg) { int size, remain, n; size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len); remain = len; n = do_op_one_page(addr, size, is_write, op, arg); if (n != 0) { remain = (n < 0 ? remain : 0); goto out; } addr += size; remain -= size; if (remain == 0) goto out; while (addr < ((addr + remain) & PAGE_MASK)) { n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); if (n != 0) { remain = (n < 0 ? remain : 0); goto out; } addr += PAGE_SIZE; remain -= PAGE_SIZE; } if (remain == 0) goto out; n = do_op_one_page(addr, remain, is_write, op, arg); if (n != 0) { remain = (n < 0 ? remain : 0); goto out; } return 0; out: return remain; } static int copy_chunk_from_user(unsigned long from, int len, void *arg) { unsigned long *to_ptr = arg, to = *to_ptr; memcpy((void *) to, (void *) from, len); *to_ptr += len; return 0; } int copy_from_user(void *to, const void __user *from, int n) { if (segment_eq(get_fs(), KERNEL_DS)) { memcpy(to, (__force void*)from, n); return 0; } return access_ok(VERIFY_READ, from, n) ? buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to): n; } static int copy_chunk_to_user(unsigned long to, int len, void *arg) { unsigned long *from_ptr = arg, from = *from_ptr; memcpy((void *) to, (void *) from, len); *from_ptr += len; return 0; } int copy_to_user(void __user *to, const void *from, int n) { if (segment_eq(get_fs(), KERNEL_DS)) { memcpy((__force void *) to, from, n); return 0; } return access_ok(VERIFY_WRITE, to, n) ? buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from) : n; } static int strncpy_chunk_from_user(unsigned long from, int len, void *arg) { char **to_ptr = arg, *to = *to_ptr; int n; strncpy(to, (void *) from, len); n = strnlen(to, len); *to_ptr += n; if (n < len) return 1; return 0; } int strncpy_from_user(char *dst, const char __user *src, int count) { int n; char *ptr = dst; if (segment_eq(get_fs(), KERNEL_DS)) { strncpy(dst, (__force void *) src, count); return strnlen(dst, count); } if (!access_ok(VERIFY_READ, src, 1)) return -EFAULT; n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user, &ptr); if (n != 0) return -EFAULT; return strnlen(dst, count); } static int clear_chunk(unsigned long addr, int len, void *unused) { memset((void *) addr, 0, len); return 0; } int __clear_user(void __user *mem, int len) { return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL); } int clear_user(void __user *mem, int len) { if (segment_eq(get_fs(), KERNEL_DS)) { memset((__force void*)mem, 0, len); return 0; } return access_ok(VERIFY_WRITE, mem, len) ? buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL) : len; } static int strnlen_chunk(unsigned long str, int len, void *arg) { int *len_ptr = arg, n; n = strnlen((void *) str, len); *len_ptr += n; if (n < len) return 1; return 0; } int strnlen_user(const void __user *str, int len) { int count = 0, n; if (segment_eq(get_fs(), KERNEL_DS)) return strnlen((__force char*)str, len) + 1; n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); if (n == 0) return count + 1; return -EFAULT; }
gpl-2.0
xsilon/linux-xsilon
net/atm/svc.c
837
16380
/* net/atm/svc.c - ATM SVC sockets */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/string.h> #include <linux/net.h> /* struct socket, struct proto_ops */ #include <linux/errno.h> /* error codes */ #include <linux/kernel.h> /* printk */ #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/sched.h> /* jiffies and HZ */ #include <linux/fcntl.h> /* O_NONBLOCK */ #include <linux/init.h> #include <linux/atm.h> /* ATM stuff */ #include <linux/atmsap.h> #include <linux/atmsvc.h> #include <linux/atmdev.h> #include <linux/bitops.h> #include <net/sock.h> /* for sock_no_* */ #include <linux/uaccess.h> #include <linux/export.h> #include "resources.h" #include "common.h" /* common for PVCs and SVCs */ #include "signaling.h" #include "addr.h" static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); /* * Note: since all this is still nicely synchronized with the signaling demon, * there's no need to protect sleep loops with clis. If signaling is * moved into the kernel, that would change. */ static int svc_shutdown(struct socket *sock, int how) { return 0; } static void svc_disconnect(struct atm_vcc *vcc) { DEFINE_WAIT(wait); struct sk_buff *skb; struct sock *sk = sk_atm(vcc); pr_debug("%p\n", vcc); if (test_bit(ATM_VF_REGIS, &vcc->flags)) { sigd_enq(vcc, as_close, NULL, NULL, NULL); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) break; schedule(); } finish_wait(sk_sleep(sk), &wait); } /* beware - socket is still in use by atmsigd until the last as_indicate has been answered */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { atm_return(vcc, skb->truesize); pr_debug("LISTEN REL\n"); sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0); dev_kfree_skb(skb); } clear_bit(ATM_VF_REGIS, &vcc->flags); /* ... may retry later */ } static int svc_release(struct socket *sock) { struct sock *sk = sock->sk; struct atm_vcc *vcc; if (sk) { vcc = ATM_SD(sock); pr_debug("%p\n", vcc); clear_bit(ATM_VF_READY, &vcc->flags); /* * VCC pointer is used as a reference, * so we must not free it (thereby subjecting it to re-use) * before all pending connections are closed */ svc_disconnect(vcc); vcc_release(sock); } return 0; } static int svc_bind(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct sockaddr_atmsvc *addr; struct atm_vcc *vcc; int error; if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) return -EINVAL; lock_sock(sk); if (sock->state == SS_CONNECTED) { error = -EISCONN; goto out; } if (sock->state != SS_UNCONNECTED) { error = -EINVAL; goto out; } vcc = ATM_SD(sock); addr = (struct sockaddr_atmsvc *) sockaddr; if (addr->sas_family != AF_ATMSVC) { error = -EAFNOSUPPORT; goto out; } clear_bit(ATM_VF_BOUND, &vcc->flags); /* failing rebind will kill old binding */ /* @@@ check memory (de)allocation on rebind */ if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { error = -EBADFD; goto out; } vcc->local = *addr; set_bit(ATM_VF_WAITING, &vcc->flags); sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) break; schedule(); } finish_wait(sk_sleep(sk), &wait); clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ if (!sigd) { error = -EUNATCH; goto out; } if (!sk->sk_err) set_bit(ATM_VF_BOUND, &vcc->flags); error = -sk->sk_err; out: release_sock(sk); return error; } static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len, int flags) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct sockaddr_atmsvc *addr; struct atm_vcc *vcc = ATM_SD(sock); int error; pr_debug("%p\n", vcc); lock_sock(sk); if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { error = -EINVAL; goto out; } switch (sock->state) { default: error = -EINVAL; goto out; case SS_CONNECTED: error = -EISCONN; goto out; case SS_CONNECTING: if (test_bit(ATM_VF_WAITING, &vcc->flags)) { error = -EALREADY; goto out; } sock->state = SS_UNCONNECTED; if (sk->sk_err) { error = -sk->sk_err; goto out; } break; case SS_UNCONNECTED: addr = (struct sockaddr_atmsvc *) sockaddr; if (addr->sas_family != AF_ATMSVC) { error = -EAFNOSUPPORT; goto out; } if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { error = -EBADFD; goto out; } if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) { error = -EINVAL; goto out; } if (!vcc->qos.txtp.traffic_class && !vcc->qos.rxtp.traffic_class) { error = -EINVAL; goto out; } vcc->remote = *addr; set_bit(ATM_VF_WAITING, &vcc->flags); sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); if (flags & O_NONBLOCK) { sock->state = SS_CONNECTING; error = -EINPROGRESS; goto out; } error = 0; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); if (!signal_pending(current)) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); continue; } pr_debug("*ABORT*\n"); /* * This is tricky: * Kernel ---close--> Demon * Kernel <--close--- Demon * or * Kernel ---close--> Demon * Kernel <--error--- Demon * or * Kernel ---close--> Demon * Kernel <--okay---- Demon * Kernel <--close--- Demon */ sigd_enq(vcc, as_close, NULL, NULL, NULL); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); schedule(); } if (!sk->sk_err) while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); schedule(); } clear_bit(ATM_VF_REGIS, &vcc->flags); clear_bit(ATM_VF_RELEASED, &vcc->flags); clear_bit(ATM_VF_CLOSE, &vcc->flags); /* we're gone now but may connect later */ error = -EINTR; break; } finish_wait(sk_sleep(sk), &wait); if (error) goto out; if (!sigd) { error = -EUNATCH; goto out; } if (sk->sk_err) { error = -sk->sk_err; goto out; } } vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp); vcc->qos.txtp.pcr = 0; vcc->qos.txtp.min_pcr = 0; error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci); if (!error) sock->state = SS_CONNECTED; else (void)svc_disconnect(vcc); out: release_sock(sk); return error; } static int svc_listen(struct socket *sock, int backlog) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; pr_debug("%p\n", vcc); lock_sock(sk); /* let server handle listen on unbound sockets */ if (test_bit(ATM_VF_SESSION, &vcc->flags)) { error = -EINVAL; goto out; } if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { error = -EADDRINUSE; goto out; } set_bit(ATM_VF_WAITING, &vcc->flags); sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) break; schedule(); } finish_wait(sk_sleep(sk), &wait); if (!sigd) { error = -EUNATCH; goto out; } set_bit(ATM_VF_LISTEN, &vcc->flags); vcc_insert_socket(sk); sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; error = -sk->sk_err; out: release_sock(sk); return error; } static int svc_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; struct atmsvc_msg *msg; struct atm_vcc *old_vcc = ATM_SD(sock); struct atm_vcc *new_vcc; int error; lock_sock(sk); error = svc_create(sock_net(sk), newsock, 0, 0); if (error) goto out; new_vcc = ATM_SD(newsock); pr_debug("%p -> %p\n", old_vcc, new_vcc); while (1) { DEFINE_WAIT(wait); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && sigd) { if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) break; if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) { error = -sk->sk_err; break; } if (flags & O_NONBLOCK) { error = -EAGAIN; break; } release_sock(sk); schedule(); lock_sock(sk); if (signal_pending(current)) { error = -ERESTARTSYS; break; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (error) goto out; if (!skb) { error = -EUNATCH; goto out; } msg = (struct atmsvc_msg *)skb->data; new_vcc->qos = msg->qos; set_bit(ATM_VF_HASQOS, &new_vcc->flags); new_vcc->remote = msg->svc; new_vcc->local = msg->local; new_vcc->sap = msg->sap; error = vcc_connect(newsock, msg->pvc.sap_addr.itf, msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci); dev_kfree_skb(skb); sk->sk_ack_backlog--; if (error) { sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL, &old_vcc->qos, error); error = error == -EAGAIN ? -EBUSY : error; goto out; } /* wait should be short, so we ignore the non-blocking flag */ set_bit(ATM_VF_WAITING, &new_vcc->flags); sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); for (;;) { prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, TASK_UNINTERRUPTIBLE); if (!test_bit(ATM_VF_WAITING, &new_vcc->flags) || !sigd) break; release_sock(sk); schedule(); lock_sock(sk); } finish_wait(sk_sleep(sk_atm(new_vcc)), &wait); if (!sigd) { error = -EUNATCH; goto out; } if (!sk_atm(new_vcc)->sk_err) break; if (sk_atm(new_vcc)->sk_err != ERESTARTSYS) { error = -sk_atm(new_vcc)->sk_err; goto out; } } newsock->state = SS_CONNECTED; out: release_sock(sk); return error; } static int svc_getname(struct socket *sock, struct sockaddr *sockaddr, int *sockaddr_len, int peer) { struct sockaddr_atmsvc *addr; *sockaddr_len = sizeof(struct sockaddr_atmsvc); addr = (struct sockaddr_atmsvc *) sockaddr; memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local, sizeof(struct sockaddr_atmsvc)); return 0; } int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) { struct sock *sk = sk_atm(vcc); DEFINE_WAIT(wait); set_bit(ATM_VF_WAITING, &vcc->flags); sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); if (!test_bit(ATM_VF_WAITING, &vcc->flags) || test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) { break; } schedule(); } finish_wait(sk_sleep(sk), &wait); if (!sigd) return -EUNATCH; return -sk->sk_err; } static int svc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int value, error = 0; lock_sock(sk); switch (optname) { case SO_ATMSAP: if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) { error = -EINVAL; goto out; } if (copy_from_user(&vcc->sap, optval, optlen)) { error = -EFAULT; goto out; } set_bit(ATM_VF_HASSAP, &vcc->flags); break; case SO_MULTIPOINT: if (level != SOL_ATM || optlen != sizeof(int)) { error = -EINVAL; goto out; } if (get_user(value, (int __user *)optval)) { error = -EFAULT; goto out; } if (value == 1) set_bit(ATM_VF_SESSION, &vcc->flags); else if (value == 0) clear_bit(ATM_VF_SESSION, &vcc->flags); else error = -EINVAL; break; default: error = vcc_setsockopt(sock, level, optname, optval, optlen); } out: release_sock(sk); return error; } static int svc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int error = 0, len; lock_sock(sk); if (!__SO_LEVEL_MATCH(optname, level) || optname != SO_ATMSAP) { error = vcc_getsockopt(sock, level, optname, optval, optlen); goto out; } if (get_user(len, optlen)) { error = -EFAULT; goto out; } if (len != sizeof(struct atm_sap)) { error = -EINVAL; goto out; } if (copy_to_user(optval, &ATM_SD(sock)->sap, sizeof(struct atm_sap))) { error = -EFAULT; goto out; } out: release_sock(sk); return error; } static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len, int flags) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; lock_sock(sk); set_bit(ATM_VF_WAITING, &vcc->flags); sigd_enq(vcc, as_addparty, NULL, NULL, (struct sockaddr_atmsvc *) sockaddr); if (flags & O_NONBLOCK) { error = -EINPROGRESS; goto out; } pr_debug("added wait queue\n"); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) break; schedule(); } finish_wait(sk_sleep(sk), &wait); error = xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; } static int svc_dropparty(struct socket *sock, int ep_ref) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; lock_sock(sk); set_bit(ATM_VF_WAITING, &vcc->flags); sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) break; schedule(); } finish_wait(sk_sleep(sk), &wait); if (!sigd) { error = -EUNATCH; goto out; } error = xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; } static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int error, ep_ref; struct sockaddr_atmsvc sa; struct atm_vcc *vcc = ATM_SD(sock); switch (cmd) { case ATM_ADDPARTY: if (!test_bit(ATM_VF_SESSION, &vcc->flags)) return -EINVAL; if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) return -EFAULT; error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa), 0); break; case ATM_DROPPARTY: if (!test_bit(ATM_VF_SESSION, &vcc->flags)) return -EINVAL; if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) return -EFAULT; error = svc_dropparty(sock, ep_ref); break; default: error = vcc_ioctl(sock, cmd, arg); } return error; } #ifdef CONFIG_COMPAT static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf. But actually it takes a struct sockaddr_atmsvc, which doesn't need compat handling. So all we have to do is fix up cmd... */ if (cmd == COMPAT_ATM_ADDPARTY) cmd = ATM_ADDPARTY; if (cmd == ATM_ADDPARTY || cmd == ATM_DROPPARTY) return svc_ioctl(sock, cmd, arg); else return vcc_compat_ioctl(sock, cmd, arg); } #endif /* CONFIG_COMPAT */ static const struct proto_ops svc_proto_ops = { .family = PF_ATMSVC, .owner = THIS_MODULE, .release = svc_release, .bind = svc_bind, .connect = svc_connect, .socketpair = sock_no_socketpair, .accept = svc_accept, .getname = svc_getname, .poll = vcc_poll, .ioctl = svc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = svc_compat_ioctl, #endif .listen = svc_listen, .shutdown = svc_shutdown, .setsockopt = svc_setsockopt, .getsockopt = svc_getsockopt, .sendmsg = vcc_sendmsg, .recvmsg = vcc_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static int svc_create(struct net *net, struct socket *sock, int protocol, int kern) { int error; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; sock->ops = &svc_proto_ops; error = vcc_create(net, sock, protocol, AF_ATMSVC); if (error) return error; ATM_SD(sock)->local.sas_family = AF_ATMSVC; ATM_SD(sock)->remote.sas_family = AF_ATMSVC; return 0; } static const struct net_proto_family svc_family_ops = { .family = PF_ATMSVC, .create = svc_create, .owner = THIS_MODULE, }; /* * Initialize the ATM SVC protocol family */ int __init atmsvc_init(void) { return sock_register(&svc_family_ops); } void atmsvc_exit(void) { sock_unregister(PF_ATMSVC); }
gpl-2.0
Snuzzo/PLUS_kernel
fs/ocfs2/extent_map.c
2373
21835
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * extent_map.c * * Block/Cluster mapping functions * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License, version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/fiemap.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "dlmglue.h" #include "extent_map.h" #include "inode.h" #include "super.h" #include "symlink.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" /* * The extent caching implementation is intentionally trivial. * * We only cache a small number of extents stored directly on the * inode, so linear order operations are acceptable. If we ever want * to increase the size of the extent map, then these algorithms must * get smarter. */ void ocfs2_extent_map_init(struct inode *inode) { struct ocfs2_inode_info *oi = OCFS2_I(inode); oi->ip_extent_map.em_num_items = 0; INIT_LIST_HEAD(&oi->ip_extent_map.em_list); } static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em, unsigned int cpos, struct ocfs2_extent_map_item **ret_emi) { unsigned int range; struct ocfs2_extent_map_item *emi; *ret_emi = NULL; list_for_each_entry(emi, &em->em_list, ei_list) { range = emi->ei_cpos + emi->ei_clusters; if (cpos >= emi->ei_cpos && cpos < range) { list_move(&emi->ei_list, &em->em_list); *ret_emi = emi; break; } } } static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos, unsigned int *phys, unsigned int *len, unsigned int *flags) { unsigned int coff; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_extent_map_item *emi; spin_lock(&oi->ip_lock); __ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi); if (emi) { coff = cpos - emi->ei_cpos; *phys = emi->ei_phys + coff; if (len) *len = emi->ei_clusters - coff; if (flags) *flags = emi->ei_flags; } spin_unlock(&oi->ip_lock); if (emi == NULL) return -ENOENT; return 0; } /* * Forget about all clusters equal to or greater than cpos. */ void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos) { struct ocfs2_extent_map_item *emi, *n; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_extent_map *em = &oi->ip_extent_map; LIST_HEAD(tmp_list); unsigned int range; spin_lock(&oi->ip_lock); list_for_each_entry_safe(emi, n, &em->em_list, ei_list) { if (emi->ei_cpos >= cpos) { /* Full truncate of this record. */ list_move(&emi->ei_list, &tmp_list); BUG_ON(em->em_num_items == 0); em->em_num_items--; continue; } range = emi->ei_cpos + emi->ei_clusters; if (range > cpos) { /* Partial truncate */ emi->ei_clusters = cpos - emi->ei_cpos; } } spin_unlock(&oi->ip_lock); list_for_each_entry_safe(emi, n, &tmp_list, ei_list) { list_del(&emi->ei_list); kfree(emi); } } /* * Is any part of emi2 contained within emi1 */ static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1, struct ocfs2_extent_map_item *emi2) { unsigned int range1, range2; /* * Check if logical start of emi2 is inside emi1 */ range1 = emi1->ei_cpos + emi1->ei_clusters; if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1) return 1; /* * Check if logical end of emi2 is inside emi1 */ range2 = emi2->ei_cpos + emi2->ei_clusters; if (range2 > emi1->ei_cpos && range2 <= range1) return 1; return 0; } static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest, struct ocfs2_extent_map_item *src) { dest->ei_cpos = src->ei_cpos; dest->ei_phys = src->ei_phys; dest->ei_clusters = src->ei_clusters; dest->ei_flags = src->ei_flags; } /* * Try to merge emi with ins. Returns 1 if merge succeeds, zero * otherwise. */ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, struct ocfs2_extent_map_item *ins) { /* * Handle contiguousness */ if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) && ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) && ins->ei_flags == emi->ei_flags) { emi->ei_clusters += ins->ei_clusters; return 1; } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos && ins->ei_flags == emi->ei_flags) { emi->ei_phys = ins->ei_phys; emi->ei_cpos = ins->ei_cpos; emi->ei_clusters += ins->ei_clusters; return 1; } /* * Overlapping extents - this shouldn't happen unless we've * split an extent to change it's flags. That is exceedingly * rare, so there's no sense in trying to optimize it yet. */ if (ocfs2_ei_is_contained(emi, ins) || ocfs2_ei_is_contained(ins, emi)) { ocfs2_copy_emi_fields(emi, ins); return 1; } /* No merge was possible. */ return 0; } /* * In order to reduce complexity on the caller, this insert function * is intentionally liberal in what it will accept. * * The only rule is that the truncate call *must* be used whenever * records have been deleted. This avoids inserting overlapping * records with different physical mappings. */ void ocfs2_extent_map_insert_rec(struct inode *inode, struct ocfs2_extent_rec *rec) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_extent_map *em = &oi->ip_extent_map; struct ocfs2_extent_map_item *emi, *new_emi = NULL; struct ocfs2_extent_map_item ins; ins.ei_cpos = le32_to_cpu(rec->e_cpos); ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb, le64_to_cpu(rec->e_blkno)); ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters); ins.ei_flags = rec->e_flags; search: spin_lock(&oi->ip_lock); list_for_each_entry(emi, &em->em_list, ei_list) { if (ocfs2_try_to_merge_extent_map(emi, &ins)) { list_move(&emi->ei_list, &em->em_list); spin_unlock(&oi->ip_lock); goto out; } } /* * No item could be merged. * * Either allocate and add a new item, or overwrite the last recently * inserted. */ if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) { if (new_emi == NULL) { spin_unlock(&oi->ip_lock); new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS); if (new_emi == NULL) goto out; goto search; } ocfs2_copy_emi_fields(new_emi, &ins); list_add(&new_emi->ei_list, &em->em_list); em->em_num_items++; new_emi = NULL; } else { BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0); emi = list_entry(em->em_list.prev, struct ocfs2_extent_map_item, ei_list); list_move(&emi->ei_list, &em->em_list); ocfs2_copy_emi_fields(emi, &ins); } spin_unlock(&oi->ip_lock); out: if (new_emi) kfree(new_emi); } static int ocfs2_last_eb_is_empty(struct inode *inode, struct ocfs2_dinode *di) { int ret, next_free; u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk); struct buffer_head *eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in " "leaf block %llu\n", inode->i_ino, (unsigned long long)eb_bh->b_blocknr); ret = -EROFS; goto out; } next_free = le16_to_cpu(el->l_next_free_rec); if (next_free == 0 || (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) ret = 1; out: brelse(eb_bh); return ret; } /* * Return the 1st index within el which contains an extent start * larger than v_cluster. */ static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el, u32 v_cluster) { int i; struct ocfs2_extent_rec *rec; for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { rec = &el->l_recs[i]; if (v_cluster < le32_to_cpu(rec->e_cpos)) break; } return i; } /* * Figure out the size of a hole which starts at v_cluster within the given * extent list. * * If there is no more allocation past v_cluster, we return the maximum * cluster size minus v_cluster. * * If we have in-inode extents, then el points to the dinode list and * eb_bh is NULL. Otherwise, eb_bh should point to the extent block * containing el. */ int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci, struct ocfs2_extent_list *el, struct buffer_head *eb_bh, u32 v_cluster, u32 *num_clusters) { int ret, i; struct buffer_head *next_eb_bh = NULL; struct ocfs2_extent_block *eb, *next_eb; i = ocfs2_search_for_hole_index(el, v_cluster); if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) { eb = (struct ocfs2_extent_block *)eb_bh->b_data; /* * Check the next leaf for any extents. */ if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) goto no_more_extents; ret = ocfs2_read_extent_block(ci, le64_to_cpu(eb->h_next_leaf_blk), &next_eb_bh); if (ret) { mlog_errno(ret); goto out; } next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data; el = &next_eb->h_list; i = ocfs2_search_for_hole_index(el, v_cluster); } no_more_extents: if (i == le16_to_cpu(el->l_next_free_rec)) { /* * We're at the end of our existing allocation. Just * return the maximum number of clusters we could * possibly allocate. */ *num_clusters = UINT_MAX - v_cluster; } else { *num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster; } ret = 0; out: brelse(next_eb_bh); return ret; } static int ocfs2_get_clusters_nocache(struct inode *inode, struct buffer_head *di_bh, u32 v_cluster, unsigned int *hole_len, struct ocfs2_extent_rec *ret_rec, unsigned int *is_last) { int i, ret, tree_height, len; struct ocfs2_dinode *di; struct ocfs2_extent_block *uninitialized_var(eb); struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; struct buffer_head *eb_bh = NULL; memset(ret_rec, 0, sizeof(*ret_rec)); if (is_last) *is_last = 0; di = (struct ocfs2_dinode *) di_bh->b_data; el = &di->id2.i_list; tree_height = le16_to_cpu(el->l_tree_depth); if (tree_height > 0) { ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in " "leaf block %llu\n", inode->i_ino, (unsigned long long)eb_bh->b_blocknr); ret = -EROFS; goto out; } } i = ocfs2_search_extent_list(el, v_cluster); if (i == -1) { /* * Holes can be larger than the maximum size of an * extent, so we return their lengths in a separate * field. */ if (hole_len) { ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode), el, eb_bh, v_cluster, &len); if (ret) { mlog_errno(ret); goto out; } *hole_len = len; } goto out_hole; } rec = &el->l_recs[i]; BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos)); if (!rec->e_blkno) { ocfs2_error(inode->i_sb, "Inode %lu has bad extent " "record (%u, %u, 0)", inode->i_ino, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); ret = -EROFS; goto out; } *ret_rec = *rec; /* * Checking for last extent is potentially expensive - we * might have to look at the next leaf over to see if it's * empty. * * The first two checks are to see whether the caller even * cares for this information, and if the extent is at least * the last in it's list. * * If those hold true, then the extent is last if any of the * additional conditions hold true: * - Extent list is in-inode * - Extent list is right-most * - Extent list is 2nd to rightmost, with empty right-most */ if (is_last) { if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) { if (tree_height == 0) *is_last = 1; else if (eb->h_blkno == di->i_last_eb_blk) *is_last = 1; else if (eb->h_next_leaf_blk == di->i_last_eb_blk) { ret = ocfs2_last_eb_is_empty(inode, di); if (ret < 0) { mlog_errno(ret); goto out; } if (ret == 1) *is_last = 1; } } } out_hole: ret = 0; out: brelse(eb_bh); return ret; } static void ocfs2_relative_extent_offsets(struct super_block *sb, u32 v_cluster, struct ocfs2_extent_rec *rec, u32 *p_cluster, u32 *num_clusters) { u32 coff = v_cluster - le32_to_cpu(rec->e_cpos); *p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno)); *p_cluster = *p_cluster + coff; if (num_clusters) *num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff; } int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, struct ocfs2_extent_list *el, unsigned int *extent_flags) { int ret = 0, i; struct buffer_head *eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_rec *rec; u32 coff; if (el->l_tree_depth) { ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in " "xattr leaf block %llu\n", inode->i_ino, (unsigned long long)eb_bh->b_blocknr); ret = -EROFS; goto out; } } i = ocfs2_search_extent_list(el, v_cluster); if (i == -1) { ret = -EROFS; mlog_errno(ret); goto out; } else { rec = &el->l_recs[i]; BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos)); if (!rec->e_blkno) { ocfs2_error(inode->i_sb, "Inode %lu has bad extent " "record (%u, %u, 0) in xattr", inode->i_ino, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); ret = -EROFS; goto out; } coff = v_cluster - le32_to_cpu(rec->e_cpos); *p_cluster = ocfs2_blocks_to_clusters(inode->i_sb, le64_to_cpu(rec->e_blkno)); *p_cluster = *p_cluster + coff; if (num_clusters) *num_clusters = ocfs2_rec_clusters(el, rec) - coff; if (extent_flags) *extent_flags = rec->e_flags; } out: if (eb_bh) brelse(eb_bh); return ret; } int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, unsigned int *extent_flags) { int ret; unsigned int uninitialized_var(hole_len), flags = 0; struct buffer_head *di_bh = NULL; struct ocfs2_extent_rec rec; if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ret = -ERANGE; mlog_errno(ret); goto out; } ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster, num_clusters, extent_flags); if (ret == 0) goto out; ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len, &rec, NULL); if (ret) { mlog_errno(ret); goto out; } if (rec.e_blkno == 0ULL) { /* * A hole was found. Return some canned values that * callers can key on. If asked for, num_clusters will * be populated with the size of the hole. */ *p_cluster = 0; if (num_clusters) { *num_clusters = hole_len; } } else { ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec, p_cluster, num_clusters); flags = rec.e_flags; ocfs2_extent_map_insert_rec(inode, &rec); } if (extent_flags) *extent_flags = flags; out: brelse(di_bh); return ret; } /* * This expects alloc_sem to be held. The allocation cannot change at * all while the map is in the process of being updated. */ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, u64 *ret_count, unsigned int *extent_flags) { int ret; int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); u32 cpos, num_clusters, p_cluster; u64 boff = 0; cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno); ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters, extent_flags); if (ret) { mlog_errno(ret); goto out; } /* * p_cluster == 0 indicates a hole. */ if (p_cluster) { boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); boff += (v_blkno & (u64)(bpc - 1)); } *p_blkno = boff; if (ret_count) { *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); *ret_count -= v_blkno & (u64)(bpc - 1); } out: return ret; } /* * The ocfs2_fiemap_inline() may be a little bit misleading, since * it not only handles the fiemap for inlined files, but also deals * with the fast symlink, cause they have no difference for extent * mapping per se. */ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, struct fiemap_extent_info *fieinfo, u64 map_start) { int ret; unsigned int id_count; struct ocfs2_dinode *di; u64 phys; u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST; struct ocfs2_inode_info *oi = OCFS2_I(inode); di = (struct ocfs2_dinode *)di_bh->b_data; if (ocfs2_inode_is_fast_symlink(inode)) id_count = ocfs2_fast_symlink_chars(inode->i_sb); else id_count = le16_to_cpu(di->id2.i_data.id_count); if (map_start < id_count) { phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits; if (ocfs2_inode_is_fast_symlink(inode)) phys += offsetof(struct ocfs2_dinode, id2.i_symlink); else phys += offsetof(struct ocfs2_dinode, id2.i_data.id_data); ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count, flags); if (ret < 0) return ret; } return 0; } #define OCFS2_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 map_start, u64 map_len) { int ret, is_last; u32 mapping_end, cpos; unsigned int hole_size; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u64 len_bytes, phys_bytes, virt_bytes; struct buffer_head *di_bh = NULL; struct ocfs2_extent_rec rec; ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS); if (ret) return ret; ret = ocfs2_inode_lock(inode, &di_bh, 0); if (ret) { mlog_errno(ret); goto out; } down_read(&OCFS2_I(inode)->ip_alloc_sem); /* * Handle inline-data and fast symlink separately. */ if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || ocfs2_inode_is_fast_symlink(inode)) { ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start); goto out_unlock; } cpos = map_start >> osb->s_clustersize_bits; mapping_end = ocfs2_clusters_for_bytes(inode->i_sb, map_start + map_len); mapping_end -= cpos; is_last = 0; while (cpos < mapping_end && !is_last) { u32 fe_flags; ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size, &rec, &is_last); if (ret) { mlog_errno(ret); goto out; } if (rec.e_blkno == 0ULL) { cpos += hole_size; continue; } fe_flags = 0; if (rec.e_flags & OCFS2_EXT_UNWRITTEN) fe_flags |= FIEMAP_EXTENT_UNWRITTEN; if (rec.e_flags & OCFS2_EXT_REFCOUNTED) fe_flags |= FIEMAP_EXTENT_SHARED; if (is_last) fe_flags |= FIEMAP_EXTENT_LAST; len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits; phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits; virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits; ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes, len_bytes, fe_flags); if (ret) break; cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters); } if (ret > 0) ret = 0; out_unlock: brelse(di_bh); up_read(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); out: return ret; } int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, struct buffer_head *bhs[], int flags, int (*validate)(struct super_block *sb, struct buffer_head *bh)) { int rc = 0; u64 p_block, p_count; int i, count, done = 0; trace_ocfs2_read_virt_blocks( inode, (unsigned long long)v_block, nr, bhs, flags, validate); if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >= i_size_read(inode)) { BUG_ON(!(flags & OCFS2_BH_READAHEAD)); goto out; } while (done < nr) { down_read(&OCFS2_I(inode)->ip_alloc_sem); rc = ocfs2_extent_map_get_blocks(inode, v_block + done, &p_block, &p_count, NULL); up_read(&OCFS2_I(inode)->ip_alloc_sem); if (rc) { mlog_errno(rc); break; } if (!p_block) { rc = -EIO; mlog(ML_ERROR, "Inode #%llu contains a hole at offset %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)(v_block + done) << inode->i_sb->s_blocksize_bits); break; } count = nr - done; if (p_count < count) count = p_count; /* * If the caller passed us bhs, they should have come * from a previous readahead call to this function. Thus, * they should have the right b_blocknr. */ for (i = 0; i < count; i++) { if (!bhs[done + i]) continue; BUG_ON(bhs[done + i]->b_blocknr != (p_block + i)); } rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count, bhs + done, flags, validate); if (rc) { mlog_errno(rc); break; } done += count; } out: return rc; }
gpl-2.0
thenameisnigel/android_kernel_lge_ls840
drivers/dma/mv_xor.c
2373
36307
/* * offload engine driver for the Marvell XOR engine * Copyright (C) 2007, 2008, Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/memory.h> #include <plat/mv_xor.h> #include "mv_xor.h" static void mv_xor_issue_pending(struct dma_chan *chan); #define to_mv_xor_chan(chan) \ container_of(chan, struct mv_xor_chan, common) #define to_mv_xor_device(dev) \ container_of(dev, struct mv_xor_device, common) #define to_mv_xor_slot(tx) \ container_of(tx, struct mv_xor_desc_slot, async_tx) static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->status = (1 << 31); hw_desc->phy_next_desc = 0; hw_desc->desc_command = (1 << 31); } static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_dest_addr; } static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, int src_idx) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_src_addr[src_idx]; } static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->byte_count = byte_count; } static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, u32 next_desc_addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; BUG_ON(hw_desc->phy_next_desc); hw_desc->phy_next_desc = next_desc_addr; } static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_next_desc = 0; } static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) { desc->value = val; } static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_dest_addr = addr; } static int mv_chan_memset_slot_count(size_t len) { return 1; } #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, int index, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_src_addr[index] = addr; if (desc->type == DMA_XOR) hw_desc->desc_command |= (1 << index); } static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) { return __raw_readl(XOR_CURR_DESC(chan)); } static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, u32 next_desc_addr) { __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); } static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) { __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); } static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) { __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); } static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) { __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); } static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) { u32 val = __raw_readl(XOR_INTR_MASK(chan)); val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); __raw_writel(val, XOR_INTR_MASK(chan)); } static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) { u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; return intr_cause; } static int mv_is_err_intr(u32 intr_cause) { if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) return 1; return 0; } static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) { u32 val = ~(1 << (chan->idx * 16)); dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) { u32 val = 0xFFFF0000 >> (chan->idx * 16); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static int mv_can_chain(struct mv_xor_desc_slot *desc) { struct mv_xor_desc_slot *chain_old_tail = list_entry( desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); if (chain_old_tail->type != desc->type) return 0; if (desc->type == DMA_MEMSET) return 0; return 1; } static void mv_set_mode(struct mv_xor_chan *chan, enum dma_transaction_type type) { u32 op_mode; u32 config = __raw_readl(XOR_CONFIG(chan)); switch (type) { case DMA_XOR: op_mode = XOR_OPERATION_MODE_XOR; break; case DMA_MEMCPY: op_mode = XOR_OPERATION_MODE_MEMCPY; break; case DMA_MEMSET: op_mode = XOR_OPERATION_MODE_MEMSET; break; default: dev_printk(KERN_ERR, chan->device->common.dev, "error: unsupported operation %d.\n", type); BUG(); return; } config &= ~0x7; config |= op_mode; __raw_writel(config, XOR_CONFIG(chan)); chan->current_type = type; } static void mv_chan_activate(struct mv_xor_chan *chan) { u32 activation; dev_dbg(chan->device->common.dev, " activate chan.\n"); activation = __raw_readl(XOR_ACTIVATION(chan)); activation |= 0x1; __raw_writel(activation, XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) { u32 state = __raw_readl(XOR_ACTIVATION(chan)); state = (state >> 4) & 0x3; return (state == 1) ? 1 : 0; } static int mv_chan_xor_slot_count(size_t len, int src_cnt) { return 1; } /** * mv_xor_free_slots - flags descriptor slots for reuse * @slot: Slot to free * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *slot) { dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", __func__, __LINE__, slot); slot->slots_per_op = 0; } /* * mv_xor_start_new_chain - program the engine to operate on new chain headed by * sw_desc * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *sw_desc) { dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", __func__, __LINE__, sw_desc); if (sw_desc->type != mv_chan->current_type) mv_set_mode(mv_chan, sw_desc->type); if (sw_desc->type == DMA_MEMSET) { /* for memset requests we need to program the engine, no * descriptors used. */ struct mv_xor_desc *hw_desc = sw_desc->hw_desc; mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); mv_chan_set_value(mv_chan, sw_desc->value); } else { /* set the hardware chain */ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); } mv_chan->pending += sw_desc->slot_cnt; mv_xor_issue_pending(&mv_chan->common); } static dma_cookie_t mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan, dma_cookie_t cookie) { BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; /* call the callback (must not sleep or submit new * operations to this channel) */ if (desc->async_tx.callback) desc->async_tx.callback( desc->async_tx.callback_param); /* unmap dma addresses * (unmap_single vs unmap_page?) */ if (desc->group_head && desc->unmap_len) { struct mv_xor_desc_slot *unmap = desc->group_head; struct device *dev = &mv_chan->device->pdev->dev; u32 len = unmap->unmap_len; enum dma_ctrl_flags flags = desc->async_tx.flags; u32 src_cnt; dma_addr_t addr; dma_addr_t dest; src_cnt = unmap->unmap_src_cnt; dest = mv_desc_get_dest_addr(unmap); if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { enum dma_data_direction dir; if (src_cnt > 1) /* is xor ? */ dir = DMA_BIDIRECTIONAL; else dir = DMA_FROM_DEVICE; dma_unmap_page(dev, dest, len, dir); } if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { while (src_cnt--) { addr = mv_desc_get_src_addr(unmap, src_cnt); if (addr == dest) continue; dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); } } desc->group_head = NULL; } } /* run dependent operations */ dma_run_dependencies(&desc->async_tx); return cookie; } static int mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { if (async_tx_test_ack(&iter->async_tx)) { list_del(&iter->completed_node); mv_xor_free_slots(mv_chan, iter); } } return 0; } static int mv_xor_clean_slot(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan) { dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", __func__, __LINE__, desc, desc->async_tx.flags); list_del(&desc->chain_node); /* the client is allowed to attach dependent operations * until 'ack' is set */ if (!async_tx_test_ack(&desc->async_tx)) { /* move this slot to the completed_slots */ list_add_tail(&desc->completed_node, &mv_chan->completed_slots); return 0; } mv_xor_free_slots(mv_chan, desc); return 0; } static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; int busy = mv_chan_is_busy(mv_chan); u32 current_desc = mv_chan_get_current_desc(mv_chan); int seen_current = 0; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); mv_xor_clean_completed_slots(mv_chan); /* free completed slots from the chain starting with * the oldest descriptor */ list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { prefetch(_iter); prefetch(&_iter->async_tx); /* do not advance past the current descriptor loaded into the * hardware channel, subsequent descriptors are either in * process or have not been submitted */ if (seen_current) break; /* stop the search if we reach the current descriptor and the * channel is busy */ if (iter->async_tx.phys == current_desc) { seen_current = 1; if (busy) break; } cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); if (mv_xor_clean_slot(iter, mv_chan)) break; } if ((busy == 0) && !list_empty(&mv_chan->chain)) { struct mv_xor_desc_slot *chain_head; chain_head = list_entry(mv_chan->chain.next, struct mv_xor_desc_slot, chain_node); mv_xor_start_new_chain(mv_chan, chain_head); } if (cookie > 0) mv_chan->completed_cookie = cookie; } static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { spin_lock_bh(&mv_chan->lock); __mv_xor_slot_cleanup(mv_chan); spin_unlock_bh(&mv_chan->lock); } static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; mv_xor_slot_cleanup(chan); } static struct mv_xor_desc_slot * mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, int slots_per_op) { struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; LIST_HEAD(chain); int slots_found, retry = 0; /* start search from the last allocated descrtiptor * if a contiguous allocation can not be found start searching * from the beginning of the list */ retry: slots_found = 0; if (retry == 0) iter = mv_chan->last_used; else iter = list_entry(&mv_chan->all_slots, struct mv_xor_desc_slot, slot_node); list_for_each_entry_safe_continue( iter, _iter, &mv_chan->all_slots, slot_node) { prefetch(_iter); prefetch(&_iter->async_tx); if (iter->slots_per_op) { /* give up after finding the first busy slot * on the second pass through the list */ if (retry) break; slots_found = 0; continue; } /* start the allocation if the slot is correctly aligned */ if (!slots_found++) alloc_start = iter; if (slots_found == num_slots) { struct mv_xor_desc_slot *alloc_tail = NULL; struct mv_xor_desc_slot *last_used = NULL; iter = alloc_start; while (num_slots) { int i; /* pre-ack all but the last descriptor */ async_tx_ack(&iter->async_tx); list_add_tail(&iter->chain_node, &chain); alloc_tail = iter; iter->async_tx.cookie = 0; iter->slot_cnt = num_slots; iter->xor_check_result = NULL; for (i = 0; i < slots_per_op; i++) { iter->slots_per_op = slots_per_op - i; last_used = iter; iter = list_entry(iter->slot_node.next, struct mv_xor_desc_slot, slot_node); } num_slots -= slots_per_op; } alloc_tail->group_head = alloc_start; alloc_tail->async_tx.cookie = -EBUSY; list_splice(&chain, &alloc_tail->tx_list); mv_chan->last_used = last_used; mv_desc_clear_next_desc(alloc_start); mv_desc_clear_next_desc(alloc_tail); return alloc_tail; } } if (!retry++) goto retry; /* try to free some slots if the allocation fails */ tasklet_schedule(&mv_chan->irq_tasklet); return NULL; } static dma_cookie_t mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *desc) { dma_cookie_t cookie = mv_chan->common.cookie; if (++cookie < 0) cookie = 1; mv_chan->common.cookie = desc->async_tx.cookie = cookie; return cookie; } /************************ DMA engine API functions ****************************/ static dma_cookie_t mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) { struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); struct mv_xor_desc_slot *grp_start, *old_chain_tail; dma_cookie_t cookie; int new_hw_chain = 1; dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p: async_tx %p\n", __func__, sw_desc, &sw_desc->async_tx); grp_start = sw_desc->group_head; spin_lock_bh(&mv_chan->lock); cookie = mv_desc_assign_cookie(mv_chan, sw_desc); if (list_empty(&mv_chan->chain)) list_splice_init(&sw_desc->tx_list, &mv_chan->chain); else { new_hw_chain = 0; old_chain_tail = list_entry(mv_chan->chain.prev, struct mv_xor_desc_slot, chain_node); list_splice_init(&grp_start->tx_list, &old_chain_tail->chain_node); if (!mv_can_chain(grp_start)) goto submit_done; dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", old_chain_tail->async_tx.phys); /* fix up the hardware chain */ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); /* if the channel is not busy */ if (!mv_chan_is_busy(mv_chan)) { u32 current_desc = mv_chan_get_current_desc(mv_chan); /* * and the curren desc is the end of the chain before * the append, then we need to start the channel */ if (current_desc == old_chain_tail->async_tx.phys) new_hw_chain = 1; } } if (new_hw_chain) mv_xor_start_new_chain(mv_chan, grp_start); submit_done: spin_unlock_bh(&mv_chan->lock); return cookie; } /* returns the number of allocated descriptors */ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) { char *hw_desc; int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; struct mv_xor_platform_data *plat_data = mv_chan->device->pdev->dev.platform_data; int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; /* Allocate descriptor slots */ idx = mv_chan->slots_allocated; while (idx < num_descs_in_pool) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { printk(KERN_INFO "MV XOR Channel only initialized" " %d descriptor slots", idx); break; } hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); mv_chan->slots_allocated = idx; list_add_tail(&slot->slot_node, &mv_chan->all_slots); spin_unlock_bh(&mv_chan->lock); } if (mv_chan->slots_allocated && !mv_chan->last_used) mv_chan->last_used = list_entry(mv_chan->all_slots.next, struct mv_xor_desc_slot, slot_node); dev_dbg(mv_chan->device->common.dev, "allocated %d descriptor slots last_used: %p\n", mv_chan->slots_allocated, mv_chan->last_used); return mv_chan->slots_allocated ? : -ENOMEM; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x src %x len: %u flags: %ld\n", __func__, dest, src, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memcpy_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMCPY; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_src_addr(grp_start, 0, src); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p\n", __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x len: %u flags: %ld\n", __func__, dest, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memset_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMSET; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_block_fill_val(grp_start, value); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan->device->common.dev, "%s src_cnt: %d len: dest %x %u flags: %ld\n", __func__, src_cnt, len, dest, flags); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_xor_slot_count(len, src_cnt); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_XOR; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); /* the byte count field is the same as in memcpy desc*/ mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); sw_desc->unmap_src_cnt = src_cnt; sw_desc->unmap_len = len; while (src_cnt--) mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static void mv_xor_free_chan_resources(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *iter, *_iter; int in_use_descs = 0; mv_xor_slot_cleanup(mv_chan); spin_lock_bh(&mv_chan->lock); list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { in_use_descs++; list_del(&iter->chain_node); } list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { in_use_descs++; list_del(&iter->completed_node); } list_for_each_entry_safe_reverse( iter, _iter, &mv_chan->all_slots, slot_node) { list_del(&iter->slot_node); kfree(iter); mv_chan->slots_allocated--; } mv_chan->last_used = NULL; dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", __func__, mv_chan->slots_allocated); spin_unlock_bh(&mv_chan->lock); if (in_use_descs) dev_err(mv_chan->device->common.dev, "freeing %d in use descriptors!\n", in_use_descs); } /** * mv_xor_status - poll the status of an XOR transaction * @chan: XOR channel handle * @cookie: XOR transaction identifier * @txstate: XOR transactions state holder (or NULL) */ static enum dma_status mv_xor_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; enum dma_status ret; last_used = chan->cookie; last_complete = mv_chan->completed_cookie; mv_chan->is_complete_cookie = cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret == DMA_SUCCESS) { mv_xor_clean_completed_slots(mv_chan); return ret; } mv_xor_slot_cleanup(mv_chan); last_used = chan->cookie; last_complete = mv_chan->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); return dma_async_is_complete(cookie, last_complete, last_used); } static void mv_dump_xor_regs(struct mv_xor_chan *chan) { u32 val; val = __raw_readl(XOR_CONFIG(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "config 0x%08x.\n", val); val = __raw_readl(XOR_ACTIVATION(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "activation 0x%08x.\n", val); val = __raw_readl(XOR_INTR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr cause 0x%08x.\n", val); val = __raw_readl(XOR_INTR_MASK(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr mask 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error cause 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_ADDR(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error addr 0x%08x.\n", val); } static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, u32 intr_cause) { if (intr_cause & (1 << 4)) { dev_dbg(chan->device->common.dev, "ignore this error\n"); return; } dev_printk(KERN_ERR, chan->device->common.dev, "error on chan %d. intr cause 0x%08x.\n", chan->idx, intr_cause); mv_dump_xor_regs(chan); BUG(); } static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) { struct mv_xor_chan *chan = data; u32 intr_cause = mv_chan_get_intr_cause(chan); dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); if (mv_is_err_intr(intr_cause)) mv_xor_err_interrupt_handler(chan, intr_cause); tasklet_schedule(&chan->irq_tasklet); mv_xor_device_clear_eoc_cause(chan); return IRQ_HANDLED; } static void mv_xor_issue_pending(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); if (mv_chan->pending >= MV_XOR_THRESHOLD) { mv_chan->pending = 0; mv_chan_activate(mv_chan); } } /* * Perform a transaction to verify the HW works. */ #define MV_XOR_TEST_SIZE 2000 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) { int i; void *src, *dest; dma_addr_t src_dma, dest_dma; struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; int err = 0; struct mv_xor_chan *mv_chan; src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ for (i = 0; i < MV_XOR_TEST_SIZE; i++) ((u8 *) src)[i] = (u8)i; /* Start copy, using first DMA channel */ dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } dest_dma = dma_map_single(dma_chan->device->dev, dest, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); src_dma = dma_map_single(dma_chan->device->dev, src, MV_XOR_TEST_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, MV_XOR_TEST_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; goto free_resources; } free_resources: mv_xor_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); return err; } #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ static int __devinit mv_xor_xor_self_test(struct mv_xor_device *device) { int i, src_idx; struct page *dest; struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; struct mv_xor_chan *mv_chan; for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } } dest = alloc_page(GFP_KERNEL); if (!dest) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } /* Fill in src buffers */ for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | (cmp_byte << 8) | cmp_byte; memset(page_address(dest), 0, PAGE_SIZE); dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } /* test xor */ dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor failed compare, disabling." " index %d, data %x, expected %x\n", i, ptr[i], cmp_word); err = -ENODEV; goto free_resources; } } free_resources: mv_xor_free_chan_resources(dma_chan); out: src_idx = MV_XOR_NUM_SRC_TEST; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); return err; } static int __devexit mv_xor_remove(struct platform_device *dev) { struct mv_xor_device *device = platform_get_drvdata(dev); struct dma_chan *chan, *_chan; struct mv_xor_chan *mv_chan; struct mv_xor_platform_data *plat_data = dev->dev.platform_data; dma_async_device_unregister(&device->common); dma_free_coherent(&dev->dev, plat_data->pool_size, device->dma_desc_pool_virt, device->dma_desc_pool); list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) { mv_chan = to_mv_xor_chan(chan); list_del(&chan->device_node); } return 0; } static int __devinit mv_xor_probe(struct platform_device *pdev) { int ret = 0; int irq; struct mv_xor_device *adev; struct mv_xor_chan *mv_chan; struct dma_device *dma_dev; struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; dma_dev = &adev->common; /* allocate coherent memory for hardware descriptors * note: writecombine gives slightly better performance, but * requires that we explicitly flush the writes */ adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, plat_data->pool_size, &adev->dma_desc_pool, GFP_KERNEL); if (!adev->dma_desc_pool_virt) return -ENOMEM; adev->id = plat_data->hw_id; /* discover transaction capabilites from the platform data */ dma_dev->cap_mask = plat_data->cap_mask; adev->pdev = pdev; platform_set_drvdata(pdev, adev); adev->shared = platform_get_drvdata(plat_data->shared); INIT_LIST_HEAD(&dma_dev->channels); /* set base routines */ dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; dma_dev->device_tx_status = mv_xor_status; dma_dev->device_issue_pending = mv_xor_issue_pending; dma_dev->dev = &pdev->dev; /* set prep routines based on capability */ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; } mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); if (!mv_chan) { ret = -ENOMEM; goto err_free_dma; } mv_chan->device = adev; mv_chan->idx = plat_data->hw_id; mv_chan->mmr_base = adev->shared->xor_base; if (!mv_chan->mmr_base) { ret = -ENOMEM; goto err_free_dma; } tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); /* clear errors before enabling interrupts */ mv_xor_device_clear_err_status(mv_chan); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_free_dma; } ret = devm_request_irq(&pdev->dev, irq, mv_xor_interrupt_handler, 0, dev_name(&pdev->dev), mv_chan); if (ret) goto err_free_dma; mv_chan_unmask_interrupts(mv_chan); mv_set_mode(mv_chan, DMA_MEMCPY); spin_lock_init(&mv_chan->lock); INIT_LIST_HEAD(&mv_chan->chain); INIT_LIST_HEAD(&mv_chan->completed_slots); INIT_LIST_HEAD(&mv_chan->all_slots); mv_chan->common.device = dma_dev; list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { ret = mv_xor_memcpy_self_test(adev); dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); if (ret) goto err_free_dma; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { ret = mv_xor_xor_self_test(adev); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) goto err_free_dma; } dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " "( %s%s%s%s)\n", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); dma_async_device_register(dma_dev); goto out; err_free_dma: dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, adev->dma_desc_pool_virt, adev->dma_desc_pool); out: return ret; } static void mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, struct mbus_dram_target_info *dram) { void __iomem *base = msp->xor_base; u32 win_enable = 0; int i; for (i = 0; i < 8; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; writel((cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); win_enable |= (1 << i); win_enable |= 3 << (16 + (2 * i)); } writel(win_enable, base + WINDOW_BAR_ENABLE(0)); writel(win_enable, base + WINDOW_BAR_ENABLE(1)); } static struct platform_driver mv_xor_driver = { .probe = mv_xor_probe, .remove = __devexit_p(mv_xor_remove), .driver = { .owner = THIS_MODULE, .name = MV_XOR_NAME, }, }; static int mv_xor_shared_probe(struct platform_device *pdev) { struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; struct mv_xor_shared_private *msp; struct resource *res; dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (!msp) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; msp->xor_base = devm_ioremap(&pdev->dev, res->start, res->end - res->start + 1); if (!msp->xor_base) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -ENODEV; msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, res->end - res->start + 1); if (!msp->xor_high_base) return -EBUSY; platform_set_drvdata(pdev, msp); /* * (Re-)program MBUS remapping windows if we are asked to. */ if (msd != NULL && msd->dram != NULL) mv_xor_conf_mbus_windows(msp, msd->dram); return 0; } static int mv_xor_shared_remove(struct platform_device *pdev) { return 0; } static struct platform_driver mv_xor_shared_driver = { .probe = mv_xor_shared_probe, .remove = mv_xor_shared_remove, .driver = { .owner = THIS_MODULE, .name = MV_XOR_SHARED_NAME, }, }; static int __init mv_xor_init(void) { int rc; rc = platform_driver_register(&mv_xor_shared_driver); if (!rc) { rc = platform_driver_register(&mv_xor_driver); if (rc) platform_driver_unregister(&mv_xor_shared_driver); } return rc; } module_init(mv_xor_init); /* it's currently unsafe to unload this module */ #if 0 static void __exit mv_xor_exit(void) { platform_driver_unregister(&mv_xor_driver); platform_driver_unregister(&mv_xor_shared_driver); return; } module_exit(mv_xor_exit); #endif MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); MODULE_LICENSE("GPL");
gpl-2.0
aloksinha2001/Linux3188
arch/arm/mach-msm/hotplug.c
2373
1876
/* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cacheflush.h> extern volatile int pen_release; static inline void cpu_enter_lowpower(void) { /* Just flush the cache. Changing the coherency is not yet * available on msm. */ flush_cache_all(); } static inline void cpu_leave_lowpower(void) { } static inline void platform_do_lowpower(unsigned int cpu) { /* Just enter wfi for now. TODO: Properly shut off the cpu. */ for (;;) { /* * here's the WFI */ asm("wfi" : : : "memory", "cc"); if (pen_release == cpu) { /* * OK, proper wakeup, we're done */ break; } /* * getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * The trouble is, letting people know about this is not really * possible, since we are currently running incoherently, and * therefore cannot safely call printk() or anything else */ pr_debug("CPU%u: spurious wakeup call\n", cpu); } } int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); platform_do_lowpower(cpu); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0